diff --git a/.bazelrc b/.bazelrc index ef005ed0d364..4703f459f8a6 100644 --- a/.bazelrc +++ b/.bazelrc @@ -5,11 +5,18 @@ startup --host_jvm_args="-Xmx8g" build --announce_rc build --color=yes -build:production --config=lsan --strip=never --copt=-O3 +build:production --config=lsan --copt=-O3 # C/C++ CONFIGS build --cxxopt=-std=c++14 -build --compilation_mode=dbg +# Create debug information only for magma binaries (not for external dependencies). +# --compilation_mode=dbg would also create debug information of external dependencies +# and increase the size of artifacts drastically. +# Needs --strip=never so that debug information is not removed by the linker. +# See https://bazel.build/docs/user-manual#compilation-mode and +# https://bazel.build/docs/user-manual#strip +build --strip=never +build --per_file_copt=^lte/gateway/c/.*$@-g # DEFAULT TEST CONFIGURATION # Please read the GH issue #13073 before adding "test" options. diff --git a/.devcontainer/bazel-base/Dockerfile b/.devcontainer/bazel-base/Dockerfile index 02da2f7fd13d..fa5082179023 100644 --- a/.devcontainer/bazel-base/Dockerfile +++ b/.devcontainer/bazel-base/Dockerfile @@ -19,7 +19,6 @@ RUN echo "Install general purpose packages" && \ apt-get install -y --no-install-recommends \ apt-transport-https \ apt-utils \ - # dependencies of FreeDiameter bison \ build-essential \ ca-certificates \ @@ -29,39 +28,30 @@ RUN echo "Install general purpose packages" && \ git \ gnupg2 \ g++ \ - # dependency of mobilityd (tests) - iproute2 \ - # dependency of python services (e.g. magmad) - iputils-ping \ + iproute2 `# dependency of mobilityd (tests)` \ + iputils-ping `# dependency of python services (e.g. magmad)` \ flex \ libconfig-dev \ - # dependency of @sentry_native//:sentry - libcurl4-openssl-dev \ - # dependencies of oai/mme + libcurl4-openssl-dev `# dependency of @sentry_native//:sentry` \ libczmq-dev \ libgcrypt-dev \ libgmp3-dev \ libidn11-dev \ libsctp1 \ libsqlite3-dev \ - # dependency of sctpd - libsctp-dev \ + libsctp-dev `# dependency of sctpd` \ libssl-dev \ - # dependency of pip systemd - libsystemd-dev \ + libsystemd-dev `# dependency of pip systemd` \ lld \ - # dependency of python services (e.g. magmad) - net-tools \ - # dependency of python services (e.g. pipelined) - netbase \ + net-tools `# dependency of python services (e.g. magmad)` \ + netbase `# dependency of python services (e.g. pipelined)` \ python${PYTHON_VERSION} \ python-is-python3 \ + python3-distutils `# dependency of bazel pip_parse rule` \ software-properties-common \ - # dependency of python services (e.g. magmad) - systemd \ + systemd `# dependency of python services (e.g. magmad)` \ unzip \ - # dependency of liagent - uuid-dev \ + uuid-dev `# dependency of liagent` \ vim \ wget \ zip @@ -82,31 +72,6 @@ RUN apt-get install -y --no-install-recommends \ libpcap-dev=1.9.1-3 \ libmnl-dev=1.0.4-2 -## Install Fmt (Folly Dep) -RUN git clone https://github.com/fmtlib/fmt.git && \ - cd fmt && \ - mkdir _build && \ - cd _build && \ - cmake -DBUILD_SHARED_LIBS=ON -DFMT_TEST=0 .. && \ - make -j"$(nproc)" && \ - make install && \ - cd / && \ - rm -rf fmt - -# Facebook Folly C++ lib -# Note: "Because folly does not provide any ABI compatibility guarantees from -# commit to commit, we generally recommend building folly as a static library." -# Here we checkout the hash for v2021.02.22.00 (arbitrary recent version) -RUN git clone --depth 1 --branch v2021.02.15.00 https://github.com/facebook/folly && \ - cd /folly && \ - mkdir _build && \ - cd _build && \ - cmake -DBUILD_SHARED_LIBS=ON .. && \ - make -j"$(nproc)" && \ - make install && \ - cd / && \ - rm -rf folly - # setup magma artifactories and install magma dependencies RUN wget -qO - https://artifactory.magmacore.org:443/artifactory/api/gpg/key/public | apt-key add - && \ add-apt-repository 'deb https://artifactory.magmacore.org/artifactory/debian-test focal-ci main' && \ @@ -114,6 +79,7 @@ RUN wget -qO - https://artifactory.magmacore.org:443/artifactory/api/gpg/key/pub apt-get update -y && \ apt-get install -y --no-install-recommends \ bcc-tools \ + libfolly-dev \ liblfds710 \ oai-asn1c \ oai-gnutls \ diff --git a/.github/workflows/backport-pull-request.yml b/.github/workflows/backport-pull-request.yml index 72bd12b045b2..89f2f10ae51b 100644 --- a/.github/workflows/backport-pull-request.yml +++ b/.github/workflows/backport-pull-request.yml @@ -33,6 +33,10 @@ jobs: ) ) steps: + - run: | + echo '{ + "prTitle": "{commitMessages} [backport to {targetBranch}]" + }' > .backportrc.json - name: Backport Action uses: sqren/backport-github-action@f54e19901f2a57f8b82360f2490d47ee82ec82c6 # pin@v8.9.3 with: diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml index b82b3bf4fcb2..dd92cde0bf9d 100644 --- a/.github/workflows/bazel.yml +++ b/.github/workflows/bazel.yml @@ -63,11 +63,10 @@ jobs: bazel_build_and_test: needs: path_filter - # Only run workflow if this is a scheduled run on master branch, - # if the workflow has been triggered manually or if it is a pull_request - # that skip-duplicate-action wants to run again. + # Only run workflow if this is a push to the magma repository, + # if the workflow has been triggered manually or if it is a pull_request. if: | - (github.event_name == 'schedule' && github.ref == 'refs/heads/master') || + (github.event_name == 'push' && github.repository_owner == 'magma') || needs.path_filter.outputs.files_changed == 'true' || github.event_name == 'workflow_dispatch' strategy: @@ -163,7 +162,7 @@ jobs: echo "Available storage:" df -h - name: Notify failure to slack - if: failure() && (github.event_name == 'push' || github.event_name == 'schedule') && github.repository_owner == 'magma' + if: failure() && github.event_name == 'push' && github.repository_owner == 'magma' uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 # pin@v2.2.0 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_BAZEL_CI }} @@ -219,11 +218,10 @@ jobs: bazel_package: needs: path_filter - # Only run workflow if this is a scheduled run on master branch, - # if the workflow has been triggered manually or if it is a pull_request - # that skip-duplicate-action wants to run again. + # Only run workflow if this is a push to the magma repository, + # if the workflow has been triggered manually or if it is a pull_request. if: | - (github.event_name == 'schedule' && github.ref == 'refs/heads/master') || + (github.event_name == 'push' && github.repository_owner == 'magma') || needs.path_filter.outputs.files_changed == 'true' || github.event_name == 'workflow_dispatch' name: Bazel Package Job @@ -266,7 +264,7 @@ jobs: echo "Available storage:" df -h - name: Notify failure to slack - if: failure() && (github.event_name == 'push' || github.event_name == 'schedule') && github.repository_owner == 'magma' + if: failure() && github.event_name == 'push' && github.repository_owner == 'magma' uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 # pin@v2.2.0 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_BAZEL_CI }} @@ -288,7 +286,7 @@ jobs: run: | ./bazel/scripts/check_py_bazel.sh - name: Notify failure to slack - if: failure() && (github.event_name == 'push' || github.event_name == 'schedule') && github.repository_owner == 'magma' + if: failure() && github.event_name == 'push' && github.repository_owner == 'magma' uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 # pin@v2.2.0 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_BAZEL_CI }} @@ -310,7 +308,7 @@ jobs: run: | ./bazel/scripts/check_c_cpp_bazel.sh - name: Notify failure to slack - if: failure() && (github.event_name == 'push' || github.event_name == 'schedule') && github.repository_owner == 'magma' + if: failure() && github.event_name == 'push' && github.repository_owner == 'magma' uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 # pin@v2.2.0 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_BAZEL_CI }} diff --git a/.github/workflows/build_all.yml b/.github/workflows/build_all.yml index fb98b7442716..a01d1fbe441c 100644 --- a/.github/workflows/build_all.yml +++ b/.github/workflows/build_all.yml @@ -140,7 +140,7 @@ jobs: run: | pip3 install --upgrade pip pip3 install ansible fabric3 jsonpickle requests PyYAML - vagrant plugin install vagrant-vbguest vagrant-vbguest vagrant-mutate + vagrant plugin install vagrant-disksize vagrant-vbguest vagrant-mutate vagrant-reload - name: Open up network interfaces for VM run: | sudo mkdir -p /etc/vbox/ diff --git a/.github/workflows/cloud-workflow.yml b/.github/workflows/cloud-workflow.yml index aa529ad7e790..bf3db57caac6 100644 --- a/.github/workflows/cloud-workflow.yml +++ b/.github/workflows/cloud-workflow.yml @@ -40,7 +40,14 @@ jobs: with: filters: | filesChanged: - - [".github/workflows/cloud-workflow.yml", "lte/protos/**", "cwf/cloud/**", "feg/cloud/**", "lte/cloud/**", "orc8r/**"] + - ".github/workflows/cloud-workflow.yml" + - "lte/protos/**" + - "cwf/cloud/**" + - "feg/cloud/**" + - "lte/cloud/**" + - "orc8r/**" + - "dp/cloud/**" + - "dp/protos/**" - name: Save should_not_skip output if: always() run: | diff --git a/.github/workflows/cwf-integ-test.yml b/.github/workflows/cwf-integ-test.yml index 5743fbfd291d..00c9cf038e72 100644 --- a/.github/workflows/cwf-integ-test.yml +++ b/.github/workflows/cwf-integ-test.yml @@ -95,7 +95,7 @@ jobs: run: | pip3 install --upgrade pip pip3 install ansible fabric3 jsonpickle requests PyYAML firebase_admin - vagrant plugin install vagrant-vbguest + vagrant plugin install vagrant-vbguest vagrant-reload vagrant-disksize - uses: actions/download-artifact@f023be2c48cc18debc3bacd34cb396e0295e2869 # pin@v2 with: name: docker-images diff --git a/.github/workflows/dp-workflow.yml b/.github/workflows/dp-workflow.yml index 7746823cf7f4..a56581385c97 100644 --- a/.github/workflows/dp-workflow.yml +++ b/.github/workflows/dp-workflow.yml @@ -31,7 +31,6 @@ jobs: runs-on: ubuntu-latest outputs: cc: ${{ steps.filter.outputs.cc }} - am: ${{ steps.filter.outputs.am }} rc: ${{ steps.filter.outputs.rc }} db: ${{ steps.filter.outputs.db }} helm: ${{ steps.filter.outputs.helm }} @@ -52,14 +51,6 @@ jobs: - 'dp/protos/**' - 'dp/cloud/go/protos/**' - 'dp/cloud/go/active_mode_controller/protos' - am: - - '.github/workflows/dp-workflow.yml' - - 'dp/cloud/go/active_mode_controller/**' - - 'dp/cloud/docker/go/active_mode_controller/**' - - 'dp/cloud/python/magma/db_service/**' - - 'dp/protos/**' - - 'dp/cloud/go/protos/**' - - 'dp/cloud/go/active_mode_controller/protos' rc: - '.github/workflows/dp-workflow.yml' - 'dp/cloud/python/magma/radio_controller/**' @@ -139,47 +130,6 @@ jobs: with: flags: unittests,configuration-controller name: codecov-configuration-controller - fail_ci_if_error: false - verbose: true - - active_mode_controller_unit_tests: - needs: path_filter - if: ${{ needs.path_filter.outputs.am == 'true' }} - name: "Active mode controller unit tests" - runs-on: ubuntu-latest - - defaults: - run: - working-directory: dp/cloud/go/active_mode_controller - env: - GO_VERSION: 1.18.3 - - steps: - - name: Checkout code - uses: actions/checkout@7884fcad6b5d53d10323aee724dc68d8b9096a2e # pin@v2 - - - name: Set up Go ${{ env.GO_VERSION }} - uses: actions/setup-go@b22fbbc2921299758641fab08929b4ac52b32923 # pin@v3 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Run Go linter - uses: golangci/golangci-lint-action@5c56cd6c9dc07901af25baab6f2b0d9f3b7c3018 # pin@v2 - with: - version: v1.46.2 - working-directory: dp/cloud/go/active_mode_controller - skip-go-installation: true - - - name: Run Go tests - run: | - go test ./... -v -race -coverprofile=coverage.txt -covermode=atomic - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # pin@v2 - with: - flags: unittests,active-mode-controller - name: codecov-active-mode-controller - fail_ci_if_error: false verbose: true radio_controller_unit_tests: @@ -258,7 +208,6 @@ jobs: with: flags: unittests,radio-controller name: codecov-radio-controller - fail_ci_if_error: false verbose: true db_migration_check: @@ -354,7 +303,6 @@ jobs: with: flags: unittests,db-service name: codecov-db-service - fail_ci_if_error: false verbose: true integration_tests_orc8r: diff --git a/.github/workflows/federated-integ-test.yml b/.github/workflows/federated-integ-test.yml index dfc46550850c..90e5b4867778 100644 --- a/.github/workflows/federated-integ-test.yml +++ b/.github/workflows/federated-integ-test.yml @@ -95,11 +95,11 @@ jobs: run: | pip3 install --upgrade pip pip3 install ansible fabric3 jsonpickle requests PyYAML firebase_admin - vagrant plugin install vagrant-vbguest vagrant-scp + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-scp vagrant-reload - name: Vagrant Host prerequisites for federated integ test run: | cd ${{ env.AGW_ROOT }} && fab open_orc8r_port_in_vagrant - cd ${{ env.MAGMA_ROOT }} && mkdir -p .cache/test_certs/ && mkdir -p .cache/feg/ && touch snowflake + cd ${{ env.MAGMA_ROOT }} && mkdir -p .cache/test_certs/ && mkdir -p .cache/feg/ cd ${{ env.MAGMA_ROOT }}/.cache/feg/ && touch snowflake - name: Open up network interfaces for VM run: | diff --git a/.github/workflows/golang-build-test.yml b/.github/workflows/golang-build-test.yml index f39ca14b3214..b190e09fa930 100644 --- a/.github/workflows/golang-build-test.yml +++ b/.github/workflows/golang-build-test.yml @@ -219,7 +219,6 @@ jobs: uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # pin@v2 with: flags: src_go - fail_ci_if_error: true verbose: true - name: Extract commit title id: commit diff --git a/.github/workflows/lte-integ-test-bazel.yml b/.github/workflows/lte-integ-test-bazel.yml index 396d598451dc..053fd309bcb1 100644 --- a/.github/workflows/lte-integ-test-bazel.yml +++ b/.github/workflows/lte-integ-test-bazel.yml @@ -29,12 +29,8 @@ jobs: lte-integ-test-bazel: if: github.repository_owner == 'magma' || github.event_name == 'workflow_dispatch' runs-on: macos-12 - env: - SHA: ${{ github.event.workflow_run.head_commit.id || github.sha }} steps: - - uses: actions/checkout@7884fcad6b5d53d10323aee724dc68d8b9096a2e # pin@v2 - with: - ref: ${{ env.SHA }} + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # pin@v3 - name: Cache magma-dev-box uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 with: @@ -66,33 +62,30 @@ jobs: run: | pip3 install --upgrade pip pip3 install ansible fabric3 jsonpickle requests PyYAML firebase_admin - vagrant plugin install vagrant-vbguest + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload - name: Open up network interfaces for VM run: | sudo mkdir -p /etc/vbox/ - sudo touch /etc/vbox/networks.conf - sudo sh -c "echo '* 192.168.0.0/16' > /etc/vbox/networks.conf" - sudo sh -c "echo '* 3001::/64' >> /etc/vbox/networks.conf" - - name: Prepare the integ test + echo '* 192.168.0.0/16' | sudo tee /etc/vbox/networks.conf + echo '* 3001::/64' | sudo tee -a /etc/vbox/networks.conf + - name: Provision the magma-dev VM run: | cd lte/gateway export MAGMA_DEV_CPUS=3 export MAGMA_DEV_MEMORY_MB=9216 - fab bazel_integ_test_pre_build - - name: Build all services with bazel + fab provision_magma_dev_vm + - name: Build all services and scripts with bazel run: | cd lte/gateway vagrant ssh -c 'cd ~/magma; bazel/scripts/remote_cache_bazelrc_setup.sh "${{ env.CACHE_KEY }}" "${{ env.REMOTE_DOWNLOAD_OPTIMIZATION }}" "${{ secrets.BAZEL_REMOTE_PASSWORD }}";' magma vagrant ssh -c 'sudo sed -i "s@#precedence ::ffff:0:0/96 100@precedence ::ffff:0:0/96 100@" /etc/gai.conf;' magma vagrant ssh -c 'cd ~/magma; bazel build --profile=bazel_profile_lte_integ_tests `bazel query "kind(.*_binary, //orc8r/... union //lte/... union //feg/...)"`;' magma vagrant ssh -c 'sudo sed -i "s@precedence ::ffff:0:0/96 100@#precedence ::ffff:0:0/96 100@" /etc/gai.conf;' magma - - name: Run the sudo tests - id: sudo_tests + - name: Linking bazel-built script executables to '/usr/local/bin/' run: | cd lte/gateway - vagrant ssh -c 'cd ~/magma; bazel/scripts/run_sudo_tests.sh --retry-on-failure --retry-attempts 1;' magma + vagrant ssh -c 'cd ~/magma; bazel/scripts/link_scripts_for_bazel_integ_tests.sh;' magma - name: Run the integ test - if: ${{ success() || steps.sudo_tests.conclusion == 'failure' }} run: | cd lte/gateway export MAGMA_DEV_CPUS=3 @@ -104,35 +97,6 @@ jobs: with: name: Bazel profile lte integ tests path: bazel_profile_lte_integ_tests - - name: Get test results - if: always() - run: | - cd lte/gateway - fab get_test_summaries:dst_path="test-results" - ls -R - - name: Upload test results - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # pin@v3 - if: always() - with: - name: test-results - path: lte/gateway/test-results/**/*.xml - - name: Get test logs - if: failure() - run: | - cd lte/gateway - fab get_test_logs:dst_path=./logs.tar.gz - - name: Upload test logs - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # pin@v3 - if: failure() - with: - name: test-logs - path: lte/gateway/logs.tar.gz - - name: Publish Unit Test Results - if: always() - uses: EnricoMi/publish-unit-test-result-action/composite@7377632048da85434c30810c38353542d3162dc4 # pin@v1 - with: - files: lte/gateway/test-results/**/*.xml - check_run_annotations: all tests - name: Notify failure to slack if: failure() && github.repository_owner == 'magma' uses: Ilshidur/action-slack@689ad44a9c9092315abd286d0e3a9a74d31ab78a # pin@2.1.0 diff --git a/.github/workflows/lte-integ-test-magma-deb.yml b/.github/workflows/lte-integ-test-magma-deb.yml new file mode 100644 index 000000000000..63f4060b136c --- /dev/null +++ b/.github/workflows/lte-integ-test-magma-deb.yml @@ -0,0 +1,64 @@ +# Copyright 2022 The Magma Authors. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: LTE integ test magma-deb + +on: + workflow_dispatch: null + workflow_run: + workflows: + - build-all + branches: + - master + types: + - completed + +jobs: + lte-integ-test-magma-deb: + if: github.repository_owner == 'magma' || github.event_name == 'workflow_dispatch' + runs-on: macos-12 + steps: + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # pin@v3 + - name: Cache magma-deb-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_deb + key: vagrant-box-magma-deb-focal64-20220804.0.0 + - name: Cache magma-test-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_test + key: vagrant-box-magma-test + - name: Cache magma-trfserver-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_trfserver + key: vagrant-box-magma-trfserver-v20220722 + - uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # pin@v2 + with: + python-version: '3.8.10' + - name: Install pre requisites + run: | + pip3 install --upgrade pip + pip3 install ansible fabric3 jsonpickle requests PyYAML + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload + - name: Open up network interfaces for VM + run: | + sudo mkdir -p /etc/vbox/ + echo '* 192.168.0.0/16' | sudo tee /etc/vbox/networks.conf + echo '* 3001::/64' | sudo tee -a /etc/vbox/networks.conf + - name: Run the integ test + env: + MAGMA_DEV_CPUS: 3 + MAGMA_DEV_MEMORY_MB: 9216 + run: | + cd lte/gateway + fab integ_test_deb_installation diff --git a/.github/workflows/lte-integ-test.yml b/.github/workflows/lte-integ-test.yml index 8a5d06826f19..def198cfaa16 100644 --- a/.github/workflows/lte-integ-test.yml +++ b/.github/workflows/lte-integ-test.yml @@ -63,7 +63,7 @@ jobs: run: | pip3 install --upgrade pip pip3 install ansible fabric3 jsonpickle requests PyYAML firebase_admin - vagrant plugin install vagrant-vbguest + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload - name: Open up network interfaces for VM run: | sudo mkdir -p /etc/vbox/ diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml deleted file mode 100644 index c5dfa8e38480..000000000000 --- a/.github/workflows/rebase.yml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2022 The Magma Authors. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Automatic Rebase -on: - issue_comment: - types: [ created ] -jobs: - # This job is based on https://github.com/marketplace/actions/automatic-rebase - rebase: - name: Rebase - if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') - runs-on: ubuntu-latest - steps: - - name: Checkout the latest code - uses: actions/checkout@7884fcad6b5d53d10323aee724dc68d8b9096a2e # pin@v2 - with: - token: ${{ secrets.PAT_TOKEN }} - fetch-depth: 0 # otherwise, you will fail to push refs to dest repo - - name: Automatic Rebase - uses: cirrus-actions/rebase@7cea12ac34ab078fa37e87798d8986185afa7bf2 # pin@1.4 - env: - GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} diff --git a/.github/workflows/sudo-python-tests.yml b/.github/workflows/sudo-python-tests.yml new file mode 100644 index 000000000000..b7883a336d4b --- /dev/null +++ b/.github/workflows/sudo-python-tests.yml @@ -0,0 +1,80 @@ +# Copyright 2022 The Magma Authors. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Sudo python tests + +on: + workflow_dispatch: null + workflow_run: + workflows: + - build-all + branches: + - master + types: + - completed + +env: + CACHE_KEY: magma-dev-vm + REMOTE_DOWNLOAD_OPTIMIZATION: false + +jobs: + sudo-python-tests: + if: github.repository_owner == 'magma' || github.event_name == 'workflow_dispatch' + runs-on: macos-12 + steps: + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # pin@v3 + - name: Cache magma-dev-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_dev + key: vagrant-box-magma-dev-v1.2.20220801 + - name: Log in to vagrant cloud + run: | + if [[ -n "${{ secrets.VAGRANT_TOKEN }}" ]] + then + echo "Logging in to vagrant cloud to mitigate rate limiting." + vagrant cloud auth login --token "${{ secrets.VAGRANT_TOKEN }}" + else + echo "Vagrant cloud token is not configured. Skipping login." + fi + - uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # pin@v2 + with: + python-version: '3.8.10' + - name: Install pre requisites + run: | + pip3 install --upgrade pip + pip3 install ansible fabric3 jsonpickle requests PyYAML firebase_admin + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload + - name: Open up network interfaces for VM + run: | + sudo mkdir -p /etc/vbox/ + echo '* 192.168.0.0/16' | sudo tee /etc/vbox/networks.conf + echo '* 3001::/64' | sudo tee -a /etc/vbox/networks.conf + - name: Provision the magma-dev VM + run: | + cd lte/gateway + export MAGMA_DEV_CPUS=3 + export MAGMA_DEV_MEMORY_MB=9216 + fab provision_magma_dev_vm + - name: Run the sudo python tests + run: | + cd lte/gateway + vagrant ssh -c 'cd ~/magma; bazel/scripts/remote_cache_bazelrc_setup.sh "${{ env.CACHE_KEY }}" "${{ env.REMOTE_DOWNLOAD_OPTIMIZATION }}" "${{ secrets.BAZEL_REMOTE_PASSWORD }}";' magma + vagrant ssh -c 'cd ~/magma; bazel/scripts/run_sudo_tests.sh --retry-on-failure --retry-attempts 1;' magma + - name: Notify failure to slack + if: failure() && github.repository_owner == 'magma' + uses: Ilshidur/action-slack@689ad44a9c9092315abd286d0e3a9a74d31ab78a # pin@2.1.0 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_BAZEL_CI }} + SLACK_USERNAME: "Sudo python tests" + SLACK_AVATAR: ":boom:" + with: + args: "Sudo python tests failed in run: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}" diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel index dc313daec155..8aef3d0dcc36 100644 --- a/WORKSPACE.bazel +++ b/WORKSPACE.bazel @@ -150,12 +150,13 @@ load("//bazel:python_repositories.bzl", "python_repositories") python_repositories() +# TODO: GH13522 upgrade to >0.7.0 when landed - see issue http_archive( name = "rules_pkg", - sha256 = "8a298e832762eda1830597d64fe7db58178aa84cd5926d76d5b744d6558941c2", + sha256 = "bdac8d3d178467c89f246e1e894b59c26c784569e91798901fb81291de834708", + strip_prefix = "rules_pkg-7f7bcf9c93bed9ee693b5bfedde5d72f9a2d6ea4", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.7.0/rules_pkg-0.7.0.tar.gz", - "https://github.com/bazelbuild/rules_pkg/releases/download/0.7.0/rules_pkg-0.7.0.tar.gz", + "https://github.com/bazelbuild/rules_pkg/archive/7f7bcf9c93bed9ee693b5bfedde5d72f9a2d6ea4.zip", ], ) diff --git a/dp/cloud/docker/go/active_mode_controller/Dockerfile b/bazel/deb_build.bzl similarity index 58% rename from dp/cloud/docker/go/active_mode_controller/Dockerfile rename to bazel/deb_build.bzl index fe458be7978e..798f0f13e3cd 100644 --- a/dp/cloud/docker/go/active_mode_controller/Dockerfile +++ b/bazel/deb_build.bzl @@ -9,13 +9,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG GOLANG_VERSION="1.18.3" -FROM golang:${GOLANG_VERSION}-alpine3.16 AS build -COPY dp/cloud/go/active_mode_controller /active_mode_controller -WORKDIR /active_mode_controller/cmd -RUN go build +""" +Constants for building debian packages. +""" -FROM alpine:3.14.3 as final -COPY --from=build /active_mode_controller/cmd/cmd /active_mode_controller/cmd -WORKDIR /active_mode_controller -CMD ["./cmd"] +PY_VERSION = "python3.8" + +PY_PKG_LOC = "dist-packages" + +PY_DEST = "/usr/local/lib/{version}/{pkg_loc}".format( + pkg_loc = PY_PKG_LOC, + version = PY_VERSION, +) diff --git a/bazel/external/requirements.in b/bazel/external/requirements.in index 1e6bad371143..21128f29fcb4 100644 --- a/bazel/external/requirements.in +++ b/bazel/external/requirements.in @@ -1,7 +1,7 @@ # requirements.in setuptools==49.6.0 -grpcio-tools -grpcio +grpcio-tools<1.49.0 +grpcio<1.49.0 redis>=3.5 protobuf six @@ -25,7 +25,8 @@ bravado_core jsonschema==3.2.0 psutil systemd-python -cryptography +# cryptography<38.0.0 because of runtime issues when starting magmad +cryptography<38.0.0 # h2>=3,<4 is requirement of aioh2 (loaded via bazel) h2>=3,<4 # priority==1.3.0 is requirement of aioh2 (loaded via bazel) diff --git a/bazel/external/requirements.txt b/bazel/external/requirements.txt index 259aaafcc69a..4cf8dc2a0a0d 100644 --- a/bazel/external/requirements.txt +++ b/bazel/external/requirements.txt @@ -99,13 +99,13 @@ attrs==22.1.0 \ # aiohttp # jsonschema # pytest -bravado-core==5.17.0 \ - --hash=sha256:b3b06ae86d3c80de5694340e55df7c9097857ff965b76642979e2a961f332abf \ - --hash=sha256:fa53e796ea574f905635a43871439a44713c2ef128c62a8fcc1d0ca8765cf855 +bravado-core==5.17.1 \ + --hash=sha256:0da9c6f3814734622a55db3f62d08db6e188b25f3ebd087de370c91afb66a7f4 \ + --hash=sha256:e231567cdc471337d23dfc950c45c5914ade8a78cde7ccf2ebb9433fcda29f40 # via -r requirements.in -certifi==2022.6.15 \ - --hash=sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d \ - --hash=sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412 +certifi==2022.9.14 \ + --hash=sha256:36973885b9542e6bd01dea287b2b4b3b21236307c56324fcc3f1160f2d655ed5 \ + --hash=sha256:e232343de1ab72c2aa521b625c80f699e356830fd0e2c620b465b304b17b0516 # via # -r requirements.in # requests @@ -382,160 +382,159 @@ frozenlist==1.3.1 \ # via # aiohttp # aiosignal -greenlet==1.1.2 \ - --hash=sha256:0051c6f1f27cb756ffc0ffbac7d2cd48cb0362ac1736871399a739b2885134d3 \ - --hash=sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711 \ - --hash=sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd \ - --hash=sha256:049fe7579230e44daef03a259faa24511d10ebfa44f69411d99e6a184fe68073 \ - --hash=sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708 \ - --hash=sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67 \ - --hash=sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23 \ - --hash=sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1 \ - --hash=sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08 \ - --hash=sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd \ - --hash=sha256:2bde6792f313f4e918caabc46532aa64aa27a0db05d75b20edfc5c6f46479de2 \ - --hash=sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa \ - --hash=sha256:356b3576ad078c89a6107caa9c50cc14e98e3a6c4874a37c3e0273e4baf33de8 \ - --hash=sha256:40b951f601af999a8bf2ce8c71e8aaa4e8c6f78ff8afae7b808aae2dc50d4c40 \ - --hash=sha256:572e1787d1460da79590bf44304abbc0a2da944ea64ec549188fa84d89bba7ab \ - --hash=sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6 \ - --hash=sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc \ - --hash=sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b \ - --hash=sha256:7418b6bfc7fe3331541b84bb2141c9baf1ec7132a7ecd9f375912eca810e714e \ - --hash=sha256:7cbd7574ce8e138bda9df4efc6bf2ab8572c9aff640d8ecfece1b006b68da963 \ - --hash=sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3 \ - --hash=sha256:833e1551925ed51e6b44c800e71e77dacd7e49181fdc9ac9a0bf3714d515785d \ - --hash=sha256:8639cadfda96737427330a094476d4c7a56ac03de7265622fcf4cfe57c8ae18d \ - --hash=sha256:8c5d5b35f789a030ebb95bff352f1d27a93d81069f2adb3182d99882e095cefe \ - --hash=sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28 \ - --hash=sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3 \ - --hash=sha256:903bbd302a2378f984aef528f76d4c9b1748f318fe1294961c072bdc7f2ffa3e \ - --hash=sha256:93f81b134a165cc17123626ab8da2e30c0455441d4ab5576eed73a64c025b25c \ - --hash=sha256:95e69877983ea39b7303570fa6760f81a3eec23d0e3ab2021b7144b94d06202d \ - --hash=sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0 \ - --hash=sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497 \ - --hash=sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee \ - --hash=sha256:aa5b467f15e78b82257319aebc78dd2915e4c1436c3c0d1ad6f53e47ba6e2713 \ - --hash=sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58 \ - --hash=sha256:aec52725173bd3a7b56fe91bc56eccb26fbdff1386ef123abb63c84c5b43b63a \ - --hash=sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06 \ - --hash=sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88 \ - --hash=sha256:b336501a05e13b616ef81ce329c0e09ac5ed8c732d9ba7e3e983fcc1a9e86965 \ - --hash=sha256:b8c008de9d0daba7b6666aa5bbfdc23dcd78cafc33997c9b7741ff6353bafb7f \ - --hash=sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4 \ - --hash=sha256:be5f425ff1f5f4b3c1e33ad64ab994eed12fc284a6ea71c5243fd564502ecbe5 \ - --hash=sha256:dd0b1e9e891f69e7675ba5c92e28b90eaa045f6ab134ffe70b52e948aa175b3c \ - --hash=sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a \ - --hash=sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1 \ - --hash=sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43 \ - --hash=sha256:eb6ea6da4c787111adf40f697b4e58732ee0942b5d3bd8f435277643329ba627 \ - --hash=sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b \ - --hash=sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168 \ - --hash=sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d \ - --hash=sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5 \ - --hash=sha256:f3acda1924472472ddd60c29e5b9db0cec629fbe3c5c5accb74d6d6d14773478 \ - --hash=sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf \ - --hash=sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce \ - --hash=sha256:fa877ca7f6b48054f847b61d6fa7bed5cebb663ebc55e018fda12db09dcc664c \ - --hash=sha256:fdcec0b8399108577ec290f55551d926d9a1fa6cad45882093a7a07ac5ec147b +greenlet==1.1.3 \ + --hash=sha256:0118817c9341ef2b0f75f5af79ac377e4da6ff637e5ee4ac91802c0e379dadb4 \ + --hash=sha256:048d2bed76c2aa6de7af500ae0ea51dd2267aec0e0f2a436981159053d0bc7cc \ + --hash=sha256:07c58e169bbe1e87b8bbf15a5c1b779a7616df9fd3e61cadc9d691740015b4f8 \ + --hash=sha256:095a980288fe05adf3d002fbb180c99bdcf0f930e220aa66fcd56e7914a38202 \ + --hash=sha256:0b181e9aa6cb2f5ec0cacc8cee6e5a3093416c841ba32c185c30c160487f0380 \ + --hash=sha256:1626185d938d7381631e48e6f7713e8d4b964be246073e1a1d15c2f061ac9f08 \ + --hash=sha256:184416e481295832350a4bf731ba619a92f5689bf5d0fa4341e98b98b1265bd7 \ + --hash=sha256:1dd51d2650e70c6c4af37f454737bf4a11e568945b27f74b471e8e2a9fd21268 \ + --hash=sha256:1ec2779774d8e42ed0440cf8bc55540175187e8e934f2be25199bf4ed948cd9e \ + --hash=sha256:2cf45e339cabea16c07586306a31cfcc5a3b5e1626d365714d283732afed6809 \ + --hash=sha256:2fb0aa7f6996879551fd67461d5d3ab0c3c0245da98be90c89fcb7a18d437403 \ + --hash=sha256:44b4817c34c9272c65550b788913620f1fdc80362b209bc9d7dd2f40d8793080 \ + --hash=sha256:466ce0928e33421ee84ae04c4ac6f253a3a3e6b8d600a79bd43fd4403e0a7a76 \ + --hash=sha256:4f166b4aca8d7d489e82d74627a7069ab34211ef5ebb57c300ec4b9337b60fc0 \ + --hash=sha256:510c3b15587afce9800198b4b142202b323bf4b4b5f9d6c79cb9a35e5e3c30d2 \ + --hash=sha256:5b756e6730ea59b2745072e28ad27f4c837084688e6a6b3633c8b1e509e6ae0e \ + --hash=sha256:5fbe1ab72b998ca77ceabbae63a9b2e2dc2d963f4299b9b278252ddba142d3f1 \ + --hash=sha256:6200a11f003ec26815f7e3d2ded01b43a3810be3528dd760d2f1fa777490c3cd \ + --hash=sha256:65ad1a7a463a2a6f863661329a944a5802c7129f7ad33583dcc11069c17e622c \ + --hash=sha256:694ffa7144fa5cc526c8f4512665003a39fa09ef00d19bbca5c8d3406db72fbe \ + --hash=sha256:6f5d4b2280ceea76c55c893827961ed0a6eadd5a584a7c4e6e6dd7bc10dfdd96 \ + --hash=sha256:7532a46505470be30cbf1dbadb20379fb481244f1ca54207d7df3bf0bbab6a20 \ + --hash=sha256:76a53bfa10b367ee734b95988bd82a9a5f0038a25030f9f23bbbc005010ca600 \ + --hash=sha256:77e41db75f9958f2083e03e9dd39da12247b3430c92267df3af77c83d8ff9eed \ + --hash=sha256:7a43bbfa9b6cfdfaeefbd91038dde65ea2c421dc387ed171613df340650874f2 \ + --hash=sha256:7b41d19c0cfe5c259fe6c539fd75051cd39a5d33d05482f885faf43f7f5e7d26 \ + --hash=sha256:7c5227963409551ae4a6938beb70d56bf1918c554a287d3da6853526212fbe0a \ + --hash=sha256:870a48007872d12e95a996fca3c03a64290d3ea2e61076aa35d3b253cf34cd32 \ + --hash=sha256:88b04e12c9b041a1e0bcb886fec709c488192638a9a7a3677513ac6ba81d8e79 \ + --hash=sha256:8c287ae7ac921dfde88b1c125bd9590b7ec3c900c2d3db5197f1286e144e712b \ + --hash=sha256:903fa5716b8fbb21019268b44f73f3748c41d1a30d71b4a49c84b642c2fed5fa \ + --hash=sha256:9537e4baf0db67f382eb29255a03154fcd4984638303ff9baaa738b10371fa57 \ + --hash=sha256:9951dcbd37850da32b2cb6e391f621c1ee456191c6ae5528af4a34afe357c30e \ + --hash=sha256:9b2f7d0408ddeb8ea1fd43d3db79a8cefaccadd2a812f021333b338ed6b10aba \ + --hash=sha256:9c88e134d51d5e82315a7c32b914a58751b7353eb5268dbd02eabf020b4c4700 \ + --hash=sha256:9fae214f6c43cd47f7bef98c56919b9222481e833be2915f6857a1e9e8a15318 \ + --hash=sha256:a3a669f11289a8995d24fbfc0e63f8289dd03c9aaa0cc8f1eab31d18ca61a382 \ + --hash=sha256:aa741c1a8a8cc25eb3a3a01a62bdb5095a773d8c6a86470bde7f607a447e7905 \ + --hash=sha256:b0877a9a2129a2c56a2eae2da016743db7d9d6a05d5e1c198f1b7808c602a30e \ + --hash=sha256:bcb6c6dd1d6be6d38d6db283747d07fda089ff8c559a835236560a4410340455 \ + --hash=sha256:caff52cb5cd7626872d9696aee5b794abe172804beb7db52eed1fd5824b63910 \ + --hash=sha256:cbc1eb55342cbac8f7ec159088d54e2cfdd5ddf61c87b8bbe682d113789331b2 \ + --hash=sha256:cd16a89efe3a003029c87ff19e9fba635864e064da646bc749fc1908a4af18f3 \ + --hash=sha256:ce5b64dfe8d0cca407d88b0ee619d80d4215a2612c1af8c98a92180e7109f4b5 \ + --hash=sha256:d58a5a71c4c37354f9e0c24c9c8321f0185f6945ef027460b809f4bb474bfe41 \ + --hash=sha256:db41f3845eb579b544c962864cce2c2a0257fe30f0f1e18e51b1e8cbb4e0ac6d \ + --hash=sha256:db5b25265010a1b3dca6a174a443a0ed4c4ab12d5e2883a11c97d6e6d59b12f9 \ + --hash=sha256:dd0404d154084a371e6d2bafc787201612a1359c2dee688ae334f9118aa0bf47 \ + --hash=sha256:de431765bd5fe62119e0bc6bc6e7b17ac53017ae1782acf88fcf6b7eae475a49 \ + --hash=sha256:df02fdec0c533301497acb0bc0f27f479a3a63dcdc3a099ae33a902857f07477 \ + --hash=sha256:e8533f5111704d75de3139bf0b8136d3a6c1642c55c067866fa0a51c2155ee33 \ + --hash=sha256:f2f908239b7098799b8845e5936c2ccb91d8c2323be02e82f8dcb4a80dcf4a25 \ + --hash=sha256:f8bfd36f368efe0ab2a6aa3db7f14598aac454b06849fb633b762ddbede1db90 \ + --hash=sha256:ffe73f9e7aea404722058405ff24041e59d31ca23d1da0895af48050a07b6932 # via eventlet -grpcio==1.47.0 \ - --hash=sha256:0425b5577be202d0a4024536bbccb1b052c47e0766096e6c3a5789ddfd5f400d \ - --hash=sha256:06c0739dff9e723bca28ec22301f3711d85c2e652d1c8ae938aa0f7ad632ef9a \ - --hash=sha256:08307dc5a6ac4da03146d6c00f62319e0665b01c6ffe805cfcaa955c17253f9c \ - --hash=sha256:090dfa19f41efcbe760ae59b34da4304d4be9a59960c9682b7eab7e0b6748a79 \ - --hash=sha256:0a24b50810aae90c74bbd901c3f175b9645802d2fbf03eadaf418ddee4c26668 \ - --hash=sha256:0cd44d78f302ff67f11a8c49b786c7ccbed2cfef6f4fd7bb0c3dc9255415f8f7 \ - --hash=sha256:0d8a7f3eb6f290189f48223a5f4464c99619a9de34200ce80d5092fb268323d2 \ - --hash=sha256:14d2bc74218986e5edf5527e870b0969d63601911994ebf0dce96288548cf0ef \ - --hash=sha256:1bb9afa85e797a646bfcd785309e869e80a375c959b11a17c9680abebacc0cb0 \ - --hash=sha256:1ec63bbd09586e5cda1bdc832ae6975d2526d04433a764a1cc866caa399e50d4 \ - --hash=sha256:2061dbe41e43b0a5e1fd423e8a7fb3a0cf11d69ce22d0fac21f1a8c704640b12 \ - --hash=sha256:324e363bad4d89a8ec7124013371f268d43afd0ac0fdeec1b21c1a101eb7dafb \ - --hash=sha256:35dfd981b03a3ec842671d1694fe437ee9f7b9e6a02792157a2793b0eba4f478 \ - --hash=sha256:43857d06b2473b640467467f8f553319b5e819e54be14c86324dad83a0547818 \ - --hash=sha256:4706c78b0c183dca815bbb4ef3e8dd2136ccc8d1699f62c585e75e211ad388f6 \ - --hash=sha256:4d9ad7122f60157454f74a850d1337ba135146cef6fb7956d78c7194d52db0fe \ - --hash=sha256:544da3458d1d249bb8aed5504adf3e194a931e212017934bf7bfa774dad37fb3 \ - --hash=sha256:55782a31ec539f15b34ee56f19131fe1430f38a4be022eb30c85e0b0dcf57f11 \ - --hash=sha256:55cd8b13c5ef22003889f599b8f2930836c6f71cd7cf3fc0196633813dc4f928 \ - --hash=sha256:5dbba95fab9b35957b4977b8904fc1fa56b302f9051eff4d7716ebb0c087f801 \ - --hash=sha256:5f57b9b61c22537623a5577bf5f2f970dc4e50fac5391090114c6eb3ab5a129f \ - --hash=sha256:64e097dd08bb408afeeaee9a56f75311c9ca5b27b8b0278279dc8eef85fa1051 \ - --hash=sha256:664a270d3eac68183ad049665b0f4d0262ec387d5c08c0108dbcfe5b351a8b4d \ - --hash=sha256:668350ea02af018ca945bd629754d47126b366d981ab88e0369b53bc781ffb14 \ - --hash=sha256:67cd275a651532d28620eef677b97164a5438c5afcfd44b15e8992afa9eb598c \ - --hash=sha256:68b5e47fcca8481f36ef444842801928e60e30a5b3852c9f4a95f2582d10dcb2 \ - --hash=sha256:7191ffc8bcf8a630c547287ab103e1fdf72b2e0c119e634d8a36055c1d988ad0 \ - --hash=sha256:815089435d0f113719eabf105832e4c4fa1726b39ae3fb2ca7861752b0f70570 \ - --hash=sha256:8dbef03853a0dbe457417c5469cb0f9d5bf47401b49d50c7dad3c495663b699b \ - --hash=sha256:91cd292373e85a52c897fa5b4768c895e20a7dc3423449c64f0f96388dd1812e \ - --hash=sha256:9298d6f2a81f132f72a7e79cbc90a511fffacc75045c2b10050bb87b86c8353d \ - --hash=sha256:96cff5a2081db82fb710db6a19dd8f904bdebb927727aaf4d9c427984b79a4c1 \ - --hash=sha256:9e63e0619a5627edb7a5eb3e9568b9f97e604856ba228cc1d8a9f83ce3d0466e \ - --hash=sha256:a278d02272214ec33f046864a24b5f5aab7f60f855de38c525e5b4ef61ec5b48 \ - --hash=sha256:a6b2432ac2353c80a56d9015dfc5c4af60245c719628d4193ecd75ddf9cd248c \ - --hash=sha256:b821403907e865e8377af3eee62f0cb233ea2369ba0fcdce9505ca5bfaf4eeb3 \ - --hash=sha256:b88bec3f94a16411a1e0336eb69f335f58229e45d4082b12d8e554cedea97586 \ - --hash=sha256:bfdb8af4801d1c31a18d54b37f4e49bb268d1f485ecf47f70e78d56e04ff37a7 \ - --hash=sha256:c79996ae64dc4d8730782dff0d1daacc8ce7d4c2ba9cef83b6f469f73c0655ce \ - --hash=sha256:cc34d182c4fd64b6ff8304a606b95e814e4f8ed4b245b6d6cc9607690e3ef201 \ - --hash=sha256:d0d481ff55ea6cc49dab2c8276597bd4f1a84a8745fedb4bc23e12e9fb9d0e45 \ - --hash=sha256:e9723784cf264697024778dcf4b7542c851fe14b14681d6268fb984a53f76df1 \ - --hash=sha256:f4508e8abd67ebcccd0fbde6e2b1917ba5d153f3f20c1de385abd8722545e05f \ - --hash=sha256:f515782b168a4ec6ea241add845ccfebe187fc7b09adf892b3ad9e2592c60af1 \ - --hash=sha256:f89de64d9eb3478b188859214752db50c91a749479011abd99e248550371375f \ - --hash=sha256:fcd5d932842df503eb0bf60f9cc35e6fe732b51f499e78b45234e0be41b0018d +grpcio==1.48.1 \ + --hash=sha256:1471e6f25a8e47d9f88499f48c565fc5b2876e8ee91bfb0ff33eaadd188b7ea6 \ + --hash=sha256:19f9c021ae858d3ef6d5ec4c0acf3f0b0a61e599e5aa36c36943c209520a0e66 \ + --hash=sha256:1c924d4e0493fd536ba3b82584b370e8b3c809ef341f9f828cff2dc3c761b3ab \ + --hash=sha256:1d065f40fe74b52b88a6c42d4373a0983f1b0090f952a0747f34f2c11d6cbc64 \ + --hash=sha256:1ff1be0474846ed15682843b187e6062f845ddfeaceb2b28972073f474f7b735 \ + --hash=sha256:2563357697f5f2d7fd80c1b07a57ef4736551327ad84de604e7b9f6c1b6b4e20 \ + --hash=sha256:2b6c336409937fd1cd2bf78eb72651f44d292d88da5e63059a4e8bd01b9d7411 \ + --hash=sha256:3340cb2224cc397954def015729391d85fb31135b5a7efca363e73e6f1b0e908 \ + --hash=sha256:346bef672a1536d59437210f16af35389d715d2b321bfe4899b3d6476a196706 \ + --hash=sha256:3d319a0c89ffac9b8dfc75bfe727a4c835d18bbccc14203b20eb5949c6c7d87d \ + --hash=sha256:460f5bec23fffa3c041aeba1f93a0f06b7a29e6a4da3658a52e1a866494920ab \ + --hash=sha256:4786323555a9f2c6380cd9a9922bcfd42165a51d68d242eebfcdfdc667651c96 \ + --hash=sha256:53b6306f9473020bc47ddf64ca704356466e63d5f88f5c2a7bf0a4692e7f03c4 \ + --hash=sha256:53fa2fc1a1713195fa7acf7443a6f59b6ac7837607690f813c66cc18a9cb8135 \ + --hash=sha256:598c8c42420443c55431eba1821c7a2f72707f1ff674a4de9e0bb03282923cfb \ + --hash=sha256:5a6a750c8324f3974e95265d3f9a0541573c537af1f67b3f6f46bf9c0b2e1b36 \ + --hash=sha256:5d81cd3c161291339ed3b469250c2f5013c3083dea7796e93aedff8f05fdcec1 \ + --hash=sha256:626822d799d8fab08f07c8d95ef5c36213d24143f7cad3f548e97413db9f4110 \ + --hash=sha256:660217eccd2943bf23ea9a36e2a292024305aec04bf747fbcff1f5032b83610e \ + --hash=sha256:741eeff39a26d26da2b6d74ff0559f882ee95ee4e3b20c0b4b829021cb917f96 \ + --hash=sha256:7cee20a4f873d61274d70c28ff63d19677d9eeea869c6a9cbaf3a00712336b6c \ + --hash=sha256:8bbaa6647986b874891bc682a1093df54cbdb073b5d4b844a2b480c47c7ffafd \ + --hash=sha256:934aad7350d9577f4275e787f3d91d3c8ff4efffa8d6b807d343d3c891ff53eb \ + --hash=sha256:9477967e605ba08715dcc769b5ee0f0d8b22bda40ef25a0df5a8759e5a4d21a5 \ + --hash=sha256:97dc35a99c61d5f35ec6457d3df0a4695ba9bb04a35686e1c254462b15c53f98 \ + --hash=sha256:9d116106cf220c79e91595523c893f1cf09ec0c2ea49de4fb82152528b7e6833 \ + --hash=sha256:9fba1d0ba7cf56811728f1951c800a9aca6677e86433c5e353f2cc2c4039fda6 \ + --hash=sha256:a15409bc1d05c52ecb00f5e42ab8ff280e7149f2eb854728f628fb2a0a161a5b \ + --hash=sha256:a1b81849061c67c2ffaa6ed27aa3d9b0762e71e68e784e24b0330b7b1c67470a \ + --hash=sha256:a5edbcb8289681fcb5ded7542f2b7dd456489e83007a95e32fcaf55e9f18603e \ + --hash=sha256:a661d4b9b314327dec1e92ed57e591e8e5eb055700e0ba9e9687f734d922dcb6 \ + --hash=sha256:b005502c59835f9ba3c3f8742f64c19eeb3db41eae1a89b035a559b39b421803 \ + --hash=sha256:b01faf7934c606d5050cf055c1d03943180f23d995d68d04cf50c80d1ef2c65a \ + --hash=sha256:b0fa666fecdb1b118d37823937e9237afa17fe734fc4dbe6dd642e1e4cca0246 \ + --hash=sha256:c54734a6eb3be544d332e65c846236d02e5fc71325e8c53af91e83a46b87b506 \ + --hash=sha256:c6b6969c529521c86884a13745a4b68930db1ef2e051735c0f479d0a7adb25b6 \ + --hash=sha256:ca382028cdfd2d79b7704b2acb8ae1fb54e9e1a03a6765e1895ba89a6fcfaba1 \ + --hash=sha256:ca5209ef89f7607be47a308fa92308cf079805ed556ecda672f00039a26e366f \ + --hash=sha256:d03009a26f7edca9f0a581aa5d3153242b815b858cb4790e34a955afb303c6ba \ + --hash=sha256:d751f8beb383c4a5a95625d7ccc1ab183b98b02c6a88924814ea7fbff530872d \ + --hash=sha256:dad2501603f954f222a6e555413c454a5f8d763ab910fbab3855bcdfef6b3148 \ + --hash=sha256:dbba883c2b6d63949bc98ab1950bc22cf7c8d4e8cb68de6edde49d3cccd8fd26 \ + --hash=sha256:e02f6ba10a3d4e289fa7ae91b301783a750d118b60f17924ca05e506c7d29bc8 \ + --hash=sha256:f0ef1dafb4eadeaca58aec8c721a5a73d551064b0c63d57fa003e233277c642e \ + --hash=sha256:f29627d66ae816837fd32c9450dc9c54780962cd74d034513ed829ba3ab46652 \ + --hash=sha256:f3a99ed422c38bd1bc893cb2cb2cea6d64173ec30927f699e95f5f58bdf625cf # via # -r requirements.in # grpcio-tools -grpcio-tools==1.47.0 \ - --hash=sha256:058060fbc5a60a1c6cc2cbb3d99f730825ba249917978d48b7d0fd8f2caf01da \ - --hash=sha256:05b495ed997a9afc9016c696ed7fcd35678a7276fe0bd8b95743a382363ad2b4 \ - --hash=sha256:0b32002ff4ae860c85feb2aca1b752eb4518e7781c5770b869e7b2dfa9d92cbe \ - --hash=sha256:0eced69e159b3fdd7597d85950f56990e0aa81c11a20a7785fb66f0e47c46b57 \ - --hash=sha256:156b5f6654fea51983fd9257d47f1ad7bfb2a1d09ed471e610a7b34b97d40802 \ - --hash=sha256:18548f35b0657422d5d40e6fa89994469f4bb77df09f8133ecdccec0e31fc72c \ - --hash=sha256:1a0a91941f6f2a4d97e843a5d9ad7ccccf702af2d9455932f18cf922e65af95e \ - --hash=sha256:2364ac3bd7266752c9971dbef3f79d21cd958777823512faa93473cbd973b8f1 \ - --hash=sha256:2a6a6e5e08866d643b84c89140bbe504f864f11b87bfff7a5f2af94c5a2be18d \ - --hash=sha256:2c5c50886e6e79af5387c6514eb19f1f6b1a0b4eb787f1b7a8f21a74e2444102 \ - --hash=sha256:3edb04d102e0d6f0149d93fe8cf69a38c20a2259a913701a4c35c119049c8404 \ - --hash=sha256:3fccc282ee97211a33652419dcdfd24a9a60bbd2d56f5c5dd50c7186a0f4d978 \ - --hash=sha256:441a0a378117447c089b944f325f11039329d8aa961ecdb8226c5dd84af6f003 \ - --hash=sha256:45ceb73a97e2d7ff719fc12c02f1ef13014c47bad60a864313da88ccd90cdf36 \ - --hash=sha256:498c0bae4975683a5a33b72cf1bd64703b34c826871fd3ee8d295407cd5211ec \ - --hash=sha256:4eced9e0674bfb5c528a3bf2ea2b8596da133148b3e0718915792074204ea226 \ - --hash=sha256:51352070f13ea3346b5f5ca825f2203528b8218fffc6ac6d951216f812272d8b \ - --hash=sha256:53c47b08ee2f59a89e8df5f3c09850d7fac264754cbaeabae65f6fbf78d80536 \ - --hash=sha256:5c8ab9b541a869d3b4ef34c291fbfb6ec78ad728e04737fddd91eac3c2193459 \ - --hash=sha256:6804cbd92b9069ae9189d65300e456bcc3945f6ae196d2af254e9635b9c3ef0d \ - --hash=sha256:6c66094fd79ee98bcb504e9f1a3fa6e7ebfd246b4e3d8132227e5020b5633988 \ - --hash=sha256:6d41ec06f2ccc8adcd400a63508ea8e008fb03f270e0031ff2de047def2ada9d \ - --hash=sha256:74f607b9084b5325a997d9ae57c0814955e19311111568d029b2a6a66f4869ec \ - --hash=sha256:7589d6f56e633378047274223f0a75534b2cd7c598f9f2894cb4854378b8b00b \ - --hash=sha256:759064fc8439bbfe5402b2fd3b0685f4ffe07d7cc6a64908c2f88a7c80449ce4 \ - --hash=sha256:7be45d69f0eed912df2e92d94958d1a3e72617469ec58ffcac3e2eb153a7057e \ - --hash=sha256:7fd10683f4f03400536e7a026de9929430ee198c2cbdf2c584edfa909ccc8993 \ - --hash=sha256:818fca1c7dd4ad1c9c01f91ba37006964f4c57c93856fa4ebd7d5589132844d6 \ - --hash=sha256:84e38f46af513a6f62a3d482160fcb94063dbc9fdd1452d09f8010422f144de1 \ - --hash=sha256:93d08c02bd82e423353399582f22493a191db459c3f34031b583f13bcf42b95e \ - --hash=sha256:94114e01c4508d904825bd984e3d2752c0b0e6eb714ac08b99f73421691cf931 \ - --hash=sha256:9ab78cd16b4ac7c6b79c8be194c67e03238f6378694133ce3ce9b123caf24ed5 \ - --hash=sha256:9dd6e26e3e0555deadcb52b087c6064e4fd02c09180b42e96c66260137d26b50 \ - --hash=sha256:a93263955da8d6e449d7ceb84af4e84b82fa760fd661b4ef4549929d9670ab8e \ - --hash=sha256:ac5c6aef72618ebc5ee9ad725dd53e1c145ef420b79d21a7c43ca80658d3d8d4 \ - --hash=sha256:ae53ae35a9761ceea50a502addb7186c5188969d63ad21cf12e00d939db5b967 \ - --hash=sha256:b2fa3c545c8aa1e8c33ca04b1424be3ff77da631faf37db3350d7459c3bdedde \ - --hash=sha256:c2c280197d68d5a28f5b90adf755bd9e28c99f3e47ad4edcfe20497cf3456e1d \ - --hash=sha256:ca548afcfa0ffc47c3cf9eeede81adde15c321bfe897085e90ce8913615584ae \ - --hash=sha256:ccc8ce33bd31bf12649541b5857fabfee7dd84b04138336a27bf46a28d150c11 \ - --hash=sha256:dc6567d652c6b70d8c03f4e450a694e62b4d69a400752f8b9c3c8b659dd6b06a \ - --hash=sha256:dd5d330230038374e64fc652fc4c1b25d457a8b67b9069bfce83a17ab675650b \ - --hash=sha256:e1de1f139f05ab6bbdabc58b06f6ebb5940a92214bbc7246270299387d0af2ae \ - --hash=sha256:f19191460435f8bc72450cf26ac0559726f98c49ad9b0969db3db8ba51be98c8 \ - --hash=sha256:f64b5378484be1d6ce59311f86174be29c8ff98d8d90f589e1c56d5acae67d3c \ - --hash=sha256:fb44ae747fd299b6513420cb6ead50491dc3691d17da48f28fcc5ebf07f47741 +grpcio-tools==1.48.1 \ + --hash=sha256:00b6592b04732648238dcabffd86202211e1df90e26bb4dbe2fa4882e8bf662c \ + --hash=sha256:04c263723dcc1ef4c3ae0a0d0c0a7d572bca49153e02fc026ed485fc2373128f \ + --hash=sha256:09c295fae4fc646376b02f81a763d32900bdd16b85a8555b912d2f7472a83ad3 \ + --hash=sha256:1178f2ea531f80cc2027ec64728df6ffc8e98cf1df61652a496eafd612127183 \ + --hash=sha256:1464248c6089d3c19ba82d29b515b11a43737ef73b08f73895c16959a4d537df \ + --hash=sha256:1cb5802dc1eb33e1c5ccc86d48af6c95cfb10975851f2e4b3e5547d2e7502829 \ + --hash=sha256:247035b1d59135c13b74b203a23069564ca8202d27b21578ced6426c06e08e98 \ + --hash=sha256:24bf144bafd8cdcdedcd738a3234346c4d8a3e82aee74429842fffb56920bcbf \ + --hash=sha256:289bb31d4ab25c49125f49bd7b048e8ce02b147ad296e8fce087814d034faf21 \ + --hash=sha256:2bf1d3b44132bf5ec8397fdee70102edff7cf311483d58926cae3a564310d121 \ + --hash=sha256:306bd078d20739026e481ef9f353e5b3025a761b443e96a47e5c5db7e3cdcd8a \ + --hash=sha256:31877d879023bd81344d31eba6e301f1239c2ccfc5fd28d62fac2cb117194a81 \ + --hash=sha256:42d0b2f488158cd12ed5848fc8e4b536214ff5a4f5d6ecc5ebe5edbbf064ae50 \ + --hash=sha256:491e87352e0be4c0e72dbecb0a52a53aa75f6a4d1dc9e53067c1cbc8833c4f5a \ + --hash=sha256:4dfcfb3f89b0e8c98e1a65f9c0e04f5bfe69ea7eaaa22b4b99c2e3895512afc8 \ + --hash=sha256:5037a919a2689ed6f8c2a3b9cf6f1431caf0a93c730bde38f0960143f0072540 \ + --hash=sha256:5090ef3fa0288977b25fa1c5c0d15ca5d751ff206f906966c64eb3200ed6fa13 \ + --hash=sha256:55b7de7f48c6f500c1252e6b96279c93d5bc24bcef618fd6587563e8c854527f \ + --hash=sha256:590a1bc3e5997e78b57085178d8de202de977d1f01984ac907a3ffca225bb729 \ + --hash=sha256:610da6278696e8f6d7831ecb9e513099f1f52dae3f40876a6049f634d4933919 \ + --hash=sha256:647ab95b7cecb5de392da3d70f0d49731cb90f1bd590d37a83ed6cfe06cb2a31 \ + --hash=sha256:6c2a2796e4b6d7ae5378d8fd848a64b9541524ad5f6eea4583b879fa53d97883 \ + --hash=sha256:739e3c49091175c117a26be4099145cdf85638a991d9bad8a5f9f6614ea31d61 \ + --hash=sha256:75fbf37461c337184624bec30db3a59c47514812b2f8f43a99818e44fb63243a \ + --hash=sha256:7d4ada8dc77f5db280fa476cd28263f7d7de96c4fce10f5559cde620a3363a95 \ + --hash=sha256:7dc607ee397eed06b126cadf6a86bde46bcc7da9b9a1d1ad5338b9afa6085d07 \ + --hash=sha256:7e3230dd4f1bd79251ef086222041954ca5c27650a3d7f6589f972292461c8bf \ + --hash=sha256:832935ea7ebb20f99bd8184fd70e0d111202043165207423f926dad22b255583 \ + --hash=sha256:87676b55aaf171c4922682e307e4fb8d6007c5e79bcdf1653cdf0868da17ae2a \ + --hash=sha256:8d7fc42b0443e8d16d740e463ea0eb77b68dec6423783738bf1e480e6475e32c \ + --hash=sha256:8ec634738c4d6220d96b293caff3115a0573c52d97581654c0a47b09d70ea958 \ + --hash=sha256:920d6381ea5bc8ebf3303e57bcc7ed3a0735508f881e70a5e08a88d1a98ab34d \ + --hash=sha256:92a7182bd96b8105a5372e8e656ab8bde8f607c5092278aba2ec2f83b8c9ab28 \ + --hash=sha256:951c3701157627f90ec6831ba502babb10aa0dd465a2cb00fe450a061ef10251 \ + --hash=sha256:a0e2c5ba0a4a181eb379415affba9dc122f3f2eecdd459cc98fe311fdad9cf96 \ + --hash=sha256:a53b033e38e60671c0c22a296ae9fd2b025a196e153c6d6b9a3aa20b11bedfcf \ + --hash=sha256:b53f48cb31a6a59ea01a371ab58bd8fa83fae9c1cfafab8acb954a70f757c6b1 \ + --hash=sha256:b5da8227eeefc957c41d7c59919d35e8a1d8275941e9839566b29eb368940bee \ + --hash=sha256:c4ec8447af615563a5da884b9ed93b41716b80bb01ac9183f09046ac48dbacb5 \ + --hash=sha256:d454f4ef35b070b5bf39b65fec30b26b1f22da5b9efa4a716f7518308713ceab \ + --hash=sha256:d9b854dec3e81fa5c2d601facb3b801a60bd5cbe295ef7e788a3e5824799183e \ + --hash=sha256:e47c663303ebc7b409f4da9073e6bef05e97b74ee47caafc08e0a2e223b95100 \ + --hash=sha256:eb04964c52721cb841a863a6a523bd06328a42d718371bc55d257f93390b6799 \ + --hash=sha256:ebe91db084c22143bc50967670f201bb5bf776b46227b62a97b0a75b0046aac5 \ + --hash=sha256:f432ae6593373cf46a0144aa6392875c5b3fd9d48bf27b5ced503c466ef496ea \ + --hash=sha256:fe071e1e13088752b2e96c6caadb719d39c80d768e441e3fc73b08f8b87560ae # via -r requirements.in h2==3.2.0 \ --hash=sha256:61e0f6601fa709f35cdb730863b4e5ec7ad449792add80d1410d4174ed139af5 \ @@ -592,9 +591,9 @@ hyperframe==5.2.0 \ --hash=sha256:5187962cb16dcc078f23cb5a4b110098d546c3f41ff2d4038a9896893bbd0b40 \ --hash=sha256:a9f5c17f2cc3c719b917c4f33ed1c61bd1f8dfac4b1bd23b7c80b3400971b41f # via h2 -idna==3.3 \ - --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ - --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via # jsonschema # requests @@ -976,7 +975,7 @@ oslo-config==8.8.0 \ --hash=sha256:96933d3011dae15608a11616bfb00d947e22da3cb09b6ff37ddd7576abd4764c \ --hash=sha256:b1e2a398450ea35a8e5630d8b23057b8939838c4433cd25a20cc3a36d5df9e3b # via -r requirements.in -oslo-i18n==5.1.0 \ +oslo.i18n==5.1.0 \ --hash=sha256:6bf111a6357d5449640852de4640eae4159b5562bbba4c90febb0034abc095d0 \ --hash=sha256:75086cfd898819638ca741159f677e2073a78ca86a9c9be8d38b46800cdf2dc9 # via oslo-config @@ -998,7 +997,7 @@ pbr==5.10.0 \ --hash=sha256:cfcc4ff8e698256fc17ea3ff796478b050852585aa5bae79ecd05b2ab7b39b9a \ --hash=sha256:da3e18aac0a3c003e9eea1a81bd23e5a3a75d745670dcf736317b7d966887fdf # via - # oslo-i18n + # oslo.i18n # stevedore pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ @@ -1008,70 +1007,69 @@ priority==1.3.0 \ --hash=sha256:6bc1961a6d7fcacbfc337769f1a382c8e746566aaa365e78047abe9f66b2ffbe \ --hash=sha256:be4fcb94b5e37cdeb40af5533afe6dd603bd665fe9c8b3052610fc1001d5d1eb # via -r requirements.in -prometheus-client==0.3.1 \ +prometheus_client==0.3.1 \ --hash=sha256:17bc24c09431644f7c65d7bce9f4237252308070b6395d6d8e87767afe867e24 # via -r requirements.in -protobuf==3.20.1 \ - --hash=sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf \ - --hash=sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f \ - --hash=sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f \ - --hash=sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7 \ - --hash=sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996 \ - --hash=sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067 \ - --hash=sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c \ - --hash=sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7 \ - --hash=sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9 \ - --hash=sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c \ - --hash=sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739 \ - --hash=sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91 \ - --hash=sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c \ - --hash=sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153 \ - --hash=sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9 \ - --hash=sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388 \ - --hash=sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e \ - --hash=sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab \ - --hash=sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde \ - --hash=sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531 \ - --hash=sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8 \ - --hash=sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7 \ - --hash=sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20 \ - --hash=sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3 +protobuf==3.20.2 \ + --hash=sha256:03d76b7bd42ac4a6e109742a4edf81ffe26ffd87c5993126d894fe48a120396a \ + --hash=sha256:09e25909c4297d71d97612f04f41cea8fa8510096864f2835ad2f3b3df5a5559 \ + --hash=sha256:18e34a10ae10d458b027d7638a599c964b030c1739ebd035a1dfc0e22baa3bfe \ + --hash=sha256:291fb4307094bf5ccc29f424b42268640e00d5240bf0d9b86bf3079f7576474d \ + --hash=sha256:2c0b040d0b5d5d207936ca2d02f00f765906622c07d3fa19c23a16a8ca71873f \ + --hash=sha256:384164994727f274cc34b8abd41a9e7e0562801361ee77437099ff6dfedd024b \ + --hash=sha256:3cb608e5a0eb61b8e00fe641d9f0282cd0eedb603be372f91f163cbfbca0ded0 \ + --hash=sha256:5d9402bf27d11e37801d1743eada54372f986a372ec9679673bfcc5c60441151 \ + --hash=sha256:712dca319eee507a1e7df3591e639a2b112a2f4a62d40fe7832a16fd19151750 \ + --hash=sha256:7a5037af4e76c975b88c3becdf53922b5ffa3f2cddf657574a4920a3b33b80f3 \ + --hash=sha256:8228e56a865c27163d5d1d1771d94b98194aa6917bcfb6ce139cbfa8e3c27334 \ + --hash=sha256:84a1544252a933ef07bb0b5ef13afe7c36232a774affa673fc3636f7cee1db6c \ + --hash=sha256:84fe5953b18a383fd4495d375fe16e1e55e0a3afe7b4f7b4d01a3a0649fcda9d \ + --hash=sha256:9c673c8bfdf52f903081816b9e0e612186684f4eb4c17eeb729133022d6032e3 \ + --hash=sha256:9f876a69ca55aed879b43c295a328970306e8e80a263ec91cf6e9189243c613b \ + --hash=sha256:a9e5ae5a8e8985c67e8944c23035a0dff2c26b0f5070b2f55b217a1c33bbe8b1 \ + --hash=sha256:b4fdb29c5a7406e3f7ef176b2a7079baa68b5b854f364c21abe327bbeec01cdb \ + --hash=sha256:c184485e0dfba4dfd451c3bd348c2e685d6523543a0f91b9fd4ae90eb09e8422 \ + --hash=sha256:c9cdf251c582c16fd6a9f5e95836c90828d51b0069ad22f463761d27c6c19019 \ + --hash=sha256:e39cf61bb8582bda88cdfebc0db163b774e7e03364bbf9ce1ead13863e81e359 \ + --hash=sha256:e8fbc522303e09036c752a0afcc5c0603e917222d8bedc02813fd73b4b4ed804 \ + --hash=sha256:f34464ab1207114e73bba0794d1257c150a2b89b7a9faf504e00af7c9fd58978 \ + --hash=sha256:f52dabc96ca99ebd2169dadbe018824ebda08a795c7684a0b7d203a290f3adb0 # via # -r requirements.in # grpcio-tools -psutil==5.9.1 \ - --hash=sha256:068935df39055bf27a29824b95c801c7a5130f118b806eee663cad28dca97685 \ - --hash=sha256:0904727e0b0a038830b019551cf3204dd48ef5c6868adc776e06e93d615fc5fc \ - --hash=sha256:0f15a19a05f39a09327345bc279c1ba4a8cfb0172cc0d3c7f7d16c813b2e7d36 \ - --hash=sha256:19f36c16012ba9cfc742604df189f2f28d2720e23ff7d1e81602dbe066be9fd1 \ - --hash=sha256:20b27771b077dcaa0de1de3ad52d22538fe101f9946d6dc7869e6f694f079329 \ - --hash=sha256:28976df6c64ddd6320d281128817f32c29b539a52bdae5e192537bc338a9ec81 \ - --hash=sha256:29a442e25fab1f4d05e2655bb1b8ab6887981838d22effa2396d584b740194de \ - --hash=sha256:3054e923204b8e9c23a55b23b6df73a8089ae1d075cb0bf711d3e9da1724ded4 \ - --hash=sha256:32c52611756096ae91f5d1499fe6c53b86f4a9ada147ee42db4991ba1520e574 \ - --hash=sha256:3a76ad658641172d9c6e593de6fe248ddde825b5866464c3b2ee26c35da9d237 \ - --hash=sha256:44d1826150d49ffd62035785a9e2c56afcea66e55b43b8b630d7706276e87f22 \ - --hash=sha256:4b6750a73a9c4a4e689490ccb862d53c7b976a2a35c4e1846d049dcc3f17d83b \ - --hash=sha256:56960b9e8edcca1456f8c86a196f0c3d8e3e361320071c93378d41445ffd28b0 \ - --hash=sha256:57f1819b5d9e95cdfb0c881a8a5b7d542ed0b7c522d575706a80bedc848c8954 \ - --hash=sha256:58678bbadae12e0db55186dc58f2888839228ac9f41cc7848853539b70490021 \ - --hash=sha256:645bd4f7bb5b8633803e0b6746ff1628724668681a434482546887d22c7a9537 \ - --hash=sha256:799759d809c31aab5fe4579e50addf84565e71c1dc9f1c31258f159ff70d3f87 \ - --hash=sha256:79c9108d9aa7fa6fba6e668b61b82facc067a6b81517cab34d07a84aa89f3df0 \ - --hash=sha256:91c7ff2a40c373d0cc9121d54bc5f31c4fa09c346528e6a08d1845bce5771ffc \ - --hash=sha256:9272167b5f5fbfe16945be3db475b3ce8d792386907e673a209da686176552af \ - --hash=sha256:944c4b4b82dc4a1b805329c980f270f170fdc9945464223f2ec8e57563139cf4 \ - --hash=sha256:a6a11e48cb93a5fa606306493f439b4aa7c56cb03fc9ace7f6bfa21aaf07c453 \ - --hash=sha256:a8746bfe4e8f659528c5c7e9af5090c5a7d252f32b2e859c584ef7d8efb1e689 \ - --hash=sha256:abd9246e4cdd5b554a2ddd97c157e292ac11ef3e7af25ac56b08b455c829dca8 \ - --hash=sha256:b14ee12da9338f5e5b3a3ef7ca58b3cba30f5b66f7662159762932e6d0b8f680 \ - --hash=sha256:b88f75005586131276634027f4219d06e0561292be8bd6bc7f2f00bdabd63c4e \ - --hash=sha256:c7be9d7f5b0d206f0bbc3794b8e16fb7dbc53ec9e40bbe8787c6f2d38efcf6c9 \ - --hash=sha256:d2d006286fbcb60f0b391741f520862e9b69f4019b4d738a2a45728c7e952f1b \ - --hash=sha256:db417f0865f90bdc07fa30e1aadc69b6f4cad7f86324b02aa842034efe8d8c4d \ - --hash=sha256:e7e10454cb1ab62cc6ce776e1c135a64045a11ec4c6d254d3f7689c16eb3efd2 \ - --hash=sha256:f65f9a46d984b8cd9b3750c2bdb419b2996895b005aefa6cbaba9a143b1ce2c5 \ - --hash=sha256:fea896b54f3a4ae6f790ac1d017101252c93f6fe075d0e7571543510f11d2676 +psutil==5.9.2 \ + --hash=sha256:14b29f581b5edab1f133563272a6011925401804d52d603c5c606936b49c8b97 \ + --hash=sha256:256098b4f6ffea6441eb54ab3eb64db9ecef18f6a80d7ba91549195d55420f84 \ + --hash=sha256:39ec06dc6c934fb53df10c1672e299145ce609ff0611b569e75a88f313634969 \ + --hash=sha256:404f4816c16a2fcc4eaa36d7eb49a66df2d083e829d3e39ee8759a411dbc9ecf \ + --hash=sha256:42638876b7f5ef43cef8dcf640d3401b27a51ee3fa137cb2aa2e72e188414c32 \ + --hash=sha256:4642fd93785a29353d6917a23e2ac6177308ef5e8be5cc17008d885cb9f70f12 \ + --hash=sha256:4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727 \ + --hash=sha256:561dec454853846d1dd0247b44c2e66a0a0c490f937086930ec4b8f83bf44f06 \ + --hash=sha256:5d39e3a2d5c40efa977c9a8dd4f679763c43c6c255b1340a56489955dbca767c \ + --hash=sha256:614337922702e9be37a39954d67fdb9e855981624d8011a9927b8f2d3c9625d9 \ + --hash=sha256:67b33f27fc0427483b61563a16c90d9f3b547eeb7af0ef1b9fe024cdc9b3a6ea \ + --hash=sha256:68b35cbff92d1f7103d8f1db77c977e72f49fcefae3d3d2b91c76b0e7aef48b8 \ + --hash=sha256:7cbb795dcd8ed8fd238bc9e9f64ab188f3f4096d2e811b5a82da53d164b84c3f \ + --hash=sha256:8f024fbb26c8daf5d70287bb3edfafa22283c255287cf523c5d81721e8e5d82c \ + --hash=sha256:91aa0dac0c64688667b4285fa29354acfb3e834e1fd98b535b9986c883c2ce1d \ + --hash=sha256:94e621c6a4ddb2573d4d30cba074f6d1aa0186645917df42c811c473dd22b339 \ + --hash=sha256:9770c1d25aee91417eba7869139d629d6328a9422ce1cdd112bd56377ca98444 \ + --hash=sha256:b1928b9bf478d31fdffdb57101d18f9b70ed4e9b0e41af751851813547b2a9ab \ + --hash=sha256:b2f248ffc346f4f4f0d747ee1947963613216b06688be0be2e393986fe20dbbb \ + --hash=sha256:b315febaebae813326296872fdb4be92ad3ce10d1d742a6b0c49fb619481ed0b \ + --hash=sha256:b3591616fa07b15050b2f87e1cdefd06a554382e72866fcc0ab2be9d116486c8 \ + --hash=sha256:b4018d5f9b6651f9896c7a7c2c9f4652e4eea53f10751c4e7d08a9093ab587ec \ + --hash=sha256:d75291912b945a7351d45df682f9644540d564d62115d4a20d45fa17dc2d48f8 \ + --hash=sha256:dc9bda7d5ced744622f157cc8d8bdd51735dafcecff807e928ff26bdb0ff097d \ + --hash=sha256:e3ac2c0375ef498e74b9b4ec56df3c88be43fe56cac465627572dbfb21c4be34 \ + --hash=sha256:e4c4a7636ffc47b7141864f1c5e7d649f42c54e49da2dd3cceb1c5f5d29bfc85 \ + --hash=sha256:ed29ea0b9a372c5188cdb2ad39f937900a10fb5478dc077283bf86eeac678ef1 \ + --hash=sha256:f40ba362fefc11d6bea4403f070078d60053ed422255bd838cd86a40674364c9 \ + --hash=sha256:f4cb67215c10d4657e320037109939b1c1d2fd70ca3d76301992f89fe2edb1f1 \ + --hash=sha256:f7929a516125f62399d6e8e026129c8835f6c5a3aab88c3fff1a05ee8feb840d \ + --hash=sha256:fd331866628d18223a4265371fd255774affd86244fc307ef66eaf00de0633d5 \ + --hash=sha256:feb861a10b6c3bb00701063b37e4afc754f8217f0f09c42280586bd6ac712b5c # via -r requirements.in py==1.11.0 \ --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ @@ -1174,32 +1172,32 @@ pyparsing==3.0.9 \ pyroute2==0.6.13 \ --hash=sha256:b03d49a581945fec2b1ec7d1d5125c6f40ba04ed11affc90c4caddc019e25792 # via -r requirements.in -pyroute2-core==0.6.13 \ +pyroute2.core==0.6.13 \ --hash=sha256:227dfd9f19888ddd1341966822ffd5880db9e3c89375096418c660ff4d1a11d0 # via # pyroute2 - # pyroute2-ethtool - # pyroute2-ipdb - # pyroute2-ipset - # pyroute2-ndb - # pyroute2-nftables - # pyroute2-nslink -pyroute2-ethtool==0.6.13 \ + # pyroute2.ethtool + # pyroute2.ipdb + # pyroute2.ipset + # pyroute2.ndb + # pyroute2.nftables + # pyroute2.nslink +pyroute2.ethtool==0.6.13 \ --hash=sha256:0a687fea0fcd77d9074c7c18ba35d9b9f70e4217ebe68a687e200408473a3bd4 # via pyroute2 -pyroute2-ipdb==0.6.13 \ +pyroute2.ipdb==0.6.13 \ --hash=sha256:bbbbb75d13be96e4549cf70eb94fd70b2e1736ea301ac6b683f56aa1acd84d5a # via pyroute2 -pyroute2-ipset==0.6.13 \ +pyroute2.ipset==0.6.13 \ --hash=sha256:28a254f622a18976d0683603d5aefda5ab7c8528fa9e36beb85bce52026f7866 # via pyroute2 -pyroute2-ndb==0.6.13 \ +pyroute2.ndb==0.6.13 \ --hash=sha256:09b1f55f26043ce64c933e8224fd08444a498f381e5dc483bc9f428cbaf0901a # via pyroute2 -pyroute2-nftables==0.6.13 \ +pyroute2.nftables==0.6.13 \ --hash=sha256:c94bd740d50b03a1a8d9654f769e77afc77a75e05fc5887dd0551e3970f86592 # via pyroute2 -pyroute2-nslink==0.6.13 \ +pyroute2.nslink==0.6.13 \ --hash=sha256:86ed506cadccb154cd27aebb3dbf73ebb723c391104e7f0f3bc2c4a39c62366c # via pyroute2 pyrsistent==0.18.1 \ @@ -1228,9 +1226,9 @@ pyrsistent==0.18.1 \ pystemd==0.10.0 \ --hash=sha256:d74a814bfda01085db1a8ad90be3cb27daf23a51ab6b03e7e29ec811fa2ae859 # via -r requirements.in -pytest==7.1.2 \ - --hash=sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c \ - --hash=sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45 +pytest==7.1.3 \ + --hash=sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7 \ + --hash=sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39 # via # -r requirements.in # pytest-cov @@ -1256,6 +1254,7 @@ pytz==2022.2.1 \ # bravado-core # spyne pyyaml==6.0 \ + --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \ --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \ --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \ --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \ @@ -1267,26 +1266,32 @@ pyyaml==6.0 \ --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \ --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \ --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \ + --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \ --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \ --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \ --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \ --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \ --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \ + --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \ --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \ --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \ --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \ --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \ --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \ --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \ + --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \ --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \ --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \ + --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \ --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \ --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \ --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \ + --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \ --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \ --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \ --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \ --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \ + --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \ --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \ --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5 # via @@ -1306,7 +1311,7 @@ redis-collections==0.11.0 \ --hash=sha256:0f6cda00666fdd26e3b8ca47da13a653eaf4cc4e45470a3b09f17d65061fea8a \ --hash=sha256:d23e8c0f6bf50de10c98a14a3b636ff1bb21119386f884f2641c906832bc4ec9 # via -r requirements.in -repoze-lru==0.7 \ +repoze.lru==0.7 \ --hash=sha256:0429a75e19380e4ed50c0694e26ac8819b4ea7851ee1fc7583c8572db80aff77 \ --hash=sha256:f77bf0e1096ea445beadd35f3479c5cff2aa1efe604a133e67150bc8630a62ea # via routes @@ -1315,6 +1320,7 @@ requests==2.28.1 \ --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 # via # -r requirements.in + # bravado-core # docker # oslo-config rfc3986==2.0.0 \ @@ -1433,15 +1439,16 @@ stevedore==4.0.0 \ strict-rfc3339==0.7 \ --hash=sha256:5cad17bedfc3af57b399db0fed32771f18fc54bbd917e85546088607ac5e1277 # via jsonschema -swagger-spec-validator==2.7.4 \ - --hash=sha256:2aee5e1fc0503be9f8299378b10c92169572781573c6de3315e831fd0559ba73 \ - --hash=sha256:4e373a4db5262e7257fde17d84c5c0178327b8057985ab1be63f580bfa009855 +swagger-spec-validator==2.7.6 \ + --hash=sha256:73f33e631a58f407265f2f813d194f2762a2b86f9aa905e7eee3df9b7f9428d3 \ + --hash=sha256:ff55d671f4cf8a386e7ecda60267d6cdd2cfbe0b3521a8ccf09b0669cbb72ab6 # via bravado-core systemd-python==234 \ --hash=sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7 # via -r requirements.in -termcolor==1.1.0 \ - --hash=sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b +termcolor==2.0.1 \ + --hash=sha256:6b2cf769e93364a2676e1de56a7c0cff2cf5bd07f37e9cc80b0dd6320ebfe388 \ + --hash=sha256:7e597f9de8e001a3208c4132938597413b9da45382b6f1d150cff8d062b7aaa3 # via fire tinyrpc==1.1.4 \ --hash=sha256:c99f412e5d9849c2deb468ea37fee2faf12fbc95bdd3616ae5c276ea195ed6bd @@ -1474,9 +1481,9 @@ webob==1.8.7 \ --hash=sha256:73aae30359291c14fa3b956f8b5ca31960e420c28c1bec002547fb04928cf89b \ --hash=sha256:b64ef5141be559cfade448f044fa45c2260351edcb6a8ef6b7e00c7dcef0c323 # via -r requirements.in -websocket-client==1.3.3 \ - --hash=sha256:5d55652dc1d0b3c734f044337d929aaf83f4f9138816ec680c1aefefb4dc4877 \ - --hash=sha256:d58c5f284d6a9bf8379dab423259fe8f85b70d5fa5d2916d5791a84594b122b1 +websocket-client==1.4.1 \ + --hash=sha256:398909eb7e261f44b8f4bd474785b6ec5f5b499d4953342fe9755e01ef624090 \ + --hash=sha256:f9611eb65c8241a67fb373bef040b3cf8ad377a9f6546a12b620b6511e8ea9ef # via docker werkzeug==2.2.2 \ --hash=sha256:7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f \ diff --git a/bazel/external/system_libraries.BUILD b/bazel/external/system_libraries.BUILD index 1563ddab37f4..222fdf1c5cd6 100644 --- a/bazel/external/system_libraries.BUILD +++ b/bazel/external/system_libraries.BUILD @@ -115,3 +115,8 @@ native_binary( src = "usr/local/bin/asn1c", out = "asn1c", ) + +cc_library( + name = "libsqlite3-dev", + linkopts = ["-lsqlite3"], +) diff --git a/bazel/runfiles.bzl b/bazel/runfiles.bzl new file mode 100644 index 000000000000..fa0768a58090 --- /dev/null +++ b/bazel/runfiles.bzl @@ -0,0 +1,128 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Inspired by https://github.com/aspect-build/rules_container/blob/main/language/runfiles.bzl by @thesayyn +# Contributed to https://github.com/aspect-build/rules_container under Apache-2.0 + +""" +This file represents a workaround for the current state of https://github.com/bazelbuild/rules_pkg. +Dependencies are not packaged (even internal dependencies). The rule here expands all +internal and external dependencies into a PackageFilesInfo so that the files can be used in +package rules. + +The dependencies are not put into service specific paths. This means, that, e.g., pip dependencies and +proto files that are used by multiple services are only added once in the packaging process. + +This rule is currently only applied to python dependencies (go and c/c++ dependencies are linked statically +into the binaries). + +Additionally this rule +* renames the relative path of files so that they can be found correctly in the target system (usually + packaged into the "dist-packages" folder of the used python interpreter) +* excludes files that are not needed during runtime +""" + +load("@rules_pkg//:providers.bzl", "PackageFilesInfo") + +STRIP_PATHS = [ + "lte/gateway/python/", + "orc8r/gateway/python/", + "lte/swagger/specs_root/", + "orc8r/swagger/specs_root/", +] + +# beware: order matters here, e.g., "lte/protos/oai/" needs to be before "lte/protos/" +STRIP_PATHS_PROTOS = [ + "dp/protos/", + "feg/protos/", + "lte/protos/oai/", + "lte/protos/", + "orc8r/protos/", + "orc8r/swagger/magmad_events_v1", +] + +EXCLUDES = [ + # external protobuf is only needed during compile time + "../com_google_protobuf", + # external grpc is only needed during compile time + "../com_github_grpc_grpc", + # bazel compiled grpc library + "_solib_k8/libexternal_Scom_Ugithub_Ugrpc_Ugrpc_Slibgrpc.so", +] + +def _is_excluded(file): + for exclude in EXCLUDES: + if file.short_path.startswith(exclude): + return True + return False + +def _runfile_path(file): + path = file.short_path + if path.startswith("../"): + return _strip_external(path) + return _strip_internal(path, file) + +def _strip_external(path): + path_clean = path.replace("../", "") + + # removes the first folder + path_wo_first_folder = path_clean.partition("/")[2] + + # special case: grpc is packaged in subfolders (stripped here) + if path_wo_first_folder.startswith("src/python/grpcio/"): + return path_wo_first_folder.replace("src/python/grpcio/", "") + + return path_wo_first_folder + +def _strip_internal(path, file): + for prefix in STRIP_PATHS: + if path.startswith(prefix): + # lte/gateway/python/magma/foo/bar.py -> magma/foo/bar.py + return path.replace(prefix, "", 1) + + for prefix in STRIP_PATHS_PROTOS: + if path.startswith(prefix): + # lte/protos/target_name/lte/protos/foo_pb2.py -> lte/protos/foo_pb2.py + return path.replace(prefix, "", 1).replace(file.owner.name + "/", "", 1) + + print("Unhandled path: " + path) # buildifier: disable=print + + return "FIXME" # needs to be handled + +def _runfiles_impl(ctx): + py_infos = [target[PyInfo] for target in ctx.attr.targets] + def_infos = [target[DefaultInfo] for target in ctx.attr.targets] + + files = depset(transitive = [py_info.transitive_sources for py_info in py_infos] + [def_info.default_runfiles.files for def_info in def_infos]) + file_map = {} + mapped_files = [] + + for file in files.to_list(): + if not _is_excluded(file): + file_map[_runfile_path(file)] = file + mapped_files = mapped_files + [file] + + files = depset(transitive = [files]) + + return [ + PackageFilesInfo( + dest_src_map = file_map, + attributes = {"mode": "0755"}, + ), + DefaultInfo(files = depset(mapped_files)), + ] + +expand_runfiles = rule( + implementation = _runfiles_impl, + attrs = { + "targets": attr.label_list(providers = [PyInfo]), + }, +) diff --git a/bazel/scripts/check_c_cpp_bazel.sh b/bazel/scripts/check_c_cpp_bazel.sh index 06de77109169..2ef63c9f7d00 100755 --- a/bazel/scripts/check_c_cpp_bazel.sh +++ b/bazel/scripts/check_c_cpp_bazel.sh @@ -37,9 +37,6 @@ DENY_LIST_NOT_YET_BAZELIFIED=( # this needs to be refactored when make is not used anymore "./lte/gateway/python/magma/pipelined/ebpf/ebpf_ul_handler.c" "./lte/gateway/python/magma/pipelined/ebpf/ebpf_dl_handler.c" - # TODO: GH12771 add MME_BENCHMARK support and bazelify files - "./lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp" - "./lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.cpp" ) DENY_LIST=( "${DENY_LIST_NOT_RELEVANT[@]}" "${DENY_LIST_NOT_YET_BAZELIFIED[@]}" ) diff --git a/bazel/scripts/check_py_bazel.sh b/bazel/scripts/check_py_bazel.sh index 68dd635150e8..c4570ffb3b9f 100755 --- a/bazel/scripts/check_py_bazel.sh +++ b/bazel/scripts/check_py_bazel.sh @@ -57,7 +57,6 @@ DENY_LIST_NOT_YET_BAZELIFIED=( "./lte/gateway/python/integ_tests/cloud" "./lte/gateway/python/integ_tests/cloud_tests" "./lte/gateway/python/integ_tests/federated_tests" - "./lte/gateway/python/integ_tests/gxgy_tests" "./lte/gateway/python/integ_tests/s1aptests/workflow" "./lte/gateway/python/integ_tests/s1aptests/test_agw_offload_mixed_idle_active_multiue.py" "./lte/gateway/python/integ_tests/s1aptests/test_attach_detach_two_pdns_with_tcptraffic.py" diff --git a/bazel/scripts/link_scripts_for_bazel_integ_tests.sh b/bazel/scripts/link_scripts_for_bazel_integ_tests.sh new file mode 100755 index 000000000000..dc53028b262e --- /dev/null +++ b/bazel/scripts/link_scripts_for_bazel_integ_tests.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +################################################################################ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +set -euo pipefail + +############################################################################### +# FUNCTION DECLARATIONS +############################################################################### + +get_python_scripts() { + echo "Collecting script targets..." + mapfile -t PYTHON_SCRIPTS < <(bazel query "kind(.*_binary, \ + //orc8r/gateway/python/scripts/... union \ + //lte/gateway/python/scripts/... )") +} + +format_targets_to_paths() { + for INDEX in "${!PYTHON_SCRIPTS[@]}" + do + # Strip leading '//' + PYTHON_SCRIPTS[INDEX]="${PYTHON_SCRIPTS[INDEX]/\/\//}" + # Replace ':' with '/' + PYTHON_SCRIPTS[INDEX]="${PYTHON_SCRIPTS[INDEX]/://}" + done +} + +create_links() { + echo "Linking bazel-built script executables to '/usr/local/bin/'..." + for PYTHON_SCRIPT in "${PYTHON_SCRIPTS[@]}" + do + sudo ln -sf "/home/vagrant/magma/bazel-bin/${PYTHON_SCRIPT}" "/usr/local/bin/$(basename "${PYTHON_SCRIPT}").py" + done + echo "Linking finished." +} + +mock_virtualenv() { + # The virtualenv is not needed with bazel. Until the switchover + # to bazel is complete, this creates an empty file that can + # be sourced, without failure, in the LTE integration tests. + # See https://github.com/magma/magma/issues/13807 + mkdir -p /home/vagrant/build/python/bin/ + touch /home/vagrant/build/python/bin/activate +} + +############################################################################### +# SCRIPT SECTION +############################################################################### + +PYTHON_SCRIPTS=() + +get_python_scripts +format_targets_to_paths +create_links +mock_virtualenv diff --git a/example/gateway/configs/streamer.yml b/bazel/test/BUILD.bazel similarity index 73% rename from example/gateway/configs/streamer.yml rename to bazel/test/BUILD.bazel index d39195867d9b..99eaca117c62 100644 --- a/example/gateway/configs/streamer.yml +++ b/bazel/test/BUILD.bazel @@ -1,6 +1,4 @@ ---- -# -# Copyright 2020 The Magma Authors. +# Copyright 2022 The Magma Authors. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -11,6 +9,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -reconnect_sec: 60 +load("//bazel/test:runfiles_test.bzl", "runfiles_test_suite") -stream_timeout: 150 +runfiles_test_suite( + name = "runfiles_test_suite_target", +) diff --git a/bazel/test/runfiles_test.bzl b/bazel/test/runfiles_test.bzl new file mode 100644 index 000000000000..a40e92090e59 --- /dev/null +++ b/bazel/test/runfiles_test.bzl @@ -0,0 +1,123 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tests for runfiles.bzl. +See https://bazel.build/rules/testing for general bazel rule testing documentation. +""" + +load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts") +load("@rules_pkg//:providers.bzl", "PackageFilesInfo") +load("//bazel:runfiles.bzl", "expand_runfiles") + +# test suite + +def runfiles_test_suite(name): + _setup_empty_targets_returns_empty_providers_test() + _setup_targets_are_correctly_expanded_test() + + native.test_suite( + name = name, + tests = [ + ":empty_targets_returns_empty_providers_test", + ":targets_are_correctly_expanded_test", + ], + ) + +# setup for rule to be tested + +def _setup_empty_targets_returns_empty_providers_test(): + expand_runfiles( + name = "expand_empty_targets", + tags = ["manual"], # should only be build here + ) + + rule_empty_targets_returns_empty_providers_test( + name = "empty_targets_returns_empty_providers_test", + target_under_test = ":expand_empty_targets", + ) + +def _setup_targets_are_correctly_expanded_test(): + # testing an actually magma target instead of an artificial one + # mconfigs proto should be sufficiently stable + expand_runfiles( + name = "expand_targets", + tags = ["manual"], # should only be build here + targets = ["//lte/protos:mconfigs_python_proto"], + ) + + rule_targets_are_correctly_expanded_test( + name = "targets_are_correctly_expanded_test", + target_under_test = ":expand_targets", + ) + +# asserts + +def _empty_targets_returns_empty_providers_test_impl(ctx): + env = analysistest.begin(ctx) + + target_under_test = analysistest.target_under_test(env) + + asserts.equals( + env, + expected = {"mode": "0755"}, + actual = target_under_test[PackageFilesInfo].attributes, + ) + asserts.equals( + env, + expected = {}, + actual = target_under_test[PackageFilesInfo].dest_src_map, + ) + asserts.equals( + env, + expected = depset([]), + actual = target_under_test[DefaultInfo].files, + ) + + return analysistest.end(env) + +expected_mapping = ( + "{" + + '"orc8r/protos/common_pb2.py": , ' + + '"lte/protos/mconfig/mconfigs_pb2.py": ' + + "}" +) + +expected_depset = ( + "depset([" + + ", " + + "" + + "])" +) + +def _targets_are_correctly_expanded_test_impl(ctx): + env = analysistest.begin(ctx) + + target_under_test = analysistest.target_under_test(env) + + asserts.equals( + env, + expected = expected_mapping, + actual = str(target_under_test[PackageFilesInfo].dest_src_map), + ) + asserts.equals( + env, + expected = expected_depset, + actual = str(target_under_test[DefaultInfo].files), + ) + + return analysistest.end(env) + +# creating rules for asserts + +rule_empty_targets_returns_empty_providers_test = analysistest.make(_empty_targets_returns_empty_providers_test_impl) + +rule_targets_are_correctly_expanded_test = analysistest.make(_targets_are_correctly_expanded_test_impl) diff --git a/ci-scripts/JenkinsFile-GitLab b/ci-scripts/JenkinsFile-GitLab index 4820ab6b6071..56622e2e0d48 100644 --- a/ci-scripts/JenkinsFile-GitLab +++ b/ci-scripts/JenkinsFile-GitLab @@ -374,7 +374,7 @@ pipeline { echo "Maybe we could not generate the coverage HTML report" } } - sh('cd lte/gateway && vagrant ssh magma -c "cd magma/lte/gateway && make stop"') + sh('cd lte/gateway && vagrant ssh magma -c "sudo service magma@* stop"') // Retrieving the sys logs and mme log for more debugging. sh('cd lte/gateway && vagrant ssh magma -c "sudo cat /var/log/syslog" > ${WORKSPACE}/archives/magma_dev_syslog.log') sh('cd lte/gateway && vagrant ssh magma -c "sudo cat /var/log/envoy.log" > ${WORKSPACE}/archives/magma_dev_envoy.log') @@ -503,7 +503,7 @@ pipeline { post { always { script { - sh('cd lte/gateway && vagrant ssh magma -c "cd magma/lte/gateway && make stop"') + sh('cd lte/gateway && vagrant ssh magma -c "sudo service magma@* stop"') // Stopping capture sh('cd lte/gateway && vagrant ssh magma -c "sudo pkill tcpdump"') // Retrieving the sys logs and mme log for more debugging. diff --git a/ci-scripts/teravm/README.md b/ci-scripts/teravm/README.md deleted file mode 100644 index f6b1449a0d87..000000000000 --- a/ci-scripts/teravm/README.md +++ /dev/null @@ -1,66 +0,0 @@ - -# TeraVM script for CI -TeraVM is a test environment that is being used for checking Magma regression. -This script allows CI to update Magma gateways on different environments -and run TeraVM (NG40) tests - -## Configuration -### Fill in config file -Please gather all the information required on `fabfile_teravm_setup.json` file. - -On `general` you will find a set of variables that define APT repo and -location of some docker components as well as user names to access VMs and the -default list of tests that should be run. - -On the `setups` section you may define IP configuration of as many setups as you -need as long as they have the same `general` configuration. - -### Password-les ssh -In order for this script to work, fab needs to be able to access ssh without using -passowrd. So ssh password-less (private/public key based) is required. - -For that reason you will need to generate a private/public ssh key on -the machine where this script is running (if you haven't yet). -``` -ssh-keygen -t rsa -b 4096 -C "your_email@domain.com" -``` - -Then append the public key (.pub) to AGW, FEG and NG40 VMs on ` ~/.ssh/authorized_keys` -file - -Test from the VM running this script using -``` -ssh remote_username@server_ip_address -``` - -## Update -### Update AGW -To update AGW run the following command -``` -fab upgrade_teravm_agw:setup_1,9cdb9470 -``` -Where `setup_1` is the setups key you use on config json file -Where `9cdb9470` is the github hash. - -### Update FEG -To update FEG run the following command -``` -fab upgrade_teravm_feg:setup_1,9cdb9470 -``` -Where `setup_1` is the setups key you use on config json file -Where `9cdb9470` is the github hash. - -## Run Test -To run the test on NG40 you can use -``` -fab run_3gpp_tests:setup_1 -``` -Where `setup_1` is the setups key you use on config json file - -## Update and Run Test -You can also do all 3 steps at the same time using -``` -fab upgrade_and_run_3gpp_tests:setup_1,9cdb9470 -``` -Where `setup_1` is the setups key you use on config json file -Where `9cdb9470` is the github hash. diff --git a/ci-scripts/teravm/fabfile.py b/ci-scripts/teravm/fabfile.py deleted file mode 100644 index 90c110268856..000000000000 --- a/ci-scripts/teravm/fabfile.py +++ /dev/null @@ -1,440 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -import collections -import distutils.util -import json -import os -import re -import sys -import time - -from fabric.api import cd, env, hide, local, run, settings -from fabric.operations import put, sudo -from fabric.utils import abort, fastprint - -CONFIG_FILE = "fabfile_teravm_conf.json" - -with open(CONFIG_FILE) as config_file: - config = json.load(config_file) - -VM_IP_MAP = config["setups"] -NG40_TEST_FILES = config["general"]["ng40_test_files"] -DEFAULT_KEY_FILENAME = config["general"]["key_filename"] -FEG_DOCKER_COMPOSE_GIT = config["general"]["feg_docker_compose_git"] -AGW_ATP_PUBKEY = config["general"]["agw_apt_etagecom_pubkey"] -AGW_APT_SOURCE = config["general"]["agw_apt_etagecom_source"] -AGW_APT_BRANCH = config["general"]["agw_apt_etagecom_branch"] -AGW_ATP_FILE = config["general"]["agw_apt_source_file"] - -fastprint("Configuration loaded\n") - - -# Both authorized key based ssh and cert-based ssh are setup from magma-driver -# to ag, feg, controller,and proxy in teravm so no need to provide password -# for ssh commands.Looks like fab env.key_filename only works with authorized -# key based ssh. A bash script can take advantage of cert-based ssh. - -def upgrade_to_latest_and_run_3gpp_tests( - setup, - key_filename=DEFAULT_KEY_FILENAME, - custom_test_file=NG40_TEST_FILES, - upgrade_agw="True", - upgrade_feg="True", -): - latest_tag = _get_latest_agw_tag(setup, key_filename) - latest_hash = _parse_hash_from_tag(latest_tag) - - upgrade_and_run_3gpp_tests( - setup, latest_hash, key_filename, - custom_test_file, upgrade_agw, upgrade_feg, - ) - - -def upgrade_and_run_3gpp_tests( - setup, - hash=None, - key_filename=DEFAULT_KEY_FILENAME, - custom_test_file=NG40_TEST_FILES, - upgrade_agw="True", - upgrade_feg="True", -): - """ - Runs upgrade and s6a and gxgy tests once. This is run in the cron job on - magma-driver: - fab upgrade_and_run_3gpp_tests: 2>&1 | tee /tmp/teravm_cronjob.log - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. If empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - - custom_test_file: a 3gpp test file to run. The default uses s6a and gxgy - """ - err = upgrade_teravm( - setup, hash, key_filename, - upgrade_agw, upgrade_feg, - ) - if err: - sys.exit(1) - - fastprint("\nSleeping for 30 seconds to make sure system is read\n\n") - time.sleep(30) - - verdicts = run_3gpp_tests(setup, key_filename, custom_test_file) - - -def upgrade_teravm_latest( - setup, - key_filename=DEFAULT_KEY_FILENAME, - upgrade_agw="True", - upgrade_feg="True", -): - latest_tag = _get_latest_agw_tag(setup, key_filename) - latest_hash = _parse_hash_from_tag(latest_tag) - - return upgrade_teravm(setup, latest_hash, key_filename, upgrade_agw, upgrade_feg) - - -def upgrade_teravm( - setup, - hash=None, - key_filename=DEFAULT_KEY_FILENAME, - upgrade_agw="True", - upgrade_feg="True", -): - """ - Upgrade teravm vms feg, agw. - This will be run by a cron job on magma-driver(192.168.60.109) in teraVM. - magma-driver is the control vm in teraVM. It will run a cron job that - upgrades and runs teraVM tests automatically. - - Alternatively, this script can be run from a local machine that is on TIP - lab VPN to 192.168.60.0/24. When run manually, a hash can be provided to - specify what are the hash of the images that it should pull and use to - upgrade test vms. - - hash: a hash to identify what images to pull from s3 bucket and use - for upgrading. If None, try find the most recent hash. - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. If empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - """ - upgrade_feg = _prep_bool_arg(upgrade_feg) - upgrade_agw = _prep_bool_arg(upgrade_agw) - - if upgrade_agw: - upgrade_teravm_agw(setup, hash, key_filename) - - if upgrade_feg: - upgrade_teravm_feg(setup, hash, key_filename) - - -def upgrade_teravm_agw(setup, hash, key_filename=DEFAULT_KEY_FILENAME): - """ - Upgrade teravm agw to image with the given hash. - hash: a hash to identify what version from APT to use for upgrading. - If not hash provided or "latest" is passed, it will install latest - on the repository. - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. If empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - """ - - fastprint("\nUpgrade teraVM AGW to %s\n" % hash) - _setup_env("magma", VM_IP_MAP[setup]["gateway"], key_filename) - err = _set_magma_apt_repo() - if err: - sys.exit(1) - sudo("apt update") - fastprint("Install version with hash %s\n" % hash) - # Get the whole version string containing that hash and 'apt install' it - with settings(abort_exception=FabricException): - try: - if hash is None or hash.lower() == "latest": - # install latest on the repository - sudo("apt install -f -y --allow-downgrades -o Dpkg::Options::=\"--force-confnew\" magma") - else: - sudo( - "version=$(" - "apt-cache madison magma | grep {hash} | awk 'NR==1{{print $3}}');" - "apt install -f -y --allow-downgrades -o Dpkg::Options::=\"--force-confnew\" magma=$version".format( - hash=hash, - ), - ) - # restart sctpd to force clean start - sudo("service sctpd restart") - - except Exception: - err = ( - "Error during install of version {} on AGW. " - "Maybe the version doesn't exist. Not installing.\n".format(hash) - ) - fastprint(err) - sys.exit(1) - - -def upgrade_teravm_agw_AWS(setup, hash, key_filename=DEFAULT_KEY_FILENAME): - """ - Upgrade teravm agw to image with the given hash. - hash: a hash to identify what images to pull from s3 bucket and use - for upgrading. If None, try find the most recent hash. - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. If empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - """ - fastprint("\nUpgrade teraVM AGW through AWSto %s\n" % hash) - _setup_env("magma", VM_IP_MAP[setup]["gateway"], key_filename) - try: - image = _get_gateway_image(hash) - except Exception: - fastprint("Image %s not found. Not updating AGW \n" % hash) - return - _fetch_image("ag", "gateway/%s" % image) - - with cd("/tmp/images"): - run("tar -xzf %s" % image) - # --fix-broken to avoid the case where a previous manual - # install didn't leave missing libraries. - sudo( - "apt --fix-broken -y install -o " - 'Dpkg::Options::="--force-confnew" --assume-yes --force-yes', - ) - sudo("apt-get update -y") - sudo("apt-get autoremove -y") - sudo( - "apt --fix-broken -y install -o " - 'Dpkg::Options::="--force-confnew" --assume-yes --force-yes', - ) - sudo("dpkg --force-confnew -i magma*.deb") - sudo("apt-get install -f -y") - sudo("systemctl stop magma@*") - sudo("systemctl restart magma@magmad") - - -def upgrade_teravm_feg(setup, hash, key_filename=DEFAULT_KEY_FILENAME): - """ - Upgrade teravm feg to the image with the given hash. - - hash: a hash to identify what images to pull from s3 bucket and use - for upgrading. If None, try find the most recent hash. - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. IIf empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - """ - for feg_ip in VM_IP_MAP[setup]["feg"]: - fastprint("\nUpgrade teraVM FEG (%s) to %s\n" % (feg_ip, hash)) - _setup_env("magma", feg_ip, key_filename) - - with cd("/var/opt/magma/docker"), settings(abort_exception=FabricException): - sudo("docker-compose down") - sudo("cp docker-compose.yml docker-compose.yml.backup") - sudo("cp .env .env.backup") - sudo('sed -i "s/IMAGE_VERSION=.*/IMAGE_VERSION=%s/g" .env' % hash) - if len(_check_disk_space()) != 0: - fastprint("Disk space alert: cleaning docker images\n") - sudo("docker system prune --all --force") - try: - # TODO: obtain .yml file from jfrog artifact instead of git master - sudo("wget -O docker-compose.yml %s" % FEG_DOCKER_COMPOSE_GIT) - sudo("docker-compose up -d") - except Exception: - err = ( - "Error during install of version {}. Maybe the image " - "doesn't exist. Reverting to the original " - "config\n".format(hash) - ) - fastprint(err) - with hide("running", "stdout"): - sudo("mv docker-compose.yml.backup docker-compose.yml") - sudo("mv .env.backup .env") - sudo("docker-compose up -d") - sys.exit(1) - - -def run_3gpp_tests( - setup, key_filename=DEFAULT_KEY_FILENAME, test_files=NG40_TEST_FILES, -): - """ - Run teravm s6a and gxgy test cases. Usage: 'fab run_3gpp_tests:' for - default key filename and default test files. - - key_filename: path to where the private key is for authorized-key based - ssh. The public key counterpart needs to in the authorized_keys file on - the remote host. If empty file name is passed, password-based ssh will - work instead. This can be used if the script is run manually. - - test_file: a test file to use instead of the s6a and gxgy defaults. - """ - if isinstance(test_files, str): - test_files = [test_files] - test_output = [] - - _setup_env("ng40", VM_IP_MAP[setup]["ng40"], key_filename) - - with cd("/home/ng40/magma/automation"): - for test_file in test_files: - fastprint("Check ng40 status (if any test is currently running\n") - run("ng40test state.ntl") - fastprint("Run test for file %s\n" % (test_file)) - with hide("warnings", "running", "stdout"), settings(warn_only=True): - output = run("ng40test %s" % test_file) - test_output.append(output) - fastprint("Done with file %s\n" % (test_file)) - - verdicts = _parse_stats(test_output) - fastprint("Results of test:\n") - _prettyprint_stats(verdicts) - return verdicts - - -def _set_magma_apt_repo(): - err = None - with settings(abort_exception=FabricException): - try: - # add repo to source file (same as add-apt-repo - repo_apt_string = "deb {} {}".format(AGW_APT_SOURCE, AGW_APT_BRANCH) - ignore_comments = "/^[[:space:]]*#/!" - sudo("touch {}".format(AGW_ATP_FILE)) - # Replace non commented lines with the wrong repo, or add it if missing - sudo( - "grep -q '{source}' {sFile} && " - "sed -i '{ign_com}s,.*{source}.*,{repo},g' {sFile} || " - "echo '{repo}' >> {sFile} ".format( - ign_com=ignore_comments, - source=AGW_APT_SOURCE, - repo=repo_apt_string, - sFile=AGW_ATP_FILE, - ), - ) - except Exception: - err = "Error changing ATP repo\n" - fastprint(err) - return err - - -def _parse_stats(teravm_raw_result): - """ - Gets stats from teraVM result output string - - teravm_test_result: output comming from the teravm stdout - """ - verdicts = collections.defaultdict(list) - - pattern = r"Verdict\((?P\w+)\) = VERDICT_(?P\w+)" - for fileResults in teravm_raw_result: - for line in fileResults.splitlines(): - match = re.match(pattern, line) - if match: - verdict = match.groupdict()["verdict"] - verdicts[verdict].append(line) - return verdicts - - -def _prettyprint_stats(verdict): - for result, test_list in verdict.items(): - for result in test_list: - fastprint("%s\n" % (result)) - - -def _check_disk_space(threshold=80, drive_prefix="/dev/sd"): - over_threshold = {} - with hide("running", "stdout", "stderr"), settings(warn_only=True): - columns = sudo("df -hP | awk 'NR>1{print $1,$5}' | sed -e's/%//g'") - - for line in columns.split("\n"): - line = line.split(" ") - if len(line) != 2: - continue - dev = line[0] - dev.strip() - percentage = int(line[1]) - if dev.startswith(drive_prefix) and percentage >= threshold: - over_threshold[dev] = percentage - - return over_threshold - - -def _get_gateway_image(hash): - output = local( - "aws s3 ls s3://magma-images/gateway/ " - "| grep %s.deps.tar.gz | sort -r | head -1" % hash, - capture=True, - ) - if len(output) == 0: - raise Exception("No gateway image found with hash %s" % hash) - else: - return output.rsplit(" ", 1)[1] - - -def _get_latest_agw_tag(setup, key_filename): - _setup_env("magma", VM_IP_MAP[setup]["gateway"], key_filename) - err = _set_magma_apt_repo() - if err: - sys.exit(1) - sudo("apt update") - tag = sudo( - "apt-cache madison magma | awk 'NR==1{{print substr ($3,1)}}'", - ) - fastprint("Latest tag of AGW is %s \n" % tag) - - return tag - - -def _parse_hash_from_tag(tag): - split_tag = tag.split("-") - if len(split_tag) != 3: - fastprint("not valid tag %s\n" % split_tag) - sys.exit(1) - fastprint("Latest hash is %s \n" % split_tag[2]) - return split_tag[2] - - -def _fetch_image(name, image): - """ - Fetches the image from s3 and copies the image to /tmp/images in the VM - """ - # Make local directory - local("rm -rf /tmp/%s-images" % name) - local("mkdir -p /tmp/%s-images" % name) - # Fetch image from s3 - local("aws s3 cp 's3://magma-images/%s' /tmp/%s-images" % (image, name)) - # create /tmp/images directory on remote host - # env has to be set up before calling this function - _setup_env("magma", VM_IP_MAP["setup_1"]["gateway"], DEFAULT_KEY_FILENAME) - run("rm -rf /tmp/images") - run("mkdir -p /tmp/images") - # copy images from local /tmp to corresponding remote /tmp/images - put("/tmp/%s-images/*" % name, "/tmp/images/") - - -def _setup_env(username, remote_machine_ip, key_filename): - env.key_filename = [key_filename] - env.host_string = "%s@%s" % (username, remote_machine_ip) - env.user = username - - -def _prep_bool_arg(arg): - return bool(distutils.util.strtobool(str(arg))) - - -class FabricException(Exception): - pass diff --git a/ci-scripts/teravm/fabfile_teravm_conf.json b/ci-scripts/teravm/fabfile_teravm_conf.json deleted file mode 100644 index ac8119a9fc26..000000000000 --- a/ci-scripts/teravm/fabfile_teravm_conf.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "general": { - "ng40_test_files": [ - "", - "" - ], - "magma_username": "", - "ng40_username" : "", - "key_filename" : "", - "feg_docker_compose_git" : "https://", - "agw_apt_etagecom_source": "http://", - "agw_apt_etagecom_branch": "http://", - "agw_apt_etagecom_pubkey": "http://", - "agw_apt_source_file": "" - }, - "setups" :{ - "setup_1": { - "_description": "", - "proxy": "192.0.0.1", - "feg": ["192.0.0.1","192.0.0.2"], - "gateway": "192.0.0.1", - "ng40": "192.0.0.1" - }, - "setup_2": { - "_description": "", - "proxy": "192.0.0.1", - "feg": ["192.0.0.1"], - "gateway": "192.0.0.1", - "ng40": "192.0.0.1" - } - } -} - diff --git a/codecov.yml b/codecov.yml index 98a7f1d8bd87..d8003774f294 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,29 +1,10 @@ comment: false github_checks: annotations: false -coverage: - status: - project: - default: - target: auto - threshold: 0% - cloud_lint: - target: auto - threshold: 0% - flags: - - cloud_lint - feg-lint: - target: auto - threshold: 0% - flags: - - feg-lint - lte-test: - target: auto - threshold: 0% - flags: - - lte-test - c_cpp: - target: auto - threshold: 0% - flags: - - c_cpp +flag_management: + default_rules: + carryforward: true + statuses: + - type: project + target: auto + threshold: 0% diff --git a/cwf/cloud/helm/cwf-orc8r/Chart.yaml b/cwf/cloud/helm/cwf-orc8r/Chart.yaml index 45ce9e03ef6c..b24c150b667c 100644 --- a/cwf/cloud/helm/cwf-orc8r/Chart.yaml +++ b/cwf/cloud/helm/cwf-orc8r/Chart.yaml @@ -10,10 +10,10 @@ # limitations under the License. apiVersion: v2 -appVersion: "1.0" +appVersion: "1.8.0" description: A Helm chart for magma orchestrator's cwf module name: cwf-orc8r -version: 0.2.2 +version: 1.8.0 engine: gotpl sources: - https://github.com/magma/magma diff --git a/cwf/gateway/deploy/roles/ovs/files/nx_actions.py b/cwf/gateway/deploy/roles/ovs/files/nx_actions.py deleted file mode 100644 index 5e8ca8e125c1..000000000000 --- a/cwf/gateway/deploy/roles/ovs/files/nx_actions.py +++ /dev/null @@ -1,3443 +0,0 @@ -# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2015 YAMAMOTO Takashi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import struct - -import six -from ryu import utils -from ryu.lib import type_desc -from ryu.lib.pack_utils import msg_pack_into -from ryu.ofproto import nicira_ext, ofproto_common -from ryu.ofproto.ofproto_parser import StringifyMixin - - -def generate(ofp_name, ofpp_name): - import sys - - ofp = sys.modules[ofp_name] - ofpp = sys.modules[ofpp_name] - - class _NXFlowSpec(StringifyMixin): - _hdr_fmt_str = '!H' # 2 bit 0s, 1 bit src, 2 bit dst, 11 bit n_bits - _dst_type = None - _subclasses = {} - _TYPE = { - 'nx-flow-spec-field': [ - 'src', - 'dst', - ], - } - - def __init__(self, src, dst, n_bits): - self.src = src - self.dst = dst - self.n_bits = n_bits - - @classmethod - def register(cls, subcls): - assert issubclass(subcls, cls) - assert subcls._dst_type not in cls._subclasses - cls._subclasses[subcls._dst_type] = subcls - - @classmethod - def parse(cls, buf): - (hdr,) = struct.unpack_from(cls._hdr_fmt_str, buf, 0) - rest = buf[struct.calcsize(cls._hdr_fmt_str):] - if hdr == 0: - return None, rest # all-0 header is no-op for padding - src_type = (hdr >> 13) & 0x1 - dst_type = (hdr >> 11) & 0x3 - n_bits = hdr & 0x3ff - subcls = cls._subclasses[dst_type] - if src_type == 0: # subfield - src = cls._parse_subfield(rest) - rest = rest[6:] - elif src_type == 1: # immediate - src_len = (n_bits + 15) // 16 * 2 - src_bin = rest[:src_len] - src = type_desc.IntDescr(size=src_len).to_user(src_bin) - rest = rest[src_len:] - if dst_type == 0: # match - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 1: # load - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 2: # output - dst = '' # empty - return subcls(src=src, dst=dst, n_bits=n_bits), rest - - def serialize(self): - buf = bytearray() - if isinstance(self.src, tuple): - src_type = 0 # subfield - else: - src_type = 1 # immediate - # header - val = (src_type << 13) | (self._dst_type << 11) | self.n_bits - msg_pack_into(self._hdr_fmt_str, buf, 0, val) - # src - if src_type == 0: # subfield - buf += self._serialize_subfield(self.src) - elif src_type == 1: # immediate - src_len = (self.n_bits + 15) // 16 * 2 - buf += type_desc.IntDescr(size=src_len).from_user(self.src) - # dst - if self._dst_type == 0: # match - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 1: # load - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 2: # output - pass # empty - return buf - - @staticmethod - def _parse_subfield(buf): - (n, len) = ofp.oxm_parse_header(buf, 0) - assert len == 4 # only 4-bytes NXM/OXM are defined - field = ofp.oxm_to_user_header(n) - rest = buf[len:] - (ofs,) = struct.unpack_from('!H', rest, 0) - return (field, ofs) - - @staticmethod - def _serialize_subfield(subfield): - (field, ofs) = subfield - buf = bytearray() - n = ofp.oxm_from_user_header(field) - ofp.oxm_serialize_header(n, buf, 0) - assert len(buf) == 4 # only 4-bytes NXM/OXM are defined - msg_pack_into('!H', buf, 4, ofs) - return buf - - class NXFlowSpecMatch(_NXFlowSpec): - """ - Specification for adding match criterion - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add a match criteria - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_VLAN_TCI[0..11] - _dst_type = 0 - - class NXFlowSpecLoad(_NXFlowSpec): - """ - Add NXAST_REG_LOAD actions - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add NXAST_REG_LOAD actions - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[] - _dst_type = 1 - - class NXFlowSpecOutput(_NXFlowSpec): - """ - Add an OFPAT_OUTPUT action - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst Must be '' - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add an OFPAT_OUTPUT action - # an example of the corresponding ovs-ofctl syntax: - # output:NXM_OF_IN_PORT[] - _dst_type = 2 - - def __init__(self, src, n_bits, dst=''): - assert dst == '' - super(NXFlowSpecOutput, self).__init__( - src=src, dst=dst, - n_bits=n_bits, - ) - - class NXAction(ofpp.OFPActionExperimenter): - _fmt_str = '!H' # subtype - _subtypes = {} - _experimenter = ofproto_common.NX_EXPERIMENTER_ID - - def __init__(self): - super(NXAction, self).__init__(self._experimenter) - self.subtype = self._subtype - - @classmethod - def parse(cls, buf): - fmt_str = NXAction._fmt_str - (subtype,) = struct.unpack_from(fmt_str, buf, 0) - subtype_cls = cls._subtypes.get(subtype) - rest = buf[struct.calcsize(fmt_str):] - if subtype_cls is None: - return NXActionUnknown(subtype, rest) - return subtype_cls.parser(rest) - - def serialize(self, buf, offset): - data = self.serialize_body() - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXAction, self).serialize(buf, offset) - msg_pack_into( - NXAction._fmt_str, - buf, - offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE, - self.subtype, - ) - buf += data - - @classmethod - def register(cls, subtype_cls): - assert subtype_cls._subtype is not cls._subtypes - cls._subtypes[subtype_cls._subtype] = subtype_cls - - class NXActionUnknown(NXAction): - def __init__( - self, subtype, data=None, - type_=None, len_=None, experimenter=None, - ): - self._subtype = subtype - super(NXActionUnknown, self).__init__() - self.data = data - - @classmethod - def parser(cls, buf): - return cls(data=buf) - - def serialize_body(self): - # fixup - return bytearray() if self.data is None else self.data - - # For OpenFlow1.0 only - class NXActionSetQueue(NXAction): - r""" - Set queue action - - This action sets the queue that should be used to queue - when packets are output. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_queue:queue - .. - - +-------------------------+ - | **set_queue**\:\ *queue*| - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - queue_id Queue ID for the packets - ================ ====================================================== - - .. note:: - This actions is supported by - ``OFPActionSetQueue`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetQueue(queue_id=10)] - """ - _subtype = nicira_ext.NXAST_SET_QUEUE - - # queue_id - _fmt_str = '!2xI' - - def __init__( - self, queue_id, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetQueue, self).__init__() - self.queue_id = queue_id - - @classmethod - def parser(cls, buf): - (queue_id,) = struct.unpack_from(cls._fmt_str, buf, 0) - return cls(queue_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.queue_id) - return data - - class NXActionPopQueue(NXAction): - """ - Pop queue action - - This action restors the queue to the value it was before any - set_queue actions were applied. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_queue - .. - - +---------------+ - | **pop_queue** | - +---------------+ - - Example:: - - actions += [parser.NXActionPopQueue()] - """ - _subtype = nicira_ext.NXAST_POP_QUEUE - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionPopQueue, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionRegLoad(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - load:value->dst[start..end] - .. - - +-----------------------------------------------------------------+ - | **load**\:\ *value*\->\ *dst*\ **[**\ *start*\..\ *end*\ **]** | - +-----------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for destination field - value OXM/NXM value to be loaded - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="eth_dst", - value=0x112233)] - """ - _subtype = nicira_ext.NXAST_REG_LOAD - _fmt_str = '!HIQ' # ofs_nbits, dst, value - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, ofs_nbits, dst, value, - type_=None, len_=None, experimenter=None, - subtype=None, - ): - super(NXActionRegLoad, self).__init__() - self.ofs_nbits = ofs_nbits - self.dst = dst - self.value = value - - @classmethod - def parser(cls, buf): - (ofs_nbits, dst, value) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - # Right-shift instead of using oxm_parse_header for simplicity... - dst_name = ofp.oxm_to_user_header(dst >> 9) - return cls(ofs_nbits, dst_name, value) - - def serialize_body(self): - hdr_data = bytearray() - n = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(n, hdr_data, 0) - (dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, dst_num, self.value, - ) - return data - - class NXActionRegLoad2(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_field:value[/mask]->dst - .. - - +------------------------------------------------------------+ - | **set_field**\:\ *value*\ **[**\/\ *mask*\ **]**\->\ *dst* | - +------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - value OXM/NXM value to be loaded - mask Mask for destination field - dst OXM/NXM header for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad2(dst="tun_ipv4_src", - value="192.168.10.0", - mask="255.255.255.0")] - """ - _subtype = nicira_ext.NXAST_REG_LOAD2 - _TYPE = { - 'ascii': [ - 'dst', - 'value', - ], - } - - def __init__( - self, dst, value, mask=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegLoad2, self).__init__() - self.dst = dst - self.value = value - self.mask = mask - - @classmethod - def parser(cls, buf): - (n, uv, mask, _len) = ofp.oxm_parse(buf, 0) - dst, value = ofp.oxm_to_user(n, uv, mask) - - if isinstance(value, (tuple, list)): - return cls(dst, value[0], value[1]) - else: - return cls(dst, value, None) - - def serialize_body(self): - data = bytearray() - if self.mask is None: - value = self.value - else: - value = (self.value, self.mask) - self._TYPE['ascii'].append('mask') - - n, value, mask = ofp.oxm_from_user(self.dst, value) - len_ = ofp.oxm_serialize(n, value, mask, data, 0) - msg_pack_into("!%dx" % (14 - len_), data, len_) - - return data - - class NXActionNote(NXAction): - r""" - Note action - - This action does nothing at all. - - And equivalent to the followings action of ovs-ofctl command. - - .. - note:[hh].. - .. - - +-----------------------------------+ - | **note**\:\ **[**\ *hh*\ **]**\.. | - +-----------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - note A list of integer type values - ================ ====================================================== - - Example:: - - actions += [parser.NXActionNote(note=[0xaa,0xbb,0xcc,0xdd])] - """ - _subtype = nicira_ext.NXAST_NOTE - - # note - _fmt_str = '!%dB' - - # set the integer array in a note - def __init__( - self, - note, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNote, self).__init__() - self.note = note - - @classmethod - def parser(cls, buf): - note = struct.unpack_from( - cls._fmt_str % len(buf), buf, 0, - ) - return cls(list(note)) - - def serialize_body(self): - assert isinstance(self.note, (tuple, list)) - for n in self.note: - assert isinstance(n, six.integer_types) - - pad = (len(self.note) + nicira_ext.NX_ACTION_HEADER_0_SIZE) % 8 - if pad: - self.note += [0x0 for i in range(8 - pad)] - note_len = len(self.note) - data = bytearray() - msg_pack_into( - self._fmt_str % note_len, data, 0, - *self.note, - ) - return data - - class _NXActionSetTunnelBase(NXAction): - # _subtype, _fmt_str must be attributes of subclass. - - def __init__( - self, - tun_id, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(_NXActionSetTunnelBase, self).__init__() - self.tun_id = tun_id - - @classmethod - def parser(cls, buf): - (tun_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(tun_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tun_id, - ) - return data - - class NXActionSetTunnel(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action sets the identifier (such as GRE) to the specified id. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel:id - .. - - +------------------------+ - | **set_tunnel**\:\ *id* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(32bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL - - # tun_id - _fmt_str = '!2xI' - - class NXActionSetTunnel64(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action outputs to a port that encapsulates - the packet in a tunnel. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel64:id - .. - - +--------------------------+ - | **set_tunnel64**\:\ *id* | - +--------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(64bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel64(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL64 - - # tun_id - _fmt_str = '!6xQ' - - class NXActionRegMove(NXAction): - r""" - Move register action - - This action copies the src to dst. - - And equivalent to the followings action of ovs-ofctl command. - - .. - move:src[start..end]->dst[start..end] - .. - - +--------------------------------------------------------+ - | **move**\:\ *src*\ **[**\ *start*\..\ *end*\ **]**\->\ | - | *dst*\ **[**\ *start*\..\ *end* \ **]** | - +--------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src_field OXM/NXM header for source field - dst_field OXM/NXM header for destination field - n_bits Number of bits - src_ofs Starting bit offset in source - dst_ofs Starting bit offset in destination - ================ ====================================================== - - .. CAUTION:: - **src_start**\ and \ **src_end**\ difference and \ **dst_start**\ - and \ **dst_end**\ difference must be the same. - - Example:: - - actions += [parser.NXActionRegMove(src_field="reg0", - dst_field="reg1", - n_bits=5, - src_ofs=0 - dst_ofs=10)] - """ - _subtype = nicira_ext.NXAST_REG_MOVE - _fmt_str = '!HHH' # n_bits, src_ofs, dst_ofs - # Followed by OXM fields (src, dst) and padding to 8 bytes boundary - _TYPE = { - 'ascii': [ - 'src_field', - 'dst_field', - ], - } - - def __init__( - self, src_field, dst_field, n_bits, src_ofs=0, dst_ofs=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegMove, self).__init__() - self.n_bits = n_bits - self.src_ofs = src_ofs - self.dst_ofs = dst_ofs - self.src_field = src_field - self.dst_field = dst_field - - @classmethod - def parser(cls, buf): - (n_bits, src_ofs, dst_ofs) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(NXActionRegMove._fmt_str):] - - # src field - (n, len) = ofp.oxm_parse_header(rest, 0) - src_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # dst field - (n, len) = ofp.oxm_parse_header(rest, 0) - dst_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # ignore padding - return cls( - src_field, dst_field=dst_field, n_bits=n_bits, - src_ofs=src_ofs, dst_ofs=dst_ofs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.n_bits, self.src_ofs, self.dst_ofs, - ) - # src field - n = ofp.oxm_from_user_header(self.src_field) - ofp.oxm_serialize_header(n, data, len(data)) - # dst field - n = ofp.oxm_from_user_header(self.dst_field) - ofp.oxm_serialize_header(n, data, len(data)) - return data - - class NXActionResubmit(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit:port - .. - - +------------------------+ - | **resubmit**\:\ *port* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT - - # in_port - _fmt_str = '!H4x' - - def __init__( - self, - in_port=0xfff8, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmit, self).__init__() - self.in_port = in_port - - @classmethod - def parser(cls, buf): - (in_port,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, - ) - return data - - class NXActionResubmitTable(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit([port],[table]) - .. - - +------------------------------------------------+ - | **resubmit(**\[\ *port*\]\,[\ *table*\]\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - table_id Checking flow tables - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080, - table_id=10)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT_TABLE - - # in_port, table_id - _fmt_str = '!HB3x' - - def __init__( - self, - in_port=0xfff8, - table_id=0xff, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmitTable, self).__init__() - self.in_port = in_port - self.table_id = table_id - - @classmethod - def parser(cls, buf): - ( - in_port, - table_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port, table_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, self.table_id, - ) - return data - - class NXActionOutputReg(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG - - # ofs_nbits, src, max_len - _fmt_str = '!H4sH6x' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - (ofs_nbits, oxm_data, max_len) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - src = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, src, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - six.binary_type(src), - self.max_len, - ) - return data - - class NXActionOutputReg2(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - .. NOTE:: - Like the ``NXActionOutputReg`` but organized so - that there is room for a 64-bit experimenter OXM as 'src'. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg2( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG2 - - # ofs_nbits, src, max_len - _fmt_str = '!HH4s' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg2, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - ofs_nbits, - max_len, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, oxm_data, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - self.max_len, - six.binary_type(oxm_data), - ) - offset = len(data) - msg_pack_into("!%dx" % (14 - offset), data, offset) - return data - - class NXActionLearn(NXAction): - r""" - Adds or modifies flow action - - This action adds or modifies a flow in OpenFlow table. - - And equivalent to the followings action of ovs-ofctl command. - - .. - learn(argument[,argument]...) - .. - - +---------------------------------------------------+ - | **learn(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - table_id The table in which the new flow should be inserted - specs Adds a match criterion to the new flow - - Please use the - ``NXFlowSpecMatch`` - in order to set the following format - - .. - field=value - field[start..end]=src[start..end] - field[start..end] - .. - - | *field*\=\ *value* - | *field*\ **[**\ *start*\..\ *end*\ **]**\ =\ - *src*\ **[**\ *start*\..\ *end*\ **]** - | *field*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecLoad`` - in order to set the following format - - .. - load:value->dst[start..end] - load:src[start..end]->dst[start..end] - .. - - | **load**\:\ *value*\ **->**\ *dst*\ - **[**\ *start*\..\ *end*\ **]** - | **load**\:\ *src*\ **[**\ *start*\..\ *end*\ - **] ->**\ *dst*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecOutput`` - in order to set the following format - - .. - output:field[start..end] - .. - - | **output:**\ field\ **[**\ *start*\..\ *end*\ **]** - - idle_timeout Idle time before discarding(seconds) - hard_timeout Max time before discarding(seconds) - priority Priority level of flow entry - cookie Cookie for new flow - flags send_flow_rem - fin_idle_timeout Idle timeout after FIN(seconds) - fin_hard_timeout Hard timeout after FIN(seconds) - ================ ====================================================== - - .. CAUTION:: - The arguments specify the flow's match fields, actions, - and other properties, as follows. - At least one match criterion and one action argument - should ordinarily be specified. - - Example:: - - actions += [ - parser.NXActionLearn(able_id=10, - specs=[parser.NXFlowSpecMatch(src=0x800, - dst=('eth_type_nxm', 0), - n_bits=16), - parser.NXFlowSpecMatch(src=('reg1', 1), - dst=('reg2', 3), - n_bits=5), - parser.NXFlowSpecMatch(src=('reg3', 1), - dst=('reg3', 1), - n_bits=5), - parser.NXFlowSpecLoad(src=0, - dst=('reg4', 3), - n_bits=5), - parser.NXFlowSpecLoad(src=('reg5', 1), - dst=('reg6', 3), - n_bits=5), - parser.NXFlowSpecOutput(src=('reg7', 1), - dst="", - n_bits=5)], - idle_timeout=180, - hard_timeout=300, - priority=1, - cookie=0x64, - flags=ofproto.OFPFF_SEND_FLOW_REM, - fin_idle_timeout=180, - fin_hard_timeout=300)] - """ - _subtype = nicira_ext.NXAST_LEARN - - # idle_timeout, hard_timeout, priority, cookie, flags, - # table_id, pad, fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HHHQHBxHH' - # Followed by flow_mod_specs - - def __init__( - self, - table_id, - specs, - idle_timeout=0, - hard_timeout=0, - priority=ofp.OFP_DEFAULT_PRIORITY, - cookie=0, - flags=0, - fin_idle_timeout=0, - fin_hard_timeout=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionLearn, self).__init__() - self.idle_timeout = idle_timeout - self.hard_timeout = hard_timeout - self.priority = priority - self.cookie = cookie - self.flags = flags - self.table_id = table_id - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - self.specs = specs - - @classmethod - def parser(cls, buf): - ( - idle_timeout, - hard_timeout, - priority, - cookie, - flags, - table_id, - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # specs - specs = [] - while len(rest) > 0: - spec, rest = _NXFlowSpec.parse(rest) - if spec is None: - continue - specs.append(spec) - return cls( - idle_timeout=idle_timeout, - hard_timeout=hard_timeout, - priority=priority, - cookie=cookie, - flags=flags, - table_id=table_id, - fin_idle_timeout=fin_idle_timeout, - fin_hard_timeout=fin_hard_timeout, - specs=specs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.idle_timeout, - self.hard_timeout, - self.priority, - self.cookie, - self.flags, - self.table_id, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - for spec in self.specs: - data += spec.serialize() - return data - - class NXActionExit(NXAction): - """ - Halt action - - This action causes OpenvSwitch to immediately halt - execution of further actions. - - And equivalent to the followings action of ovs-ofctl command. - - .. - exit - .. - - +----------+ - | **exit** | - +----------+ - - Example:: - - actions += [parser.NXActionExit()] - """ - _subtype = nicira_ext.NXAST_EXIT - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionExit, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionDecTtl(NXAction): - """ - Decrement IP TTL action - - This action decrements TTL of IPv4 packet or - hop limit of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl - .. - - +-------------+ - | **dec_ttl** | - +-------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecNwTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionController(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER - - # max_len, controller_id, reason - _fmt_str = '!HHBx' - - def __init__( - self, - max_len, - controller_id, - reason, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionController, self).__init__() - self.max_len = max_len - self.controller_id = controller_id - self.reason = reason - - @classmethod - def parser(cls, buf): - ( - max_len, - controller_id, - reason, - ) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls( - max_len, - controller_id, - reason, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.max_len, - self.controller_id, - self.reason, - ) - return data - - class NXActionController2(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - userdata Additional data to the controller in the packet-in - message - pause Flag to pause pipeline to resume later - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL, - userdata=[0xa,0xb,0xc], - pause=True)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER2 - _fmt_str = '!6x' - _PACK_STR = '!HH' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - **kwargs - ): - super(NXActionController2, self).__init__() - - for arg in kwargs: - if arg in NXActionController2Prop._NAMES: - setattr(self, arg, kwargs[arg]) - - @classmethod - def parser(cls, buf): - cls_data = {} - offset = 6 - buf_len = len(buf) - while buf_len > offset: - (type_, length) = struct.unpack_from(cls._PACK_STR, buf, offset) - offset += 4 - try: - subcls = NXActionController2Prop._TYPES[type_] - except KeyError: - subcls = NXActionController2PropUnknown - data, size = subcls.parser_prop(buf[offset:], length - 4) - offset += size - cls_data[subcls._arg_name] = data - return cls(**cls_data) - - def serialize_body(self): - body = bytearray() - msg_pack_into(self._fmt_str, body, 0) - prop_list = [] - for arg in self.__dict__: - if arg in NXActionController2Prop._NAMES: - prop_list.append(( - NXActionController2Prop._NAMES[arg], - self.__dict__[arg], - )) - prop_list.sort(key=lambda x: x[0].type) - - for subcls, value in prop_list: - body += subcls.serialize_prop(value) - - return body - - class NXActionController2Prop(object): - _TYPES = {} - _NAMES = {} - - @classmethod - def register_type(cls, type_): - def _register_type(subcls): - subcls.type = type_ - NXActionController2Prop._TYPES[type_] = subcls - NXActionController2Prop._NAMES[subcls._arg_name] = subcls - return subcls - - return _register_type - - class NXActionController2PropUnknown(NXActionController2Prop): - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - return buf, size - - @classmethod - def serialize_prop(cls, argment): - data = bytearray() - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_MAX_LEN) - class NXActionController2PropMaxLen(NXActionController2Prop): - # max_len - _fmt_str = "!H2x" - _arg_name = "max_len" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (max_len,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return max_len, size - - @classmethod - def serialize_prop(cls, max_len): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_MAX_LEN, - 8, - max_len, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_CONTROLLER_ID) - class NXActionController2PropControllerId(NXActionController2Prop): - # controller_id - _fmt_str = "!H2x" - _arg_name = "controller_id" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (controller_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return controller_id, size - - @classmethod - def serialize_prop(cls, controller_id): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_CONTROLLER_ID, - 8, - controller_id, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_REASON) - class NXActionController2PropReason(NXActionController2Prop): - # reason - _fmt_str = "!B3x" - _arg_name = "reason" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (reason,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return reason, size - - @classmethod - def serialize_prop(cls, reason): - data = bytearray() - msg_pack_into( - "!HHB3x", data, 0, - nicira_ext.NXAC2PT_REASON, - 5, - reason, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_USERDATA) - class NXActionController2PropUserData(NXActionController2Prop): - # userdata - _fmt_str = "!B" - _arg_name = "userdata" - - @classmethod - def parser_prop(cls, buf, length): - userdata = [] - offset = 0 - - while offset < length: - u = struct.unpack_from(cls._fmt_str, buf, offset) - userdata.append(u[0]) - offset += 1 - - user_size = utils.round_up(length, 4) - - if user_size > 4 and (user_size % 8) == 0: - size = utils.round_up(length, 4) + 4 - else: - size = utils.round_up(length, 4) - - return userdata, size - - @classmethod - def serialize_prop(cls, userdata): - data = bytearray() - user_buf = bytearray() - user_offset = 0 - for user in userdata: - msg_pack_into( - '!B', user_buf, user_offset, - user, - ) - user_offset += 1 - - msg_pack_into( - "!HH", data, 0, - nicira_ext.NXAC2PT_USERDATA, - 4 + user_offset, - ) - data += user_buf - - if user_offset > 4: - user_len = utils.round_up(user_offset, 4) - brank_size = 0 - if (user_len % 8) == 0: - brank_size = 4 - msg_pack_into( - "!%dx" % (user_len - user_offset + brank_size), - data, 4 + user_offset, - ) - else: - user_len = utils.round_up(user_offset, 4) - - msg_pack_into( - "!%dx" % (user_len - user_offset), - data, 4 + user_offset, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_PAUSE) - class NXActionController2PropPause(NXActionController2Prop): - _arg_name = "pause" - - @classmethod - def parser_prop(cls, buf, length): - pause = True - size = 4 - return pause, size - - @classmethod - def serialize_prop(cls, pause): - data = bytearray() - msg_pack_into( - "!HH4x", data, 0, - nicira_ext.NXAC2PT_PAUSE, - 4, - ) - return data - - class NXActionDecTtlCntIds(NXAction): - r""" - Decrement TTL action - - This action decrements TTL of IPv4 packet or - hop limits of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl(id1[,id2]...) - .. - - +-------------------------------------------+ - | **dec_ttl(**\ *id1*\[,\ *id2*\]...\ **)** | - +-------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - cnt_ids Controller ids - ================ ====================================================== - - Example:: - - actions += [parser.NXActionDecTtlCntIds(cnt_ids=[1,2,3])] - - .. NOTE:: - If you want to set the following ovs-ofctl command. - Please use ``OFPActionDecNwTtl``. - - +-------------+ - | **dec_ttl** | - +-------------+ - """ - _subtype = nicira_ext.NXAST_DEC_TTL_CNT_IDS - - # controllers - _fmt_str = '!H4x' - _fmt_len = 6 - - def __init__( - self, - cnt_ids, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionDecTtlCntIds, self).__init__() - - self.cnt_ids = cnt_ids - - @classmethod - def parser(cls, buf): - (controllers,) = struct.unpack_from( - cls._fmt_str, buf, - ) - - offset = cls._fmt_len - cnt_ids = [] - - for i in range(0, controllers): - id_ = struct.unpack_from('!H', buf, offset) - cnt_ids.append(id_[0]) - offset += 2 - - return cls(cnt_ids) - - def serialize_body(self): - assert isinstance(self.cnt_ids, (tuple, list)) - for i in self.cnt_ids: - assert isinstance(i, six.integer_types) - - controllers = len(self.cnt_ids) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - controllers, - ) - offset = self._fmt_len - - for id_ in self.cnt_ids: - msg_pack_into('!H', data, offset, id_) - offset += 2 - - id_len = ( - utils.round_up(controllers, 4) - - controllers - ) - - if id_len != 0: - msg_pack_into('%dx' % id_len * 2, data, offset) - - return data - - # Use in only OpenFlow1.0 - class NXActionMplsBase(NXAction): - # ethertype - _fmt_str = '!H4x' - - def __init__( - self, - ethertype, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionMplsBase, self).__init__() - self.ethertype = ethertype - - @classmethod - def parser(cls, buf): - (ethertype,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ethertype) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ethertype, - ) - return data - - # For OpenFlow1.0 only - class NXActionPushMpls(NXActionMplsBase): - r""" - Push MPLS action - - This action pushes a new MPLS header to the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - push_mpls:ethertype - .. - - +-------------------------------+ - | **push_mpls**\:\ *ethertype* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type(The value must be either 0x8847 or 0x8848) - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPushMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x0800) - actions += [parser.NXActionPushMpls(ethertype=0x8847)] - """ - _subtype = nicira_ext.NXAST_PUSH_MPLS - - # For OpenFlow1.0 only - class NXActionPopMpls(NXActionMplsBase): - r""" - Pop MPLS action - - This action pops the MPLS header from the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_mpls:ethertype - .. - - +------------------------------+ - | **pop_mpls**\:\ *ethertype* | - +------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPopMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x8847) - actions += [parser.NXActionPushMpls(ethertype=0x0800)] - """ - _subtype = nicira_ext.NXAST_POP_MPLS - - # For OpenFlow1.0 only - class NXActionSetMplsTtl(NXAction): - r""" - Set MPLS TTL action - - This action sets the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_ttl:ttl - .. - - +---------------------------+ - | **set_mpls_ttl**\:\ *ttl* | - +---------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ttl MPLS TTL - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsTil(ttl=128)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TTL - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - ttl, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTtl, self).__init__() - self.ttl = ttl - - @classmethod - def parser(cls, buf): - (ttl,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ttl) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ttl, - ) - return data - - # For OpenFlow1.0 only - class NXActionDecMplsTtl(NXAction): - """ - Decrement MPLS TTL action - - This action decrements the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_mpls_ttl - .. - - +------------------+ - | **dec_mpls_ttl** | - +------------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecMplsTil()] - """ - _subtype = nicira_ext.NXAST_DEC_MPLS_TTL - - # ethertype - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecMplsTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsLabel(NXAction): - r""" - Set MPLS Lavel action - - This action sets the MPLS Label. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_label:label - .. - - +-------------------------------+ - | **set_mpls_label**\:\ *label* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - label MPLS Label - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=label)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(label=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_LABEL - - # ethertype - _fmt_str = '!2xI' - - def __init__( - self, - label, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsLabel, self).__init__() - self.label = label - - @classmethod - def parser(cls, buf): - (label,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(label) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.label, - ) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsTc(NXAction): - r""" - Set MPLS Tc action - - This action sets the MPLS Tc. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_tc:tc - .. - - +-------------------------+ - | **set_mpls_tc**\:\ *tc* | - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tc MPLS Tc - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=tc)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(tc=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TC - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - tc, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTc, self).__init__() - self.tc = tc - - @classmethod - def parser(cls, buf): - (tc,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(tc) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tc, - ) - return data - - class NXActionStackBase(NXAction): - # start, field, end - _fmt_str = '!H4sH' - _TYPE = { - 'ascii': [ - 'field', - ], - } - - def __init__( - self, - field, - start, - end, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionStackBase, self).__init__() - self.field = field - self.start = start - self.end = end - - @classmethod - def parser(cls, buf): - (start, oxm_data, end) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - field = ofp.oxm_to_user_header(n) - return cls(field, start, end) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.field) - ofp.oxm_serialize_header(oxm, oxm_data, 0) - msg_pack_into( - self._fmt_str, data, 0, - self.start, - six.binary_type(oxm_data), - self.end, - ) - offset = len(data) - msg_pack_into("!%dx" % (12 - offset), data, offset) - return data - - class NXActionStackPush(NXActionStackBase): - r""" - Push field action - - This action pushes field to top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:dst[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *dst*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for source field - start Start bit for source field - end End bit for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPush(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_PUSH - - class NXActionStackPop(NXActionStackBase): - r""" - Pop field action - - This action pops field from top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:src[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for destination field - start Start bit for destination field - end End bit for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPop(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_POP - - class NXActionSample(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets - to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3,)] - """ - _subtype = nicira_ext.NXAST_SAMPLE - - # probability, collector_set_id, obs_domain_id, obs_point_id - _fmt_str = '!HIII' - - def __init__( - self, - probability, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - ) - return data - - class NXActionSample2(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - 'sampling_port' can be equal to ingress port or one of egress ports. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - sampling_port Sampling port number - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample2(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3, - apn_mac_addr=0a:00:27:00:00:05, - msisdn=magmaIsTheBest, - apn_name=big_tower123, - pdp_start_epoch=100, - sampling_port=8080)] - """ - _subtype = nicira_ext.NXAST_SAMPLE2 - - # probability, collector_set_id, obs_domain_id, - # obs_point_id, msisdn, apn_mac_addr, apn_name, sampling_port - _fmt_str = '!HIIIL16s6B24s8s6x' - - def __init__( - self, - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - sampling_port=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample2, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - self.sampling_port = sampling_port - - self.msisdn = msisdn.encode('ascii') - self.apn_mac_addr = apn_mac_addr - self.apn_name = apn_name.encode('ascii') - self.pdp_start_epoch = pdp_start_epoch.to_bytes(8, byteorder='little') - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - - return cls( - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - self.sampling_port, - self.msisdn, - *self.apn_mac_addr, - self.apn_name, - self.pdp_start_epoch, - ) - - return data - - class NXActionFinTimeout(NXAction): - r""" - Change TCP timeout action - - This action changes the idle timeout or hard timeout or - both, of this OpenFlow rule when the rule matches a TCP - packet with the FIN or RST flag. - - And equivalent to the followings action of ovs-ofctl command. - - .. - fin_timeout(argument[,argument]...) - .. - - +---------------------------------------------------------+ - | **fin_timeout(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fin_idle_timeout Causes the flow to expire after the given number - of seconds of inactivity - fin_idle_timeout Causes the flow to expire after the given number - of second, regardless of activity - ================ ====================================================== - - Example:: - - match = parser.OFPMatch(ip_proto=6, eth_type=0x0800) - actions += [parser.NXActionFinTimeout(fin_idle_timeout=30, - fin_hard_timeout=60)] - """ - _subtype = nicira_ext.NXAST_FIN_TIMEOUT - - # fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HH2x' - - def __init__( - self, - fin_idle_timeout, - fin_hard_timeout, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionFinTimeout, self).__init__() - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - - @classmethod - def parser(cls, buf): - ( - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - fin_idle_timeout, - fin_hard_timeout, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - return data - - class NXActionConjunction(NXAction): - r""" - Conjunctive matches action - - This action ties groups of individual OpenFlow flows into - higher-level conjunctive flows. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - conjunction(id,k/n) - .. - - +--------------------------------------------------+ - | **conjunction(**\ *id*\,\ *k*\ **/**\ *n*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - clause Number assigned to the flow's dimension - n_clauses Specify the conjunctive flow's match condition - id\_ Conjunction ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionConjunction(clause=1, - n_clauses=2, - id_=10)] - """ - _subtype = nicira_ext.NXAST_CONJUNCTION - - # clause, n_clauses, id - _fmt_str = '!BBI' - - def __init__( - self, - clause, - n_clauses, - id_, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionConjunction, self).__init__() - self.clause = clause - self.n_clauses = n_clauses - self.id = id_ - - @classmethod - def parser(cls, buf): - ( - clause, - n_clauses, - id_, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(clause, n_clauses, id_) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.clause, - self.n_clauses, - self.id, - ) - return data - - class NXActionMultipath(NXAction): - r""" - Select multipath link action - - This action selects multipath link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - multipath(fields, basis, algorithm, n_links, arg, dst[start..end]) - .. - - +-------------------------------------------------------------+ - | **multipath(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *n_links*\, \ *arg*\, \ *dst*\[\ *start*\..\ *end*\]\ **)** | - +-------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - algorithm One of NX_MP_ALG_*. - max_link Number of output links - arg Algorithm-specific argument - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionMultipath( - fields=nicira_ext.NX_HASH_FIELDS_SYMMETRIC_L4, - basis=1024, - algorithm=nicira_ext.NX_MP_ALG_HRW, - max_link=5, - arg=0, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg2")] - """ - _subtype = nicira_ext.NXAST_MULTIPATH - - # fields, basis, algorithm, max_link, - # arg, ofs_nbits, dst - _fmt_str = '!HH2xHHI2xH4s' - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionMultipath, self).__init__() - self.fields = fields - self.basis = basis - self.algorithm = algorithm - self.max_link = max_link - self.arg = arg - self.ofs_nbits = ofs_nbits - self.dst = dst - - @classmethod - def parser(cls, buf): - ( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - dst = ofp.oxm_to_user_header(n) - return cls( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - ) - - def serialize_body(self): - data = bytearray() - dst = bytearray() - oxm = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm, dst, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.fields, - self.basis, - self.algorithm, - self.max_link, - self.arg, - self.ofs_nbits, - six.binary_type(dst), - ) - - return data - - class _NXActionBundleBase(NXAction): - # algorithm, fields, basis, slave_type, n_slaves - # ofs_nbits - _fmt_str = '!HHHIHH' - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(_NXActionBundleBase, self).__init__() - self.len = utils.round_up( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE + len(slaves) * 2, 8, - ) - - self.algorithm = algorithm - self.fields = fields - self.basis = basis - self.slave_type = slave_type - self.n_slaves = n_slaves - self.ofs_nbits = ofs_nbits - self.dst = dst - - assert isinstance(slaves, (list, tuple)) - for s in slaves: - assert isinstance(s, six.integer_types) - - self.slaves = slaves - - @classmethod - def parser(cls, buf): - # Add dst ('I') to _fmt_str - ( - algorithm, fields, basis, - slave_type, n_slaves, ofs_nbits, dst, - ) = struct.unpack_from( - cls._fmt_str + 'I', buf, 0, - ) - - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if dst != 0: - (n, len_) = ofp.oxm_parse_header(buf, offset) - dst = ofp.oxm_to_user_header(n) - - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - - slaves = [] - for i in range(0, n_slaves): - s = struct.unpack_from('!H', buf, slave_offset) - slaves.append(s[0]) - slave_offset += 2 - - return cls( - algorithm, fields, basis, slave_type, - n_slaves, ofs_nbits, dst, slaves, - ) - - def serialize_body(self): - data = bytearray() - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - self.n_slaves = len(self.slaves) - for s in self.slaves: - msg_pack_into('!H', data, slave_offset, s) - slave_offset += 2 - pad_len = ( - utils.round_up(self.n_slaves, 4) - - self.n_slaves - ) - - if pad_len != 0: - msg_pack_into('%dx' % pad_len * 2, data, slave_offset) - - msg_pack_into( - self._fmt_str, data, 0, - self.algorithm, self.fields, self.basis, - self.slave_type, self.n_slaves, - self.ofs_nbits, - ) - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if self.dst == 0: - msg_pack_into('I', data, offset, self.dst) - else: - oxm_data = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm_data, data, offset) - return data - - class NXActionBundle(_NXActionBundleBase): - r""" - Select bundle link action - - This action selects bundle link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle(fields, basis, algorithm, slave_type, slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. (must be zero) - dst OXM/NXM header for source field(must be zero) - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundle( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=0, - dst=0, - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - # NXAST_BUNDLE actions should have 'sofs_nbits' and 'dst' zeroed. - super(NXActionBundle, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits=0, dst=0, slaves=slaves, - ) - - class NXActionBundleLoad(_NXActionBundleBase): - r""" - Select bundle link action - - This action has the same behavior as the bundle action, - with one exception. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle_load(fields, basis, algorithm, slave_type, - dst[start..end], slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle_load(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *dst*\[\ *start*\... \*emd*\], | - | \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundleLoad( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg0", - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE_LOAD - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(NXActionBundleLoad, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ) - - class NXActionCT(NXAction): - r""" - Pass traffic to the connection tracker action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct(argument[,argument]...) - .. - - +------------------------------------------------+ - | **ct(**\ *argument*\[,\ *argument*\]...\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - zone_src OXM/NXM header for source field - zone_ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits``. - If you need set the Immediate value for zone, - zone_src must be set to None or empty character string. - recirc_table Recirculate to a specific table - alg Well-known port number for the protocol - actions Zero or more actions may immediately follow this - action - ================ ====================================================== - - .. NOTE:: - - If you set number to zone_src, - Traceback occurs when you run the to_jsondict. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800, ct_state=(0,32)) - actions += [parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 4, - alg = 0, - actions = [])] - """ - _subtype = nicira_ext.NXAST_CT - - # flags, zone_src, zone_ofs_nbits, recirc_table, - # pad, alg - _fmt_str = '!H4sHB3xH' - _TYPE = { - 'ascii': [ - 'zone_src', - ], - } - - # Followed by actions - - def __init__( - self, - flags, - zone_src, - zone_ofs_nbits, - recirc_table, - alg, - actions, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCT, self).__init__() - self.flags = flags - self.zone_src = zone_src - self.zone_ofs_nbits = zone_ofs_nbits - self.recirc_table = recirc_table - self.alg = alg - self.actions = actions - - @classmethod - def parser(cls, buf): - ( - flags, - oxm_data, - zone_ofs_nbits, - recirc_table, - alg, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - - # OXM/NXM field - if oxm_data == b'\x00' * 4: - zone_src = "" - else: - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - zone_src = ofp.oxm_to_user_header(n) - - # actions - actions = [] - while len(rest) > 0: - action = ofpp.OFPAction.parser(rest, 0) - actions.append(action) - rest = rest[action.len:] - - return cls( - flags, zone_src, zone_ofs_nbits, recirc_table, - alg, actions, - ) - - def serialize_body(self): - data = bytearray() - # If zone_src is zero, zone_ofs_nbits is zone_imm - if not self.zone_src: - zone_src = b'\x00' * 4 - elif isinstance(self.zone_src, six.integer_types): - zone_src = struct.pack("!I", self.zone_src) - else: - zone_src = bytearray() - oxm = ofp.oxm_from_user_header(self.zone_src) - ofp.oxm_serialize_header(oxm, zone_src, 0) - - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - six.binary_type(zone_src), - self.zone_ofs_nbits, - self.recirc_table, - self.alg, - ) - for a in self.actions: - a.serialize(data, len(data)) - return data - - class NXActionCTClear(NXAction): - """ - Clear connection tracking state action - - This action clears connection tracking state from packets. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct_clear - .. - - +--------------+ - | **ct_clear** | - +--------------+ - - Example:: - - actions += [parser.NXActionCTClear()] - """ - _subtype = nicira_ext.NXAST_CT_CLEAR - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCTClear, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionNAT(NXAction): - r""" - Network address translation action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. NOTE:: - The following command image does not exist in ovs-ofctl command - manual and has been created from the command response. - - .. - nat(src=ip_min-ip_max : proto_min-proto-max) - .. - - +--------------------------------------------------+ - | **nat(src**\=\ *ip_min*\ **-**\ *ip_max*\ **:** | - | *proto_min*\ **-**\ *proto-max*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - range_ipv4_min Range ipv4 address minimun - range_ipv4_max Range ipv4 address maximun - range_ipv6_min Range ipv6 address minimun - range_ipv6_max Range ipv6 address maximun - range_proto_min Range protocol minimum - range_proto_max Range protocol maximun - ================ ====================================================== - - .. CAUTION:: - ``NXActionNAT`` must be defined in the actions in the - ``NXActionCT``. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800) - actions += [ - parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 255, - alg = 0, - actions = [ - parser.NXActionNAT( - flags = 1, - range_ipv4_min = "10.1.12.0", - range_ipv4_max = "10.1.13.255", - range_ipv6_min = "", - range_ipv6_max = "", - range_proto_min = 1, - range_proto_max = 1023 - ) - ] - ) - ] - """ - _subtype = nicira_ext.NXAST_NAT - - # pad, flags, range_present - _fmt_str = '!2xHH' - # Followed by optional parameters - - _TYPE = { - 'ascii': [ - 'range_ipv4_max', - 'range_ipv4_min', - 'range_ipv6_max', - 'range_ipv6_min', - ], - } - - def __init__( - self, - flags, - range_ipv4_min='', - range_ipv4_max='', - range_ipv6_min='', - range_ipv6_max='', - range_proto_min=None, - range_proto_max=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNAT, self).__init__() - self.flags = flags - self.range_ipv4_min = range_ipv4_min - self.range_ipv4_max = range_ipv4_max - self.range_ipv6_min = range_ipv6_min - self.range_ipv6_max = range_ipv6_max - self.range_proto_min = range_proto_min - self.range_proto_max = range_proto_max - - @classmethod - def parser(cls, buf): - ( - flags, - range_present, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # optional parameters - kwargs = dict() - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN: - kwargs['range_ipv4_min'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MAX: - kwargs['range_ipv4_max'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MIN: - kwargs['range_ipv6_min'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MAX: - kwargs['range_ipv6_max'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN: - kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2]) - rest = rest[2:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX: - kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2]) - - return cls(flags, **kwargs) - - def serialize_body(self): - # Pack optional parameters first, as range_present needs - # to be calculated. - optional_data = b'' - range_present = 0 - if self.range_ipv4_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MIN - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_min, - ) - if self.range_ipv4_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MAX - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_max, - ) - if self.range_ipv6_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MIN - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_min, - ) - if self.range_ipv6_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MAX - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_max, - ) - if self.range_proto_min is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MIN - optional_data += type_desc.Int2.from_user( - self.range_proto_min, - ) - if self.range_proto_max is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MAX - optional_data += type_desc.Int2.from_user( - self.range_proto_max, - ) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - range_present, - ) - msg_pack_into( - '!%ds' % len(optional_data), data, len(data), - optional_data, - ) - - return data - - class NXActionOutputTrunc(NXAction): - r""" - Truncate output action - - This action truncate a packet into the specified size and outputs it. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output(port=port,max_len=max_len) - .. - - +--------------------------------------------------------------+ - | **output(port**\=\ *port*\,\ **max_len**\=\ *max_len*\ **)** | - +--------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - port Output port - max_len Max bytes to send - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputTrunc(port=8080, - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_TRUNC - - # port, max_len - _fmt_str = '!HI' - - def __init__( - self, - port, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputTrunc, self).__init__() - self.port = port - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - port, - max_len, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(port, max_len) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.port, - self.max_len, - ) - return data - - class NXActionEncapEther(NXAction): - """ - Encap Ether - - This action encaps package with ethernet - - And equivalent to the followings action of ovs-ofctl command. - - :: - - encap(ethernet) - - Example:: - - actions += [parser.NXActionEncapEther()] - """ - _subtype = nicira_ext.NXAST_RAW_ENCAP - - _fmt_str = '!HI' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionEncapEther, self).__init__() - self.hdr_size = 0 - self.new_pkt_type = 0x00000000 - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.hdr_size, self.new_pkt_type) - return data - - class NXActionEncapNsh(NXAction): - """ - Encap nsh - - This action encaps package with nsh - - And equivalent to the followings action of ovs-ofctl command. - - :: - - encap(nsh(md_type=1)) - - Example:: - - actions += [parser.NXActionEncapNsh()] - """ - _subtype = nicira_ext.NXAST_RAW_ENCAP - - _fmt_str = '!HI' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionEncapNsh, self).__init__() - self.hdr_size = hdr_size - self.new_pkt_type = 0x0001894F - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.hdr_size, self.new_pkt_type) - return data - - class NXActionDecNshTtl(NXAction): - """ - Decrement NSH TTL action - - This action decrements the TTL in the Network Service Header(NSH). - - This action was added in OVS v2.9. - - And equivalent to the followings action of ovs-ofctl command. - - :: - - dec_nsh_ttl - - Example:: - - actions += [parser.NXActionDecNshTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_NSH_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecNshTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - def add_attr(k, v): - v.__module__ = ofpp.__name__ # Necessary for stringify stuff - setattr(ofpp, k, v) - - add_attr('NXAction', NXAction) - add_attr('NXActionUnknown', NXActionUnknown) - - classes = [ - 'NXActionSetQueue', - 'NXActionPopQueue', - 'NXActionRegLoad', - 'NXActionRegLoad2', - 'NXActionNote', - 'NXActionSetTunnel', - 'NXActionSetTunnel64', - 'NXActionRegMove', - 'NXActionResubmit', - 'NXActionResubmitTable', - 'NXActionOutputReg', - 'NXActionOutputReg2', - 'NXActionLearn', - 'NXActionExit', - 'NXActionDecTtl', - 'NXActionController', - 'NXActionController2', - 'NXActionDecTtlCntIds', - 'NXActionPushMpls', - 'NXActionPopMpls', - 'NXActionSetMplsTtl', - 'NXActionDecMplsTtl', - 'NXActionSetMplsLabel', - 'NXActionSetMplsTc', - 'NXActionStackPush', - 'NXActionStackPop', - 'NXActionSample', - 'NXActionSample2', - 'NXActionFinTimeout', - 'NXActionConjunction', - 'NXActionMultipath', - 'NXActionBundle', - 'NXActionBundleLoad', - 'NXActionCT', - 'NXActionCTClear', - 'NXActionNAT', - 'NXActionOutputTrunc', - '_NXFlowSpec', # exported for testing - 'NXFlowSpecMatch', - 'NXFlowSpecLoad', - 'NXFlowSpecOutput', - 'NXActionEncapNsh', - 'NXActionEncapEther', - 'NXActionDecNshTtl', - ] - vars = locals() - for name in classes: - cls = vars[name] - add_attr(name, cls) - if issubclass(cls, NXAction): - NXAction.register(cls) - if issubclass(cls, _NXFlowSpec): - _NXFlowSpec.register(cls) diff --git a/cwf/gateway/docker/python/Dockerfile b/cwf/gateway/docker/python/Dockerfile index 58eb65bf27ac..eccd7139b669 100644 --- a/cwf/gateway/docker/python/Dockerfile +++ b/cwf/gateway/docker/python/Dockerfile @@ -67,6 +67,7 @@ RUN apt-get -y update && apt-get -y install \ pkg-config \ python-cffi \ python3-pip \ + python3-ryu \ python3.8 \ python3.8-dev \ redis-server \ @@ -86,7 +87,6 @@ RUN python3.8 -m pip install --no-cache-dir \ fire \ envoy \ glob2 \ - ryu \ flask \ aiodns \ pymemoize \ @@ -114,8 +114,6 @@ RUN python3.8 -m pip install --no-cache-dir \ aioeventlet@git+https://github.com/magma/deb-python-aioeventlet@86130360db113430370ed6c64d42aee3b47cd619 \ jsonpickle -COPY cwf/gateway/deploy/roles/ovs/files/nx_actions.py /usr/local/lib/python3.8/dist-packages/ryu/ofproto/ - # Temporary workaround to restore uplink bridge flows RUN mkdir -p /var/opt/magma/scripts COPY cwf/gateway/deploy/roles/cwag/files/add_uplink_bridge_flows.sh /var/opt/magma/scripts diff --git a/cwf/gateway/integ_tests/gy_enforcement_test.go b/cwf/gateway/integ_tests/gy_enforcement_test.go index 99ae321d5abc..9c9d1ecbaaa5 100644 --- a/cwf/gateway/integ_tests/gy_enforcement_test.go +++ b/cwf/gateway/integ_tests/gy_enforcement_test.go @@ -115,15 +115,15 @@ func provisionRestrictRules(t *testing.T, tr *TestRunner, ruleManager *RuleManag tr.WaitForPoliciesToSync() } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M. -// Generate traffic and assert the CCR-I is received. -// - Set an expectation for a CCR-U with >80% of data usage to be sent up to -// OCS, to which it will response with more quota. -// Generate traffic and assert the CCR-U is received with final quota grant. -// - Generate 5M traffic to exceed 100% of the quota and trigger session termination -// - Assert that UE flows are deleted. -// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M. +// Generate traffic and assert the CCR-I is received. +// - Set an expectation for a CCR-U with >80% of data usage to be sent up to +// OCS, to which it will response with more quota. +// Generate traffic and assert the CCR-U is received with final quota grant. +// - Generate 5M traffic to exceed 100% of the quota and trigger session termination +// - Assert that UE flows are deleted. +// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. func TestGyCreditExhaustionWithCRRU(t *testing.T) { fmt.Println("\nRunning TestGyCreditExhaustionWithCRRU...") tr, ruleManager, ue := ocsTestSetup(t) @@ -274,12 +274,12 @@ func TestGyCreditValidityTime(t *testing.T) { tr.AssertEventuallyAllRulesRemovedAfterDisconnect(imsi) } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M. -// Generate traffic and assert the CCR-I is received. -// - Generate 5M traffic to exceed 100% of the quota and trigger session termination -// - Assert that UE flows are deleted. -// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M. +// Generate traffic and assert the CCR-I is received. +// - Generate 5M traffic to exceed 100% of the quota and trigger session termination +// - Assert that UE flows are deleted. +// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. func TestGyCreditExhaustionWithoutCCRU(t *testing.T) { fmt.Println("\nRunning TestGyCreditExhaustionWithoutCCRU...") @@ -342,9 +342,9 @@ func TestGyCreditExhaustionWithoutCCRU(t *testing.T) { tr.AssertAllGyExpectationsMetNoError() } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// NOT respond with any answer. -// - Asset that authentication fails and that no rules were installed +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// NOT respond with any answer. +// - Asset that authentication fails and that no rules were installed func TestGyLinksFailureOCStoFEG(t *testing.T) { fmt.Println("\nRunning TestGyLinksFailureOCStoFEG...") @@ -378,22 +378,24 @@ func TestGyLinksFailureOCStoFEG(t *testing.T) { assert.Empty(t, recordsBySubID["IMSI"+ue.Imsi]) } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M and final action set to redirect. -// Generate traffic and assert the CCR-I is received. -// - Generate 5M traffic to exceed 100% of the quota to trigger redirection. -// - When redirection happens, redirect rule is installed on top of the actual rules -// Assert that UE flows are NOT deleted and data was passed -// - Assert redirect rule is installed -// - Send a Charging ReAuth request to top up quota and assert that the -// response is successful -// - Assert that CCR-U was is generated -// - Assert the redirect rule is gone -// - Generate traffic and assert that UE flows are NOT deleted and data was passed. -// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M and final action set to redirect. +// Generate traffic and assert the CCR-I is received. +// - Generate 5M traffic to exceed 100% of the quota to trigger redirection. +// - When redirection happens, redirect rule is installed on top of the actual rules +// Assert that UE flows are NOT deleted and data was passed +// - Assert redirect rule is installed +// - Send a Charging ReAuth request to top up quota and assert that the +// response is successful +// - Assert that CCR-U was is generated +// - Assert the redirect rule is gone +// - Generate traffic and assert that UE flows are NOT deleted and data was passed. +// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. +// // NOTE : the test is only verifying that session was not terminated. -// Improvement is needed to validate that ovs rule is well added and -// traffic is being redirected. +// +// Improvement is needed to validate that ovs rule is well added and +// traffic is being redirected. func TestGyCreditExhaustionRedirect(t *testing.T) { fmt.Println("\nRunning TestGyCreditExhaustionRedirect...") @@ -409,7 +411,7 @@ func TestGyCreditExhaustionRedirect(t *testing.T) { quotaGrant := &fegprotos.QuotaGrant{ RatingGroup: 1, GrantedServiceUnit: &fegprotos.Octets{ - TotalOctets: 4 * MegaBytes, + TotalOctets: 1 * MegaBytes, }, IsFinalCredit: true, FinalUnitIndication: &fegprotos.FinalUnitIndication{ @@ -447,19 +449,17 @@ func TestGyCreditExhaustionRedirect(t *testing.T) { // we need to generate over 100% of the quota to trigger a session redirection req := &cwfprotos.GenTrafficRequest{ - Imsi: imsi, - Volume: &wrappers.StringValue{Value: "10M"}, - //Bitrate: &wrappers.StringValue{Value: "100M"}, + Imsi: imsi, + Volume: &wrappers.StringValue{Value: "2M"}, Timeout: 60, } - //time.Sleep(500 * time.Microsecond) _, err := tr.GenULTraffic(req) assert.NoError(t, err) // Check that enforcement stats flow was not removed and data was passed assert.Eventually(t, - tr.WaitForEnforcementStatsForRuleGreaterThan(imsi, "static-pass-all-ocs2", 3*MegaBytes), time.Minute, 2*time.Second) + tr.WaitForEnforcementStatsForRuleGreaterThan(imsi, "static-pass-all-ocs2", 1*MegaBytes), time.Minute, 2*time.Second) // Wait for service deactivation assert.Eventually(t, tr.WaitForEnforcementStatsForRule(imsi, "redirect"), time.Minute, 2*time.Second) @@ -481,7 +481,7 @@ func TestGyCreditExhaustionRedirect(t *testing.T) { // we need to generate more traffic req = &cwfprotos.GenTrafficRequest{ Imsi: imsi, - Volume: &wrappers.StringValue{Value: "2M"}, + Volume: &wrappers.StringValue{Value: "0.5M"}, Bitrate: &wrappers.StringValue{Value: "30M"}, Timeout: 60, } @@ -491,7 +491,11 @@ func TestGyCreditExhaustionRedirect(t *testing.T) { // Check that enforcement stats flow was not removed and data was passed assert.Eventually(t, - tr.WaitForEnforcementStatsForRuleGreaterThan(imsi, "static-pass-all-ocs2", 1*MegaBytes), time.Minute, 2*time.Second) + tr.WaitForEnforcementStatsForRuleGreaterThan(imsi, "static-pass-all-ocs2", 0.25*MegaBytes), time.Minute, 2*time.Second) + + // Check that no redirect rule is installed when quota grant is not exhausted + time.Sleep(15 * time.Second) + assert.False(t, tr.WaitForEnforcementStatsForRule(imsi, "redirect")()) // When we initiate a UE disconnect, we expect a terminate request to go up terminateRequest := fegprotos.NewGyCCRequest(imsi, fegprotos.CCRequestType_TERMINATION) @@ -587,7 +591,7 @@ func TestGyAbortSessionRequest(t *testing.T) { err = setNewOCSConfig( &fegprotos.OCSConfig{ - MaxUsageOctets: &fegprotos.Octets{TotalOctets: 8 * MegaBytes}, //we generate more then 5Mbyte traffic, if this is set below 7MB this session will terminate before the ASR goes through + MaxUsageOctets: &fegprotos.Octets{TotalOctets: 8 * MegaBytes}, // we generate more then 5Mbyte traffic, if this is set below 7MB this session will terminate before the ASR goes through MaxUsageTime: ReAuthMaxUsageTimeSec, ValidityTime: ReAuthValidityTime, }, @@ -655,16 +659,16 @@ func TestGyAbortSessionRequest(t *testing.T) { tr.AssertEventuallyAllRulesRemovedAfterDisconnect(imsi) } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M and final action set to redirect. -// Generate traffic and assert the CCR-I is received. -// - Generate 5M traffic to exceed 100% of the quota to trigger service restriction. -// - Assert that UE flows are NOT deleted and data was passed. -// - Generate an additional 2M traffic and assert that only Gy flows matched. -// - Send a Charging ReAuth request to top up quota and assert that the -// response is successful -// - Assert that CCR-U was is generated -// - Generate 2M traffic and assert that UE flows are NOT deleted and data was passed. +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M and final action set to redirect. +// Generate traffic and assert the CCR-I is received. +// - Generate 5M traffic to exceed 100% of the quota to trigger service restriction. +// - Assert that UE flows are NOT deleted and data was passed. +// - Generate an additional 2M traffic and assert that only Gy flows matched. +// - Send a Charging ReAuth request to top up quota and assert that the +// response is successful +// - Assert that CCR-U was is generated +// - Generate 2M traffic and assert that UE flows are NOT deleted and data was passed. func TestGyCreditExhaustionRestrict(t *testing.T) { fmt.Println("\nRunning TestGyCreditExhaustionRestrict...") @@ -780,15 +784,15 @@ func TestGyCreditExhaustionRestrict(t *testing.T) { tr.AssertEventuallyAllRulesRemovedAfterDisconnect(imsi) } -// - Send a CCA-I with valid credit for a RG but with 4012 error code (transient) -// - Assert that UE flows for that RG are deleted -// - Generate an additional 2M traffic and assert that only Gy flows matched. -// - Assert that Redirect flows are installed -// - Send a Charging ReAuth request to top up quota and assert that the -// response is successful -// - Assert that CCR-U was is generated -// - Generate 2M traffic and assert that UE flows are reinstalled for RG -// and traffic goes through them. +// - Send a CCA-I with valid credit for a RG but with 4012 error code (transient) +// - Assert that UE flows for that RG are deleted +// - Generate an additional 2M traffic and assert that only Gy flows matched. +// - Assert that Redirect flows are installed +// - Send a Charging ReAuth request to top up quota and assert that the +// response is successful +// - Assert that CCR-U was is generated +// - Generate 2M traffic and assert that UE flows are reinstalled for RG +// and traffic goes through them. func TestGyCreditTransientErrorRestrict(t *testing.T) { fmt.Println("\nRunning TestGyCreditTransientErrorRestrict...") @@ -896,22 +900,22 @@ func TestGyCreditTransientErrorRestrict(t *testing.T) { // TODO: uncomment once we fix passing the ip to pipelined for cwf // Check that enforcement stats flow was not removed and data passed - //tr.AssertPolicyUsage(imsi, "static-pass-all-ocs1", uint64(math.Round(1.5*MegaBytes)), 3*MegaBytes+Buffer) - //assert.Nil(t, policyUsage["IMSI"+imsi]["restrict-pass-user"], fmt.Sprintf("Policy usage restrict-pass-user for imsi: %v was NOT removed", imsi)) + // tr.AssertPolicyUsage(imsi, "static-pass-all-ocs1", uint64(math.Round(1.5*MegaBytes)), 3*MegaBytes+Buffer) + // assert.Nil(t, policyUsage["IMSI"+imsi]["restrict-pass-user"], fmt.Sprintf("Policy usage restrict-pass-user for imsi: %v was NOT removed", imsi)) // trigger disconnection tr.DisconnectAndAssertSuccess(imsi) tr.AssertEventuallyAllRulesRemovedAfterDisconnect(imsi) } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M with two rules. -// - Generate traffic and assert the CCR-I is received. -// - Set an expectation for a CCR-U with >80% of data usage to be sent up to -// OCS, to which it will respond with an ERROR CODE -// - Send an CCA-U with a 4012 code transient failure which should trigger suspend that credit -// - Assert that UE flows for one rule are delete. -// - Assert that UE flows for the other rule are still valid +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M with two rules. +// - Generate traffic and assert the CCR-I is received. +// - Set an expectation for a CCR-U with >80% of data usage to be sent up to +// OCS, to which it will respond with an ERROR CODE +// - Send an CCA-U with a 4012 code transient failure which should trigger suspend that credit +// - Assert that UE flows for one rule are delete. +// - Assert that UE flows for the other rule are still valid func TestGyWithTransientErrorCode(t *testing.T) { fmt.Println("\nRunning TestGyWithTransientErrorCode...") @@ -999,13 +1003,13 @@ func TestGyWithTransientErrorCode(t *testing.T) { tr.AssertEventuallyAllRulesRemovedAfterDisconnect(imsi) } -// - Set an expectation for a CCR-I to be sent up to OCS, to which it will -// respond with a quota grant of 4M. -// Generate traffic and assert the CCR-I is received. -// - Generate traffic over 80% and under 100% not to trigger termination -// - Send an CCA-U with a 5xxx code which should trigger termination -// - Assert that UE flows are deleted. -// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. +// - Set an expectation for a CCR-I to be sent up to OCS, to which it will +// respond with a quota grant of 4M. +// Generate traffic and assert the CCR-I is received. +// - Generate traffic over 80% and under 100% not to trigger termination +// - Send an CCA-U with a 5xxx code which should trigger termination +// - Assert that UE flows are deleted. +// - Expect a CCR-T, trigger a UE disconnect, and assert the CCR-T is received. func TestGyWithPermanentErrorCode(t *testing.T) { fmt.Println("\nRunning TestGyWithPermanentErrorCode...") diff --git a/cwf/gateway/integ_tests/test_runner.go b/cwf/gateway/integ_tests/test_runner.go index f0765cdcdd5c..139be70cc263 100644 --- a/cwf/gateway/integ_tests/test_runner.go +++ b/cwf/gateway/integ_tests/test_runner.go @@ -257,11 +257,11 @@ func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos // - If we spend more than wait for time, and we haven't reached totalVolume // // Arguments -// - req: request to pas to UEsim -// - ruleID: name of the rule to monitor -// - totalVolume: total used by that rule. Note that if hte rule was used before, you have to add it's previous usage -// So if the rule already used 1Mb and you want to send 1Mb more, you will have to use 2M as min -// - waitFor time out the UE will be sending data +// - req: request to pas to UEsim +// - ruleID: name of the rule to monitor +// - totalVolume: total used by that rule. Note that if hte rule was used before, you have to add it's previous usage +// So if the rule already used 1Mb and you want to send 1Mb more, you will have to use 2M as min +// - waitFor time out the UE will be sending data func (tr *TestRunner) GenULTrafficBasedOnPolicyUsage(req *cwfprotos.GenTrafficRequest, ruleID string, totalVolume uint64, waitFor time.Duration) (*cwfprotos.GenTrafficResponse, error) { fmt.Printf("************* Checking rule %s exists before generating traffic for UE\n", ruleID) diff --git a/cwf/gateway/services/uesim/servicers/uesim.go b/cwf/gateway/services/uesim/servicers/uesim.go index 1a12e8f3c018..fa862542d768 100644 --- a/cwf/gateway/services/uesim/servicers/uesim.go +++ b/cwf/gateway/services/uesim/servicers/uesim.go @@ -214,10 +214,10 @@ func (srv *UESimServer) Disconnect(ctx context.Context, id *cwfprotos.Disconnect // GenTraffic generates traffic using a remote iperf server. The command to be sent is configured using GenTrafficRequest // Note that GenTrafficRequest have parameter that configures iperf client itself, and parameters that configure UESim // Configuration parameters related to the UESim client itself (not iperf) are: -// - timeout: if different than 0 stops iperf externally after n seconds. Use it to avoid the test to hang on a unreachable server -// If the test timesout it will be counted as an error. By default this is 0 (DISABLED) -// - disableServerReachabilityCheck: enables/disables the function to request the server to send the UE small packets to check if -// the server is alive. By default this is ENABLED +// - timeout: if different than 0 stops iperf externally after n seconds. Use it to avoid the test to hang on a unreachable server +// If the test timesout it will be counted as an error. By default this is 0 (DISABLED) +// - disableServerReachabilityCheck: enables/disables the function to request the server to send the UE small packets to check if +// the server is alive. By default this is ENABLED func (srv *UESimServer) GenTraffic(ctx context.Context, req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) { if req == nil { return &cwfprotos.GenTrafficResponse{}, fmt.Errorf("Nil GenTrafficRequest provided") diff --git a/docs/readmes/basics/prerequisites.md b/docs/readmes/basics/prerequisites.md index 902997e6b0c1..a56fa7cb92e4 100644 --- a/docs/readmes/basics/prerequisites.md +++ b/docs/readmes/basics/prerequisites.md @@ -42,7 +42,7 @@ Development can occur from multiple OS's, where **macOS** and **Ubuntu** are **e pyenv install 3.8.10 pyenv global 3.8.10 pip3 install ansible fabric3 jsonpickle requests PyYAML - vagrant plugin install vagrant-vbguest + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload ``` **Note**: In the case where installation of `fabric3` through pip was unsuccessful, @@ -150,7 +150,7 @@ Development can occur from multiple OS's, where **macOS** and **Ubuntu** are **e 5. Install `vagrant` necessary plugin. ```bash - vagrant plugin install vagrant-vbguest + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload ``` Make sure `virtualbox` is the default provider for `vagrant` by adding the following line to your `.bashrc` (or equivalent) and restart your shell: `export VAGRANT_DEFAULT_PROVIDER="virtualbox"`. diff --git a/docs/readmes/feg/s1ap_federated_tests.md b/docs/readmes/feg/s1ap_federated_tests.md index 70b26c4a5358..da81950491d7 100644 --- a/docs/readmes/feg/s1ap_federated_tests.md +++ b/docs/readmes/feg/s1ap_federated_tests.md @@ -118,9 +118,9 @@ You can then [run the tests manually](#run-tests-manually). If you want to build the environment manually, you can carry out the following steps. -*Note that commands for the AGW have to be run inside the Vagrant VM. For this reason, +*Note that commands for the AGW and FeG have to be run inside the Vagrant VM. For this reason, all such commands include the `vagrant ssh magma` command first. To leave -Vagrant, just type `exit`. FeG and Orc8r will need to be run on the +Vagrant, just type `exit`. Orc8r will need to be run on the host itself (no Vagrant involved).* - AGW: @@ -148,7 +148,7 @@ vagrant ssh magma # inside vagrant vm cd magma/lte/gateway/python/integ_tests/federated_tests/docker docker-compose build -docker-compose up -d +./run.py ``` - Orc8r: diff --git a/docs/readmes/lte/s1ap_tests.md b/docs/readmes/lte/s1ap_tests.md index ea73b2810935..3d03731c1dca 100644 --- a/docs/readmes/lte/s1ap_tests.md +++ b/docs/readmes/lte/s1ap_tests.md @@ -19,23 +19,44 @@ interfere with the test scenario. Our VM-only tests use 3 Vagrant-managed VMs hosted on the local device (laptop): -- *magma*, i.e. magma-dev or gateway +- *magma* (a.k.a. magma-dev) or *magma_deb*, both of which act as a gateway, with the difference +being the way magma is installed on them (see [below](#gateway-vm-setup) for more details) - *magma_test*, i.e. s1ap_tester - *magma_trfserver*, i.e. an Iperf server to generate uplink/downlink traffic ## Gateway-only tests These tests use all 3 VMs listed above. The *magma_test* VM abstracts away the -UE and eNodeB, the *magma_trfserver* emulates the Internet, while the *magma* VM +UE and eNodeB, the *magma_trfserver* emulates the Internet, while the *magma*/*magma_deb* VM acts as the gateway between *magma_test* and *magma_trfserver*. ### Gateway VM setup +There are two options for setting up the gateway VM, with the difference being the magma installation +method: it can either be installed via `make` or from debian packages, the latter being the +method by which magma is usually deployed. For everyday development, the `make` installation is +recommended, while the debian installation is useful for testing packages before release. + +> **Warning**: These two VMs use the same network configuration, so one must only run one of them +at a time. + +#### Make installation + Spin up and provision the gateway VM, then make and start its services: 1. From `magma/lte/gateway` on the host machine: `vagrant up magma && vagrant ssh magma` 1. Now in the gateway VM: `cd $MAGMA_ROOT/lte/gateway && make run` +#### Debian installation + +Spin up the *magma_deb* VM. The services start automatically: + +1. From `magma/lte/gateway` on the host machine: `vagrant up magma_deb && vagrant ssh magma_deb` +1. To check the services are running, run `systemctl list-units --type=service magma@*` + +> **Warning**: During provisioning, the latest magma gateway debian build from the magma artifactory +is installed. That is, the deployed gateway might not match your local repository state. + ### Test VM setup Spin up and provision the s1ap tester's VM, make, then make in the integ_tests directory. @@ -65,7 +86,7 @@ setup. Look at the section below on running traffic tests. ### Running uplink/downlink traffic tests -1. On the *magma* VM, run, `disable-tcp-checksumming` +1. On the *magma* or *magma_deb* VM, run, `disable-tcp-checksumming` 1. On the *magma_test* VM, `disable-tcp-checksumming` @@ -110,7 +131,7 @@ sctpd.service 1. Clean up all the state in redis: `redis-cli -p 6380 FLUSHALL`. This might throw a "Could not connect" error if magma@redis service is not running. Start the redis service with `sudo service magma@redis start` and then try again. -1. `cd $MAGMA_ROOT/lte/gateway; make restart` +1. `magma-restart` On test VM: @@ -138,7 +159,7 @@ service file `/etc/systemd/system/magma@mme.service` (you will need sudo privile throw a "Could not connect" error if magma@redis service is not running. Start the redis service with `sudo service magma@redis start` and then try again. -1. `cd $MAGMA_ROOT/lte/gateway; make restart` +1. `magma-restart` On test VM: @@ -167,7 +188,7 @@ service file `/etc/systemd/system/magma@mme.service` (you will need sudo privile throw a "Could not connect" error if magma@redis service is not running. Start the redis service with `sudo service magma@redis start` and then try again. -1. `cd $MAGMA_ROOT/lte/gateway; make restart` +1. `magma-restart` On test VM: diff --git a/docs/readmes/orc8r/deploy_install.md b/docs/readmes/orc8r/deploy_install.md index 81d6c2740f08..972682b77cd5 100644 --- a/docs/readmes/orc8r/deploy_install.md +++ b/docs/readmes/orc8r/deploy_install.md @@ -136,7 +136,7 @@ override the following parameters - `orc8r_db_engine_version` on fresh Orc8r installs, target Postgres `12.6` Make sure that the `source` variables for the module definitions point to -`github.com/magma/magma//orc8r/cloud/deploy/terraform/MODULE?ref=v1.6`. +`github.com/magma/magma//orc8r/cloud/deploy/terraform/MODULE?ref=v1.8`. Adjust any other parameters as you see fit. Check the READMEs for the relevant Terraform modules to see additional variables that can be set. You can [override values](./deploy_terraform_options.md#override-terraform-module-values) diff --git a/docs/readmes/orc8r/deploy_intro.md b/docs/readmes/orc8r/deploy_intro.md index 59a9ba34db6a..16c236c1b4e2 100644 --- a/docs/readmes/orc8r/deploy_intro.md +++ b/docs/readmes/orc8r/deploy_intro.md @@ -64,6 +64,15 @@ To target a specific release, checkout the Magma repository's relevant release b Values for recent Orchestrator releases are summarized below +### v1.8.0 + +Verified with Terraform version `1.0.11`. + +- `v1.8` [patch branch](https://github.com/magma/magma/tree/v1.8) +- `github.com/magma/magma//orc8r/cloud/deploy/terraform/orc8r-aws?ref=v1.8` +Terraform module source +- `1.8.0` Helm chart version + ### v1.6.0 Verified with Terraform version `0.15.0`. diff --git a/dp/cloud/configs/dp.yml b/dp/cloud/configs/dp.yml index bc774d332b81..e233a0a17279 100644 --- a/dp/cloud/configs/dp.yml +++ b/dp/cloud/configs/dp.yml @@ -13,7 +13,15 @@ # interval after which cbsd is considered inactive # time is measured since last get state grpc request to # radio controller service in domain proxy -cbsd_inactivity_interval_sec: 14400 - -# URL where DP logs are sent -log_consumer_url: "http://domain-proxy-fluentd:9888/dp" +dp_backend: + cbsd_inactivity_interval_sec: 14400 + log_consumer_url: "http://domain-proxy-fluentd:9888/dp" +active_mode_controller: + dial_timeout_sec: 60 + heartbeat_send_timeout_sec: 10 + request_timeout_sec: 5 + request_processing_interval_sec: 10 + polling_interval: 10 + grpc_service: 'domain-proxy-radio-controller' + grpc_port: 50053 + cbsd_inactivity_interval_sec: 14400 diff --git a/dp/cloud/docker/python/radio_controller/Dockerfile b/dp/cloud/docker/python/radio_controller/Dockerfile index bc03729cb36f..d7d1e9dc8084 100644 --- a/dp/cloud/docker/python/radio_controller/Dockerfile +++ b/dp/cloud/docker/python/radio_controller/Dockerfile @@ -1,59 +1,5 @@ -ARG ENV=standard -FROM python:3.9.2-slim-buster as protos-generator - -RUN apt-get update && apt-get install -y --no-install-recommends curl zip make unzip -RUN curl -Lfs https://github.com/protocolbuffers/protobuf/releases/download/v3.10.0/protoc-3.10.0-linux-x86_64.zip \ - -o protoc3.zip -RUN unzip protoc3.zip -d protoc3 &&\ - mv protoc3/bin/protoc /bin/protoc &&\ - chmod a+rx /bin/protoc &&\ - mkdir -p /magma &&\ - mv ./protoc3/include/google /magma/google &&\ - rm -rf protoc3.zip protoc3 -RUN pip3 install --no-cache-dir protobuf==3.19.1 setuptools==49.6.0 grpcio==1.37.1 grpcio-tools==1.37.1 -ENV MAGMA_ROOT=/magma -ENV PYTHONPATH=$MAGMA_ROOT:$MAGMA_ROOT/build/gen -ENV PYTHON_BUILD=$MAGMA_ROOT/build -COPY ./protos $MAGMA_ROOT/protos/ -COPY ./orc8r/protos $MAGMA_ROOT/orc8r/protos/ -COPY ./lte/protos $MAGMA_ROOT/lte/protos/ -COPY ./feg/protos $MAGMA_ROOT/feg/protos/ -COPY ./cwf/protos $MAGMA_ROOT/cwf/protos/ -COPY ./dp $MAGMA_ROOT/dp/ -WORKDIR /magma/dp -RUN mkdir -p $PYTHON_BUILD && make protos - -FROM python:3.9.2-slim-buster as standard-version +# TODO remove AMC from deployment scripts -ENV RC_DIRECTORY=dp/cloud/python/magma/radio_controller -ENV DB_DIRECTORY=dp/cloud/python/magma/db_service -ENV FC_DIRECTORY=dp/cloud/python/magma/fluentd_client -ENV MC_DIRECTORY=dp/cloud/python/magma/metricsd_client -ENV GW_COMMON_DIRECTORY=orc8r/gateway/python/magma/common -COPY $RC_DIRECTORY/requirements.txt \ - /$RC_DIRECTORY/requirements.txt -RUN pip3 install --upgrade pip --no-cache-dir -r /$RC_DIRECTORY/requirements.txt - -#FROM standard-version as tests-version -# -#COPY $RC_DIRECTORY/tests/requirements.txt \ -# /$RC_DIRECTORY/tests/requirements.txt -#WORKDIR /$RC_DIRECTORY -#RUN pip3 install --upgrade pip --no-cache-dir -r tests/requirements.txt - -# hadolint ignore=DL3006 -FROM ${ENV}-version as final +FROM python:3.9.2-slim-buster as protos-generator -ENV PYTHONPATH=/magma/build/gen:/dp/cloud/python:/orc8r/gateway/python -COPY $RC_DIRECTORY /$RC_DIRECTORY/ -COPY $DB_DIRECTORY /$DB_DIRECTORY/ -COPY $FC_DIRECTORY /$FC_DIRECTORY/ -COPY $MC_DIRECTORY /$MC_DIRECTORY/ -COPY dp/cloud/python/magma/mappings /dp/cloud/python/magma/mappings/ -COPY $GW_COMMON_DIRECTORY/metrics_export.py /$GW_COMMON_DIRECTORY/metrics_export.py -COPY $GW_COMMON_DIRECTORY/__init__.py /$GW_COMMON_DIRECTORY/__init__.py -COPY --from=protos-generator /magma/build/gen /magma/build/gen -WORKDIR /$RC_DIRECTORY -EXPOSE 50053 -ENTRYPOINT ["python"] -CMD ["run.py"] +CMD ["sleep", "infinity"] \ No newline at end of file diff --git a/dp/cloud/go/active_mode_controller/cmd/main.go b/dp/cloud/go/active_mode_controller/cmd/main.go deleted file mode 100644 index f5ffdfffe329..000000000000 --- a/dp/cloud/go/active_mode_controller/cmd/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "log" - "math/rand" - "os" - - "magma/dp/cloud/go/active_mode_controller/config" - "magma/dp/cloud/go/active_mode_controller/internal/app" - "magma/dp/cloud/go/active_mode_controller/internal/signal" - "magma/dp/cloud/go/active_mode_controller/internal/time" -) - -func main() { - cfg, err := config.Read() - if err != nil { - log.Printf("failed to read config: %s", err) - os.Exit(1) - } - clock := &time.Clock{} - seed := rand.NewSource(clock.Now().Unix()) - a := app.NewApp( - app.WithConfig(cfg), - app.WithClock(clock), - app.WithRNG(rand.New(seed)), - ) - ctx := context.Background() - if err := signal.Run(ctx, a); err != nil && err != context.Canceled { - log.Printf("failed to stop app: %s", err) - os.Exit(1) - } -} diff --git a/dp/cloud/go/active_mode_controller/config/config.go b/dp/cloud/go/active_mode_controller/config/config.go deleted file mode 100644 index 56d6542d48c6..000000000000 --- a/dp/cloud/go/active_mode_controller/config/config.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "time" - - "github.com/kelseyhightower/envconfig" -) - -type Config struct { - DialTimeout time.Duration - HeartbeatSendTimeout time.Duration - RequestTimeout time.Duration - RequestProcessingInterval time.Duration - PollingInterval time.Duration - GrpcService string - GrpcPort int - CbsdInactivityTimeout time.Duration -} - -// Unfortunately it was decided that timeouts should not have units -// and this is why this proxy config is needed -type appConfig struct { - DialTimeoutSec int `envconfig:"DIAL_TIMEOUT_SEC"` - HeartbeatSendTimeoutSec int `envconfig:"HEARTBEAT_SEND_TIMEOUT_SEC"` - RequestTimeoutSec int `envconfig:"REQUEST_TIMEOUT_SEC"` - RequestProcessingIntervalSec int `envconfig:"REQUEST_PROCESSING_INTERVAL_SEC"` - PollingIntervalSec int `envconfig:"POLLING_INTERVAL_SEC"` - GrpcService string `envconfig:"GRPC_SERVICE"` - GrpcPort int `envconfig:"GRPC_PORT"` - CbsdInactivityTimeoutSec int `envconfig:"CBSD_INACTIVITY_TIMEOUT_SEC"` -} - -func Read() (*Config, error) { - cfg := &appConfig{ - DialTimeoutSec: 60, - HeartbeatSendTimeoutSec: 10, - RequestTimeoutSec: 5, - RequestProcessingIntervalSec: 10, - PollingIntervalSec: 10, - GrpcService: "domain-proxy-radio-controller", - GrpcPort: 50053, - CbsdInactivityTimeoutSec: 4 * 60 * 60, - } - if err := envconfig.Process("", cfg); err != nil { - return nil, err - } - return toAppConfig(cfg), nil -} - -func toAppConfig(c *appConfig) *Config { - return &Config{ - DialTimeout: secToDuration(c.DialTimeoutSec), - HeartbeatSendTimeout: secToDuration(c.HeartbeatSendTimeoutSec), - RequestTimeout: secToDuration(c.RequestTimeoutSec), - RequestProcessingInterval: secToDuration(c.RequestProcessingIntervalSec), - PollingInterval: secToDuration(c.PollingIntervalSec), - GrpcService: c.GrpcService, - GrpcPort: c.GrpcPort, - CbsdInactivityTimeout: secToDuration(c.CbsdInactivityTimeoutSec), - } -} - -func secToDuration(s int) time.Duration { - return time.Duration(s) * time.Second -} diff --git a/dp/cloud/go/active_mode_controller/config/config_test.go b/dp/cloud/go/active_mode_controller/config/config_test.go deleted file mode 100644 index 84fe3d1f3f8f..000000000000 --- a/dp/cloud/go/active_mode_controller/config/config_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config_test - -import ( - "os" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - "magma/dp/cloud/go/active_mode_controller/config" -) - -const ( - dialTimeoutKey = "DIAL_TIMEOUT_SEC" - heartbeatSendTimeoutKey = "HEARTBEAT_SEND_TIMEOUT_SEC" - requestTimeoutKey = "REQUEST_TIMEOUT_SEC" - requestProcessingIntervalKey = "REQUEST_PROCESSING_INTERVAL_SEC" - pollingIntervalKey = "POLLING_INTERVAL_SEC" - grpcServiceKey = "GRPC_SERVICE" - grpcPortKey = "GRPC_PORT" - cbsdInactivityTimeoutKey = "CBSD_INACTIVITY_TIMEOUT_SEC" -) - -func TestConfigTestSuite(t *testing.T) { - suite.Run(t, &ConfigTestSuite{}) -} - -type ConfigTestSuite struct { - suite.Suite - env map[string]string -} - -func (s *ConfigTestSuite) SetupTest() { - s.env = map[string]string{ - dialTimeoutKey: os.Getenv(dialTimeoutKey), - heartbeatSendTimeoutKey: os.Getenv(heartbeatSendTimeoutKey), - requestTimeoutKey: os.Getenv(requestTimeoutKey), - requestProcessingIntervalKey: os.Getenv(requestProcessingIntervalKey), - pollingIntervalKey: os.Getenv(pollingIntervalKey), - grpcServiceKey: os.Getenv(grpcServiceKey), - grpcPortKey: os.Getenv(grpcPortKey), - cbsdInactivityTimeoutKey: os.Getenv(cbsdInactivityTimeoutKey), - } -} - -func (s *ConfigTestSuite) TearDownTest() { - for key, value := range s.env { - err := os.Setenv(key, value) - s.NoError(err) - } -} - -func (s *ConfigTestSuite) TestReadDefaultValues() { - actual, err := config.Read() - s.NoError(err) - - expected := &config.Config{ - DialTimeout: time.Second * 60, - HeartbeatSendTimeout: time.Second * 10, - RequestTimeout: time.Second * 5, - RequestProcessingInterval: time.Second * 10, - PollingInterval: time.Second * 10, - GrpcService: "domain-proxy-radio-controller", - GrpcPort: 50053, - CbsdInactivityTimeout: time.Hour * 4, - } - s.Equal(expected, actual) -} - -func (s *ConfigTestSuite) TestReadFromEnv() { - s.NoError(os.Setenv(dialTimeoutKey, "1")) - s.NoError(os.Setenv(heartbeatSendTimeoutKey, "4")) - s.NoError(os.Setenv(requestTimeoutKey, "2")) - s.NoError(os.Setenv(requestProcessingIntervalKey, "5")) - s.NoError(os.Setenv(pollingIntervalKey, "3")) - s.NoError(os.Setenv(grpcServiceKey, "some_grpc_service")) - s.NoError(os.Setenv(grpcPortKey, "1234")) - s.NoError(os.Setenv(cbsdInactivityTimeoutKey, "6")) - - actual, err := config.Read() - s.NoError(err) - - expected := &config.Config{ - DialTimeout: time.Second * 1, - HeartbeatSendTimeout: time.Second * 4, - RequestTimeout: time.Second * 2, - RequestProcessingInterval: time.Second * 5, - PollingInterval: time.Second * 3, - GrpcService: "some_grpc_service", - GrpcPort: 1234, - CbsdInactivityTimeout: time.Second * 6, - } - s.Equal(expected, actual) -} diff --git a/dp/cloud/go/active_mode_controller/go.mod b/dp/cloud/go/active_mode_controller/go.mod deleted file mode 100644 index e0eec7a7de98..000000000000 --- a/dp/cloud/go/active_mode_controller/go.mod +++ /dev/null @@ -1,22 +0,0 @@ -module magma/dp/cloud/go/active_mode_controller - -go 1.18 - -require ( - github.com/golang/protobuf v1.5.0 - github.com/kelseyhightower/envconfig v1.4.0 - github.com/stretchr/testify v1.7.0 - golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 - google.golang.org/grpc v1.40.0 - google.golang.org/protobuf v1.27.1 -) - -require ( - github.com/davecgh/go-spew v1.1.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect - golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect - golang.org/x/text v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect -) diff --git a/dp/cloud/go/active_mode_controller/go.sum b/dp/cloud/go/active_mode_controller/go.sum deleted file mode 100644 index f52a6b1f7fba..000000000000 --- a/dp/cloud/go/active_mode_controller/go.sum +++ /dev/null @@ -1,127 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= -github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 h1:x03zeu7B2B11ySp+daztnwM5oBJ/8wGUSqrwcw9L0RA= -golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/dp/cloud/go/active_mode_controller/internal/app/app.go b/dp/cloud/go/active_mode_controller/internal/app/app.go deleted file mode 100644 index 9e8ebf19c4e1..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/app/app.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "context" - "fmt" - "log" - "net" - "time" - - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/config" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -type App struct { - additionalGrpcOpts []grpc.DialOption - clock Clock - rng message_generator.RNG - cfg *config.Config -} - -func NewApp(options ...Option) *App { - a := &App{} - for _, o := range options { - o(a) - } - return a -} - -type Clock interface { - Now() time.Time - Tick(duration time.Duration) *time.Ticker -} - -type Option func(*App) - -type Dialer func(context.Context, string) (net.Conn, error) - -func WithDialer(dialer Dialer) Option { - return func(a *App) { - a.additionalGrpcOpts = append(a.additionalGrpcOpts, grpc.WithContextDialer(dialer)) - } -} - -func WithRNG(rng message_generator.RNG) Option { - return func(a *App) { - a.rng = rng - } -} - -func WithClock(clock Clock) Option { - return func(a *App) { - a.clock = clock - } -} - -func WithConfig(cfg *config.Config) Option { - return func(a *App) { - a.cfg = cfg - } -} - -func (a *App) Run(ctx context.Context) error { - conn, err := a.connect(ctx) - if err != nil { - return err - } - defer conn.Close() - client := active_mode.NewActiveModeControllerClient(conn) - ticker := a.clock.Tick(a.cfg.PollingInterval) - defer ticker.Stop() - generator := newGenerator(a.cfg, a.rng) - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - state, err := a.getState(ctx, client) - if err != nil { - log.Printf("failed to get state: %s", err) - continue - } - messages := generator.GenerateMessages(state, a.clock.Now()) - for _, msg := range messages { - if err := a.sendMessage(ctx, client, msg); err != nil { - log.Printf("failed to send message '%s': %s", msg, err) - } - } - } - } -} - -func newGenerator(cfg *config.Config, rng message_generator.RNG) messageGenerator { - return message_generator.NewMessageGenerator( - cfg.HeartbeatSendTimeout+cfg.PollingInterval+cfg.RequestProcessingInterval, - cfg.CbsdInactivityTimeout, - rng, - ) -} - -type messageGenerator interface { - GenerateMessages(*active_mode.State, time.Time) []message_generator.Message -} - -func (a *App) connect(ctx context.Context) (*grpc.ClientConn, error) { - opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()} - opts = append(opts, a.additionalGrpcOpts...) - dialCtx, cancel := context.WithTimeout(ctx, a.cfg.DialTimeout) - defer cancel() - addr := fmt.Sprintf("%s:%d", a.cfg.GrpcService, a.cfg.GrpcPort) - return grpc.DialContext(dialCtx, addr, opts...) -} - -func (a *App) getState(ctx context.Context, c active_mode.ActiveModeControllerClient) (*active_mode.State, error) { - log.Printf("getting state") - reqCtx, cancel := context.WithTimeout(ctx, a.cfg.RequestTimeout) - defer cancel() - return c.GetState(reqCtx, &active_mode.GetStateRequest{}) -} - -func (a *App) sendMessage(ctx context.Context, client active_mode.ActiveModeControllerClient, msg message_generator.Message) error { - log.Printf("sending message: %s", msg) - reqCtx, cancel := context.WithTimeout(ctx, a.cfg.RequestTimeout) - defer cancel() - return msg.Send(reqCtx, client) -} diff --git a/dp/cloud/go/active_mode_controller/internal/app/app_test.go b/dp/cloud/go/active_mode_controller/internal/app/app_test.go deleted file mode 100644 index 9aae6b60cbc4..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/app/app_test.go +++ /dev/null @@ -1,305 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app_test - -import ( - "context" - "errors" - "fmt" - "net" - "strings" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" - - "magma/dp/cloud/go/active_mode_controller/config" - "magma/dp/cloud/go/active_mode_controller/internal/app" - "magma/dp/cloud/go/active_mode_controller/internal/test_utils/builders" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -const ( - bufferSize = 16 - timeout = time.Millisecond * 50 - heartbeatTimeout = time.Second * 10 - pollingTimeout = time.Second * 20 -) - -func TestAppTestSuite(t *testing.T) { - suite.Run(t, &AppTestSuite{}) -} - -type AppTestSuite struct { - suite.Suite - clock *stubClock - activeModeController *stubActiveModeControllerService - appDone chan error - cancel context.CancelFunc - dialer app.Dialer - grpcServerDone chan error - grpcServer *grpc.Server -} - -func (s *AppTestSuite) SetupTest() { - s.clock = &stubClock{ticker: make(chan time.Time, bufferSize)} - s.activeModeController = &stubActiveModeControllerService{ - requests: make(chan *active_mode.RequestPayload, bufferSize), - states: make(chan *active_mode.State, bufferSize), - err: make(chan error, bufferSize), - } - s.givenGrpcServer() - s.givenAppRunning() -} - -func (s *AppTestSuite) TearDownTest() { - s.whenAppWasShutdown() - s.thenAppWasShutdown() - s.thenNoOtherRequestWasReceived() - - s.whenGrpcServerWasShutdown() - s.thenGrpcServerWasShutdown() -} - -func (s *AppTestSuite) TestGetStateAndSendRequests() { - s.givenState(buildSomeState("some")) - s.whenTickerFired() - s.thenRequestsWereEventuallyReceived(getExpectedRequests("some")) -} - -// TODO cleanup this -func (s *AppTestSuite) TestCalculateHeartbeatDeadline() { - const interval = 50 * time.Second - const delta = heartbeatTimeout + pollingTimeout - now := s.clock.Now() - base := now.Add(delta - interval) - timestamps := []time.Time{base.Add(time.Second), base} - s.givenState(buildStateWithAuthorizedGrants("some", interval, timestamps...)) - s.whenTickerFired() - s.thenRequestsWereEventuallyReceived(getExpectedHeartbeatRequests("some", "1")) -} - -func (s *AppTestSuite) TestAppWorkInALoop() { - s.givenState(buildSomeState("some")) - s.whenTickerFired() - s.thenRequestsWereEventuallyReceived(getExpectedRequests("some")) - - s.givenState(buildSomeState("other")) - s.whenTickerFired() - s.thenRequestsWereEventuallyReceived(getExpectedRequests("other")) -} - -func (s *AppTestSuite) TestContinueWhenFailedToGetState() { - s.givenStateError(errors.New("some error")) - s.whenTickerFired() - - s.givenState(buildSomeState("some")) - s.whenTickerFired() - s.thenRequestsWereEventuallyReceived(getExpectedRequests("some")) -} - -func (s *AppTestSuite) givenAppRunning() { - s.appDone = make(chan error) - ctx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - a := app.NewApp( - app.WithDialer(s.dialer), - app.WithClock(s.clock), - app.WithRNG(stubRNG{}), - app.WithConfig(&config.Config{ - DialTimeout: timeout, - HeartbeatSendTimeout: heartbeatTimeout, - RequestTimeout: timeout, - PollingInterval: pollingTimeout, - RequestProcessingInterval: timeout, - GrpcService: "", - GrpcPort: 0, - CbsdInactivityTimeout: timeout, - }), - ) - go func() { - s.appDone <- a.Run(ctx) - }() -} - -func (s *AppTestSuite) givenGrpcServer() { - listener := bufconn.Listen(bufferSize) - s.grpcServer = grpc.NewServer() - active_mode.RegisterActiveModeControllerServer(s.grpcServer, s.activeModeController) - s.grpcServerDone = make(chan error) - go func() { - s.grpcServerDone <- s.grpcServer.Serve(listener) - }() - s.dialer = func(_ context.Context, _ string) (net.Conn, error) { - return listener.Dial() - } -} - -func (s *AppTestSuite) givenState(state *active_mode.State) { - s.activeModeController.states <- state - s.activeModeController.err <- nil -} - -func (s *AppTestSuite) givenStateError(err error) { - s.activeModeController.states <- nil - s.activeModeController.err <- err -} - -func (s *AppTestSuite) whenAppWasShutdown() { - s.cancel() -} - -func (s *AppTestSuite) whenGrpcServerWasShutdown() { - s.grpcServer.Stop() -} - -func (s *AppTestSuite) whenTickerFired() { - s.clock.ticker <- time.Time{} -} - -func (s *AppTestSuite) thenAppWasShutdown() { - select { - case err := <-s.appDone: - s.EqualError(err, context.Canceled.Error()) - return - case <-time.After(timeout): - s.Fail("failed to stop app") - } -} - -func (s *AppTestSuite) thenGrpcServerWasShutdown() { - select { - case err := <-s.grpcServerDone: - s.NoError(err) - return - case <-time.After(timeout): - s.Fail("failed to stop grpc server") - } -} - -func (s *AppTestSuite) thenRequestsWereEventuallyReceived(expectedRequests []*active_mode.RequestPayload) { - timer := time.After(timeout) - for _, expected := range expectedRequests { - select { - case actual := <-s.activeModeController.requests: - s.JSONEq(expected.Payload, actual.Payload) - case <-timer: - s.Fail("Waiting for requests timed out") - } - } -} - -func (s *AppTestSuite) thenNoOtherRequestWasReceived() { - select { - case actual := <-s.activeModeController.requests: - s.Failf("Expected no more requests, got: %s", actual.Payload) - default: - } -} - -func buildSomeState(names ...string) *active_mode.State { - cbsds := make([]*active_mode.Cbsd, len(names)) - for i, name := range names { - cbsds[i] = builders.NewCbsdBuilder(). - WithState(active_mode.CbsdState_Unregistered). - WithName(name). - Build() - } - return &active_mode.State{Cbsds: cbsds} -} - -func buildStateWithAuthorizedGrants(name string, interval time.Duration, timestamps ...time.Time) *active_mode.State { - b := builders.NewCbsdBuilder(). - WithName(name). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies(builders.NoAvailableFrequencies). - WithCarrierAggregation() - for i, timestamp := range timestamps { - b.WithGrant(&active_mode.Grant{ - Id: fmt.Sprintf("%d", i), - State: active_mode.GrantState_Authorized, - HeartbeatIntervalSec: int64(interval / time.Second), - LastHeartbeatTimestamp: timestamp.Unix(), - LowFrequencyHz: int64(3550+10*i) * 1e6, - HighFrequencyHz: int64(3550+10*(i+1)) * 1e6, - }) - } - return &active_mode.State{Cbsds: []*active_mode.Cbsd{b.Build()}} -} - -func getExpectedRequests(name string) []*active_mode.RequestPayload { - const template = `{"registrationRequest":[%s]}` - request := fmt.Sprintf(template, getExpectedSingleRequest(name)) - return []*active_mode.RequestPayload{{Payload: request}} -} - -func getExpectedSingleRequest(name string) string { - const template = `{"userId":"%[1]s","fccId":"%[1]s","cbsdSerialNumber":"%[1]s"}` - return fmt.Sprintf(template, name) -} - -func getExpectedHeartbeatRequests(id string, grantIds ...string) []*active_mode.RequestPayload { - if len(grantIds) == 0 { - return nil - } - reqs := make([]string, len(grantIds)) - for i, grantId := range grantIds { - reqs[i] = getExpectedHeartbeatRequest(id, grantId) - } - const template = `{"heartbeatRequest":[%s]}` - payload := fmt.Sprintf(template, strings.Join(reqs, ",")) - return []*active_mode.RequestPayload{{Payload: payload}} -} - -func getExpectedHeartbeatRequest(id string, grantId string) string { - const template = `{"cbsdId":"%s","grantId":"%s","operationState":"AUTHORIZED"}` - return fmt.Sprintf(template, id, grantId) -} - -type stubRNG struct{} - -func (stubRNG) Int() int { - return 0 -} - -type stubClock struct { - ticker chan time.Time -} - -func (s *stubClock) Now() time.Time { - return time.Unix(builders.Now, 0) -} - -func (s *stubClock) Tick(_ time.Duration) *time.Ticker { - return &time.Ticker{C: s.ticker} -} - -type stubActiveModeControllerService struct { - active_mode.UnimplementedActiveModeControllerServer - requests chan *active_mode.RequestPayload - states chan *active_mode.State - err chan error -} - -func (s *stubActiveModeControllerService) GetState(_ context.Context, _ *active_mode.GetStateRequest) (*active_mode.State, error) { - return <-s.states, <-s.err -} - -func (s *stubActiveModeControllerService) UploadRequests(_ context.Context, in *active_mode.RequestPayload) (*empty.Empty, error) { - s.requests <- in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete.go deleted file mode 100644 index 024a7bfa0a33..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message - -import ( - "context" - "fmt" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func NewDeleteMessage(id int64) *deleteMessage { - return &deleteMessage{id: id} -} - -type deleteMessage struct { - id int64 -} - -func (d *deleteMessage) Send(ctx context.Context, client active_mode.ActiveModeControllerClient) error { - req := &active_mode.DeleteCbsdRequest{Id: d.id} - _, err := client.DeleteCbsd(ctx, req) - return err -} - -func (d *deleteMessage) String() string { - return fmt.Sprintf("delete: %d", d.id) -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete_test.go deleted file mode 100644 index 5ee18d0fc0b3..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/delete_test.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -const id = 123 - -func TestDeleteMessageString(t *testing.T) { - m := message.NewDeleteMessage(id) - expected := fmt.Sprintf("delete: %d", id) - assert.Equal(t, expected, m.String()) -} - -func TestDeleteMessageSend(t *testing.T) { - client := &stubDeleteClient{} - - m := message.NewDeleteMessage(id) - require.NoError(t, m.Send(context.Background(), client)) - - expected := &active_mode.DeleteCbsdRequest{Id: id} - assert.Equal(t, expected, client.req) -} - -type stubDeleteClient struct { - active_mode.ActiveModeControllerClient - req *active_mode.DeleteCbsdRequest -} - -func (s *stubDeleteClient) DeleteCbsd(_ context.Context, in *active_mode.DeleteCbsdRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.req = in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish.go deleted file mode 100644 index 5187ed8ffed7..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message - -import ( - "context" - "fmt" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func NewRelinquishMessage(id int64) *relinquishMessage { - return &relinquishMessage{id: id} -} - -type relinquishMessage struct { - id int64 - delta int64 -} - -func (u *relinquishMessage) Send(ctx context.Context, client active_mode.ActiveModeControllerClient) error { - req := &active_mode.AcknowledgeCbsdRelinquishRequest{Id: u.id} - _, err := client.AcknowledgeCbsdRelinquish(ctx, req) - return err -} - -func (u *relinquishMessage) String() string { - return fmt.Sprintf("relinquish: %d", u.id) -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish_test.go deleted file mode 100644 index 2bd222ab69f1..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/relinquish_test.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func TestRelinquishMessageString(t *testing.T) { - m := message.NewRelinquishMessage(id) - expected := fmt.Sprintf("relinquish: %d", id) - assert.Equal(t, expected, m.String()) -} - -func TestRelinquishMessageSend(t *testing.T) { - client := &stubRelinquishClient{} - - m := message.NewRelinquishMessage(id) - require.NoError(t, m.Send(context.Background(), client)) - - expected := &active_mode.AcknowledgeCbsdRelinquishRequest{Id: id} - assert.Equal(t, expected, client.req) -} - -type stubRelinquishClient struct { - active_mode.ActiveModeControllerClient - req *active_mode.AcknowledgeCbsdRelinquishRequest -} - -func (s *stubRelinquishClient) AcknowledgeCbsdRelinquish(_ context.Context, in *active_mode.AcknowledgeCbsdRelinquishRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.req = in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas.go deleted file mode 100644 index aaad1f05c25d..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message - -import ( - "context" - "fmt" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func NewSasMessage(data string) *sasMessage { - return &sasMessage{data: data} -} - -type sasMessage struct { - data string -} - -func (s *sasMessage) Send(ctx context.Context, client active_mode.ActiveModeControllerClient) error { - payload := &active_mode.RequestPayload{Payload: s.data} - _, err := client.UploadRequests(ctx, payload) - return err -} - -func (s *sasMessage) String() string { - return fmt.Sprintf("request: %s", s.data) -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas_test.go deleted file mode 100644 index be66db4dd186..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/sas_test.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -const data = "some data" - -func TestSasMessageString(t *testing.T) { - m := message.NewSasMessage(data) - expected := fmt.Sprintf("request: %s", data) - assert.Equal(t, expected, m.String()) -} - -func TestSasMessageSend(t *testing.T) { - client := &stubRequestsClient{} - - m := message.NewSasMessage(data) - require.NoError(t, m.Send(context.Background(), client)) - - expected := &active_mode.RequestPayload{Payload: data} - assert.Equal(t, expected, client.req) -} - -type stubRequestsClient struct { - active_mode.ActiveModeControllerClient - req *active_mode.RequestPayload -} - -func (s *stubRequestsClient) UploadRequests(_ context.Context, in *active_mode.RequestPayload, _ ...grpc.CallOption) (*empty.Empty, error) { - s.req = in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies.go deleted file mode 100644 index a8f905bf7c88..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message - -import ( - "context" - "strconv" - "strings" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func NewStoreAvailableFrequenciesMessage(id int64, frequencies []uint32) *storeAvailableFrequenciesMessage { - return &storeAvailableFrequenciesMessage{ - id: id, - frequencies: frequencies, - } -} - -type storeAvailableFrequenciesMessage struct { - id int64 - frequencies []uint32 -} - -func (s *storeAvailableFrequenciesMessage) Send(ctx context.Context, client active_mode.ActiveModeControllerClient) error { - req := &active_mode.StoreAvailableFrequenciesRequest{ - Id: s.id, - AvailableFrequencies: s.frequencies, - } - _, err := client.StoreAvailableFrequencies(ctx, req) - return err -} - -func (s *storeAvailableFrequenciesMessage) String() string { - b := strings.Builder{} - _, _ = b.WriteString("store available frequencies: ") - _, _ = b.WriteString(strconv.FormatInt(s.id, 10)) - _, _ = b.WriteString(" (") - for i, f := range s.frequencies { - _, _ = b.WriteString(strconv.FormatUint(uint64(f), 2)) - if i != len(s.frequencies)-1 { - _, _ = b.WriteString(", ") - } - } - _, _ = b.WriteString(")") - return b.String() -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies_test.go deleted file mode 100644 index b85665afd26d..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/store_available_frequencies_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func TestStoreAvailableFrequenciesMessageString(t *testing.T) { - m := message.NewStoreAvailableFrequenciesMessage(id, freqs) - msg := "store available frequencies: %d (1110, 1100, 1100, 1000)" - expected := fmt.Sprintf(msg, id) - assert.Equal(t, expected, m.String()) -} - -func TestStoreAvailableFrequenciesMessageSend(t *testing.T) { - client := &stubStoreClient{} - - m := message.NewStoreAvailableFrequenciesMessage(id, freqs) - require.NoError(t, m.Send(context.Background(), client)) - - expected := &active_mode.StoreAvailableFrequenciesRequest{ - Id: id, - AvailableFrequencies: freqs, - } - assert.Equal(t, expected, client.req) -} - -var freqs = []uint32{0b1110, 0b1100, 0b1100, 0b1000} - -type stubStoreClient struct { - active_mode.ActiveModeControllerClient - req *active_mode.StoreAvailableFrequenciesRequest -} - -func (s *stubStoreClient) StoreAvailableFrequencies(_ context.Context, in *active_mode.StoreAvailableFrequenciesRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.req = in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/update.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/update.go deleted file mode 100644 index 2242c899992d..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/update.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message - -import ( - "context" - "fmt" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func NewUpdateMessage(id int64) *updateMessage { - return &updateMessage{id: id} -} - -type updateMessage struct { - id int64 - delta int64 -} - -func (u *updateMessage) Send(ctx context.Context, client active_mode.ActiveModeControllerClient) error { - req := &active_mode.AcknowledgeCbsdUpdateRequest{Id: u.id} - _, err := client.AcknowledgeCbsdUpdate(ctx, req) - return err -} - -func (u *updateMessage) String() string { - return fmt.Sprintf("update: %d", u.id) -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message/update_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message/update_test.go deleted file mode 100644 index d2ecb23f890a..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message/update_test.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func TestUpdateMessageString(t *testing.T) { - m := message.NewUpdateMessage(id) - expected := fmt.Sprintf("update: %d", id) - assert.Equal(t, expected, m.String()) -} - -func TestUpdateMessageSend(t *testing.T) { - client := &stubUpdateClient{} - - m := message.NewUpdateMessage(id) - require.NoError(t, m.Send(context.Background(), client)) - - expected := &active_mode.AcknowledgeCbsdUpdateRequest{Id: id} - assert.Equal(t, expected, client.req) -} - -type stubUpdateClient struct { - active_mode.ActiveModeControllerClient - req *active_mode.AcknowledgeCbsdUpdateRequest -} - -func (s *stubUpdateClient) AcknowledgeCbsdUpdate(_ context.Context, in *active_mode.AcknowledgeCbsdUpdateRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.req = in - return &empty.Empty{}, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator.go deleted file mode 100644 index b56bbba11e71..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_generator - -import ( - "context" - "fmt" - "time" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/message" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -type messageGenerator struct { - heartbeatTimeout time.Duration - inactivityTimeout time.Duration - rng RNG -} - -type RNG interface { - Int() int -} - -func NewMessageGenerator( - heartbeatTimeout time.Duration, - inactivityTimeout time.Duration, - rng RNG, -) *messageGenerator { - return &messageGenerator{ - heartbeatTimeout: heartbeatTimeout, - inactivityTimeout: inactivityTimeout, - rng: rng, - } -} - -type Message interface { - fmt.Stringer - Send(context.Context, active_mode.ActiveModeControllerClient) error -} - -func (m *messageGenerator) GenerateMessages(state *active_mode.State, now time.Time) []Message { - var requests []*sas.Request - var msgs []Message - for _, cbsd := range state.Cbsds { - g := m.getPerCbsdMessageGenerator(cbsd, now) - reqs := g.sas.GenerateRequests(cbsd) - requests = append(requests, reqs...) - msgs = append(msgs, g.action.generateActions(cbsd)...) - } - payloads := sas_helpers.Build(requests) - for _, payload := range payloads { - msgs = append(msgs, message.NewSasMessage(payload)) - } - return msgs -} - -// TODO make this more readable -func (m *messageGenerator) getPerCbsdMessageGenerator(cbsd *active_mode.Cbsd, now time.Time) *perCbsdMessageGenerator { - generator := &perCbsdMessageGenerator{ - sas: &noRequestGenerator{}, - action: &noMessageGenerator{}, - } - isActive := cbsd.LastSeenTimestamp >= now.Add(-m.inactivityTimeout).Unix() - if cbsd.State == active_mode.CbsdState_Unregistered { - if cbsd.DbData.IsDeleted { - generator.action = &deleteMessageGenerator{} - } else if cbsd.DbData.ShouldDeregister { - generator.action = &updateMessageGenerator{} - } else if isActive && cbsd.DesiredState == active_mode.CbsdState_Registered { - generator.sas = &sas.RegistrationRequestGenerator{} - } - } else if cbsd.DbData.IsDeleted || - cbsd.DbData.ShouldDeregister || - cbsd.DesiredState == active_mode.CbsdState_Unregistered { - generator.sas = &sas.DeregistrationRequestGenerator{} - } else if cbsd.DbData.ShouldRelinquish { - if len(cbsd.Grants) == 0 { - generator.action = &relinquishMessageGenerator{} - } else { - generator.sas = &sas.RelinquishmentRequestGenerator{} - } - } else if !isActive { - generator.sas = &sas.RelinquishmentRequestGenerator{} - } else if len(cbsd.Channels) == 0 { - generator.sas = &sas.SpectrumInquiryRequestGenerator{} - } else if len(cbsd.GrantSettings.AvailableFrequencies) == 0 { - generator.action = &availableFrequenciesMessageGenerator{} - } else { - nextSend := now.Add(m.heartbeatTimeout).Unix() - generator.sas = &grantManager{ - nextSendTimestamp: nextSend, - rng: m.rng, - } - } - return generator -} - -type grantManager struct { - nextSendTimestamp int64 - rng RNG -} - -func (g *grantManager) GenerateRequests(cbsd *active_mode.Cbsd) []*sas.Request { - grants := grant.GetFrequencyGrantMapping(cbsd.Grants) - calc := eirp.NewCalculator(cbsd.InstallationParams.AntennaGainDbi, cbsd.EirpCapabilities) - processors := grant.Processors[*sas.Request]{ - Del: &sas.RelinquishmentProcessor{ - CbsdId: cbsd.CbsdId, - Grants: grants, - }, - Keep: &sas.HeartbeatProcessor{ - NextSendTimestamp: g.nextSendTimestamp, - CbsdId: cbsd.CbsdId, - Grants: grants, - }, - Add: &sas.GrantProcessor{ - CbsdId: cbsd.CbsdId, - Calc: calc, - Channels: cbsd.Channels, - }, - } - requests := grant.ProcessGrants( - cbsd.Grants, cbsd.Preferences, cbsd.GrantSettings, - processors, g.rng.Int(), - ) - if len(requests) > 0 { - return requests - } - gen := sas.SpectrumInquiryRequestGenerator{} - return gen.GenerateRequests(cbsd) -} - -type perCbsdMessageGenerator struct { - sas sasRequestGenerator - action actionMessageGenerator -} - -type sasRequestGenerator interface { - GenerateRequests(cbsd *active_mode.Cbsd) []*sas.Request -} - -type actionMessageGenerator interface { - generateActions(cbsd *active_mode.Cbsd) []Message -} - -type noRequestGenerator struct{} - -func (*noRequestGenerator) GenerateRequests(_ *active_mode.Cbsd) []*sas.Request { - return nil -} - -type noMessageGenerator struct{} - -func (*noMessageGenerator) generateActions(_ *active_mode.Cbsd) []Message { - return nil -} - -type deleteMessageGenerator struct{} - -func (*deleteMessageGenerator) generateActions(data *active_mode.Cbsd) []Message { - return []Message{message.NewDeleteMessage(data.DbData.Id)} -} - -type updateMessageGenerator struct{} - -func (*updateMessageGenerator) generateActions(data *active_mode.Cbsd) []Message { - return []Message{message.NewUpdateMessage(data.DbData.Id)} -} - -type relinquishMessageGenerator struct{} - -func (*relinquishMessageGenerator) generateActions(data *active_mode.Cbsd) []Message { - return []Message{message.NewRelinquishMessage(data.DbData.Id)} -} - -type availableFrequenciesMessageGenerator struct{} - -func (*availableFrequenciesMessageGenerator) generateActions(data *active_mode.Cbsd) []Message { - calc := eirp.NewCalculator(data.InstallationParams.AntennaGainDbi, data.EirpCapabilities) - frequencies := grant.CalcAvailableFrequencies(data.Channels, calc) - msg := message.NewStoreAvailableFrequenciesMessage(data.DbData.Id, frequencies) - return []Message{msg} -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator_test.go deleted file mode 100644 index 2ff2713441c7..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/message_generator_test.go +++ /dev/null @@ -1,410 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package message_generator_test - -import ( - "context" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator" - "magma/dp/cloud/go/active_mode_controller/internal/test_utils/builders" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -func TestGenerateMessages(t *testing.T) { - const timeout = 100 * time.Second - now := time.Unix(builders.Now, 0) - data := []struct { - name string - cbsd *active_mode.Cbsd - expectedMessages []any - }{{ - name: "Should do nothing for unregistered non active cbsd", - cbsd: builders.NewCbsdBuilder(). - Inactive(). - WithState(active_mode.CbsdState_Unregistered). - Build(), - }, { - name: "Should do nothing when inactive cbsd has no grants", - cbsd: builders.NewCbsdBuilder(). - Inactive(). - Build(), - }, { - name: "Should generate deregistration request for non active registered cbsd if desired", - cbsd: builders.NewCbsdBuilder(). - Inactive(). - WithDesiredState(active_mode.CbsdState_Unregistered). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "deregistrationRequest": [ - { - "cbsdId": "some_cbsd_id" - } - ] -}`, - }, - }, - }, { - name: "Should generate registration request for active non registered cbsd", - cbsd: builders.NewCbsdBuilder(). - WithState(active_mode.CbsdState_Unregistered). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "registrationRequest": [ - { - "userId": "some_user_id", - "fccId": "some_fcc_id", - "cbsdSerialNumber": "some_serial_number" - } -] -}`, - }, - }, - }, { - name: "Should generate spectrum inquiry request when there are no available channels", - cbsd: builders.NewCbsdBuilder(). - Build(), - expectedMessages: []any{getSpectrumInquiryRequest()}, - }, { - name: "Should set available frequencies when they are nil but there are channels", - cbsd: builders.NewCbsdBuilder(). - WithChannel(&active_mode.Channel{ - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, - }). - Build(), - expectedMessages: []any{ - &active_mode.StoreAvailableFrequenciesRequest{ - Id: builders.DbId, - AvailableFrequencies: []uint32{ - 1<<9 | 1<<10 | 1<<11, - 1<<9 | 1<<10 | 1<<11, - 1 << 10, - 1 << 10, - }, - }, - }, - }, { - name: "Should generate spectrum inquiry request when no suitable available frequencies", - cbsd: builders.NewCbsdBuilder(). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies([]uint32{0, 0, 0, 0}). - Build(), - expectedMessages: []any{getSpectrumInquiryRequest()}, - }, { - name: "Should generate grant request when there are available frequencies and channels", - cbsd: builders.NewCbsdBuilder(). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies([]uint32{0, 1 << 15, 0, 0}). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "grantRequest": [ - { - "cbsdId": "some_cbsd_id", - "operationParam": { - "maxEirp": 35, - "operationFrequencyRange": { - "lowFrequency": 3620000000, - "highFrequency": 3630000000 - } - } - } - ] -}`, - }, - }, - }, { - name: "Should request two grants in carrier aggregation mode", - cbsd: builders.NewCbsdBuilder(). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies([]uint32{0, 0, 0, 1<<10 | 1<<20}). - WithCarrierAggregation(). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "grantRequest": [ - { - "cbsdId": "some_cbsd_id", - "operationParam": { - "maxEirp": 31, - "operationFrequencyRange": { - "lowFrequency": 3590000000, - "highFrequency": 3610000000 - } - } - }, - { - "cbsdId": "some_cbsd_id", - "operationParam": { - "maxEirp": 31, - "operationFrequencyRange": { - "lowFrequency": 3640000000, - "highFrequency": 3660000000 - } - } - } - ] -}`, - }, - }, - }, { - name: "Should send heartbeat message for grant in granted state", - cbsd: builders.NewCbsdBuilder(). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies(builders.NoAvailableFrequencies). - WithGrant(&active_mode.Grant{ - Id: builders.GrantId, - State: active_mode.GrantState_Granted, - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, - }). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "heartbeatRequest": [ - { - "cbsdId": "some_cbsd_id", - "grantId": "some_grant_id", - "operationState": "GRANTED" - } - ] -}`, - }, - }, - }, { - name: "Should send relinquish message for unsync grant", - cbsd: builders.NewCbsdBuilder(). - WithChannel(builders.SomeChannel). - WithAvailableFrequencies(builders.NoAvailableFrequencies). - WithGrant(&active_mode.Grant{ - Id: builders.GrantId, - State: active_mode.GrantState_Unsync, - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, - }). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "relinquishmentRequest": [ - { - "cbsdId": "some_cbsd_id", - "grantId": "some_grant_id" - } - ] -}`, - }, - }, - }, { - name: "Should send relinquish message when inactive for too long", - cbsd: builders.NewCbsdBuilder(). - Inactive(). - WithGrant(&active_mode.Grant{ - Id: builders.GrantId, - State: active_mode.GrantState_Authorized, - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, - }). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "relinquishmentRequest": [ - { - "cbsdId": "some_cbsd_id", - "grantId": "some_grant_id" - } - ] -}`, - }, - }, - }, { - name: "Should send relinquish message when requested", - cbsd: builders.NewCbsdBuilder(). - ForRelinquish(). - WithGrant(&active_mode.Grant{ - Id: builders.GrantId, - State: active_mode.GrantState_Authorized, - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, - }). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "relinquishmentRequest": [ - { - "cbsdId": "some_cbsd_id", - "grantId": "some_grant_id" - } - ] -}`, - }, - }, - }, { - name: "Should deregister deleted cbsd", - cbsd: builders.NewCbsdBuilder(). - Deleted(). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "deregistrationRequest": [ - { - "cbsdId": "some_cbsd_id" - } - ] -}`, - }, - }, - }, { - name: "Should delete unregistered cbsd marked as deleted", - cbsd: builders.NewCbsdBuilder(). - WithState(active_mode.CbsdState_Unregistered). - Deleted(). - Build(), - expectedMessages: []any{ - &active_mode.DeleteCbsdRequest{Id: 123}, - }, - }, { - name: "Should deregister updated cbsd", - cbsd: builders.NewCbsdBuilder(). - ForDeregistration(). - Build(), - expectedMessages: []any{ - &active_mode.RequestPayload{ - Payload: `{ - "deregistrationRequest": [ - { - "cbsdId": "some_cbsd_id" - } - ] -}`, - }, - }, - }, { - name: "Should acknowledge update of unregistered cbsd marked as updated", - cbsd: builders.NewCbsdBuilder(). - WithState(active_mode.CbsdState_Unregistered). - ForDeregistration(). - Build(), - expectedMessages: []any{ - &active_mode.AcknowledgeCbsdUpdateRequest{Id: 123}, - }, - }, { - name: "Should acknowledge relinquish when there are no grants", - cbsd: builders.NewCbsdBuilder(). - ForRelinquish(). - Build(), - expectedMessages: []any{ - &active_mode.AcknowledgeCbsdRelinquishRequest{Id: 123}, - }, - }} - for _, tt := range data { - t.Run(tt.name, func(t *testing.T) { - g := message_generator.NewMessageGenerator(0, timeout, &stubRNG{}) - state := &active_mode.State{Cbsds: []*active_mode.Cbsd{tt.cbsd}} - msgs := g.GenerateMessages(state, now) - client := &stubActiveModeControllerClient{} - for _, msg := range msgs { - _ = msg.Send(context.Background(), client) - } - require.Len(t, client.messages, len(tt.expectedMessages)) - for i := range tt.expectedMessages { - assertMessageEqual(t, tt.expectedMessages[i], client.messages[i]) - } - }) - } -} - -func assertMessageEqual(t *testing.T, expected any, actual any) { - switch e := expected.(type) { - case *active_mode.RequestPayload: - a := actual.(*active_mode.RequestPayload) - assert.JSONEq(t, e.Payload, a.Payload) - default: - assert.Equal(t, expected, actual) - } -} - -func getSpectrumInquiryRequest() *active_mode.RequestPayload { - return &active_mode.RequestPayload{ - Payload: `{ - "spectrumInquiryRequest": [ - { - "cbsdId": "some_cbsd_id", - "inquiredSpectrum": [ - { - "lowFrequency": 3550000000, - "highFrequency": 3700000000 - } - ] - } - ] -}`, - } -} - -type stubRNG struct{} - -func (s *stubRNG) Int() int { - return 0 -} - -type stubActiveModeControllerClient struct { - messages []any -} - -func (s *stubActiveModeControllerClient) GetState(_ context.Context, _ *active_mode.GetStateRequest, _ ...grpc.CallOption) (*active_mode.State, error) { - panic("not implemented") -} - -func (s *stubActiveModeControllerClient) DeleteCbsd(_ context.Context, in *active_mode.DeleteCbsdRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.messages = append(s.messages, in) - return nil, nil -} - -func (s *stubActiveModeControllerClient) AcknowledgeCbsdUpdate(_ context.Context, in *active_mode.AcknowledgeCbsdUpdateRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.messages = append(s.messages, in) - return nil, nil -} - -func (s *stubActiveModeControllerClient) AcknowledgeCbsdRelinquish(_ context.Context, in *active_mode.AcknowledgeCbsdRelinquishRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.messages = append(s.messages, in) - return nil, nil -} - -func (s *stubActiveModeControllerClient) StoreAvailableFrequencies(_ context.Context, in *active_mode.StoreAvailableFrequenciesRequest, _ ...grpc.CallOption) (*empty.Empty, error) { - s.messages = append(s.messages, in) - return nil, nil -} - -func (s *stubActiveModeControllerClient) UploadRequests(_ context.Context, in *active_mode.RequestPayload, _ ...grpc.CallOption) (*empty.Empty, error) { - s.messages = append(s.messages, in) - return nil, nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration.go b/dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration.go deleted file mode 100644 index 0ab4f11193f1..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sas - -import ( - "encoding/json" - "strings" - - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" -) - -type RegistrationRequestGenerator struct{} - -func (*RegistrationRequestGenerator) GenerateRequests(cbsd *active_mode.Cbsd) []*Request { - req := buildRegistrationRequest(cbsd) - return []*Request{asRequest(Registration, req)} -} - -func buildRegistrationRequest(cbsd *active_mode.Cbsd) *registrationRequest { - settings := cbsd.GetSasSettings() - if !settings.GetSingleStepEnabled() { - return ®istrationRequest{ - UserId: settings.GetUserId(), - FccId: settings.GetFccId(), - CbsdSerialNumber: settings.GetSerialNumber(), - } - } - installation := cbsd.GetInstallationParams() - return ®istrationRequest{ - UserId: settings.GetUserId(), - FccId: settings.GetFccId(), - CbsdSerialNumber: settings.GetSerialNumber(), - CbsdCategory: strings.ToUpper(settings.GetCbsdCategory()), - AirInterface: &airInterface{RadioTechnology: "E_UTRA"}, - InstallationParam: &installationParam{ - Latitude: installation.GetLatitudeDeg(), - Longitude: installation.GetLongitudeDeg(), - Height: installation.GetHeightM(), - HeightType: strings.ToUpper(installation.GetHeightType()), - IndoorDeployment: installation.GetIndoorDeployment(), - AntennaGain: installation.GetAntennaGainDbi(), - }, - MeasCapability: json.RawMessage("[]"), - } -} - -type registrationRequest struct { - UserId string `json:"userId"` - FccId string `json:"fccId"` - CbsdSerialNumber string `json:"cbsdSerialNumber"` - CbsdCategory string `json:"cbsdCategory,omitempty"` - AirInterface *airInterface `json:"airInterface,omitempty"` - InstallationParam *installationParam `json:"installationParam,omitempty"` - MeasCapability json.RawMessage `json:"measCapability,omitempty"` -} - -type airInterface struct { - RadioTechnology string `json:"radioTechnology"` -} - -type installationParam struct { - Latitude float32 `json:"latitude"` - Longitude float32 `json:"longitude"` - Height float32 `json:"height"` - HeightType string `json:"heightType"` - IndoorDeployment bool `json:"indoorDeployment"` - AntennaGain float32 `json:"antennaGain"` -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/request.go b/dp/cloud/go/active_mode_controller/internal/message_generator/sas/request.go deleted file mode 100644 index e20291b0d1f3..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/request.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sas - -import "encoding/json" - -type Request struct { - Data json.RawMessage - Type RequestType -} -type RequestType uint8 - -const ( - Registration RequestType = iota - SpectrumInquiry - Grant - Heartbeat - Relinquishment - Deregistration - RequestTypeCount -) - -func (r RequestType) String() string { - var pref string - switch r { - case Registration: - pref = "registration" - case SpectrumInquiry: - pref = "spectrumInquiry" - case Grant: - pref = "grant" - case Heartbeat: - pref = "heartbeat" - case Relinquishment: - pref = "relinquishment" - case Deregistration: - pref = "deregistration" - } - return pref + "Request" -} - -func asRequest(requestType RequestType, data interface{}) *Request { - b, _ := json.Marshal(data) - return &Request{ - Type: requestType, - Data: b, - } -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder.go b/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder.go deleted file mode 100644 index 7c3ce5dd082a..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sas_helpers - -import ( - "encoding/json" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" -) - -func Build(reqs []*sas.Request) []string { - byType := [sas.RequestTypeCount][]json.RawMessage{} - for _, r := range reqs { - if r != nil { - byType[r.Type] = append(byType[r.Type], r.Data) - } - } - payloads := make([]string, 0, len(byType)) - for k, v := range byType { - if len(v) != 0 { - payloads = append(payloads, toRequest(sas.RequestType(k), v)) - } - } - return payloads -} - -func toRequest(requestType sas.RequestType, reqs []json.RawMessage) string { - data := map[string][]json.RawMessage{ - requestType.String(): reqs, - } - payload, _ := json.Marshal(data) - return string(payload) -} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder_test.go b/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder_test.go deleted file mode 100644 index ed0b161d8622..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers/builder_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sas_helpers_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas_helpers" -) - -func TestBuild(t *testing.T) { - const someDeregistrationRequest = `{"cbsdId":"someId"}` - const otherDeregistrationRequest = `{"cbsdId":"otherId"}` - const someHeartbeatRequest = `{"cbsdId":"someId","grantId":"grantId"}` - const someRegistrationRequest = `{"key":"value"}` - requests := []*sas.Request{{ - Type: sas.Deregistration, - Data: []byte(someDeregistrationRequest), - }, { - Type: sas.Heartbeat, - Data: []byte(someHeartbeatRequest), - }, { - Type: sas.Deregistration, - Data: []byte(otherDeregistrationRequest), - }, { - Type: sas.Registration, - Data: []byte(someRegistrationRequest), - }} - actual := sas_helpers.Build(requests) - expected := []string{ - fmt.Sprintf(`{"%s":[%s]}`, sas.Registration, someRegistrationRequest), - fmt.Sprintf(`{"%s":[%s]}`, sas.Heartbeat, someHeartbeatRequest), - fmt.Sprintf(`{"%s":[%s,%s]}`, sas.Deregistration, - someDeregistrationRequest, otherDeregistrationRequest), - } - assert.Equal(t, expected, actual) -} - -func TestSkipNil(t *testing.T) { - requests := []*sas.Request{nil, nil} - actual := sas_helpers.Build(requests) - assert.Empty(t, actual) -} diff --git a/dp/cloud/go/active_mode_controller/internal/signal/signal.go b/dp/cloud/go/active_mode_controller/internal/signal/signal.go deleted file mode 100644 index 813c9f3ca547..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/signal/signal.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package signal - -import ( - "context" - "os" - "os/signal" - "syscall" -) - -type app interface { - Run(ctx context.Context) error -} - -func Run(ctx context.Context, app app) error { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) - appCtx, cancel := context.WithCancel(ctx) - go func() { - <-c - cancel() - }() - return app.Run(appCtx) -} diff --git a/dp/cloud/go/active_mode_controller/internal/signal/signal_test.go b/dp/cloud/go/active_mode_controller/internal/signal/signal_test.go deleted file mode 100644 index b9d112be6fb1..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/signal/signal_test.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package signal_test - -import ( - "context" - "os" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "magma/dp/cloud/go/active_mode_controller/internal/signal" -) - -const ( - timeout = time.Millisecond * 10 -) - -func TestRun(t *testing.T) { - app, errs := whenAppWasStarted() - thenAppWasStarted(t, app) - - whenSignalWasSent(t) - thenAppWasShutdown(t, errs) -} - -func whenAppWasStarted() (*stubApp, chan error) { - errs := make(chan error) - app := &stubApp{running: make(chan struct{})} - go func() { - errs <- signal.Run(context.Background(), app) - }() - return app, errs -} - -func thenAppWasStarted(t *testing.T, s *stubApp) { - select { - case <-s.running: - return - case <-time.After(timeout): - assert.Fail(t, "app failed to start") - } -} - -func whenSignalWasSent(t *testing.T) { - proc, err := os.FindProcess(os.Getpid()) - assert.NoError(t, err) - assert.NoError(t, proc.Signal(syscall.SIGTERM)) -} - -func thenAppWasShutdown(t *testing.T, errs chan error) { - select { - case err := <-errs: - assert.NoError(t, err) - case <-time.After(timeout): - assert.Fail(t, "app failed to stop") - } -} - -type stubApp struct { - running chan struct{} -} - -func (s *stubApp) Run(ctx context.Context) error { - close(s.running) - <-ctx.Done() - return nil -} diff --git a/dp/cloud/go/active_mode_controller/internal/test_utils/builders/cbsd.go b/dp/cloud/go/active_mode_controller/internal/test_utils/builders/cbsd.go deleted file mode 100644 index 9967d298ae3c..000000000000 --- a/dp/cloud/go/active_mode_controller/internal/test_utils/builders/cbsd.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2022 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builders - -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" - -const ( - Now = 1000 - DbId = 123 - CbsdId = "some_cbsd_id" - GrantId = "some_grant_id" -) - -var ( - SomeChannel = &active_mode.Channel{ - LowFrequencyHz: 3550e6, - HighFrequencyHz: 3700e6, - } - NoAvailableFrequencies = []uint32{0, 0, 0, 0} -) - -type cbsdBuilder struct { - cbsd *active_mode.Cbsd -} - -func NewCbsdBuilder() *cbsdBuilder { - return &cbsdBuilder{ - cbsd: &active_mode.Cbsd{ - CbsdId: CbsdId, - State: active_mode.CbsdState_Registered, - DesiredState: active_mode.CbsdState_Registered, - LastSeenTimestamp: Now, - SasSettings: &active_mode.SasSettings{ - SingleStepEnabled: false, - CbsdCategory: "A", - SerialNumber: "some_serial_number", - FccId: "some_fcc_id", - UserId: "some_user_id", - }, - InstallationParams: &active_mode.InstallationParams{ - AntennaGainDbi: 15, - }, - EirpCapabilities: &active_mode.EirpCapabilities{ - MinPower: 0, - MaxPower: 30, - NumberOfPorts: 1, - }, - DbData: &active_mode.DatabaseCbsd{ - Id: DbId, - }, - Preferences: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - GrantSettings: &active_mode.GrantSettings{ - MaxIbwMhz: 150, - }, - }, - } -} - -func (c *cbsdBuilder) Build() *active_mode.Cbsd { - return c.cbsd -} - -func (c *cbsdBuilder) Inactive() *cbsdBuilder { - c.cbsd.LastSeenTimestamp = 0 - return c -} - -func (c *cbsdBuilder) WithState(state active_mode.CbsdState) *cbsdBuilder { - c.cbsd.State = state - return c -} - -func (c *cbsdBuilder) WithDesiredState(state active_mode.CbsdState) *cbsdBuilder { - c.cbsd.DesiredState = state - return c -} - -func (c *cbsdBuilder) Deleted() *cbsdBuilder { - c.cbsd.DbData.IsDeleted = true - return c -} - -func (c *cbsdBuilder) ForDeregistration() *cbsdBuilder { - c.cbsd.DbData.ShouldDeregister = true - return c -} - -func (c *cbsdBuilder) ForRelinquish() *cbsdBuilder { - c.cbsd.DbData.ShouldRelinquish = true - return c -} - -func (c *cbsdBuilder) WithChannel(channel *active_mode.Channel) *cbsdBuilder { - c.cbsd.Channels = append(c.cbsd.Channels, channel) - return c -} - -func (c *cbsdBuilder) WithGrant(grant *active_mode.Grant) *cbsdBuilder { - c.cbsd.Grants = append(c.cbsd.Grants, grant) - return c -} - -func (c *cbsdBuilder) WithAvailableFrequencies(frequencies []uint32) *cbsdBuilder { - c.cbsd.GrantSettings.AvailableFrequencies = frequencies - return c -} - -func (c *cbsdBuilder) WithCarrierAggregation() *cbsdBuilder { - c.cbsd.GrantSettings.GrantRedundancyEnabled = true - c.cbsd.GrantSettings.CarrierAggregationEnabled = true - return c -} - -func (c *cbsdBuilder) WithName(name string) *cbsdBuilder { - c.cbsd.CbsdId = name - c.cbsd.SasSettings.SerialNumber = name - c.cbsd.SasSettings.FccId = name - c.cbsd.SasSettings.UserId = name - return c -} diff --git a/dp/cloud/go/go.mod b/dp/cloud/go/go.mod index 3f6d709ede8c..98f95c0d2e47 100644 --- a/dp/cloud/go/go.mod +++ b/dp/cloud/go/go.mod @@ -32,8 +32,10 @@ require ( github.com/labstack/echo/v4 v4.7.2 github.com/olivere/elastic/v7 v7.0.6 github.com/stretchr/testify v1.7.0 + golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0 google.golang.org/grpc v1.48.0 google.golang.org/protobuf v1.28.0 + gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 magma/orc8r/cloud/go v0.0.0-00010101000000-000000000000 magma/orc8r/lib/go v0.0.0-00010101000000-000000000000 magma/orc8r/lib/go/protos v0.0.0 @@ -92,7 +94,7 @@ require ( golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect - golang.org/x/tools v0.1.8 // indirect + golang.org/x/tools v0.1.12 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect diff --git a/dp/cloud/go/go.sum b/dp/cloud/go/go.sum index dbe64dfecb5f..f3b4430574e0 100644 --- a/dp/cloud/go/go.sum +++ b/dp/cloud/go/go.sum @@ -551,6 +551,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0 h1:17k44ji3KFYG94XS5QEFC8pyuOlMh3IoR+vkmTZmJJs= +golang.org/x/exp v0.0.0-20220907003533-145caa8ea1d0/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -758,8 +760,8 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -854,6 +856,7 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= +gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action.go new file mode 100644 index 000000000000..01b99d7b3ae9 --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action.go @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + sq "github.com/Masterminds/squirrel" + + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" +) + +type Delete struct { + Id int64 +} + +func (d *Delete) Do(runner sq.BaseRunner, manager storage.AmcManager) error { + cbsd := &storage.DBCbsd{Id: db.MakeInt(d.Id)} + return manager.DeleteCbsd(runner, cbsd) +} + +type Update struct { + Data *storage.DBCbsd + Mask db.FieldMask +} + +func (u *Update) Do(runner sq.BaseRunner, manager storage.AmcManager) error { + return manager.UpdateCbsd(runner, u.Data, u.Mask) +} + +type Request struct { + Data *storage.MutableRequest +} + +func (r *Request) Do(runner sq.BaseRunner, manager storage.AmcManager) error { + return manager.CreateRequest(runner, r.Data) +} diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action_test.go new file mode 100644 index 000000000000..e3ebbcc373a9 --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/action/action_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action_test + +import ( + "testing" + + "github.com/Masterminds/squirrel" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/action" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" +) + +func TestRequest(t *testing.T) { + m := &stubAmcManager{} + data := &storage.MutableRequest{ + Request: &storage.DBRequest{ + CbsdId: db.MakeInt(123), + Payload: `{"some":"request"}`, + }, + RequestType: &storage.DBRequestType{ + Name: db.MakeString("some type"), + }, + } + + a := action.Request{Data: data} + require.NoError(t, a.Do(nil, m)) + + assert.Equal(t, m.action, createRequest) + assert.Equal(t, m.request, data) +} + +func TestUpdate(t *testing.T) { + m := &stubAmcManager{} + data := &storage.DBCbsd{ + AvailableFrequencies: []uint32{1, 2, 3, 4}, + } + mask := db.NewIncludeMask("available_frequencies") + + a := action.Update{Data: data, Mask: mask} + require.NoError(t, a.Do(nil, m)) + + assert.Equal(t, m.action, updateCbsd) + assert.Equal(t, m.cbsd, data) + assert.Equal(t, m.mask, mask) +} + +func TestDelete(t *testing.T) { + m := &stubAmcManager{} + + a := action.Delete{Id: 123} + require.NoError(t, a.Do(nil, m)) + + assert.Equal(t, m.action, deleteCbsd) + assert.Equal(t, &storage.DBCbsd{Id: db.MakeInt(123)}, m.cbsd) +} + +type actionType uint8 + +const ( + createRequest actionType = iota + deleteCbsd + updateCbsd +) + +type stubAmcManager struct { + action actionType + request *storage.MutableRequest + cbsd *storage.DBCbsd + mask db.FieldMask +} + +func (s *stubAmcManager) GetState(_ squirrel.BaseRunner) ([]*storage.DetailedCbsd, error) { + return nil, nil +} + +func (s *stubAmcManager) CreateRequest(_ squirrel.BaseRunner, request *storage.MutableRequest) error { + s.action = createRequest + s.request = request + return nil +} + +func (s *stubAmcManager) DeleteCbsd(_ squirrel.BaseRunner, cbsd *storage.DBCbsd) error { + s.action = deleteCbsd + s.cbsd = cbsd + return nil +} + +func (s *stubAmcManager) UpdateCbsd(_ squirrel.BaseRunner, cbsd *storage.DBCbsd, mask db.FieldMask) error { + s.action = updateCbsd + s.cbsd = cbsd + s.mask = mask + return nil +} diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/generators.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/generators.go new file mode 100644 index 000000000000..3856f258241e --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/generators.go @@ -0,0 +1,127 @@ +package action_generator + +import ( + sq "github.com/Masterminds/squirrel" + + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/action" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" +) + +type Action interface { + Do(sq.BaseRunner, storage.AmcManager) error +} + +type actionGeneratorPerCbsd interface { + generateActions(*storage.DetailedCbsd) []Action +} + +type nothingGenerator struct{} + +func (*nothingGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + return nil +} + +type sasRequestGenerator struct { + g sasGenerator +} + +type sasGenerator interface { + GenerateRequests(*storage.DetailedCbsd) []*storage.MutableRequest +} + +func (s *sasRequestGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + reqs := s.g.GenerateRequests(cbsd) + actions := make([]Action, 0, len(reqs)) + for _, r := range reqs { + if r != nil { + r.Request.CbsdId = cbsd.Cbsd.Id + actions = append(actions, &action.Request{Data: r}) + } + } + return actions +} + +type deleteGenerator struct{} + +func (*deleteGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + act := &action.Delete{Id: cbsd.Cbsd.Id.Int64} + return []Action{act} +} + +type acknowledgeDeregisterGenerator struct{} + +func (a *acknowledgeDeregisterGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + data := &storage.DBCbsd{ + Id: cbsd.Cbsd.Id, + ShouldDeregister: db.MakeBool(false), + } + mask := db.NewIncludeMask("should_deregister") + act := &action.Update{Data: data, Mask: mask} + return []Action{act} +} + +type acknowledgeRelinquishGenerator struct{} + +func (a *acknowledgeRelinquishGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + data := &storage.DBCbsd{ + Id: cbsd.Cbsd.Id, + ShouldRelinquish: db.MakeBool(false), + } + mask := db.NewIncludeMask("should_relinquish") + act := &action.Update{Data: data, Mask: mask} + return []Action{act} +} + +type storeAvailableFrequenciesGenerator struct{} + +func (s *storeAvailableFrequenciesGenerator) generateActions(cbsd *storage.DetailedCbsd) []Action { + calc := eirp.NewCalculator(cbsd.Cbsd) + frequencies := grant.CalcAvailableFrequencies(cbsd.Cbsd.Channels, calc) + data := &storage.DBCbsd{ + Id: cbsd.Cbsd.Id, + AvailableFrequencies: frequencies, + } + mask := db.NewIncludeMask("available_frequencies") + act := &action.Update{Data: data, Mask: mask} + return []Action{act} +} + +type grantManager struct { + nextSendTimestamp int64 + rng RNG +} + +func (g *grantManager) GenerateRequests(cbsd *storage.DetailedCbsd) []*storage.MutableRequest { + grants := grant.GetFrequencyGrantMapping(cbsd.Grants) + calc := eirp.NewCalculator(cbsd.Cbsd) + processors := grant.Processors[*storage.MutableRequest]{ + Del: &sas.RelinquishmentProcessor{ + CbsdId: cbsd.Cbsd.CbsdId.String, + Grants: grants, + }, + Keep: &sas.HeartbeatProcessor{ + NextSendTimestamp: g.nextSendTimestamp, + CbsdId: cbsd.Cbsd.CbsdId.String, + Grants: grants, + }, + Add: &sas.GrantProcessor{ + CbsdId: cbsd.Cbsd.CbsdId.String, + Calc: calc, + Channels: cbsd.Cbsd.Channels, + }, + } + dbGrants := make([]*storage.DBGrant, len(cbsd.Grants)) + for i, gt := range cbsd.Grants { + dbGrants[i] = gt.Grant + } + requests := grant.ProcessGrants(cbsd.Cbsd, dbGrants, processors, g.rng.Int()) + if len(requests) > 0 { + return requests + } + gen := sas.SpectrumInquiryRequestGenerator{} + return gen.GenerateRequests(cbsd) +} diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator.go new file mode 100644 index 000000000000..370fd55946e7 --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action_generator + +import ( + "time" + + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" +) + +type ActionGenerator struct { + HeartbeatTimeout time.Duration + InactivityTimeout time.Duration + Rng RNG +} + +type RNG interface { + Int() int +} + +func (a *ActionGenerator) GenerateActions(cbsds []*storage.DetailedCbsd, now time.Time) []Action { + actions := make([]Action, 0, len(cbsds)) + for _, cbsd := range cbsds { + g := a.getPerCbsdMessageGenerator(cbsd, now) + actions = append(actions, g.generateActions(cbsd)...) + } + return actions +} + +// TODO make this more readable +func (a *ActionGenerator) getPerCbsdMessageGenerator(cbsd *storage.DetailedCbsd, now time.Time) actionGeneratorPerCbsd { + isActive := now.Sub(cbsd.Cbsd.LastSeen.Time) <= a.InactivityTimeout + if cbsd.CbsdState.Name.String == "unregistered" { + if cbsd.Cbsd.IsDeleted.Bool { + return &deleteGenerator{} + } else if cbsd.Cbsd.ShouldDeregister.Bool { + return &acknowledgeDeregisterGenerator{} + } else if isActive && cbsd.DesiredState.Name.String == "registered" { + return &sasRequestGenerator{g: &sas.RegistrationRequestGenerator{}} + } + } else if cbsd.Cbsd.IsDeleted.Bool || + cbsd.Cbsd.ShouldDeregister.Bool || + cbsd.DesiredState.Name.String == "unregistered" { + return &sasRequestGenerator{g: &sas.DeregistrationRequestGenerator{}} + } else if cbsd.Cbsd.ShouldRelinquish.Bool { + if len(cbsd.Grants) == 0 { + return &acknowledgeRelinquishGenerator{} + } else { + return &sasRequestGenerator{g: &sas.RelinquishmentRequestGenerator{}} + } + } else if !isActive { + return &sasRequestGenerator{g: &sas.RelinquishmentRequestGenerator{}} + } else if len(cbsd.Cbsd.Channels) == 0 { + return &sasRequestGenerator{g: &sas.SpectrumInquiryRequestGenerator{}} + } else if len(cbsd.Cbsd.AvailableFrequencies) == 0 { + return &storeAvailableFrequenciesGenerator{} + } else { + nextSend := now.Add(a.HeartbeatTimeout).Unix() + gm := &grantManager{ + nextSendTimestamp: nextSend, + rng: a.Rng, + } + return &sasRequestGenerator{g: gm} + } + return ¬hingGenerator{} +} diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator_test.go new file mode 100644 index 000000000000..3ba1e510c9c4 --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/message_generator_test.go @@ -0,0 +1,480 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action_generator_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/action" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/frequency" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" +) + +func TestGenerateMessages(t *testing.T) { + const timeout = 100 * time.Second + now := time.Unix(currentTimestamp, 0) + data := []struct { + name string + cbsd *storage.DetailedCbsd + expected []action_generator.Action + }{{ + name: "Should do nothing for unregistered non active cbsd", + cbsd: NewCbsdBuilder(). + Inactive(). + WithState(unregistered). + Build(), + }, { + name: "Should do nothing when inactive cbsd has no grants", + cbsd: NewCbsdBuilder(). + Inactive(). + Build(), + }, { + name: "Should generate deregistration request for non active registered cbsd if desired", + cbsd: NewCbsdBuilder(). + Inactive(). + WithDesiredState(unregistered). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Deregistration, &sas.DeregistrationRequest{ + CbsdId: cbsdId, + }), + }, + }, { + name: "Should generate registration request for active non registered cbsd", + cbsd: NewCbsdBuilder(). + WithState(unregistered). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Registration, &sas.RegistrationRequest{ + UserId: "some_user_id", + FccId: "some_fcc_id", + CbsdSerialNumber: "some_serial_number", + }), + }, + }, { + name: "Should generate spectrum inquiry request when there are no available channels", + cbsd: NewCbsdBuilder(). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.SpectrumInquiry, &sas.SpectrumInquiryRequest{ + CbsdId: cbsdId, + InquiredSpectrum: []*sas.FrequencyRange{{ + LowFrequency: frequency.LowestHz, + HighFrequency: frequency.HighestHz, + }}, + }), + }, + }, { + name: "Should set available frequencies when they are nil but there are channels", + cbsd: NewCbsdBuilder(). + WithChannel(storage.Channel{ + LowFrequencyHz: 3590e6, + HighFrequencyHz: 3610e6, + MaxEirp: 37, + }). + Build(), + expected: []action_generator.Action{ + &action.Update{ + Data: &storage.DBCbsd{ + Id: db.MakeInt(dbId), + AvailableFrequencies: []uint32{ + 1<<9 | 1<<10 | 1<<11, + 1<<9 | 1<<10 | 1<<11, + 1 << 10, + 1 << 10, + }, + }, + Mask: db.NewIncludeMask("available_frequencies"), + }, + }, + }, { + name: "Should generate spectrum inquiry request when no suitable available frequencies", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies([]uint32{0, 0, 0, 0}). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.SpectrumInquiry, &sas.SpectrumInquiryRequest{ + CbsdId: cbsdId, + InquiredSpectrum: []*sas.FrequencyRange{{ + LowFrequency: frequency.LowestHz, + HighFrequency: frequency.HighestHz, + }}, + }), + }, + }, { + name: "Should generate grant request when there are available frequencies and channels", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies([]uint32{0, 1 << 15, 0, 0}). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Grant, &sas.GrantRequest{ + CbsdId: cbsdId, + OperationParam: &sas.OperationParam{ + MaxEirp: 35, + OperationFrequencyRange: &sas.FrequencyRange{ + LowFrequency: 3620e6, + HighFrequency: 3630e6, + }, + }, + }), + }, + }, { + name: "Should request two grants in carrier aggregation mode", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies([]uint32{0, 0, 0, 1<<10 | 1<<20}). + WithCarrierAggregation(). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Grant, &sas.GrantRequest{ + CbsdId: cbsdId, + OperationParam: &sas.OperationParam{ + MaxEirp: 31, + OperationFrequencyRange: &sas.FrequencyRange{ + LowFrequency: 3590e6, + HighFrequency: 3610e6, + }, + }, + }), + makeRequest(sas.Grant, &sas.GrantRequest{ + CbsdId: cbsdId, + OperationParam: &sas.OperationParam{ + MaxEirp: 31, + OperationFrequencyRange: &sas.FrequencyRange{ + LowFrequency: 3640e6, + HighFrequency: 3660e6, + }, + }, + }), + }, + }, { + name: "Should send heartbeat message for grant in granted state", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies(noAvailableFrequencies). + WithGrant(granted, someGrant). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Heartbeat, &sas.HeartbeatRequest{ + CbsdId: cbsdId, + GrantId: grantId, + OperationState: "GRANTED", + }), + }, + }, { + name: "Should not send anything if heartbeat is not needed yet", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies(noAvailableFrequencies). + WithGrant(authorized, &storage.DBGrant{ + GrantId: db.MakeString(grantId), + HeartbeatIntervalSec: db.MakeInt(int64(timeout/time.Second) + 1), + LastHeartbeatRequestTime: db.MakeTime(now), + LowFrequencyHz: db.MakeInt(3590e6), + HighFrequencyHz: db.MakeInt(3610e6), + }). + Build(), + expected: nil, + }, { + name: "Should send heartbeat request if necessary", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies(noAvailableFrequencies). + WithGrant(authorized, &storage.DBGrant{ + GrantId: db.MakeString(grantId), + HeartbeatIntervalSec: db.MakeInt(int64(timeout / time.Second)), + LastHeartbeatRequestTime: db.MakeTime(now), + LowFrequencyHz: db.MakeInt(3590e6), + HighFrequencyHz: db.MakeInt(3610e6), + }). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Heartbeat, &sas.HeartbeatRequest{ + CbsdId: cbsdId, + GrantId: grantId, + OperationState: "AUTHORIZED", + }), + }, + }, { + name: "Should send relinquish message for unsync grant", + cbsd: NewCbsdBuilder(). + WithChannel(someChannel). + WithAvailableFrequencies(noAvailableFrequencies). + WithGrant(unsync, someGrant). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Relinquishment, &sas.RelinquishmentRequest{ + CbsdId: cbsdId, + GrantId: grantId, + }), + }, + }, { + name: "Should send relinquish message when inactive for too long", + cbsd: NewCbsdBuilder(). + Inactive(). + WithGrant(authorized, someGrant). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Relinquishment, &sas.RelinquishmentRequest{ + CbsdId: cbsdId, + GrantId: grantId, + }), + }, + }, { + name: "Should send relinquish message when requested", + cbsd: NewCbsdBuilder(). + ForRelinquish(). + WithGrant(authorized, someGrant). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Relinquishment, &sas.RelinquishmentRequest{ + CbsdId: cbsdId, + GrantId: grantId, + }), + }, + }, { + name: "Should deregister deleted cbsd", + cbsd: NewCbsdBuilder(). + Deleted(). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Deregistration, &sas.DeregistrationRequest{ + CbsdId: cbsdId, + }), + }, + }, { + name: "Should delete unregistered cbsd marked as deleted", + cbsd: NewCbsdBuilder(). + WithState(unregistered). + Deleted(). + Build(), + expected: []action_generator.Action{ + &action.Delete{Id: dbId}, + }, + }, { + name: "Should deregister updated cbsd", + cbsd: NewCbsdBuilder(). + ForDeregistration(). + Build(), + expected: []action_generator.Action{ + makeRequest(sas.Deregistration, &sas.DeregistrationRequest{ + CbsdId: cbsdId, + }), + }, + }, { + name: "Should acknowledge update of unregistered cbsd marked as updated", + cbsd: NewCbsdBuilder(). + WithState(unregistered). + ForDeregistration(). + Build(), + expected: []action_generator.Action{ + &action.Update{ + Data: &storage.DBCbsd{ + Id: db.MakeInt(dbId), + ShouldDeregister: db.MakeBool(false), + }, + Mask: db.NewIncludeMask("should_deregister"), + }, + }, + }, { + name: "Should acknowledge relinquish when there are no grants", + cbsd: NewCbsdBuilder(). + ForRelinquish(). + Build(), + expected: []action_generator.Action{ + &action.Update{ + Data: &storage.DBCbsd{ + Id: db.MakeInt(dbId), + ShouldRelinquish: db.MakeBool(false), + }, + Mask: db.NewIncludeMask("should_relinquish"), + }, + }, + }} + for _, tt := range data { + t.Run(tt.name, func(t *testing.T) { + g := &action_generator.ActionGenerator{ + HeartbeatTimeout: timeout, + InactivityTimeout: timeout, + Rng: &stubRNG{}, + } + + cbsds := []*storage.DetailedCbsd{tt.cbsd} + actual := g.GenerateActions(cbsds, now) + + require.Len(t, actual, len(tt.expected)) + for i := range tt.expected { + assert.Equal(t, tt.expected[i], actual[i]) + } + }) + } +} + +func makeRequest(requestType string, payload any) action_generator.Action { + req := &storage.MutableRequest{ + Request: &storage.DBRequest{ + CbsdId: db.MakeInt(dbId), + Payload: payload, + }, + RequestType: &storage.DBRequestType{ + Name: db.MakeString(requestType), + }, + } + return &action.Request{Data: req} +} + +type stubRNG struct{} + +func (s *stubRNG) Int() int { + return 0 +} + +const ( + currentTimestamp = 1000 + dbId = 123 + cbsdId = "some_cbsd_id" + grantId = "some_grant_id" + + registered = "registered" + unregistered = "unregistered" + + granted = "granted" + authorized = "authorized" + unsync = "unsync" +) + +var ( + someChannel = storage.Channel{ + LowFrequencyHz: 3550e6, + HighFrequencyHz: 3700e6, + MaxEirp: 37, + } + someGrant = &storage.DBGrant{ + GrantId: db.MakeString(grantId), + LowFrequencyHz: db.MakeInt(3590e6), + HighFrequencyHz: db.MakeInt(3610e6), + } + noAvailableFrequencies = []uint32{0, 0, 0, 0} +) + +type cbsdBuilder struct { + cbsd *storage.DetailedCbsd +} + +func NewCbsdBuilder() *cbsdBuilder { + return &cbsdBuilder{ + cbsd: &storage.DetailedCbsd{ + Cbsd: &storage.DBCbsd{ + Id: db.MakeInt(dbId), + CbsdId: db.MakeString(cbsdId), + UserId: db.MakeString("some_user_id"), + FccId: db.MakeString("some_fcc_id"), + CbsdSerialNumber: db.MakeString("some_serial_number"), + LastSeen: db.MakeTime(time.Unix(currentTimestamp, 0)), + PreferredBandwidthMHz: db.MakeInt(20), + MinPower: db.MakeFloat(0), + MaxPower: db.MakeFloat(30), + AntennaGainDbi: db.MakeFloat(15), + NumberOfPorts: db.MakeInt(1), + SingleStepEnabled: db.MakeBool(false), + CbsdCategory: db.MakeString("A"), + MaxIbwMhx: db.MakeInt(150), + }, + CbsdState: &storage.DBCbsdState{ + Name: db.MakeString(registered), + }, + DesiredState: &storage.DBCbsdState{ + Name: db.MakeString(registered), + }, + }, + } +} + +func (c *cbsdBuilder) Build() *storage.DetailedCbsd { + return c.cbsd +} + +func (c *cbsdBuilder) Inactive() *cbsdBuilder { + c.cbsd.Cbsd.LastSeen = db.MakeTime(time.Unix(0, 0)) + return c +} + +func (c *cbsdBuilder) WithState(state string) *cbsdBuilder { + c.cbsd.CbsdState.Name = db.MakeString(state) + return c +} + +func (c *cbsdBuilder) WithDesiredState(state string) *cbsdBuilder { + c.cbsd.DesiredState.Name = db.MakeString(state) + return c +} + +func (c *cbsdBuilder) Deleted() *cbsdBuilder { + c.cbsd.Cbsd.IsDeleted = db.MakeBool(true) + return c +} + +func (c *cbsdBuilder) ForDeregistration() *cbsdBuilder { + c.cbsd.Cbsd.ShouldDeregister = db.MakeBool(true) + return c +} + +func (c *cbsdBuilder) ForRelinquish() *cbsdBuilder { + c.cbsd.Cbsd.ShouldRelinquish = db.MakeBool(true) + return c +} + +func (c *cbsdBuilder) WithChannel(channel storage.Channel) *cbsdBuilder { + c.cbsd.Cbsd.Channels = append(c.cbsd.Cbsd.Channels, channel) + return c +} + +func (c *cbsdBuilder) WithGrant(state string, grant *storage.DBGrant) *cbsdBuilder { + g := &storage.DetailedGrant{ + Grant: grant, + GrantState: &storage.DBGrantState{ + Name: db.MakeString(state), + }, + } + c.cbsd.Grants = append(c.cbsd.Grants, g) + return c +} + +func (c *cbsdBuilder) WithAvailableFrequencies(frequencies []uint32) *cbsdBuilder { + c.cbsd.Cbsd.AvailableFrequencies = frequencies + return c +} + +func (c *cbsdBuilder) WithCarrierAggregation() *cbsdBuilder { + c.cbsd.Cbsd.GrantRedundancy = db.MakeBool(true) + c.cbsd.Cbsd.CarrierAggregationEnabled = db.MakeBool(true) + return c +} + +func (c *cbsdBuilder) WithName(name string) *cbsdBuilder { + c.cbsd.Cbsd.CbsdId = db.MakeString(name) + c.cbsd.Cbsd.CbsdSerialNumber = db.MakeString(name) + c.cbsd.Cbsd.FccId = db.MakeString(name) + c.cbsd.Cbsd.UserId = db.MakeString(name) + return c +} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration.go similarity index 61% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration.go index 4dad08498e6e..0911ea83e5b8 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration.go @@ -13,17 +13,20 @@ limitations under the License. package sas -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" +import ( + "magma/dp/cloud/go/services/dp/storage" +) type DeregistrationRequestGenerator struct{} -func (*DeregistrationRequestGenerator) GenerateRequests(cbsd *active_mode.Cbsd) []*Request { - req := &deregistrationRequest{ - CbsdId: cbsd.CbsdId, +func (*DeregistrationRequestGenerator) GenerateRequests(cbsd *storage.DetailedCbsd) []*storage.MutableRequest { + payload := &DeregistrationRequest{ + CbsdId: cbsd.Cbsd.CbsdId.String, } - return []*Request{asRequest(Deregistration, req)} + req := makeRequest(Deregistration, payload) + return []*storage.MutableRequest{req} } -type deregistrationRequest struct { +type DeregistrationRequest struct { CbsdId string `json:"cbsdId"` } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration_test.go similarity index 71% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration_test.go index 062f29e2fd27..10b31bc9650a 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/deregistration_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/deregistration_test.go @@ -16,14 +16,16 @@ package sas_test import ( "testing" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestDeregistrationRequestGenerator(t *testing.T) { - cbsd := &active_mode.Cbsd{CbsdId: "some_id"} + cbsd := &storage.DBCbsd{CbsdId: db.MakeString("some_id")} + data := &storage.DetailedCbsd{Cbsd: cbsd} g := &sas.DeregistrationRequestGenerator{} - actual := g.GenerateRequests(cbsd) + actual := g.GenerateRequests(data) expected := []*request{{ requestType: "deregistrationRequest", data: `{ diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator.go similarity index 72% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator.go index b557e4629502..d63b19f0ba86 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator.go @@ -18,7 +18,7 @@ import ( "golang.org/x/exp/constraints" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/storage" ) type calculator struct { @@ -28,12 +28,12 @@ type calculator struct { noPorts float64 } -func NewCalculator(antennaGain float32, capabilities *active_mode.EirpCapabilities) *calculator { +func NewCalculator(cbsd *storage.DBCbsd) *calculator { return &calculator{ - minPower: float64(capabilities.GetMinPower()), - maxPower: float64(capabilities.GetMaxPower()), - antennaGain: float64(antennaGain), - noPorts: float64(capabilities.GetNumberOfPorts()), + minPower: cbsd.MinPower.Float64, + maxPower: cbsd.MaxPower.Float64, + antennaGain: cbsd.AntennaGainDbi.Float64, + noPorts: float64(cbsd.NumberOfPorts.Int64), } } @@ -41,7 +41,7 @@ func (c *calculator) CalcLowerBound(bandwidthHz int) float64 { return math.Ceil(c.calcEirp(c.minPower, bandwidthHz)) } -func (c *calculator) CalcUpperBoundForRange(channels []*active_mode.Channel, low int64, high int64) float64 { +func (c *calculator) CalcUpperBoundForRange(channels []storage.Channel, low int64, high int64) float64 { eirp := c.calcUpperBound(int(high - low)) return calculateMaxEirp(channels, low, high, eirp) } @@ -60,7 +60,7 @@ const ( maxSASEirp = 37 ) -func calculateMaxEirp(channels []*active_mode.Channel, lowFrequencyHz int64, highFrequencyHz int64, maxEirp float64) float64 { +func calculateMaxEirp(channels []storage.Channel, lowFrequencyHz int64, highFrequencyHz int64, maxEirp float64) float64 { bw := int((highFrequencyHz - lowFrequencyHz) / 1e6) eirps := make([]float64, bw+1) for i := range eirps { @@ -78,15 +78,12 @@ func calculateMaxEirp(channels []*active_mode.Channel, lowFrequencyHz int64, hig return eirp } -func updateMaxEirpsForChannel(c *active_mode.Channel, eirps []float64, lowFrequencyHz int64, highFrequencyHz int64) { +func updateMaxEirpsForChannel(c storage.Channel, eirps []float64, lowFrequencyHz int64, highFrequencyHz int64) { low := max(lowFrequencyHz, c.LowFrequencyHz) high := min(highFrequencyHz, c.HighFrequencyHz) l := int((low - lowFrequencyHz + 1e6 - 1) / 1e6) r := int((high - lowFrequencyHz) / 1e6) - eirp := float64(maxSASEirp) - if c.MaxEirp != nil { - eirp = float64(c.MaxEirp.Value) - } + eirp := c.MaxEirp for ; l <= r; l++ { eirps[l] = max(eirps[l], eirp) } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator_test.go similarity index 70% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator_test.go index ccfe7dd099d8..81a5952e5057 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp/calculator_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp/calculator_test.go @@ -17,19 +17,20 @@ import ( "testing" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/wrapperspb" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/eirp" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/eirp" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestCalcLowerBound(t *testing.T) { - ec := &active_mode.EirpCapabilities{ - MinPower: 0, - MaxPower: 20, - NumberOfPorts: 2, + cbsd := &storage.DBCbsd{ + MinPower: db.MakeFloat(0), + MaxPower: db.MakeFloat(20), + AntennaGainDbi: db.MakeFloat(15), + NumberOfPorts: db.MakeInt(2), } - c := eirp.NewCalculator(15, ec) + c := eirp.NewCalculator(cbsd) actual := c.CalcLowerBound(10 * 1e6) assert.Equal(t, 9.0, actual) @@ -38,17 +39,17 @@ func TestCalcLowerBound(t *testing.T) { func TestCalculateUpperBoundForRange(t *testing.T) { data := []struct { name string - channels []*active_mode.Channel + channels []storage.Channel lowFrequencyHz int64 highFrequencyHz int64 maxEirp float64 expected float64 }{{ name: "Should calculate eirp for channel matching exactly", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3595e6, HighFrequencyHz: 3605e6, - MaxEirp: wrapperspb.Float(20), + MaxEirp: 20, }}, lowFrequencyHz: 3595e6, highFrequencyHz: 3605e6, @@ -56,14 +57,14 @@ func TestCalculateUpperBoundForRange(t *testing.T) { expected: 20, }, { name: "Should calculate eirp for non overlapping channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3600e6, - MaxEirp: wrapperspb.Float(25), + MaxEirp: 25, }, { LowFrequencyHz: 3600e6, HighFrequencyHz: 3610e6, - MaxEirp: wrapperspb.Float(20), + MaxEirp: 20, }}, lowFrequencyHz: 3590e6, highFrequencyHz: 3610e6, @@ -71,18 +72,18 @@ func TestCalculateUpperBoundForRange(t *testing.T) { expected: 20, }, { name: "Should calculate eirp for overlapping channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3585e6, HighFrequencyHz: 3595e6, - MaxEirp: wrapperspb.Float(25), + MaxEirp: 25, }, { LowFrequencyHz: 3590e6, HighFrequencyHz: 3600e6, - MaxEirp: wrapperspb.Float(15), + MaxEirp: 15, }, { LowFrequencyHz: 3595e6, HighFrequencyHz: 3615e6, - MaxEirp: wrapperspb.Float(20), + MaxEirp: 20, }}, lowFrequencyHz: 3590e6, highFrequencyHz: 3610e6, @@ -90,10 +91,10 @@ func TestCalculateUpperBoundForRange(t *testing.T) { expected: 20, }, { name: "Should use given max eirp is it is smallest", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, - MaxEirp: wrapperspb.Float(25), + MaxEirp: 25, }}, lowFrequencyHz: 3590e6, highFrequencyHz: 3610e6, @@ -101,9 +102,10 @@ func TestCalculateUpperBoundForRange(t *testing.T) { expected: 20, }, { name: "Should use max sas eirp by default", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, + MaxEirp: 37, // TODO can this be null? }}, lowFrequencyHz: 3590e6, highFrequencyHz: 3610e6, @@ -111,13 +113,14 @@ func TestCalculateUpperBoundForRange(t *testing.T) { expected: 37, }, { name: "Should skip outside channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, + MaxEirp: 37, }, { LowFrequencyHz: 3550e6, HighFrequencyHz: 3570e6, - MaxEirp: wrapperspb.Float(20), + MaxEirp: 20, }}, lowFrequencyHz: 3590e6, highFrequencyHz: 3610e6, @@ -126,11 +129,11 @@ func TestCalculateUpperBoundForRange(t *testing.T) { }} for _, tt := range data { t.Run(tt.name, func(t *testing.T) { - c := &active_mode.EirpCapabilities{ - MaxPower: float32(tt.maxEirp), - NumberOfPorts: int32(tt.highFrequencyHz-tt.lowFrequencyHz) / 1e6, + cbsd := &storage.DBCbsd{ + MaxPower: db.MakeFloat(tt.maxEirp), + NumberOfPorts: db.MakeInt((tt.highFrequencyHz - tt.lowFrequencyHz) / 1e6), } - calc := eirp.NewCalculator(0, c) + calc := eirp.NewCalculator(cbsd) actual := calc.CalcUpperBoundForRange(tt.channels, tt.lowFrequencyHz, tt.highFrequencyHz) assert.Equal(t, tt.expected, actual) }) diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/frequency/const.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/frequency/const.go similarity index 100% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/frequency/const.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/frequency/const.go diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant.go similarity index 67% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant.go index 6a592e3c0c90..1edb71a9b487 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant.go @@ -14,42 +14,42 @@ limitations under the License. package sas import ( - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/storage" ) type GrantProcessor struct { CbsdId string Calc eirpCalculator - Channels []*active_mode.Channel + Channels []storage.Channel } type eirpCalculator interface { - CalcUpperBoundForRange([]*active_mode.Channel, int64, int64) float64 + CalcUpperBoundForRange([]storage.Channel, int64, int64) float64 } -func (g *GrantProcessor) ProcessGrant(frequency int64, bandwidth int64) *Request { +func (g *GrantProcessor) ProcessGrant(frequency int64, bandwidth int64) *storage.MutableRequest { low := frequency - bandwidth/2 high := frequency + bandwidth/2 maxEirp := g.Calc.CalcUpperBoundForRange(g.Channels, low, high) - req := &grantRequest{ + payload := &GrantRequest{ CbsdId: g.CbsdId, - OperationParam: &operationParam{ + OperationParam: &OperationParam{ MaxEirp: maxEirp, - OperationFrequencyRange: &frequencyRange{ + OperationFrequencyRange: &FrequencyRange{ LowFrequency: low, HighFrequency: high, }, }, } - return asRequest(Grant, req) + return makeRequest(Grant, payload) } -type grantRequest struct { +type GrantRequest struct { CbsdId string `json:"cbsdId"` - OperationParam *operationParam `json:"operationParam"` + OperationParam *OperationParam `json:"operationParam"` } -type operationParam struct { +type OperationParam struct { MaxEirp float64 `json:"maxEirp"` - OperationFrequencyRange *frequencyRange `json:"operationFrequencyRange"` + OperationFrequencyRange *FrequencyRange `json:"operationFrequencyRange"` } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies.go similarity index 79% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies.go index 329ff28e9c98..53108de653b8 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies.go @@ -16,17 +16,17 @@ package grant import ( "sort" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/storage" ) -func CalcAvailableFrequencies(channels []*active_mode.Channel, calc eirpCalculator) []uint32 { +func CalcAvailableFrequencies(channels []storage.Channel, calc eirpCalculator) []uint32 { masks := make([]uint32, 4) sort.Slice(channels, func(i, j int) bool { return channels[i].LowFrequencyHz < channels[j].LowFrequencyHz }) for i := 0; i < 4; i++ { bw := (i + 1) * unitToHz - minEirp := float32(calc.CalcLowerBound(bw)) + minEirp := calc.CalcLowerBound(bw) masks[i] = calcAvailableFrequenciesForBandwidth(channels, minEirp, int64(bw)) } return masks @@ -36,10 +36,10 @@ type eirpCalculator interface { CalcLowerBound(bandwidthHz int) float64 } -func calcAvailableFrequenciesForBandwidth(channels []*active_mode.Channel, minEirp float32, band int64) uint32 { +func calcAvailableFrequenciesForBandwidth(channels []storage.Channel, minEirp float64, band int64) uint32 { mask, begin, end := uint32(0), int64(0), int64(0) for _, c := range channels { - if c.MaxEirp != nil && c.MaxEirp.Value < minEirp { + if c.MaxEirp < minEirp { continue } if c.LowFrequencyHz > end { diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies_test.go similarity index 82% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies_test.go index 2cad86e556de..f3ccb6b60409 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/available_frequencies_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/available_frequencies_test.go @@ -17,16 +17,15 @@ import ( "testing" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/wrapperspb" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" + "magma/dp/cloud/go/services/dp/storage" ) func TestCalcAvailableFrequencies(t *testing.T) { testData := []struct { name string - channels []*active_mode.Channel + channels []storage.Channel eirps []float64 expected []uint32 }{{ @@ -36,9 +35,10 @@ func TestCalcAvailableFrequencies(t *testing.T) { expected: []uint32{0, 0, 0, 0}, }, { name: "Should handle single channel", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -49,12 +49,14 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, }, { name: "Should handle joined channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3600e6, + MaxEirp: 37, }, { LowFrequencyHz: 3600e6, HighFrequencyHz: 3610e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -65,12 +67,14 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, }, { name: "Should handle disjoint channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3600e6, + MaxEirp: 37, }, { LowFrequencyHz: 3610e6, HighFrequencyHz: 3620e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -81,12 +85,14 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, }, { name: "Should handle nested channels", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3595e6, HighFrequencyHz: 3605e6, + MaxEirp: 37, }, { LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -97,9 +103,10 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, }, { name: "Should handle borders", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3550e6, HighFrequencyHz: 3700e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -111,12 +118,14 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, { name: "Should calculate channels not aligned to multiple of 5MHz", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3591e6, HighFrequencyHz: 3600e6, + MaxEirp: 37, }, { LowFrequencyHz: 3610e6, HighFrequencyHz: 3629e6, + MaxEirp: 37, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{ @@ -127,23 +136,23 @@ func TestCalcAvailableFrequencies(t *testing.T) { }, }, { name: "Should skip channels with too low eirp", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3610e6, - MaxEirp: wrapperspb.Float(-1), + MaxEirp: -1, }}, eirps: []float64{0, 0, 0, 0}, expected: []uint32{0, 0, 0, 0}, }, { name: "Should use correct eirp per bandwidth", - channels: []*active_mode.Channel{{ + channels: []storage.Channel{{ LowFrequencyHz: 3590e6, HighFrequencyHz: 3600e6, - MaxEirp: wrapperspb.Float(5), + MaxEirp: 5, }, { LowFrequencyHz: 3600e6, HighFrequencyHz: 3610e6, - MaxEirp: wrapperspb.Float(10), + MaxEirp: 10, }}, eirps: []float64{11, 10, 9, 5}, expected: []uint32{ diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency.go similarity index 65% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency.go index 09abebf366c8..b614781a64dc 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency.go @@ -13,12 +13,14 @@ limitations under the License. package grant -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" +import ( + "magma/dp/cloud/go/services/dp/storage" +) -func GetFrequencyGrantMapping(grants []*active_mode.Grant) map[int64]*active_mode.Grant { - m := make(map[int64]*active_mode.Grant, len(grants)) +func GetFrequencyGrantMapping(grants []*storage.DetailedGrant) map[int64]*storage.DetailedGrant { + m := make(map[int64]*storage.DetailedGrant, len(grants)) for _, g := range grants { - m[(g.HighFrequencyHz+g.LowFrequencyHz)/2] = g + m[(g.Grant.HighFrequencyHz.Int64+g.Grant.LowFrequencyHz.Int64)/2] = g } return m } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency_test.go similarity index 56% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency_test.go index 543c36e5b1eb..90f3448907dc 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/frequency_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/frequency_test.go @@ -18,22 +18,27 @@ import ( "github.com/stretchr/testify/assert" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestGetFrequencyGrantMapping(t *testing.T) { - grants := []*active_mode.Grant{{ - Id: "some_id", - LowFrequencyHz: 3580e6, - HighFrequencyHz: 3590e6, + grants := []*storage.DetailedGrant{{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString("some_id"), + LowFrequencyHz: db.MakeInt(3580e6), + HighFrequencyHz: db.MakeInt(3590e6), + }, }, { - Id: "other_id", - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, + Grant: &storage.DBGrant{ + GrantId: db.MakeString("other_id"), + LowFrequencyHz: db.MakeInt(3590e6), + HighFrequencyHz: db.MakeInt(3610e6), + }, }} actual := grant.GetFrequencyGrantMapping(grants) - expected := map[int64]*active_mode.Grant{ + expected := map[int64]*storage.DetailedGrant{ 3585e6: grants[0], 3600e6: grants[1], } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/per_bandwidth.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/per_bandwidth.go similarity index 100% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/per_bandwidth.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/per_bandwidth.go diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/per_bandwidth_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/per_bandwidth_test.go similarity index 98% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/per_bandwidth_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/per_bandwidth_test.go index 289d5cf1a5c0..8cd9cf487df8 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/per_bandwidth_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/per_bandwidth_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/assert" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" ) const msg = "\nexpected: %b\nactual: %b" diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences.go similarity index 86% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences.go index 293fd4a5ded3..63f367aff468 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences.go @@ -13,7 +13,9 @@ limitations under the License. package grant -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" +import ( + "magma/dp/cloud/go/services/dp/storage" +) type SelectionData struct { BandwidthHz int64 @@ -27,10 +29,10 @@ const ( BestEffort ) -func PickBandwidthSelectionOrder(settings *active_mode.GrantSettings, maxBandwidthHz int64, oldBandwidthHz int64) []*SelectionData { +func PickBandwidthSelectionOrder(cbsd *storage.DBCbsd, maxBandwidthHz int64, oldBandwidthHz int64) []*SelectionData { if oldBandwidthHz != 0 { redundancy := NoRedundancy - if settings.GrantRedundancyEnabled { + if cbsd.GrantRedundancy.Bool { redundancy = BestEffort } return []*SelectionData{{ @@ -39,10 +41,10 @@ func PickBandwidthSelectionOrder(settings *active_mode.GrantSettings, maxBandwid }} } order := bandwidthSelectionOrder[2] - if settings.GrantRedundancyEnabled { + if cbsd.GrantRedundancy.Bool { order = bandwidthSelectionOrder[1] } - if settings.CarrierAggregationEnabled { + if cbsd.CarrierAggregationEnabled.Bool { order = bandwidthSelectionOrder[0] } return filterBandwidth(order, maxBandwidthHz) diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences_test.go similarity index 81% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences_test.go index 19599b66caef..729eb6415904 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/preferences_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/preferences_test.go @@ -18,20 +18,21 @@ import ( "github.com/stretchr/testify/assert" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestPickBandwidthSelectionOrder(t *testing.T) { testData := []struct { name string - settings *active_mode.GrantSettings + settings *storage.DBCbsd maxBandwidthHz int64 oldBandwidthHz int64 expected []*grant.SelectionData }{{ name: "Should pick no redundancy order when redundancy is disabled", - settings: &active_mode.GrantSettings{}, + settings: &storage.DBCbsd{}, maxBandwidthHz: 20e6, expected: []*grant.SelectionData{ {BandwidthHz: 20e6, UseRedundancy: grant.NoRedundancy}, @@ -41,8 +42,8 @@ func TestPickBandwidthSelectionOrder(t *testing.T) { }, }, { name: "Should pick best effort order for redundancy without carrier aggregation", - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, + settings: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), }, maxBandwidthHz: 20e6, expected: []*grant.SelectionData{ @@ -53,9 +54,9 @@ func TestPickBandwidthSelectionOrder(t *testing.T) { }, }, { name: "Should pick custom order for carrier aggregation", - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - CarrierAggregationEnabled: true, + settings: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + CarrierAggregationEnabled: db.MakeBool(true), }, maxBandwidthHz: 20e6, expected: []*grant.SelectionData{ @@ -67,9 +68,9 @@ func TestPickBandwidthSelectionOrder(t *testing.T) { }, }, { name: "Should filter out too large bandwidths", - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - CarrierAggregationEnabled: true, + settings: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + CarrierAggregationEnabled: db.MakeBool(true), }, maxBandwidthHz: 10e6, expected: []*grant.SelectionData{ @@ -79,7 +80,7 @@ func TestPickBandwidthSelectionOrder(t *testing.T) { }, }, { name: "Should pick no redundancy for existing bandwidth without redundancy", - settings: &active_mode.GrantSettings{}, + settings: &storage.DBCbsd{}, maxBandwidthHz: 20e6, oldBandwidthHz: 15e6, expected: []*grant.SelectionData{ @@ -87,8 +88,8 @@ func TestPickBandwidthSelectionOrder(t *testing.T) { }, }, { name: "Should pick best effort for existing bandwidth with redundancy", - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, + settings: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), }, maxBandwidthHz: 20e6, oldBandwidthHz: 15e6, diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection.go similarity index 65% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection.go index 74302bd871c3..52c67573e187 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection.go @@ -16,8 +16,8 @@ package grant import ( "math/bits" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/frequency" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/frequency" + "magma/dp/cloud/go/services/dp/storage" ) type Processor[T any] interface { @@ -30,22 +30,19 @@ type Processors[T any] struct { Add Processor[T] } -func ProcessGrants[T any]( - grants []*active_mode.Grant, pref *active_mode.FrequencyPreferences, - settings *active_mode.GrantSettings, processors Processors[T], index int, -) []T { +func ProcessGrants[T any](cbsd *storage.DBCbsd, grants []*storage.DBGrant, processors Processors[T], index int) []T { oldBw, oldGrants := calculateOldGrants(grants) - bw, newGrants := selectGrants(pref, settings, oldGrants, oldBw, index) + bw, newGrants := selectGrants(cbsd, oldGrants, oldBw, index) return processGrants(processors, oldGrants, newGrants, bw) } -func calculateOldGrants(grants []*active_mode.Grant) (int64, uint32) { +func calculateOldGrants(grants []*storage.DBGrant) (int64, uint32) { mask := uint32(0) bw := int64(0) for i, g := range grants { - mask |= hzToMask((g.HighFrequencyHz + g.LowFrequencyHz) / 2) + mask |= hzToMask((g.HighFrequencyHz.Int64 + g.LowFrequencyHz.Int64) / 2) if i == 0 { - bw = g.HighFrequencyHz - g.LowFrequencyHz + bw = g.HighFrequencyHz.Int64 - g.LowFrequencyHz.Int64 } } return bw, mask @@ -57,14 +54,11 @@ func hzToMask(hz int64) uint32 { const unitToHz = 5e6 -func selectGrants( - pref *active_mode.FrequencyPreferences, settings *active_mode.GrantSettings, - oldGrants uint32, oldBandwidthHz int64, index int, -) (int64, uint32) { - prefMask := preferencesToMask(pref.FrequenciesMhz) - order := PickBandwidthSelectionOrder(settings, int64(pref.BandwidthMhz)*1e6, oldBandwidthHz) +func selectGrants(cbsd *storage.DBCbsd, oldGrants uint32, oldBandwidthHz int64, index int) (int64, uint32) { + prefMask := preferencesToMask(cbsd.PreferredFrequenciesMHz) + order := PickBandwidthSelectionOrder(cbsd, cbsd.PreferredBandwidthMHz.Int64*1e6, oldBandwidthHz) for _, o := range order { - newGrants := selectGrantsForBandwidth(o, settings, oldGrants, prefMask, index) + newGrants := selectGrantsForBandwidth(o, cbsd, oldGrants, prefMask, index) if newGrants != 0 { return o.BandwidthHz, newGrants } @@ -72,18 +66,18 @@ func selectGrants( return 0, 0 } -func preferencesToMask(frequenciesMHz []int32) []uint32 { +func preferencesToMask(frequenciesMHz []int64) []uint32 { masks := make([]uint32, len(frequenciesMHz)) for i, f := range frequenciesMHz { - masks[i] = hzToMask(int64(f) * 1e6) + masks[i] = hzToMask(f * 1e6) } return masks } -func selectGrantsForBandwidth(data *SelectionData, settings *active_mode.GrantSettings, grants uint32, pref []uint32, index int) uint32 { +func selectGrantsForBandwidth(data *SelectionData, cbsd *storage.DBCbsd, grants uint32, pref []uint32, index int) uint32 { minWidth := int(data.BandwidthHz/unitToHz - 1) - maxWidth := int((int64(settings.MaxIbwMhz)*1e6 - data.BandwidthHz) / unitToHz) - available := settings.AvailableFrequencies[minWidth] + maxWidth := int((cbsd.MaxIbwMhx.Int64*1e6 - data.BandwidthHz) / unitToHz) + available := cbsd.AvailableFrequencies[minWidth] if minWidth > maxWidth { maxWidth = minWidth } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection_test.go similarity index 50% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection_test.go index 9059dacc001c..2c4dc153eeaa 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant/selection_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant/selection_test.go @@ -18,38 +18,33 @@ import ( "github.com/stretchr/testify/assert" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestProcessGrants(t *testing.T) { testData := []struct { name string - grants []*active_mode.Grant - pref *active_mode.FrequencyPreferences - settings *active_mode.GrantSettings + cbsd *storage.DBCbsd + grants []*storage.DBGrant expected []grantData }{{ - name: "Should do nothing when no grants or available frequencies", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - MaxIbwMhz: 150, - AvailableFrequencies: []uint32{0, 0, 0, 0}, + name: "Should do nothing when no grants or available frequencies", + cbsd: &storage.DBCbsd{ + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(150), + AvailableFrequencies: []uint32{0, 0, 0, 0}, }, expected: []grantData{}, }, { - name: "Should select only one grant in no redundancy mode", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - MaxIbwMhz: 150, - AvailableFrequencies: allAvailable, + name: "Should select only one grant in no redundancy mode", + cbsd: &storage.DBCbsd{ + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(150), + AvailableFrequencies: allAvailable, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3560e6, @@ -57,33 +52,29 @@ func TestProcessGrants(t *testing.T) { }}, }, { name: "Should select keep existing grant in no redundancy mode", - grants: []*active_mode.Grant{{ - LowFrequencyHz: 3590e6, - HighFrequencyHz: 3610e6, + cbsd: &storage.DBCbsd{ + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(150), + AvailableFrequencies: allAvailable, + }, + grants: []*storage.DBGrant{{ + LowFrequencyHz: db.MakeInt(3590e6), + HighFrequencyHz: db.MakeInt(3610e6), }}, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - MaxIbwMhz: 150, - AvailableFrequencies: allAvailable, - }, expected: []grantData{{ action: keep, frequency: 3600e6, bandwidth: 20e6, }}, }, { - name: "Should select grants for redundancy", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - MaxIbwMhz: 150, - AvailableFrequencies: allAvailable, + name: "Should select grants for redundancy", + cbsd: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(150), + AvailableFrequencies: allAvailable, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3560e6, @@ -94,17 +85,15 @@ func TestProcessGrants(t *testing.T) { bandwidth: 20e6, }}, }, { - name: "Should use custom ordering in carrier aggregation mode", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 15, - }, - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - CarrierAggregationEnabled: true, - MaxIbwMhz: 150, + name: "Should use custom ordering in carrier aggregation mode", + cbsd: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + CarrierAggregationEnabled: db.MakeBool(true), + PreferredBandwidthMHz: db.MakeInt(15), + MaxIbwMhx: db.MakeInt(150), AvailableFrequencies: allAvailable, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3555e6, @@ -115,17 +104,15 @@ func TestProcessGrants(t *testing.T) { bandwidth: 10e6, }}, }, { - name: "Should use frequency and bandwidth preferences", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 15, - FrequenciesMhz: []int32{3570}, - }, - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - MaxIbwMhz: 150, - AvailableFrequencies: allAvailable, + name: "Should use frequency and bandwidth preferences", + cbsd: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + PreferredBandwidthMHz: db.MakeInt(15), + PreferredFrequenciesMHz: []int64{3570}, + MaxIbwMhx: db.MakeInt(150), + AvailableFrequencies: allAvailable, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3570e6, @@ -136,33 +123,29 @@ func TestProcessGrants(t *testing.T) { bandwidth: 15e6, }}, }, { - name: "Should add only one grant if only available in standard redundancy", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - MaxIbwMhz: 30, - AvailableFrequencies: allAvailable, + name: "Should add only one grant if only available in standard redundancy", + cbsd: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(30), + AvailableFrequencies: allAvailable, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3560e6, bandwidth: 20e6, }}, }, { - name: "Should go to next bandwidth if only one available in carrier aggregation mode", - grants: nil, - pref: &active_mode.FrequencyPreferences{ - BandwidthMhz: 20, - }, - settings: &active_mode.GrantSettings{ - GrantRedundancyEnabled: true, - CarrierAggregationEnabled: true, - MaxIbwMhz: 150, + name: "Should go to next bandwidth if only one available in carrier aggregation mode", + cbsd: &storage.DBCbsd{ + GrantRedundancy: db.MakeBool(true), + CarrierAggregationEnabled: db.MakeBool(true), + PreferredBandwidthMHz: db.MakeInt(20), + MaxIbwMhx: db.MakeInt(150), AvailableFrequencies: []uint32{0, 1 << 10, 1 << 10, 0}, }, + grants: nil, expected: []grantData{{ action: add, frequency: 3600e6, @@ -176,7 +159,7 @@ func TestProcessGrants(t *testing.T) { Del: &stubGrantProcessor{action: del}, Add: &stubGrantProcessor{action: add}, } - actual := grant.ProcessGrants[grantData](tt.grants, tt.pref, tt.settings, p, 0) + actual := grant.ProcessGrants[grantData](tt.cbsd, tt.grants, p, 0) assert.Equal(t, tt.expected, actual) }) } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant_test.go similarity index 78% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant_test.go index 991c2c633c14..9feb80a25942 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/grant_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/grant_test.go @@ -17,10 +17,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/wrapperspb" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" ) func TestGrantProcessor(t *testing.T) { @@ -32,10 +31,10 @@ func TestGrantProcessor(t *testing.T) { calc := &stubEirpCalculator{ eirp: eirp, } - channels := []*active_mode.Channel{{ + channels := []storage.Channel{{ LowFrequencyHz: 3550e6, HighFrequencyHz: 3700e6, - MaxEirp: wrapperspb.Float(eirp), + MaxEirp: eirp, }} p := &sas.GrantProcessor{ CbsdId: "some_id", @@ -63,13 +62,13 @@ func TestGrantProcessor(t *testing.T) { } type stubEirpCalculator struct { - channels []*active_mode.Channel + channels []storage.Channel low int64 high int64 eirp float64 } -func (s *stubEirpCalculator) CalcUpperBoundForRange(channels []*active_mode.Channel, low int64, high int64) float64 { +func (s *stubEirpCalculator) CalcUpperBoundForRange(channels []storage.Channel, low int64, high int64) float64 { s.channels = channels s.low = low s.high = high diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat.go similarity index 53% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat.go index 79dc4422e6a8..a14fcc62f431 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat.go @@ -16,43 +16,48 @@ package sas import ( "strings" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/storage" ) type HeartbeatProcessor struct { NextSendTimestamp int64 CbsdId string - Grants map[int64]*active_mode.Grant + Grants map[int64]*storage.DetailedGrant } -func (h *HeartbeatProcessor) ProcessGrant(frequency int64, _ int64) *Request { +func (h *HeartbeatProcessor) ProcessGrant(frequency int64, _ int64) *storage.MutableRequest { grant := h.Grants[frequency] - if grant.State == active_mode.GrantState_Unsync { - req := &relinquishmentRequest{ + if grant.GrantState.Name.String == unsync { + req := &RelinquishmentRequest{ CbsdId: h.CbsdId, - GrantId: grant.Id, + GrantId: grant.Grant.GrantId.String, } - return asRequest(Relinquishment, req) + return makeRequest(Relinquishment, req) } - if grant.State == active_mode.GrantState_Granted || - shouldSendNow(grant, h.NextSendTimestamp) { - req := &heartbeatRequest{ + if grant.GrantState.Name.String == granted || + shouldSendNow(grant.Grant, h.NextSendTimestamp) { + req := &HeartbeatRequest{ CbsdId: h.CbsdId, - GrantId: grant.Id, - OperationState: strings.ToUpper(grant.State.String()), + GrantId: grant.Grant.GrantId.String, + OperationState: strings.ToUpper(grant.GrantState.Name.String), } - return asRequest(Heartbeat, req) + return makeRequest(Heartbeat, req) } return nil } -type heartbeatRequest struct { +type HeartbeatRequest struct { CbsdId string `json:"cbsdId"` GrantId string `json:"grantId"` OperationState string `json:"operationState"` } -func shouldSendNow(grant *active_mode.Grant, nextSendTimestamp int64) bool { - deadline := grant.HeartbeatIntervalSec + grant.LastHeartbeatTimestamp +const ( + granted = "granted" + unsync = "unsync" +) + +func shouldSendNow(grant *storage.DBGrant, nextSendTimestamp int64) bool { + deadline := grant.HeartbeatIntervalSec.Int64 + grant.LastHeartbeatRequestTime.Time.Unix() return deadline <= nextSendTimestamp } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat_test.go similarity index 56% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat_test.go index 984c37d04c3e..a4359ff00f39 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/heartbeat_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/heartbeat_test.go @@ -16,9 +16,11 @@ package sas_test import ( "fmt" "testing" + "time" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) const ( @@ -38,40 +40,56 @@ const ( func TestHeartbeatRequestGenerator(t *testing.T) { data := []struct { name string - grant *active_mode.Grant + grant *storage.DetailedGrant expected *request }{{ name: "Should generate heartbeat immediately when grant is not authorized yet", - grant: &active_mode.Grant{ - Id: someGrantId, - State: active_mode.GrantState_Granted, - HeartbeatIntervalSec: heartbeatInterval, - LastHeartbeatTimestamp: nextSend, + grant: &storage.DetailedGrant{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString(someGrantId), + HeartbeatIntervalSec: db.MakeInt(heartbeatInterval), + LastHeartbeatRequestTime: db.MakeTime(time.Unix(nextSend, 0)), + }, + GrantState: &storage.DBGrantState{ + Name: db.MakeString("granted"), + }, }, expected: getHeartbeatRequest(granted), }, { name: "Should generate heartbeat when timeout has expired", - grant: &active_mode.Grant{ - Id: someGrantId, - State: active_mode.GrantState_Authorized, - HeartbeatIntervalSec: heartbeatInterval, - LastHeartbeatTimestamp: nextSend - heartbeatInterval, + grant: &storage.DetailedGrant{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString(someGrantId), + HeartbeatIntervalSec: db.MakeInt(heartbeatInterval), + LastHeartbeatRequestTime: db.MakeTime(time.Unix(nextSend-heartbeatInterval, 0)), + }, + GrantState: &storage.DBGrantState{ + Name: db.MakeString("authorized"), + }, }, expected: getHeartbeatRequest(authorized), }, { name: "Should not generate heartbeat request when timeout has not expired yet", - grant: &active_mode.Grant{ - Id: someGrantId, - State: active_mode.GrantState_Authorized, - HeartbeatIntervalSec: heartbeatInterval, - LastHeartbeatTimestamp: nextSend - heartbeatInterval + 1, + grant: &storage.DetailedGrant{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString(someGrantId), + HeartbeatIntervalSec: db.MakeInt(heartbeatInterval), + LastHeartbeatRequestTime: db.MakeTime(time.Unix(nextSend-heartbeatInterval+1, 0)), + }, + GrantState: &storage.DBGrantState{ + Name: db.MakeString("authorized"), + }, }, expected: nil, }, { name: "Should generate relinquish request for unsync grant", - grant: &active_mode.Grant{ - Id: "some_grant_id", - State: active_mode.GrantState_Unsync, + grant: &storage.DetailedGrant{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString(someGrantId), + }, + GrantState: &storage.DBGrantState{ + Name: db.MakeString("unsync"), + }, }, expected: getRelinquishmentRequest(), }} @@ -80,7 +98,7 @@ func TestHeartbeatRequestGenerator(t *testing.T) { p := sas.HeartbeatProcessor{ NextSendTimestamp: nextSend, CbsdId: someCbsdId, - Grants: map[int64]*active_mode.Grant{ + Grants: map[int64]*storage.DetailedGrant{ frequency: tt.grant, }, } diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration.go new file mode 100644 index 000000000000..020bcd5766cb --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sas + +import ( + "encoding/json" + "strings" + + "magma/dp/cloud/go/services/dp/storage" +) + +type RegistrationRequestGenerator struct{} + +func (*RegistrationRequestGenerator) GenerateRequests(cbsd *storage.DetailedCbsd) []*storage.MutableRequest { + payload := buildRegistrationRequest(cbsd.Cbsd) + req := makeRequest(Registration, payload) + return []*storage.MutableRequest{req} +} + +func buildRegistrationRequest(cbsd *storage.DBCbsd) *RegistrationRequest { + if !cbsd.SingleStepEnabled.Bool { + return &RegistrationRequest{ + UserId: cbsd.UserId.String, + FccId: cbsd.FccId.String, + CbsdSerialNumber: cbsd.CbsdSerialNumber.String, + } + } + return &RegistrationRequest{ + UserId: cbsd.UserId.String, + FccId: cbsd.FccId.String, + CbsdSerialNumber: cbsd.CbsdSerialNumber.String, + CbsdCategory: strings.ToUpper(cbsd.CbsdCategory.String), + AirInterface: &AirInterface{RadioTechnology: "E_UTRA"}, + InstallationParam: &InstallationParam{ + Latitude: cbsd.LatitudeDeg.Float64, + Longitude: cbsd.LongitudeDeg.Float64, + Height: cbsd.HeightM.Float64, + HeightType: strings.ToUpper(cbsd.HeightType.String), + IndoorDeployment: cbsd.IndoorDeployment.Bool, + AntennaGain: cbsd.AntennaGainDbi.Float64, + }, + MeasCapability: json.RawMessage("[]"), + } +} + +type RegistrationRequest struct { + UserId string `json:"userId"` + FccId string `json:"fccId"` + CbsdSerialNumber string `json:"cbsdSerialNumber"` + CbsdCategory string `json:"cbsdCategory,omitempty"` + AirInterface *AirInterface `json:"airInterface,omitempty"` + InstallationParam *InstallationParam `json:"installationParam,omitempty"` + MeasCapability json.RawMessage `json:"measCapability,omitempty"` +} + +type AirInterface struct { + RadioTechnology string `json:"radioTechnology"` +} + +type InstallationParam struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Height float64 `json:"height"` + HeightType string `json:"heightType"` + IndoorDeployment bool `json:"indoorDeployment"` + AntennaGain float64 `json:"antennaGain"` +} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration_test.go similarity index 61% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration_test.go index 251db2bcb16b..704cb268334c 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/registration_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/registration_test.go @@ -14,28 +14,28 @@ limitations under the License. package sas_test import ( + "encoding/json" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestRegistrationRequestGenerator(t *testing.T) { data := []struct { name string - cbsd *active_mode.Cbsd + cbsd *storage.DBCbsd expected string }{{ name: "Should generate multi step registration request", - cbsd: &active_mode.Cbsd{ - SasSettings: &active_mode.SasSettings{ - UserId: "some_user_id", - FccId: "some_fcc_id", - SerialNumber: "some_serial_number", - }, + cbsd: &storage.DBCbsd{ + UserId: db.MakeString("some_user_id"), + FccId: db.MakeString("some_fcc_id"), + CbsdSerialNumber: db.MakeString("some_serial_number"), }, expected: `{ "userId": "some_user_id", @@ -44,22 +44,18 @@ func TestRegistrationRequestGenerator(t *testing.T) { }`, }, { name: "Should generate cpi less single step registration request", - cbsd: &active_mode.Cbsd{ - SasSettings: &active_mode.SasSettings{ - SingleStepEnabled: true, - CbsdCategory: "a", - SerialNumber: "some_serial_number", - FccId: "some_fcc_id", - UserId: "some_user_id", - }, - InstallationParams: &active_mode.InstallationParams{ - LatitudeDeg: 12, - LongitudeDeg: 34, - HeightM: 5, - HeightType: "agl", - IndoorDeployment: true, - AntennaGainDbi: 15, - }, + cbsd: &storage.DBCbsd{ + SingleStepEnabled: db.MakeBool(true), + CbsdCategory: db.MakeString("a"), + CbsdSerialNumber: db.MakeString("some_serial_number"), + FccId: db.MakeString("some_fcc_id"), + UserId: db.MakeString("some_user_id"), + LatitudeDeg: db.MakeFloat(12), + LongitudeDeg: db.MakeFloat(34), + HeightM: db.MakeFloat(5), + HeightType: db.MakeString("agl"), + IndoorDeployment: db.MakeBool(true), + AntennaGainDbi: db.MakeFloat(15), }, expected: `{ "userId": "some_user_id", @@ -83,7 +79,8 @@ func TestRegistrationRequestGenerator(t *testing.T) { g := &sas.RegistrationRequestGenerator{} for _, tt := range data { t.Run(tt.name, func(t *testing.T) { - actual := g.GenerateRequests(tt.cbsd) + data := &storage.DetailedCbsd{Cbsd: tt.cbsd} + actual := g.GenerateRequests(data) expected := []*request{{ requestType: "registrationRequest", data: tt.expected, @@ -98,7 +95,7 @@ type request struct { data string } -func assertRequestsEqual(t *testing.T, expected []*request, actual []*sas.Request) { +func assertRequestsEqual(t *testing.T, expected []*request, actual []*storage.MutableRequest) { require.Len(t, actual, len(expected)) for i := range actual { args := []any{"at %d", i} @@ -106,11 +103,12 @@ func assertRequestsEqual(t *testing.T, expected []*request, actual []*sas.Reques } } -func assertRequestEqual(t *testing.T, expected *request, actual *sas.Request, args ...any) { +func assertRequestEqual(t *testing.T, expected *request, actual *storage.MutableRequest, args ...any) { if expected == nil { assert.Nil(t, actual, args...) return } - assert.Equal(t, expected.requestType, actual.Type.String(), args...) - assert.JSONEq(t, expected.data, string(actual.Data), args...) + assert.Equal(t, expected.requestType, actual.RequestType.Name.String, args...) + actualPayload, _ := json.Marshal(actual.Request.Payload) + assert.JSONEq(t, expected.data, string(actualPayload), args...) } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment.go similarity index 53% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment.go index 4bd1be182405..348dbc9e23b8 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment.go @@ -13,38 +13,38 @@ limitations under the License. package sas -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" +import ( + "magma/dp/cloud/go/services/dp/storage" +) type RelinquishmentRequestGenerator struct{} -func (*RelinquishmentRequestGenerator) GenerateRequests(cbsd *active_mode.Cbsd) []*Request { - grants := cbsd.Grants - cbsdId := cbsd.CbsdId - reqs := make([]*Request, 0, len(grants)) - for _, grant := range grants { - req := &relinquishmentRequest{ - CbsdId: cbsdId, - GrantId: grant.Id, +func (*RelinquishmentRequestGenerator) GenerateRequests(cbsd *storage.DetailedCbsd) []*storage.MutableRequest { + reqs := make([]*storage.MutableRequest, len(cbsd.Grants)) + for i, grant := range cbsd.Grants { + payload := &RelinquishmentRequest{ + CbsdId: cbsd.Cbsd.CbsdId.String, + GrantId: grant.Grant.GrantId.String, } - reqs = append(reqs, asRequest(Relinquishment, req)) + reqs[i] = makeRequest(Relinquishment, payload) } return reqs } -type relinquishmentRequest struct { +type RelinquishmentRequest struct { CbsdId string `json:"cbsdId"` GrantId string `json:"grantId"` } type RelinquishmentProcessor struct { CbsdId string - Grants map[int64]*active_mode.Grant + Grants map[int64]*storage.DetailedGrant } -func (r *RelinquishmentProcessor) ProcessGrant(frequency int64, _ int64) *Request { - req := &relinquishmentRequest{ +func (r *RelinquishmentProcessor) ProcessGrant(frequency int64, _ int64) *storage.MutableRequest { + payload := &RelinquishmentRequest{ CbsdId: r.CbsdId, - GrantId: r.Grants[frequency].Id, + GrantId: r.Grants[frequency].Grant.GrantId.String, } - return asRequest(Relinquishment, req) + return makeRequest(Relinquishment, payload) } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment_test.go similarity index 67% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment_test.go index 862bfabce824..a7167f80d9f1 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/relinquishment_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/relinquishment_test.go @@ -16,19 +16,24 @@ package sas_test import ( "testing" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestRelinquishmentRequestGenerator(t *testing.T) { - cbsd := &active_mode.Cbsd{ - CbsdId: "some_cbsd_id", - Grants: []*active_mode.Grant{{ - Id: "some_grant_id", + data := &storage.DetailedCbsd{ + Cbsd: &storage.DBCbsd{ + CbsdId: db.MakeString("some_cbsd_id"), + }, + Grants: []*storage.DetailedGrant{{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString("some_grant_id"), + }, }}, } g := &sas.RelinquishmentRequestGenerator{} - actual := g.GenerateRequests(cbsd) + actual := g.GenerateRequests(data) expected := []*request{getRelinquishmentRequest()} assertRequestsEqual(t, expected, actual) } @@ -37,8 +42,12 @@ func TestRelinquishmentProcessor(t *testing.T) { const frequency = 3600e6 p := &sas.RelinquishmentProcessor{ CbsdId: "some_cbsd_id", - Grants: map[int64]*active_mode.Grant{ - frequency: {Id: "some_grant_id"}, + Grants: map[int64]*storage.DetailedGrant{ + frequency: { + Grant: &storage.DBGrant{ + GrantId: db.MakeString("some_grant_id"), + }, + }, }, } actual := p.ProcessGrant(frequency, 20e6) diff --git a/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/request.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/request.go new file mode 100644 index 000000000000..a71c5a5bceb7 --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/request.go @@ -0,0 +1,39 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sas + +import ( + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" +) + +const ( + Registration = "registrationRequest" + SpectrumInquiry = "spectrumInquiryRequest" + Grant = "grantRequest" + Heartbeat = "heartbeatRequest" + Relinquishment = "relinquishmentRequest" + Deregistration = "deregistrationRequest" +) + +func makeRequest(requestType string, data any) *storage.MutableRequest { + return &storage.MutableRequest{ + Request: &storage.DBRequest{ + Payload: data, + }, + RequestType: &storage.DBRequestType{ + Name: db.MakeString(requestType), + }, + } +} diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry.go similarity index 51% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry.go index 4a2e160d8788..88f568793792 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry.go @@ -13,32 +13,31 @@ limitations under the License. package sas -import "magma/dp/cloud/go/active_mode_controller/protos/active_mode" +import ( + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/frequency" + "magma/dp/cloud/go/services/dp/storage" +) type SpectrumInquiryRequestGenerator struct{} -func (*SpectrumInquiryRequestGenerator) GenerateRequests(cbsd *active_mode.Cbsd) []*Request { - req := &spectrumInquiryRequest{ - CbsdId: cbsd.GetCbsdId(), - InquiredSpectrum: []*frequencyRange{{ - LowFrequency: lowestFrequencyHz, - HighFrequency: highestFrequencyHz, +func (*SpectrumInquiryRequestGenerator) GenerateRequests(cbsd *storage.DetailedCbsd) []*storage.MutableRequest { + payload := &SpectrumInquiryRequest{ + CbsdId: cbsd.Cbsd.CbsdId.String, + InquiredSpectrum: []*FrequencyRange{{ + LowFrequency: frequency.LowestHz, + HighFrequency: frequency.HighestHz, }}, } - return []*Request{asRequest(SpectrumInquiry, req)} + req := makeRequest(SpectrumInquiry, payload) + return []*storage.MutableRequest{req} } -const ( - lowestFrequencyHz int64 = 3550 * 1e6 - highestFrequencyHz int64 = 3700 * 1e6 -) - -type spectrumInquiryRequest struct { +type SpectrumInquiryRequest struct { CbsdId string `json:"cbsdId"` - InquiredSpectrum []*frequencyRange `json:"inquiredSpectrum"` + InquiredSpectrum []*FrequencyRange `json:"inquiredSpectrum"` } -type frequencyRange struct { +type FrequencyRange struct { LowFrequency int64 `json:"lowFrequency"` HighFrequency int64 `json:"highFrequency"` } diff --git a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry_test.go b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry_test.go similarity index 74% rename from dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry_test.go rename to dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry_test.go index 1cdfc7c57e9a..260a2b07503b 100644 --- a/dp/cloud/go/active_mode_controller/internal/message_generator/sas/spectrum_inquiry_test.go +++ b/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas/spectrum_inquiry_test.go @@ -16,14 +16,16 @@ package sas_test import ( "testing" - "magma/dp/cloud/go/active_mode_controller/internal/message_generator/sas" - "magma/dp/cloud/go/active_mode_controller/protos/active_mode" + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator/sas" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" ) func TestSpectrumInquiryRequestGenerator(t *testing.T) { - cbsd := &active_mode.Cbsd{CbsdId: "some_id"} + cbsd := &storage.DBCbsd{CbsdId: db.MakeString("some_id")} + data := &storage.DetailedCbsd{Cbsd: cbsd} g := &sas.SpectrumInquiryRequestGenerator{} - actual := g.GenerateRequests(cbsd) + actual := g.GenerateRequests(data) expected := []*request{{ requestType: "spectrumInquiryRequest", data: `{ diff --git a/dp/cloud/go/services/dp/active_mode_controller/app.go b/dp/cloud/go/services/dp/active_mode_controller/app.go new file mode 100644 index 000000000000..c0e67732fd1a --- /dev/null +++ b/dp/cloud/go/services/dp/active_mode_controller/app.go @@ -0,0 +1,115 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package active_mode_controller + +import ( + "context" + "database/sql" + "time" + + "github.com/golang/glog" + + "magma/dp/cloud/go/services/dp/active_mode_controller/action_generator" + "magma/dp/cloud/go/services/dp/storage" +) + +type App struct { + db *sql.DB + clock Clock + rng action_generator.RNG + heartbeatSendTimeout time.Duration + pollingInterval time.Duration + cbsdInactivityTimeout time.Duration + amcManager storage.AmcManager +} + +func NewApp(options ...Option) *App { + a := &App{} + for _, o := range options { + o(a) + } + return a +} + +type Clock interface { + Now() time.Time + Tick(duration time.Duration) *time.Ticker +} + +type Option func(*App) + +func WithDb(db *sql.DB) Option { + return func(a *App) { a.db = db } +} + +func WithAmcManager(manager storage.AmcManager) Option { + return func(a *App) { a.amcManager = manager } +} + +func WithRNG(rng action_generator.RNG) Option { + return func(a *App) { a.rng = rng } +} + +func WithClock(clock Clock) Option { + return func(a *App) { a.clock = clock } +} + +func WithHeartbeatSendTimeout(sendTimeout time.Duration, sendInterval time.Duration) Option { + return func(a *App) { a.heartbeatSendTimeout = sendTimeout + sendInterval } +} + +func WithPollingInterval(interval time.Duration) Option { + return func(a *App) { a.pollingInterval = interval } +} + +func WithCbsdInactivityTimeout(timeout time.Duration) Option { + return func(a *App) { a.cbsdInactivityTimeout = timeout } +} + +func (a *App) Run(ctx context.Context) error { + ticker := a.clock.Tick(a.pollingInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + _, err := storage.WithinTx(a.db, a.getStateAndProcessData) + if err != nil { + glog.Errorf("failed to process data: %s", err) + } + } + } +} + +// TODO add context +func (a *App) getStateAndProcessData(tx *sql.Tx) (any, error) { + state, err := a.amcManager.GetState(tx) + if err != nil { + return nil, err + } + generator := &action_generator.ActionGenerator{ + HeartbeatTimeout: a.heartbeatSendTimeout + a.pollingInterval, + InactivityTimeout: a.cbsdInactivityTimeout, + Rng: a.rng, + } + now := a.clock.Now() + actions := generator.GenerateActions(state, now) + for _, act := range actions { + if err := act.Do(tx, a.amcManager); err != nil { + return nil, err + } + } + return nil, nil +} diff --git a/lte/gateway/python/integ_tests/gxgy_tests/__init__.py b/dp/cloud/go/services/dp/active_mode_controller/app_test.go similarity index 80% rename from lte/gateway/python/integ_tests/gxgy_tests/__init__.py rename to dp/cloud/go/services/dp/active_mode_controller/app_test.go index 5c6cb646cadc..e1c4efdfce15 100644 --- a/lte/gateway/python/integ_tests/gxgy_tests/__init__.py +++ b/dp/cloud/go/services/dp/active_mode_controller/app_test.go @@ -1,5 +1,5 @@ -""" -Copyright 2020 The Magma Authors. +/* +Copyright 2022 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. @@ -9,4 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -""" +*/ + +package active_mode_controller_test + +// TODO migrate AMC app tests diff --git a/dp/cloud/go/active_mode_controller/protos/active_mode/active_mode.pb.go b/dp/cloud/go/services/dp/active_mode_controller/protos/active_mode/active_mode.pb.go similarity index 99% rename from dp/cloud/go/active_mode_controller/protos/active_mode/active_mode.pb.go rename to dp/cloud/go/services/dp/active_mode_controller/protos/active_mode/active_mode.pb.go index 3129fbcfc1cd..23030f33f813 100644 --- a/dp/cloud/go/active_mode_controller/protos/active_mode/active_mode.pb.go +++ b/dp/cloud/go/services/dp/active_mode_controller/protos/active_mode/active_mode.pb.go @@ -1362,11 +1362,12 @@ var file_dp_protos_active_mode_proto_rawDesc = []byte{ 0x62, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, - 0x3d, 0x5a, 0x3b, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2f, 0x64, 0x70, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2f, 0x67, 0x6f, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x73, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x49, 0x5a, 0x47, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2f, 0x64, 0x70, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x70, + 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/dp/cloud/go/active_mode_controller/internal/time/clock.go b/dp/cloud/go/services/dp/active_mode_controller/time/clock.go similarity index 100% rename from dp/cloud/go/active_mode_controller/internal/time/clock.go rename to dp/cloud/go/services/dp/active_mode_controller/time/clock.go diff --git a/dp/cloud/go/services/dp/builders/builders.go b/dp/cloud/go/services/dp/builders/builders.go index 571393896379..025430d1b3fd 100644 --- a/dp/cloud/go/services/dp/builders/builders.go +++ b/dp/cloud/go/services/dp/builders/builders.go @@ -36,7 +36,7 @@ const ( someFccId = "some_fcc_id" someUserId = "some_user_id" catB = "b" - someModel = "some_model" + catA = "a" ) type DBCbsdBuilder struct { @@ -50,7 +50,7 @@ func NewDBCbsdBuilder() *DBCbsdBuilder { FccId: db.MakeString(someFccId), CbsdSerialNumber: db.MakeString(someSerialNumber), PreferredBandwidthMHz: db.MakeInt(20), - PreferredFrequenciesMHz: db.MakeString("[3600]"), + PreferredFrequenciesMHz: []int64{3600}, MinPower: db.MakeFloat(10), MaxPower: db.MakeFloat(20), NumberOfPorts: db.MakeInt(2), @@ -99,7 +99,7 @@ func (b *DBCbsdBuilder) WithUserId(id string) *DBCbsdBuilder { } func (b *DBCbsdBuilder) WithAntennaGain(gain float64) *DBCbsdBuilder { - b.Cbsd.AntennaGain = db.MakeFloat(gain) + b.Cbsd.AntennaGainDbi = db.MakeFloat(gain) return b } @@ -108,38 +108,46 @@ func (b *DBCbsdBuilder) WithLatitude(lat float64) *DBCbsdBuilder { return b } +func (b *DBCbsdBuilder) WithEirpCapabilities(minPower float64, maxPower float64, numberOfPorts int64) *DBCbsdBuilder { + b.Cbsd.MinPower = db.MakeFloat(minPower) + b.Cbsd.MaxPower = db.MakeFloat(maxPower) + b.Cbsd.NumberOfPorts = db.MakeInt(numberOfPorts) + return b +} + func (b *DBCbsdBuilder) WithLongitude(lon float64) *DBCbsdBuilder { b.Cbsd.LongitudeDeg = db.MakeFloat(lon) return b } -func (b *DBCbsdBuilder) WithNumberOfPorts(num int64) *DBCbsdBuilder { - b.Cbsd.NumberOfPorts = db.MakeInt(num) +func (b *DBCbsdBuilder) WithLastSeen(t int64) *DBCbsdBuilder { + b.Cbsd.LastSeen = db.MakeTime(time.Unix(t, 0).UTC()) return b } -func (b *DBCbsdBuilder) WithMaxPower(pow float64) *DBCbsdBuilder { - b.Cbsd.MaxPower = db.MakeFloat(pow) +func (b *DBCbsdBuilder) WithStateId(t int64) *DBCbsdBuilder { + b.Cbsd.StateId = db.MakeInt(t) return b } -func (b *DBCbsdBuilder) WithMinPower(pow float64) *DBCbsdBuilder { - b.Cbsd.MinPower = db.MakeFloat(pow) +func (b *DBCbsdBuilder) WithDesiredStateId(t int64) *DBCbsdBuilder { + b.Cbsd.DesiredStateId = db.MakeInt(t) return b } -func (b *DBCbsdBuilder) WithLastSeen(t int64) *DBCbsdBuilder { - b.Cbsd.LastSeen = db.MakeTime(time.Unix(t, 0).UTC()) +func (b *DBCbsdBuilder) WithChannels(channels []storage.Channel) *DBCbsdBuilder { + b.Cbsd.Channels = channels return b } -func (b *DBCbsdBuilder) WithStateId(t int64) *DBCbsdBuilder { - b.Cbsd.StateId = db.MakeInt(t) +func (b *DBCbsdBuilder) WithPreferences(bandwidthMhz int64, frequenciesMhz []int64) *DBCbsdBuilder { + b.Cbsd.PreferredBandwidthMHz = db.MakeInt(bandwidthMhz) + b.Cbsd.PreferredFrequenciesMHz = frequenciesMhz return b } -func (b *DBCbsdBuilder) WithDesiredStateId(t int64) *DBCbsdBuilder { - b.Cbsd.DesiredStateId = db.MakeInt(t) +func (b *DBCbsdBuilder) WithAvailableFrequencies(frequenciesMhz []uint32) *DBCbsdBuilder { + b.Cbsd.AvailableFrequencies = frequenciesMhz return b } @@ -154,7 +162,7 @@ func (b *DBCbsdBuilder) WithFullInstallationParam() *DBCbsdBuilder { b.Cbsd.IndoorDeployment = db.MakeBool(true) b.Cbsd.HeightM = db.MakeFloat(12.5) b.Cbsd.HeightType = db.MakeString("agl") - b.Cbsd.AntennaGain = db.MakeFloat(4.5) + b.Cbsd.AntennaGainDbi = db.MakeFloat(4.5) return b } @@ -204,13 +212,8 @@ func (b *DBCbsdBuilder) WithShouldDeregister(should bool) *DBCbsdBuilder { return b } -func (b *DBCbsdBuilder) WithPreferredBandwidthMHz(bandwidth int64) *DBCbsdBuilder { - b.Cbsd.PreferredBandwidthMHz = db.MakeInt(bandwidth) - return b -} - -func (b *DBCbsdBuilder) WithPreferredFrequenciesMHz(freq string) *DBCbsdBuilder { - b.Cbsd.PreferredFrequenciesMHz = db.MakeString(freq) +func (b *DBCbsdBuilder) WithShouldRelinquish(should bool) *DBCbsdBuilder { + b.Cbsd.ShouldRelinquish = db.MakeBool(should) return b } @@ -231,12 +234,13 @@ func NewDBGrantBuilder() *DBGrantBuilder { func (b *DBGrantBuilder) WithDefaultTestValues() *DBGrantBuilder { b.Grant = &storage.DBGrant{ - GrantExpireTime: db.MakeTime(time.Unix(123, 0).UTC()), - TransmitExpireTime: db.MakeTime(time.Unix(456, 0).UTC()), - LowFrequency: db.MakeInt(3590 * 1e6), - HighFrequency: db.MakeInt(3610 * 1e6), - MaxEirp: db.MakeFloat(35), - GrantId: db.MakeString("some_grant_id"), + GrantExpireTime: db.MakeTime(time.Unix(123, 0).UTC()), + TransmitExpireTime: db.MakeTime(time.Unix(456, 0).UTC()), + LowFrequencyHz: db.MakeInt(3590 * 1e6), + HighFrequencyHz: db.MakeInt(3610 * 1e6), + MaxEirp: db.MakeFloat(35), + GrantId: db.MakeString("some_grant_id"), + HeartbeatIntervalSec: db.MakeInt(1), } return b } @@ -267,8 +271,8 @@ func (b *DBGrantBuilder) WithGrantId(id string) *DBGrantBuilder { } func (b *DBGrantBuilder) WithFrequency(frequencyMHz int64) *DBGrantBuilder { - b.Grant.LowFrequency = db.MakeInt((frequencyMHz - 10) * 1e6) - b.Grant.HighFrequency = db.MakeInt((frequencyMHz + 10) * 1e6) + b.Grant.LowFrequencyHz = db.MakeInt((frequencyMHz - 10) * 1e6) + b.Grant.HighFrequencyHz = db.MakeInt((frequencyMHz + 10) * 1e6) return b } @@ -282,6 +286,11 @@ func (b *DBGrantBuilder) WithTransmitExpireTime(t time.Time) *DBGrantBuilder { return b } +func (b *DBGrantBuilder) WithLastHeartbeatTime(t time.Time) *DBGrantBuilder { + b.Grant.LastHeartbeatRequestTime = db.MakeTime(t) + return b +} + type CbsdStateResultBuilder struct { Result *protos.CBSDStateResult } @@ -413,8 +422,8 @@ func (b *DetailedDBCbsdBuilder) WithGrant(state string, frequencyMHz int64, gran Grant: &storage.DBGrant{ GrantExpireTime: db.MakeTime(grantExpireTime), TransmitExpireTime: db.MakeTime(transmitExpireTime), - LowFrequency: db.MakeInt((frequencyMHz - 10) * 1e6), - HighFrequency: db.MakeInt((frequencyMHz + 10) * 1e6), + LowFrequencyHz: db.MakeInt((frequencyMHz - 10) * 1e6), + HighFrequencyHz: db.MakeInt((frequencyMHz + 10) * 1e6), MaxEirp: db.MakeFloat(35), }, GrantState: &storage.DBGrantState{ @@ -425,6 +434,23 @@ func (b *DetailedDBCbsdBuilder) WithGrant(state string, frequencyMHz int64, gran return b } +func (b *DetailedDBCbsdBuilder) WithAmcGrant(state string, frequencyMHz int64, lastHeartbeatTime time.Time, grantId string, heartbeatInterval int64) *DetailedDBCbsdBuilder { + grant := &storage.DetailedGrant{ + Grant: &storage.DBGrant{ + GrantId: db.MakeString(grantId), + LowFrequencyHz: db.MakeInt((frequencyMHz - 10) * 1e6), + HighFrequencyHz: db.MakeInt((frequencyMHz + 10) * 1e6), + LastHeartbeatRequestTime: db.MakeTime(lastHeartbeatTime), + HeartbeatIntervalSec: db.MakeInt(heartbeatInterval), + }, + GrantState: &storage.DBGrantState{ + Name: db.MakeString(state), + }, + } + b.Details.Grants = append(b.Details.Grants, grant) + return b +} + type DetailedProtoCbsdBuilder struct { Details *protos.CbsdDetails } @@ -705,3 +731,18 @@ func (b *DPLogBuilder) WithLogMessage(m string) *DPLogBuilder { b.Log.LogMessage = m return b } + +func NewRequestBuilder(id int64, cbsdId int64, typeId int64, payload string) *RequestBuilder { + return &RequestBuilder{ + Request: &storage.DBRequest{ + Id: db.MakeInt(id), + TypeId: db.MakeInt(typeId), + CbsdId: db.MakeInt(cbsdId), + Payload: db.MakeString(payload), + }, + } +} + +type RequestBuilder struct { + Request *storage.DBRequest +} diff --git a/dp/cloud/go/services/dp/config.go b/dp/cloud/go/services/dp/config.go index 266aa2b6ca54..263086867142 100644 --- a/dp/cloud/go/services/dp/config.go +++ b/dp/cloud/go/services/dp/config.go @@ -14,6 +14,23 @@ limitations under the License. package dp type Config struct { + // TODO cleanup config (common fields, separate packages, etc...) + DpBackend *BackendConfig `yaml:"dp_backend"` + ActiveModeController *AmcConfig `yaml:"active_mode_controller"` +} + +type BackendConfig struct { CbsdInactivityIntervalSec int `yaml:"cbsd_inactivity_interval_sec"` LogConsumerUrl string `yaml:"log_consumer_url"` } + +type AmcConfig struct { + DialTimeoutSec int `yaml:"dial_timeout_sec"` + HeartbeatSendTimeoutSec int `yaml:"heartbeat_send_timeout_sec"` + RequestTimeoutSec int `yaml:"request_timeout_sec"` + RequestProcessingIntervalSec int `yaml:"request_processing_interval_sec"` + PollingIntervalSec int `yaml:"polling_interval"` // TODO add sec to deployment scripts + GrpcService string `yaml:"grpc_service"` + GrpcPort int `yaml:"grpc_port"` + CbsdInactivityTimeoutSec int `yaml:"cbsd_inactivity_interval_sec"` // TODO temporary fix to make integration tests pass +} diff --git a/dp/cloud/go/services/dp/dp/main.go b/dp/cloud/go/services/dp/dp/main.go index 5598292f311b..88317226b856 100644 --- a/dp/cloud/go/services/dp/dp/main.go +++ b/dp/cloud/go/services/dp/dp/main.go @@ -13,6 +13,9 @@ limitations under the License. package main import ( + "context" + "database/sql" + "math/rand" "time" "github.com/golang/glog" @@ -20,6 +23,8 @@ import ( "magma/dp/cloud/go/dp" "magma/dp/cloud/go/protos" dp_service "magma/dp/cloud/go/services/dp" + "magma/dp/cloud/go/services/dp/active_mode_controller" + amc_time "magma/dp/cloud/go/services/dp/active_mode_controller/time" "magma/dp/cloud/go/services/dp/logs_pusher" "magma/dp/cloud/go/services/dp/obsidian/cbsd" dp_log "magma/dp/cloud/go/services/dp/obsidian/log" @@ -52,13 +57,51 @@ func main() { } cbsdStore := dp_storage.NewCbsdManager(db, sqorc.GetSqlBuilder(), sqorc.GetErrorChecker(), sqorc.GetSqlLocker()) - interval := time.Second * time.Duration(serviceConfig.CbsdInactivityIntervalSec) - logConsumerUrl := serviceConfig.LogConsumerUrl + dpCfg := serviceConfig.DpBackend + interval := time.Second * time.Duration(dpCfg.CbsdInactivityIntervalSec) + logConsumerUrl := dpCfg.LogConsumerUrl protos.RegisterCbsdManagementServer(srv.GrpcServer, servicers.NewCbsdManager(cbsdStore, interval, logConsumerUrl, logs_pusher.PushDPLog)) + cancel, errs := startAmc(db, serviceConfig.ActiveModeController) + err = srv.Run() if err != nil { glog.Fatalf("Error while running %s service amd echo server: %s", dp_service.ServiceName, err) } + + stopAmc(cancel, errs) +} + +func startAmc(db *sql.DB, cfg *dp_service.AmcConfig) (context.CancelFunc, chan error) { + clock := &amc_time.Clock{} + seed := rand.NewSource(clock.Now().Unix()) + amcManager := dp_storage.NewAmcManager(db, sqorc.GetSqlBuilder(), sqorc.GetErrorChecker(), sqorc.GetSqlLocker()) + app := active_mode_controller.NewApp( + active_mode_controller.WithDb(db), + active_mode_controller.WithAmcManager(amcManager), + active_mode_controller.WithClock(clock), + active_mode_controller.WithRNG(rand.New(seed)), + active_mode_controller.WithHeartbeatSendTimeout( + secToDuration(cfg.HeartbeatSendTimeoutSec), + secToDuration(cfg.RequestProcessingIntervalSec)), + active_mode_controller.WithPollingInterval(secToDuration(cfg.PollingIntervalSec)), + active_mode_controller.WithCbsdInactivityTimeout(secToDuration(cfg.CbsdInactivityTimeoutSec)), + ) + errs := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + go func() { errs <- app.Run(ctx) }() + return cancel, errs +} + +func secToDuration(s int) time.Duration { + return time.Second * time.Duration(s) +} + +func stopAmc(cancel context.CancelFunc, errs chan error) { + cancel() + err := <-errs + if err != nil && err != context.Canceled { + glog.Fatalf("Error while shutting down amc: %s", err) + } } diff --git a/dp/cloud/go/services/dp/servicers/cbsd_manager.go b/dp/cloud/go/services/dp/servicers/cbsd_manager.go index 7bcc1091c4ca..602c45e9f295 100644 --- a/dp/cloud/go/services/dp/servicers/cbsd_manager.go +++ b/dp/cloud/go/services/dp/servicers/cbsd_manager.go @@ -84,8 +84,8 @@ func (c *cbsdManager) EnodebdUpdateCbsd(ctx context.Context, request *protos.Eno for _, grant := range details.Grants { channels = append(channels, &protos.LteChannel{ - LowFrequencyHz: grant.Grant.LowFrequency.Int64, - HighFrequencyHz: grant.Grant.HighFrequency.Int64, + LowFrequencyHz: grant.Grant.LowFrequencyHz.Int64, + HighFrequencyHz: grant.Grant.HighFrequencyHz.Int64, MaxEirpDbmMhz: float32(grant.Grant.MaxEirp.Float64), }) } @@ -152,6 +152,7 @@ func (c *cbsdManager) RelinquishCbsd(_ context.Context, request *protos.Relinqui } func (c *cbsdManager) sendLog(ctx context.Context, source interface{}, name string, from string, to string, details *storage.DetailedCbsd) { + // TODO maybe we don't have to marshal msg msg, _ := json.Marshal(source) log := &logs_pusher.DPLog{ EventTimestamp: clock.Now().UTC().Unix(), @@ -211,7 +212,6 @@ func buildCbsd(data *protos.CbsdData) *storage.DBCbsd { capabilities := data.GetCapabilities() preferences := data.GetPreferences() installationParam := data.GetInstallationParam() - b, _ := json.Marshal(preferences.GetFrequenciesMhz()) cbsd := &storage.DBCbsd{ UserId: db.MakeString(data.GetUserId()), FccId: db.MakeString(data.GetFccId()), @@ -220,7 +220,7 @@ func buildCbsd(data *protos.CbsdData) *storage.DBCbsd { MaxPower: db.MakeFloat(capabilities.GetMaxPower()), NumberOfPorts: db.MakeInt(capabilities.GetNumberOfAntennas()), PreferredBandwidthMHz: db.MakeInt(preferences.GetBandwidthMhz()), - PreferredFrequenciesMHz: db.MakeString(string(b)), + PreferredFrequenciesMHz: preferences.GetFrequenciesMhz(), SingleStepEnabled: db.MakeBool(data.GetSingleStepEnabled()), CbsdCategory: db.MakeString(data.GetCbsdCategory()), CarrierAggregationEnabled: db.MakeBool(data.GetCarrierAggregationEnabled()), @@ -238,14 +238,12 @@ func setInstallationParam(cbsd *storage.DBCbsd, params *protos.InstallationParam cbsd.HeightM = dbFloat64OrNil(params.HeightM) cbsd.HeightType = dbStringOrNil(params.HeightType) cbsd.IndoorDeployment = dbBoolOrNil(params.IndoorDeployment) - cbsd.AntennaGain = dbFloat64OrNil(params.AntennaGain) + cbsd.AntennaGainDbi = dbFloat64OrNil(params.AntennaGain) } } func cbsdFromDatabase(data *storage.DetailedCbsd, inactivityInterval time.Duration) *protos.CbsdDetails { isActive := clock.Since(data.Cbsd.LastSeen.Time) < inactivityInterval - var frequencies []int64 - _ = json.Unmarshal([]byte(data.Cbsd.PreferredFrequenciesMHz.String), &frequencies) return &protos.CbsdDetails{ Id: data.Cbsd.Id.Int64, Data: &protos.CbsdData{ @@ -262,7 +260,7 @@ func cbsdFromDatabase(data *storage.DetailedCbsd, inactivityInterval time.Durati }, Preferences: &protos.FrequencyPreferences{ BandwidthMhz: data.Cbsd.PreferredBandwidthMHz.Int64, - FrequenciesMhz: frequencies, + FrequenciesMhz: data.Cbsd.PreferredFrequenciesMHz, }, DesiredState: data.DesiredState.Name.String, InstallationParam: getInstallationParam(data.Cbsd), @@ -283,7 +281,7 @@ func getInstallationParam(c *storage.DBCbsd) *protos.InstallationParam { p.IndoorDeployment = protoBoolOrNil(c.IndoorDeployment) p.HeightM = protoDoubleOrNil(c.HeightM) p.HeightType = protoStringOrNil(c.HeightType) - p.AntennaGain = protoDoubleOrNil(c.AntennaGain) + p.AntennaGain = protoDoubleOrNil(c.AntennaGainDbi) return p } @@ -291,8 +289,8 @@ func grantsFromDatabase(grants []*storage.DetailedGrant) []*protos.GrantDetails const mega int64 = 1e6 res := make([]*protos.GrantDetails, len(grants)) for i, g := range grants { - bw := (g.Grant.HighFrequency.Int64 - g.Grant.LowFrequency.Int64) / mega - freq := (g.Grant.HighFrequency.Int64 + g.Grant.LowFrequency.Int64) / (mega * 2) + bw := (g.Grant.HighFrequencyHz.Int64 - g.Grant.LowFrequencyHz.Int64) / mega + freq := (g.Grant.HighFrequencyHz.Int64 + g.Grant.LowFrequencyHz.Int64) / (mega * 2) res[i] = &protos.GrantDetails{ BandwidthMhz: bw, FrequencyMhz: freq, diff --git a/dp/cloud/go/services/dp/servicers/cbsd_manager_test.go b/dp/cloud/go/services/dp/servicers/cbsd_manager_test.go index 4653aad8d7a9..749ba7824320 100644 --- a/dp/cloud/go/services/dp/servicers/cbsd_manager_test.go +++ b/dp/cloud/go/services/dp/servicers/cbsd_manager_test.go @@ -692,7 +692,7 @@ func (s *stubCbsdManager) EnodebdUpdateCbsd(data *storage.DBCbsd) (*storage.Deta return nil, s.err } s.details.Cbsd.CbsdCategory = data.CbsdCategory - s.details.Cbsd.AntennaGain = data.AntennaGain + s.details.Cbsd.AntennaGainDbi = data.AntennaGainDbi s.details.Cbsd.LatitudeDeg = data.LatitudeDeg s.details.Cbsd.LongitudeDeg = data.LongitudeDeg s.details.Cbsd.HeightType = data.HeightType diff --git a/dp/cloud/go/services/dp/storage/amc_manager.go b/dp/cloud/go/services/dp/storage/amc_manager.go new file mode 100644 index 000000000000..f27a74ab38c8 --- /dev/null +++ b/dp/cloud/go/services/dp/storage/amc_manager.go @@ -0,0 +1,226 @@ +/* +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "database/sql" + + sq "github.com/Masterminds/squirrel" + + "magma/dp/cloud/go/services/dp/storage/db" + "magma/orc8r/cloud/go/sqorc" +) + +// AmcManager is supposed to be a library that will replace radio controller +// it is not supposed to be a service but rather an interface to database +// could be implemented in this file as separate struct or combined with cbsd manager +// also its methods are supposed to be used in transaction (they should start a new one) +type AmcManager interface { + // GetState is equivalent to GetState grpc method + // it should return list of all feasible cbsd with grants + // cbsd is considered feasible if and only if + // - it has no pending requests + // - one of the following conditions is satisfied + // - it has all necessary parameters to perform sas requests (registration/grant) + // - it has some pending db action (e.g. it needs to be deleted) + GetState(sq.BaseRunner) ([]*DetailedCbsd, error) + CreateRequest(sq.BaseRunner, *MutableRequest) error + DeleteCbsd(sq.BaseRunner, *DBCbsd) error + UpdateCbsd(sq.BaseRunner, *DBCbsd, db.FieldMask) error +} + +type MutableRequest struct { + Request *DBRequest + RequestType *DBRequestType +} + +// WithinTx is used to call AmcManager function inside single transaction. +func WithinTx[T any](db *sql.DB, f func(tx *sql.Tx) (T, error)) (res T, err error) { + tx, err := db.BeginTx(context.Background(), nil) + if err != nil { + return res, err + } + + defer func() { + switch err { + case nil: + err = tx.Commit() + default: + rollbackErr := tx.Rollback() + if rollbackErr != nil { + err = rollbackErr + } + } + }() + + res, err = f(tx) + return res, err +} + +func NewAmcManager(db *sql.DB, builder sqorc.StatementBuilder, errorChecker sqorc.ErrorChecker, locker sqorc.Locker) *amcManager { + return &amcManager{ + &dpManager{ + db: db, + builder: builder, + cache: &enumCache{cache: map[string]map[string]int64{}}, + errorChecker: errorChecker, + locker: locker, + }, + } +} + +type amcManager struct { + *dpManager +} + +// CreateRequest inserts given request into the DB. +func (m *amcManager) CreateRequest(tx sq.BaseRunner, data *MutableRequest) error { + builder := m.builder.RunWith(tx) + + desiredTypeId, err := m.cache.getValue(builder, &DBRequestType{}, data.RequestType.Name.String) + if err != nil { + return err + } + data.Request.TypeId = db.MakeInt(desiredTypeId) + + _, err = db.NewQuery(). + WithBuilder(builder). + From(data.Request). + Insert(db.NewIncludeMask("type_id", "cbsd_id", "payload")) + return err +} + +// DeleteCbsd removes given CBSD from the DB. +func (m *amcManager) DeleteCbsd(tx sq.BaseRunner, cbsd *DBCbsd) error { + builder := m.builder.RunWith(tx) + where := sq.Eq{"id": cbsd.Id} + return db.NewQuery(). + WithBuilder(builder). + From(cbsd). + Where(where). + Delete() +} + +// UpdateCbsd update CBSD in the DB with given mask. +func (m *amcManager) UpdateCbsd(tx sq.BaseRunner, cbsd *DBCbsd, mask db.FieldMask) error { + builder := m.builder.RunWith(tx) + _, err := db.NewQuery(). + WithBuilder(builder). + From(cbsd). + Select(db.NewIncludeMask()). + Where(sq.Eq{"id": cbsd.Id}). + Update(mask) + return err +} + +func (m *amcManager) GetState(tx sq.BaseRunner) ([]*DetailedCbsd, error) { + runner := m.getQueryRunner(tx) + return runner.getState() +} + +func notNull(fields ...string) sq.Sqlizer { + filters := make(sq.And, len(fields)) + for i, f := range fields { + filters[i] = sq.NotEq{f: nil} + } + return filters +} + +func (r *queryRunner) getState() ([]*DetailedCbsd, error) { + multiStepFields := []string{"fcc_id", "user_id", "number_of_ports", "min_power", "max_power", "antenna_gain"} + singleStepFields := append(multiStepFields, "latitude_deg", "longitude_deg", "height_m", "height_type") + res, err := db.NewQuery(). + WithBuilder(r.builder). + From(&DBCbsd{}). + Select(db.NewExcludeMask("network_id", "state_id", "desired_state_id")). + Join(db.NewQuery(). + From(&DBCbsdState{}). + As("t1"). + On(db.On(CbsdTable, "state_id", "t1", "id")). + Select(db.NewIncludeMask("name"))). + Join(db.NewQuery(). + From(&DBCbsdState{}). + As("t2"). + On(db.On(CbsdTable, "desired_state_id", "t2", "id")). + Select(db.NewIncludeMask("name"))). + Join(db.NewQuery(). + From(&DBGrant{}). + On(db.On(CbsdTable, "id", GrantTable, "cbsd_id")). + Select(db.NewIncludeMask("grant_id", "heartbeat_interval", "last_heartbeat_request_time", "low_frequency", "high_frequency")). + Join(db.NewQuery(). + From(&DBGrantState{}). + On(db.On(GrantTable, "state_id", GrantStateTable, "id")). + Select(db.NewIncludeMask("name"))). + Nullable()). + Nullable(). + Join(db.NewQuery(). + From(&DBRequest{}). + On(db.On(CbsdTable, "id", RequestTable, "cbsd_id")). + Select(db.NewIncludeMask()). + Join(db.NewQuery(). + From(&DBRequestType{}). + On(db.On(RequestTable, "type_id", RequestTypeTable, "id")). + Select(db.NewIncludeMask())). + Nullable()). + Nullable(). + Where(sq.And{ + sq.Eq{RequestTable + ".id": nil}, + sq.Or{ + sq.Eq{CbsdTable + ".should_deregister": true}, + sq.Eq{"should_relinquish": true}, + sq.Eq{"is_deleted": true}, + sq.And{ + sq.Eq{"single_step_enabled": false}, + notNull(multiStepFields...), + }, + sq.And{ + sq.Eq{"single_step_enabled": true}, + sq.Eq{"cbsd_category": "a"}, + sq.Eq{"indoor_deployment": true}, + notNull(singleStepFields...), + }, + }, + }). + OrderBy(CbsdTable+".id", db.OrderAsc). + List() + + if err != nil { + return nil, err + } + + cbsds := make([]*DetailedCbsd, 0, len(res)) + + cbsdId := int64(-1) + cbsdIndex := -1 + + for i, models := range res { + cbsd := models[0].(*DBCbsd) + if cbsd.Id.Int64 != cbsdId { + cbsds = append(cbsds, convertCbsdToDetails(models)) + cbsdIndex += 1 + } + grant := res[i][3] + if grant.(*DBGrant).LowFrequencyHz.Valid { + grantState := res[i][4] + cbsds[cbsdIndex].Grants = append(cbsds[cbsdIndex].Grants, &DetailedGrant{ + Grant: grant.(*DBGrant), + GrantState: grantState.(*DBGrantState), + }) + } + cbsdId = cbsd.Id.Int64 + } + + return cbsds, nil +} diff --git a/dp/cloud/go/services/dp/storage/amc_manager_test.go b/dp/cloud/go/services/dp/storage/amc_manager_test.go new file mode 100644 index 000000000000..d396c2f67fbc --- /dev/null +++ b/dp/cloud/go/services/dp/storage/amc_manager_test.go @@ -0,0 +1,1017 @@ +package storage_test + +import ( + "database/sql" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "gopkg.in/DATA-DOG/go-sqlmock.v1" + + b "magma/dp/cloud/go/services/dp/builders" + "magma/dp/cloud/go/services/dp/storage" + "magma/dp/cloud/go/services/dp/storage/db" + "magma/dp/cloud/go/services/dp/storage/dbtest" + "magma/orc8r/cloud/go/clock" + "magma/orc8r/cloud/go/sqorc" +) + +const ( + requestPayload = "some payload" + someGrantId = "some_grant_id" +) + +func TestAmcManager(t *testing.T) { + suite.Run(t, &AmcManagerTestSuite{}) +} + +type AmcManagerTestSuite struct { + suite.Suite + database *sql.DB + amcManager storage.AmcManager + resourceManager dbtest.ResourceManager + enumMaps map[string]map[string]int64 +} + +func (s *AmcManagerTestSuite) SetupSuite() { + builder := sqorc.GetSqlBuilder() + errorChecker := sqorc.SQLiteErrorChecker{} + locker := sqorc.GetSqlLocker() + database, err := sqorc.Open("sqlite3", ":memory:") + s.Require().NoError(err) + s.database = database + s.amcManager = storage.NewAmcManager(database, builder, errorChecker, locker) + + s.resourceManager = dbtest.NewResourceManager(s.T(), s.database, builder) + err = s.resourceManager.CreateTables( + &storage.DBCbsdState{}, + &storage.DBCbsd{}, + &storage.DBGrantState{}, + &storage.DBGrant{}, + &storage.DBRequest{}, + &storage.DBRequestType{}, + ) + s.Require().NoError(err) + err = s.resourceManager.InsertResources( + db.NewExcludeMask("id"), + &storage.DBCbsdState{Name: db.MakeString(unregistered)}, + &storage.DBCbsdState{Name: db.MakeString(registered)}, + &storage.DBGrantState{Name: db.MakeString(idle)}, + &storage.DBGrantState{Name: db.MakeString(granted)}, + &storage.DBGrantState{Name: db.MakeString(authorized)}, + &storage.DBRequestType{Name: db.MakeString(grant)}, + ) + s.Require().NoError(err) + s.enumMaps = map[string]map[string]int64{} + for _, model := range []db.Model{ + &storage.DBCbsdState{}, + &storage.DBGrantState{}, + &storage.DBRequestType{}, + } { + table := model.GetMetadata().Table + s.enumMaps[table] = s.getNameIdMapping(model) + } +} + +func (s *AmcManagerTestSuite) TearDownTest() { + clock.UnfreezeClock(s.T()) + err := s.resourceManager.DropResources( + &storage.DBCbsd{}, + &storage.DBGrant{}, + &storage.DBRequest{}, + ) + s.Require().NoError(err) +} + +func (s *AmcManagerTestSuite) TestCreateRequest() { + request := storage.MutableRequest{ + Request: &storage.DBRequest{ + CbsdId: db.MakeInt(1), + Payload: requestPayload, + }, + RequestType: &storage.DBRequestType{ + Name: db.MakeString(grant), + }, + } + + _, err := storage.WithinTx(s.database, func(tx *sql.Tx) (interface{}, error) { + return nil, s.amcManager.CreateRequest(tx, &request) + }) + s.Require().NoError(err) + + err = s.resourceManager.InTransaction(func() { + actual, err := db.NewQuery(). + WithBuilder(s.resourceManager.GetBuilder()). + From(&storage.DBRequest{}). + Select(db.NewIncludeMask("type_id", "cbsd_id", "payload")). + Fetch() + s.Require().NoError(err) + + expected := []db.Model{&storage.DBRequest{ + CbsdId: db.MakeInt(1), + TypeId: db.MakeInt(1), + Payload: requestPayload, + }} + s.Assert().Equal(expected, actual) + }) + s.Require().NoError(err) +} + +func (s *AmcManagerTestSuite) TestDeleteCbsd() { + stateId := s.enumMaps[storage.CbsdStateTable][unregistered] + cbsd1 := storage.DBCbsd{ + Id: db.MakeInt(1), + NetworkId: db.MakeString(someNetwork), + StateId: db.MakeInt(stateId), + DesiredStateId: db.MakeInt(stateId), + PreferredBandwidthMHz: db.MakeInt(20), + } + cbsd2 := storage.DBCbsd{ + Id: db.MakeInt(2), + NetworkId: db.MakeString(someNetwork), + StateId: db.MakeInt(stateId), + DesiredStateId: db.MakeInt(stateId), + PreferredBandwidthMHz: db.MakeInt(20), + } + err := s.resourceManager.InsertResources(db.NewExcludeMask(), &cbsd1, &cbsd2) + s.Require().NoError(err) + + _, err = storage.WithinTx(s.database, func(tx *sql.Tx) (interface{}, error) { + return nil, s.amcManager.DeleteCbsd(tx, &cbsd1) + }) + s.Require().NoError(err) + + // only cbsd2 should exist + err = s.resourceManager.InTransaction(func() { + actual, err := db.NewQuery(). + WithBuilder(s.resourceManager.GetBuilder()). + From(&storage.DBCbsd{}). + Select(db.NewIncludeMask("id")). + Fetch() + s.Require().NoError(err) + + expected := []db.Model{&storage.DBCbsd{Id: db.MakeInt(2)}} + s.Assert().Equal(expected, actual) + }) + s.Require().NoError(err) + + // delete on not existing cbsd should not return error + _, err = storage.WithinTx(s.database, func(tx *sql.Tx) (interface{}, error) { + return nil, s.amcManager.DeleteCbsd(tx, &cbsd1) + }) + s.Require().NoError(err) +} + +func (s *AmcManagerTestSuite) TestUpdateCbsd() { + stateId := s.enumMaps[storage.CbsdStateTable][unregistered] + cbsd := storage.DBCbsd{ + Id: db.MakeInt(1), + NetworkId: db.MakeString(someNetwork), + StateId: db.MakeInt(stateId), + DesiredStateId: db.MakeInt(stateId), + PreferredBandwidthMHz: db.MakeInt(20), + } + err := s.resourceManager.InsertResources(db.NewExcludeMask(), &cbsd) + s.Require().NoError(err) + + cbsdUpdate := storage.DBCbsd{ + Id: db.MakeInt(1), + PreferredBandwidthMHz: db.MakeInt(30), + MinPower: db.MakeFloat(0), + MaxPower: db.MakeFloat(20), + } + mask := db.NewIncludeMask("preferred_bandwidth_mhz", "min_power", "max_power") + _, err = storage.WithinTx(s.database, func(tx *sql.Tx) (interface{}, error) { + return nil, s.amcManager.UpdateCbsd(tx, &cbsdUpdate, mask) + }) + s.Require().NoError(err) + + err = s.resourceManager.InTransaction(func() { + actual, err := db.NewQuery(). + WithBuilder(s.resourceManager.GetBuilder()). + From(&storage.DBCbsd{}). + Select(db.NewIncludeMask("id", "preferred_bandwidth_mhz", "min_power", "max_power")). + Fetch() + s.Require().NoError(err) + + expected := []db.Model{&cbsdUpdate} + s.Assert().Equal(expected, actual) + }) + s.Require().NoError(err) +} + +func TestWithinTx(t *testing.T) { + testData := []struct { + name string + prepareMockFunc func(sqlmock.Sqlmock) + wrappedFunc func(*sql.Tx) (any, error) + resultCheckFunc func(any, error) + }{{ + name: "test working insert", + prepareMockFunc: func(mock sqlmock.Sqlmock) { + mock.ExpectBegin() + mock.ExpectExec("INSERT INTO table").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + mock.ExpectClose() + }, + wrappedFunc: func(tx *sql.Tx) (any, error) { + res, _ := tx.Exec("INSERT INTO table (\"field\") VALUES (1);") + lastId, err := res.LastInsertId() + return lastId, err + }, + resultCheckFunc: func(res any, err error) { + assert.Equal(t, int64(1), res) + assert.NoError(t, err) + }, + }, { + name: "test wrapped func error", + prepareMockFunc: func(mock sqlmock.Sqlmock) { + mock.ExpectBegin() + mock.ExpectExec("INSERT INTO table").WillReturnError(errors.New("exec error")) + mock.ExpectRollback() + mock.ExpectClose() + }, + wrappedFunc: func(tx *sql.Tx) (any, error) { + res, err := tx.Exec("INSERT INTO table (\"field\") VALUES (1);") + return res, err + }, + resultCheckFunc: func(res any, err error) { + assert.Equal(t, nil, res) + assert.Errorf(t, err, "exec error") + }, + }, { + name: "test commit error", + prepareMockFunc: func(mock sqlmock.Sqlmock) { + mock.ExpectBegin() + mock.ExpectExec("INSERT INTO table").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit().WillReturnError(errors.New("commit error")) + mock.ExpectClose() + }, + wrappedFunc: func(tx *sql.Tx) (any, error) { + res, _ := tx.Exec("INSERT INTO table (\"field\") VALUES (1);") + lastId, err := res.LastInsertId() + return lastId, err + }, + resultCheckFunc: func(res any, err error) { + assert.Equal(t, int64(1), res) + assert.Errorf(t, err, "commit error") + }, + }, { + name: "test begin transaction error", + prepareMockFunc: func(mock sqlmock.Sqlmock) { + mock.ExpectBegin().WillReturnError(errors.New("begin error")) + mock.ExpectClose() + }, + wrappedFunc: func(tx *sql.Tx) (any, error) { + return nil, nil + }, + resultCheckFunc: func(res any, err error) { + assert.Equal(t, nil, res) + assert.Errorf(t, err, "begin error") + }, + }, { + name: "test transaction rollback error", + prepareMockFunc: func(mock sqlmock.Sqlmock) { + mock.ExpectBegin() + mock.ExpectRollback().WillReturnError(errors.New("rollback error")) + mock.ExpectClose() + }, + wrappedFunc: func(tx *sql.Tx) (any, error) { + return nil, errors.New("an error") + }, + resultCheckFunc: func(res any, err error) { + assert.Equal(t, nil, res) + assert.Errorf(t, err, "rollback error") + }, + }} + + for _, tc := range testData { + database, mock, err := sqlmock.New() + assert.NoError(t, err) + + t.Run(tc.name, func(t *testing.T) { + tc.prepareMockFunc(mock) + res, err := storage.WithinTx(database, tc.wrappedFunc) + tc.resultCheckFunc(res, err) + }) + + err = database.Close() + assert.NoError(t, err) + } +} + +func (s *AmcManagerTestSuite) TestGetState() { + registeredId := s.enumMaps[storage.CbsdStateTable][registered] + grantedId := s.enumMaps[storage.GrantStateTable][granted] + authorizedId := s.enumMaps[storage.GrantStateTable][authorized] + grantReqId := s.enumMaps[storage.RequestTypeTable]["grant"] + preferences := []uint32{0b10101100, 0b00110, 0b0100000, 0b11010} + availableFreqs := []uint32{0b10111100, 0b010110, 0b01001011, 0b11110} + testCases := []struct { + name string + input []db.Model + expected []*storage.DetailedCbsd + }{{ + name: "test get basic state", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber). + WithId(0). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithAntennaGain(20). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(0). + WithSerialNumber(someSerialNumber). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithAntennaGain(20). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state with frequency preferences", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber+"1"). + WithId(1). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(preferences). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(1). + WithSerialNumber(someSerialNumber+"1"). + WithCbsdId(someCbsdIdStr). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(preferences). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state with grants", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(2). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber+"2"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(availableFreqs). + Cbsd, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithCbsdId(2). + WithStateId(grantedId). + WithLastHeartbeatTime(time.Unix(111, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithCbsdId(2). + WithStateId(authorizedId). + WithLastHeartbeatTime(time.Unix(112, 0).UTC()). + Grant, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(2). + WithSerialNumber(someSerialNumber+"2"). + WithCbsdId(someCbsdIdStr). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(availableFreqs). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + WithAmcGrant(granted, 3600, time.Unix(111, 0).UTC(), someGrantId, 1). + WithAmcGrant(authorized, 3600, time.Unix(112, 0).UTC(), someGrantId, 1). + Details, + }, + }, { + name: "test get state with channels", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(3). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber+"3"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(availableFreqs). + WithChannels([]storage.Channel{ + { + LowFrequencyHz: 3590, + HighFrequencyHz: 3610, + MaxEirp: 15, + }, + { + LowFrequencyHz: 3600, + HighFrequencyHz: 3620, + }, + }). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(3). + WithSerialNumber(someSerialNumber+"3"). + WithCbsdId(someCbsdIdStr). + WithCbsdCategory("a"). + WithAntennaGain(20). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithAvailableFrequencies(availableFreqs). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithChannels([]storage.Channel{ + { + LowFrequencyHz: 3590, + HighFrequencyHz: 3610, + MaxEirp: 15, + }, + { + LowFrequencyHz: 3600, + HighFrequencyHz: 3620, + }, + }). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state for cbsd marked for deletion", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(4). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "4"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithIsDeleted(true). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(4). + WithSerialNumber(someSerialNumber+"4"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(true). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state for cbsd marked for update", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(5). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "5"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithShouldDeregister(true). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(5). + WithSerialNumber(someSerialNumber+"5"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(true). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state for cbsd marked for relinquish", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(6). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "6"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithShouldRelinquish(true). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(6). + WithSerialNumber(someSerialNumber+"6"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(true). + WithIsDeleted(false). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state for cbsd with last seen", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(7). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "7"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithAntennaGain(20). + WithLastSeen(1). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(7). + WithSerialNumber(someSerialNumber+"7"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithAntennaGain(20). + WithLastSeen(1). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state for cbsd with pending requests", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(8). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "8"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithAntennaGain(20). + Cbsd, + b.NewRequestBuilder(1, 8, grantReqId, "{'some': 'payload'}").Request, + }, + expected: []*storage.DetailedCbsd{}, + }, { + name: "test get state with multiple cbsds", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "9"). + WithId(9). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithAntennaGain(20). + Cbsd, + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "10"). + WithId(10). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithAntennaGain(20). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(9). + WithSerialNumber(someSerialNumber+"9"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithAntennaGain(20). + Cbsd, + registered, + registered). + Details, + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(10). + WithSerialNumber(someSerialNumber+"10"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithAntennaGain(20). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state with single step enabled", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "11"). + WithId(11). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithSingleStepEnabled(true). + WithFullInstallationParam(). + WithCbsdCategory("a"). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(11). + WithSerialNumber(someSerialNumber+"11"). + WithCbsdId(someCbsdIdStr). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithSingleStepEnabled(true). + WithFullInstallationParam(). + WithCbsdCategory("a"). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test get state with single step enabled", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "12"). + WithId(12). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithSingleStepEnabled(true). + WithFullInstallationParam(). + WithCbsdCategory("a"). + Cbsd, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithSerialNumber(someSerialNumber+"12"). + WithId(12). + WithCbsdId(someCbsdIdStr). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + WithSingleStepEnabled(true). + WithFullInstallationParam(). + WithCbsdCategory("a"). + Cbsd, + registered, + registered). + Details, + }, + }, { + name: "test not get state without registration params", + input: []db.Model{ + b.NewDBCbsdBuilder(). + Empty(). + WithNetworkId(someNetwork). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithStateId(registeredId). + WithEirpCapabilities(1, 2, 2). + WithDesiredStateId(registeredId). + Cbsd, + }, + expected: []*storage.DetailedCbsd{}, + }, { + name: "test not get state without eirp capabilities", + input: []db.Model{ + b.NewDBCbsdBuilder(). + Empty(). + WithFccId(someFccId). + WithUserId(someUserId). + WithNetworkId(someNetwork). + WithCbsdId(someCbsdIdStr). + WithEirpCapabilities(1, 2, 2). + WithSerialNumber(someSerialNumber+"14"). + WithPreferences(15, []int64{3600, 3580, 3620}). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + Cbsd, + }, + expected: []*storage.DetailedCbsd{}, + }, { + name: "test not get state with single step enabled and no installation params", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber+"14"). + WithFccId(someFccId). + WithUserId(someUserId). + WithNetworkId(someNetwork). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithEirpCapabilities(1, 2, 2). + WithDesiredStateId(registeredId). + WithSingleStepEnabled(true). + WithCbsdCategory("a"). + Cbsd, + }, + expected: []*storage.DetailedCbsd{}, + }, { + name: "test not get state with single step enabled, category A and outdoor", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber+"15"). + WithFccId(someFccId). + WithUserId(someUserId). + WithNetworkId(someNetwork). + WithCbsdId(someCbsdIdStr). + WithEirpCapabilities(1, 2, 2). + WithStateId(registeredId). + WithDesiredStateId(registeredId). + WithSingleStepEnabled(true). + WithCbsdCategory("a"). + WithFullInstallationParam(). + WithIndoorDeployment(false). + Cbsd, + }, + expected: []*storage.DetailedCbsd{}, + }, { + name: "test get state for multiple radios with multiple grants", + input: []db.Model{ + b.NewDBCbsdBuilder(). + WithId(15). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "15"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + Cbsd, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(15). + WithLastHeartbeatTime(time.Unix(113, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(15). + WithLastHeartbeatTime(time.Unix(114, 0).UTC()). + Grant, + b.NewDBCbsdBuilder(). + WithId(16). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "16"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + Cbsd, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(authorizedId). + WithCbsdId(16). + WithLastHeartbeatTime(time.Unix(115, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(16). + WithLastHeartbeatTime(time.Unix(116, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(16). + WithLastHeartbeatTime(time.Unix(117, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(16). + WithLastHeartbeatTime(time.Unix(118, 0).UTC()). + Grant, + b.NewDBCbsdBuilder(). + WithId(17). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "17"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + Cbsd, + b.NewDBCbsdBuilder(). + WithId(18). + WithNetworkId(someNetwork). + WithSerialNumber(someSerialNumber + "18"). + WithCbsdId(someCbsdIdStr). + WithStateId(registeredId). + WithAntennaGain(20). + WithDesiredStateId(registeredId). + Cbsd, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(grantedId). + WithCbsdId(18). + WithLastHeartbeatTime(time.Unix(119, 0).UTC()). + Grant, + b.NewDBGrantBuilder(). + WithDefaultTestValues(). + WithStateId(authorizedId). + WithCbsdId(18). + WithLastHeartbeatTime(time.Unix(120, 0).UTC()). + Grant, + }, + expected: []*storage.DetailedCbsd{ + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(15). + WithSerialNumber(someSerialNumber+"15"). + WithCbsdId(someCbsdIdStr). + WithAntennaGain(20). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + WithAmcGrant(granted, 3600, time.Unix(113, 0).UTC(), someGrantId, 1). + WithAmcGrant(granted, 3600, time.Unix(114, 0).UTC(), someGrantId, 1). + Details, + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(16). + WithSerialNumber(someSerialNumber+"16"). + WithCbsdId(someCbsdIdStr). + WithAntennaGain(20). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + WithAmcGrant(authorized, 3600, time.Unix(115, 0).UTC(), someGrantId, 1). + WithAmcGrant(granted, 3600, time.Unix(116, 0).UTC(), someGrantId, 1). + WithAmcGrant(granted, 3600, time.Unix(117, 0).UTC(), someGrantId, 1). + WithAmcGrant(granted, 3600, time.Unix(118, 0).UTC(), someGrantId, 1). + Details, + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(17). + WithSerialNumber(someSerialNumber+"17"). + WithCbsdId(someCbsdIdStr). + WithAntennaGain(20). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + Details, + b.NewDetailedDBCbsdBuilder(). + WithCbsd( + b.NewDBCbsdBuilder(). + WithId(18). + WithSerialNumber(someSerialNumber+"18"). + WithCbsdId(someCbsdIdStr). + WithAntennaGain(20). + WithIndoorDeployment(false). + WithShouldDeregister(false). + WithShouldRelinquish(false). + WithIsDeleted(false). + Cbsd, + registered, + registered). + WithAmcGrant(granted, 3600, time.Unix(119, 0).UTC(), someGrantId, 1). + WithAmcGrant(authorized, 3600, time.Unix(120, 0).UTC(), someGrantId, 1). + Details, + }, + }} + for _, tc := range testCases { + s.Run(tc.name, func() { + s.givenResourcesInserted(tc.input...) + + actual, err := storage.WithinTx(s.database, func(tx *sql.Tx) (interface{}, error) { + actual, err := s.amcManager.GetState(tx) + return actual, err + }) + + s.Require().NoError(err) + s.Assert().Equal(tc.expected, actual) + + err = s.resourceManager.DropResources( + &storage.DBCbsd{}, + &storage.DBGrant{}, + ) + s.Require().NoError(err) + + }) + } +} + +func (s *AmcManagerTestSuite) givenResourcesInserted(models ...db.Model) { + err := s.resourceManager.InsertResources(db.NewExcludeMask(), models...) + s.Require().NoError(err) +} + +func (s *AmcManagerTestSuite) getNameIdMapping(model db.Model) map[string]int64 { + var resources [][]db.Model + err := s.resourceManager.InTransaction(func() { + var err error + resources, err = db.NewQuery(). + WithBuilder(s.resourceManager.GetBuilder()). + From(model). + Select(db.NewExcludeMask()). + List() + s.Require().NoError(err) + }) + s.Require().NoError(err) + m := make(map[string]int64, len(resources)) + for _, r := range resources { + enum := r[0].(storage.EnumModel) + m[enum.GetName()] = enum.GetId() + } + return m +} diff --git a/dp/cloud/go/services/dp/storage/cbsd_manager.go b/dp/cloud/go/services/dp/storage/cbsd_manager.go index a4428269c6d6..1192e5413a18 100644 --- a/dp/cloud/go/services/dp/storage/cbsd_manager.go +++ b/dp/cloud/go/services/dp/storage/cbsd_manager.go @@ -63,20 +63,18 @@ type DetailedGrant struct { func NewCbsdManager(db *sql.DB, builder sqorc.StatementBuilder, errorChecker sqorc.ErrorChecker, locker sqorc.Locker) *cbsdManager { return &cbsdManager{ - db: db, - builder: builder, - cache: &enumCache{cache: map[string]map[string]int64{}}, - errorChecker: errorChecker, - locker: locker, + &dpManager{ + db: db, + builder: builder, + cache: &enumCache{cache: map[string]map[string]int64{}}, + errorChecker: errorChecker, + locker: locker, + }, } } type cbsdManager struct { - db *sql.DB - builder sqorc.StatementBuilder - cache *enumCache - errorChecker sqorc.ErrorChecker - locker sqorc.Locker + *dpManager } type enumCache struct { @@ -85,7 +83,7 @@ type enumCache struct { func (c *cbsdManager) CreateCbsd(networkId string, data *MutableCbsd) error { _, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) err := runner.createCbsd(networkId, data) return nil, err }) @@ -94,7 +92,7 @@ func (c *cbsdManager) CreateCbsd(networkId string, data *MutableCbsd) error { func (c *cbsdManager) UpdateCbsd(networkId string, id int64, data *MutableCbsd) error { _, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) err := runner.updateCbsd(networkId, id, data) return nil, err }) @@ -104,7 +102,7 @@ func (c *cbsdManager) UpdateCbsd(networkId string, id int64, data *MutableCbsd) func (c *cbsdManager) EnodebdUpdateCbsd(data *DBCbsd) (*DetailedCbsd, error) { grantJoinClause := getGrantJoinClauseForEnodebdUpdate() result, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) err := runner.enodebdUpdateCbsd(data) if err != nil { return nil, err @@ -119,7 +117,7 @@ func (c *cbsdManager) EnodebdUpdateCbsd(data *DBCbsd) (*DetailedCbsd, error) { func (c *cbsdManager) DeleteCbsd(networkId string, id int64) error { _, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) data := &DBCbsd{IsDeleted: db.MakeBool(true)} err := runner.updateField(networkId, id, "is_deleted", data) return nil, err @@ -129,7 +127,7 @@ func (c *cbsdManager) DeleteCbsd(networkId string, id int64) error { func (c *cbsdManager) FetchCbsd(networkId string, id int64) (*DetailedCbsd, error) { cbsd, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) grantJoinClause := db.On(GrantTable, "state_id", GrantStateTable, "id") return runner.fetchDetailedCbsd(getCbsdFiltersWithId(networkId, id), grantJoinClause) }) @@ -141,7 +139,7 @@ func (c *cbsdManager) FetchCbsd(networkId string, id int64) (*DetailedCbsd, erro func (c *cbsdManager) ListCbsd(networkId string, pagination *Pagination, filter *CbsdFilter) (*DetailedCbsdList, error) { cbsds, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) return runner.listDetailedCbsd(networkId, pagination, filter) }) if err != nil { @@ -152,7 +150,7 @@ func (c *cbsdManager) ListCbsd(networkId string, pagination *Pagination, filter func (c *cbsdManager) DeregisterCbsd(networkId string, id int64) error { _, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) data := &DBCbsd{ShouldDeregister: db.MakeBool(true)} err := runner.updateField(networkId, id, "should_deregister", data) return nil, err @@ -162,7 +160,7 @@ func (c *cbsdManager) DeregisterCbsd(networkId string, id int64) error { func (c *cbsdManager) RelinquishCbsd(networkId string, id int64) error { _, err := sqorc.ExecInTx(c.db, nil, nil, func(tx *sql.Tx) (interface{}, error) { - runner := c.getInTransactionManager(tx) + runner := c.getQueryRunner(tx) data := &DBCbsd{ShouldRelinquish: db.MakeBool(true)} err := runner.updateField(networkId, id, "should_relinquish", data) return nil, err @@ -170,26 +168,12 @@ func (c *cbsdManager) RelinquishCbsd(networkId string, id int64) error { return makeError(err, c.errorChecker) } -func (c *cbsdManager) getInTransactionManager(tx sq.BaseRunner) *cbsdManagerInTransaction { - return &cbsdManagerInTransaction{ - builder: c.builder.RunWith(tx), - cache: c.cache, - locker: c.locker, - } -} - -type cbsdManagerInTransaction struct { - builder sq.StatementBuilderType - cache *enumCache - locker sqorc.Locker -} - -func (c *cbsdManagerInTransaction) createCbsd(networkId string, data *MutableCbsd) error { - unregisteredState, err := c.cache.getValue(c.builder, &DBCbsdState{}, "unregistered") +func (r *queryRunner) createCbsd(networkId string, data *MutableCbsd) error { + unregisteredState, err := r.cache.getValue(r.builder, &DBCbsdState{}, "unregistered") if err != nil { return err } - desiredState, err := c.cache.getValue(c.builder, &DBCbsdState{}, data.DesiredState.Name.String) + desiredState, err := r.cache.getValue(r.builder, &DBCbsdState{}, data.DesiredState.Name.String) if err != nil { return err } @@ -199,7 +183,7 @@ func (c *cbsdManagerInTransaction) createCbsd(networkId string, data *MutableCbs columns := append(getCbsdWriteFields(), "state_id", "network_id") mask := db.NewIncludeMask(columns...) _, err = db.NewQuery(). - WithBuilder(c.builder). + WithBuilder(r.builder). From(data.Cbsd). Insert(mask) return err @@ -245,12 +229,12 @@ func getEnodebdWritableFields() []string { } } -func (c *cbsdManagerInTransaction) updateCbsd(networkId string, id int64, data *MutableCbsd) error { +func (r *queryRunner) updateCbsd(networkId string, id int64, data *MutableCbsd) error { mask := db.NewIncludeMask("id") - if _, err := c.selectForUpdateIfCbsdExists(mask, getCbsdFiltersWithId(networkId, id)); err != nil { + if _, err := r.selectForUpdateIfCbsdExists(mask, getCbsdFiltersWithId(networkId, id)); err != nil { return err } - desiredState, err := c.cache.getValue(c.builder, &DBCbsdState{}, data.DesiredState.Name.String) + desiredState, err := r.cache.getValue(r.builder, &DBCbsdState{}, data.DesiredState.Name.String) if err != nil { return err } @@ -259,7 +243,7 @@ func (c *cbsdManagerInTransaction) updateCbsd(networkId string, id int64, data * columns := append(getCbsdWriteFields(), "should_deregister") mask = db.NewIncludeMask(columns...) _, err = db.NewQuery(). - WithBuilder(c.builder). + WithBuilder(r.builder). From(data.Cbsd). Select(db.NewIncludeMask()). Where(sq.Eq{"id": id}). @@ -267,12 +251,12 @@ func (c *cbsdManagerInTransaction) updateCbsd(networkId string, id int64, data * return err } -func (c *cbsdManagerInTransaction) enodebdUpdateCbsd(data *DBCbsd) error { +func (r *queryRunner) enodebdUpdateCbsd(data *DBCbsd) error { identifiers := []string{"cbsd_serial_number", "network_id"} maskFields := append(identifiers, getEnodebdWritableFields()...) mask := db.NewIncludeMask(maskFields...) filters := sq.Eq{"cbsd_serial_number": data.CbsdSerialNumber} - cbsd, err := c.selectForUpdateIfCbsdExists(mask, filters) + cbsd, err := r.selectForUpdateIfCbsdExists(mask, filters) if err != nil { return err @@ -287,7 +271,7 @@ func (c *cbsdManagerInTransaction) enodebdUpdateCbsd(data *DBCbsd) error { } _, err = db.NewQuery(). - WithBuilder(c.builder). + WithBuilder(r.builder). From(data). Select(db.NewIncludeMask("id")). Where(filters). @@ -296,13 +280,13 @@ func (c *cbsdManagerInTransaction) enodebdUpdateCbsd(data *DBCbsd) error { return err } -func (c *cbsdManagerInTransaction) selectForUpdateIfCbsdExists(mask db.FieldMask, filters sq.Eq) (*DBCbsd, error) { +func (r *queryRunner) selectForUpdateIfCbsdExists(mask db.FieldMask, filters sq.Eq) (*DBCbsd, error) { res, err := db.NewQuery(). - WithBuilder(c.builder). + WithBuilder(r.builder). From(&DBCbsd{}). Select(mask). Where(filters). - Lock(c.locker.WithLock()). + Lock(r.locker.WithLock()). Fetch() if err != nil { return nil, err @@ -310,14 +294,14 @@ func (c *cbsdManagerInTransaction) selectForUpdateIfCbsdExists(mask db.FieldMask return res[0].(*DBCbsd), nil } -func (c *cbsdManagerInTransaction) updateField(networkId string, id int64, field string, data *DBCbsd) error { +func (r *queryRunner) updateField(networkId string, id int64, field string, data *DBCbsd) error { mask := db.NewIncludeMask("id") - if _, err := c.selectForUpdateIfCbsdExists(mask, getCbsdFiltersWithId(networkId, id)); err != nil { + if _, err := r.selectForUpdateIfCbsdExists(mask, getCbsdFiltersWithId(networkId, id)); err != nil { return err } mask = db.NewIncludeMask(field) _, err := db.NewQuery(). - WithBuilder(c.builder). + WithBuilder(r.builder). From(data). Select(db.NewIncludeMask()). Where(sq.Eq{"id": id}). @@ -328,15 +312,15 @@ func (c *cbsdManagerInTransaction) updateField(networkId string, id int64, field return nil } -func (c *cbsdManagerInTransaction) fetchDetailedCbsd(filter sq.Eq, grantJoinClause sq.Sqlizer) (*DetailedCbsd, error) { - rawCbsd, err := buildDetailedCbsdQuery(c.builder). +func (r *queryRunner) fetchDetailedCbsd(filter sq.Eq, grantJoinClause sq.Sqlizer) (*DetailedCbsd, error) { + rawCbsd, err := buildDetailedCbsdQuery(r.builder). Where(filter). Fetch() if err != nil { return nil, err } cbsd := convertCbsdToDetails(rawCbsd) - if err := getGrantsForCbsds(c.builder, grantJoinClause, cbsd); err != nil { + if err := getGrantsForCbsds(r.builder, grantJoinClause, cbsd); err != nil { return nil, err } return cbsd, nil @@ -356,7 +340,8 @@ func buildDetailedCbsdQuery(builder sq.StatementBuilderType) *db.Query { From(&DBCbsd{}). Select(db.NewExcludeMask( "state_id", "desired_state_id", - "is_deleted", "should_deregister", "should_relinquish")). + "is_deleted", "should_deregister", "should_relinquish", + "available_frequencies", "channels")). Join(db.NewQuery(). From(&DBCbsdState{}). As("t1"). @@ -407,12 +392,12 @@ func buildDetailedGrantQuery(builder sq.StatementBuilderType, on sq.Sqlizer) *db Select(db.NewIncludeMask("name"))) } -func (c *cbsdManagerInTransaction) listDetailedCbsd(networkId string, pagination *Pagination, filter *CbsdFilter) (*DetailedCbsdList, error) { - count, err := countCbsds(networkId, filter, c.builder) +func (r *queryRunner) listDetailedCbsd(networkId string, pagination *Pagination, filter *CbsdFilter) (*DetailedCbsdList, error) { + count, err := countCbsds(networkId, filter, r.builder) if err != nil { return nil, err } - query := buildDetailedCbsdQuery(c.builder) + query := buildDetailedCbsdQuery(r.builder) res, err := buildPagination(query, pagination). Where(getCbsdFilters(networkId, filter)). OrderBy(CbsdTable+".id", db.OrderAsc). @@ -425,7 +410,7 @@ func (c *cbsdManagerInTransaction) listDetailedCbsd(networkId string, pagination cbsds[i] = convertCbsdToDetails(models) } on := db.On(GrantTable, "state_id", GrantStateTable, "id") - if err := getGrantsForCbsds(c.builder, on, cbsds...); err != nil { + if err := getGrantsForCbsds(r.builder, on, cbsds...); err != nil { return nil, err } return &DetailedCbsdList{ diff --git a/dp/cloud/go/services/dp/storage/cbsd_manager_test.go b/dp/cloud/go/services/dp/storage/cbsd_manager_test.go index b103793c5d59..724b046a0060 100644 --- a/dp/cloud/go/services/dp/storage/cbsd_manager_test.go +++ b/dp/cloud/go/services/dp/storage/cbsd_manager_test.go @@ -33,8 +33,10 @@ import ( const ( registered = "registered" unregistered = "unregistered" + idle = "idle" someCbsdIdStr = "some_cbsd_id" authorized = "authorized" + granted = "granted" someNetwork = "some_network" otherNetwork = "other_network_id" someCbsdId = 123 @@ -44,6 +46,7 @@ const ( someSerialNumber = "some_serial_number" anotherSerialNumber = "another_serial_number" nowTimestamp = 12345678 + grant = "grant" ) func TestCbsdManager(t *testing.T) { @@ -182,9 +185,10 @@ func (s *CbsdManagerTestSuite) TestUpdateCbsd() { WithFccId(fmt.Sprintf("%snew2", cbsdBuilder.Cbsd.FccId.String)). WithSerialNumber(fmt.Sprintf("%snew3", cbsdBuilder.Cbsd.CbsdSerialNumber.String)). WithAntennaGain(1). - WithMaxPower(cbsdBuilder.Cbsd.MaxPower.Float64+2). - WithMinPower(cbsdBuilder.Cbsd.MinPower.Float64+3). - WithNumberOfPorts(cbsdBuilder.Cbsd.NumberOfPorts.Int64+4). + WithEirpCapabilities( + cbsdBuilder.Cbsd.MinPower.Float64+3, + cbsdBuilder.Cbsd.MaxPower.Float64+2, + cbsdBuilder.Cbsd.NumberOfPorts.Int64+4). WithSingleStepEnabled(true). WithIndoorDeployment(true). WithCarrierAggregationEnabled(true). @@ -1095,6 +1099,7 @@ func (s *CbsdManagerTestSuite) thenCbsdIs(expected *storage.DBCbsd) { cbsd.IsDeleted = db.MakeBool(false) cbsd.ShouldDeregister = db.MakeBool(false) cbsd.ShouldRelinquish = db.MakeBool(false) + cbsd.Channels = []storage.Channel{} expected := []db.Model{ cbsd, &storage.DBCbsdState{Name: db.MakeString(unregistered)}, diff --git a/dp/cloud/go/services/dp/storage/db/join.go b/dp/cloud/go/services/dp/storage/db/join.go index e99dcb4a5994..e724560088b8 100644 --- a/dp/cloud/go/services/dp/storage/db/join.go +++ b/dp/cloud/go/services/dp/storage/db/join.go @@ -41,6 +41,7 @@ type queryVisitor interface { type columnNamesCollector struct { args []*arg masks []indexMask + nCols int } func collectColumns(q *Query) *columnNamesCollector { @@ -69,9 +70,9 @@ func getTableName(arg *arg) string { return arg.metadata.Table } -func (c *columnNamesCollector) getPointers() ([]Model, []interface{}) { +func (c *columnNamesCollector) getPointers() ([]Model, []any) { models := make([]Model, len(c.args)) - var pointers []interface{} + pointers := make([]any, 0, c.nCols) for i, arg := range c.args { models[i] = arg.metadata.CreateObject() fields := c.masks[i].filterPointers(models[i]) @@ -82,7 +83,9 @@ func (c *columnNamesCollector) getPointers() ([]Model, []interface{}) { func (c *columnNamesCollector) preVisit(q *Query) { c.args = append(c.args, q.arg) - c.masks = append(c.masks, makeIndexMask(q.arg.metadata, q.arg.outputMask)) + mask := makeIndexMask(q.arg.metadata, q.arg.outputMask) + c.masks = append(c.masks, mask) + c.nCols += len(mask) } func (*columnNamesCollector) postVisit(_ *Query) {} @@ -91,7 +94,7 @@ type joinClause struct { query *Query } -func (j *joinClause) ToSql() (string, []interface{}, error) { +func (j *joinClause) ToSql() (string, []any, error) { b := &joinBuilder{ sql: strings.Builder{}, } @@ -101,7 +104,7 @@ func (j *joinClause) ToSql() (string, []interface{}, error) { type joinBuilder struct { sql strings.Builder - args []interface{} + args []any err error } @@ -163,9 +166,9 @@ func (im indexMask) filterColumns(metadata *ModelMetadata) []string { return columns } -func (im indexMask) filterPointers(model Model) []interface{} { +func (im indexMask) filterPointers(model Model) []any { fields := model.Fields() - pointers := make([]interface{}, len(im)) + pointers := make([]any, len(im)) for i, j := range im { pointers[i] = fields[j].ptr() } diff --git a/dp/cloud/go/services/dp/storage/db/query_test.go b/dp/cloud/go/services/dp/storage/db/query_test.go index 1f6596e11654..19b9ff169bb6 100644 --- a/dp/cloud/go/services/dp/storage/db/query_test.go +++ b/dp/cloud/go/services/dp/storage/db/query_test.go @@ -40,7 +40,7 @@ func (s *QueryTestSuite) SetupSuite() { database, err := sqorc.Open("sqlite3", ":memory:") s.Require().NoError(err) s.resourceManager = dbtest.NewResourceManager(s.T(), database, builder) - err = s.resourceManager.CreateTables(&someModel{}, &otherModel{}, &anotherModel{}, &modelWithUniqueFields{}) + err = s.resourceManager.CreateTables(&someModel{}, &otherModel{}, &anotherModel{}) s.Require().NoError(err) } @@ -626,6 +626,9 @@ func getOtherModel() *otherModel { name: db.MakeString("pqr"), flag: db.MakeBool(false), date: db.MakeTime(time.Unix(2e6, 0).UTC()), + list: []int{1, 2, 3}, + object: map[string]int{"a": 1, "b": 2}, + custom: &customType{A: 123, B: "xyz"}, } } @@ -636,6 +639,14 @@ type otherModel struct { name sql.NullString flag sql.NullBool date sql.NullTime + list []int + object map[string]int + custom *customType +} + +type customType struct { + A int `json:"a"` + B string `json:"b"` } func (o *otherModel) GetMetadata() *db.ModelMetadata { @@ -672,6 +683,21 @@ func (o *otherModel) GetMetadata() *db.ModelMetadata { SqlType: sqorc.ColumnTypeDatetime, Nullable: true, }, + { + Name: "list", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, + { + Name: "object", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, + { + Name: "custom", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, }, CreateObject: func() db.Model { return &otherModel{} @@ -687,6 +713,9 @@ func (o *otherModel) Fields() []db.BaseType { db.StringType{X: &o.name}, db.BoolType{X: &o.flag}, db.TimeType{X: &o.date}, + db.JsonType{X: &o.list}, + db.JsonType{X: &o.object}, + db.JsonType{X: &o.custom}, } } @@ -738,42 +767,3 @@ func (a *anotherModel) Fields() []db.BaseType { db.IntType{X: &a.defaultValue}, } } - -type modelWithUniqueFields struct { - id sql.NullInt64 - uniqueField sql.NullInt64 - anotherUniqueFied sql.NullInt64 -} - -func (m *modelWithUniqueFields) GetMetadata() *db.ModelMetadata { - return &db.ModelMetadata{ - Table: "unique_table", - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "unique_field", - SqlType: sqorc.ColumnTypeInt, - Unique: true, - }, - { - Name: "another_unique_fied", - SqlType: sqorc.ColumnTypeInt, - Unique: true, - }, - }, - CreateObject: func() db.Model { - return &modelWithUniqueFields{} - }, - } -} - -func (m *modelWithUniqueFields) Fields() []db.BaseType { - return []db.BaseType{ - db.IntType{X: &m.id}, - db.IntType{X: &m.uniqueField}, - db.IntType{X: &m.anotherUniqueFied}, - } -} diff --git a/dp/cloud/go/services/dp/storage/db/types.go b/dp/cloud/go/services/dp/storage/db/types.go index ea0f16f44b4e..59b48159f1dd 100644 --- a/dp/cloud/go/services/dp/storage/db/types.go +++ b/dp/cloud/go/services/dp/storage/db/types.go @@ -15,40 +15,67 @@ package db import ( "database/sql" + "encoding/json" + "fmt" ) type BaseType interface { - value() interface{} - ptr() interface{} + value() any + ptr() any isNull() bool } type IntType struct{ X *sql.NullInt64 } -func (x IntType) value() interface{} { return *x.X } -func (x IntType) ptr() interface{} { return x.X } -func (x IntType) isNull() bool { return !x.X.Valid } +func (x IntType) value() any { return *x.X } +func (x IntType) ptr() any { return x.X } +func (x IntType) isNull() bool { return !x.X.Valid } type FloatType struct{ X *sql.NullFloat64 } -func (x FloatType) value() interface{} { return *x.X } -func (x FloatType) ptr() interface{} { return x.X } -func (x FloatType) isNull() bool { return !x.X.Valid } +func (x FloatType) value() any { return *x.X } +func (x FloatType) ptr() any { return x.X } +func (x FloatType) isNull() bool { return !x.X.Valid } type StringType struct{ X *sql.NullString } -func (x StringType) value() interface{} { return *x.X } -func (x StringType) ptr() interface{} { return x.X } -func (x StringType) isNull() bool { return !x.X.Valid } +func (x StringType) value() any { return *x.X } +func (x StringType) ptr() any { return x.X } +func (x StringType) isNull() bool { return !x.X.Valid } type BoolType struct{ X *sql.NullBool } -func (x BoolType) value() interface{} { return *x.X } -func (x BoolType) ptr() interface{} { return x.X } -func (x BoolType) isNull() bool { return !x.X.Valid } +func (x BoolType) value() any { return *x.X } +func (x BoolType) ptr() any { return x.X } +func (x BoolType) isNull() bool { return !x.X.Valid } type TimeType struct{ X *sql.NullTime } -func (x TimeType) value() interface{} { return *x.X } -func (x TimeType) ptr() interface{} { return x.X } -func (x TimeType) isNull() bool { return !x.X.Valid } +func (x TimeType) value() any { return *x.X } +func (x TimeType) ptr() any { return x.X } +func (x TimeType) isNull() bool { return !x.X.Valid } + +type JsonType struct{ X any } + +func (x JsonType) value() any { + b, _ := json.Marshal(x.X) + return b +} +func (x JsonType) ptr() any { return jsonScanner{x: x.X} } +func (x JsonType) isNull() bool { return x.X == nil } + +type jsonScanner struct{ x any } + +func (j jsonScanner) Scan(value any) error { + if value == nil { + return nil + } + switch v := value.(type) { + case []byte: + return json.Unmarshal(v, j.x) + case string: + return json.Unmarshal([]byte(v), j.x) + default: + return fmt.Errorf("unexpected type: %t", v) + } +} diff --git a/dp/cloud/go/services/dp/storage/diff.go b/dp/cloud/go/services/dp/storage/diff.go index b78f9d01e3a0..bcf93b29ae4b 100644 --- a/dp/cloud/go/services/dp/storage/diff.go +++ b/dp/cloud/go/services/dp/storage/diff.go @@ -11,19 +11,14 @@ const ( func ShouldEnodebdUpdateInstallationParams(prev *DBCbsd, next *DBCbsd) bool { // TODO this should probably moved out from storage - return canUpdate(prev) && - (paramsChanges(next, prev) || coordinatesChanged(next, prev)) -} - -func canUpdate(prev *DBCbsd) bool { - return !prev.CpiDigitalSignature.Valid + return paramsChanges(next, prev) || coordinatesChanged(next, prev) } func paramsChanges(prev *DBCbsd, next *DBCbsd) bool { return prev.HeightM != next.HeightM || prev.HeightType != next.HeightType || prev.IndoorDeployment != next.IndoorDeployment || - prev.AntennaGain != next.AntennaGain + prev.AntennaGainDbi != next.AntennaGainDbi } func coordinatesChanged(prev *DBCbsd, next *DBCbsd) bool { diff --git a/dp/cloud/go/services/dp/storage/diff_test.go b/dp/cloud/go/services/dp/storage/diff_test.go index 241ec06e889a..7edaad34bc96 100644 --- a/dp/cloud/go/services/dp/storage/diff_test.go +++ b/dp/cloud/go/services/dp/storage/diff_test.go @@ -20,145 +20,111 @@ func TestShouldENodeBDUpdateInstallationParams(t *testing.T) { }{{ name: "should update if installation parameters have changes", prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(10), - CbsdCategory: sql.NullString{}, - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(5), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(false), - CpiDigitalSignature: sql.NullString{}, - SingleStepEnabled: db.MakeBool(true), + AntennaGainDbi: db.MakeFloat(10), + CbsdCategory: sql.NullString{}, + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(5), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(false), + SingleStepEnabled: db.MakeBool(true), }, next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), }, expected: true, }, { name: "should not update all parameters are the same", prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - SingleStepEnabled: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), + SingleStepEnabled: db.MakeBool(true), }, next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, - }, - expected: false, - }, { - name: "should not update if cbsd had cpi signature", - prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(10), - CbsdCategory: sql.NullString{}, - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(5), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(false), - SingleStepEnabled: db.MakeBool(true), - CpiDigitalSignature: db.MakeString("some signature"), - }, - next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), }, expected: false, }, { name: "should update if any of coordinates are empty", prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: sql.NullFloat64{}, - LongitudeDeg: sql.NullFloat64{}, - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - SingleStepEnabled: db.MakeBool(true), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: sql.NullFloat64{}, + LongitudeDeg: sql.NullFloat64{}, + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + SingleStepEnabled: db.MakeBool(true), + IndoorDeployment: db.MakeBool(true), }, next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: sql.NullFloat64{}, - LongitudeDeg: sql.NullFloat64{}, - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: sql.NullFloat64{}, + LongitudeDeg: sql.NullFloat64{}, + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), }, expected: true, }, { name: "should not update if coordinates changed less than 10m", prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - SingleStepEnabled: db.MakeBool(true), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + SingleStepEnabled: db.MakeBool(true), + IndoorDeployment: db.MakeBool(true), }, next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50.00006), - LongitudeDeg: db.MakeFloat(100.0001), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50.00006), + LongitudeDeg: db.MakeFloat(100.0001), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), }, expected: false, }, { name: "should update if coordinates changed more than 10m", prev: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50), - LongitudeDeg: db.MakeFloat(100), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - SingleStepEnabled: db.MakeBool(true), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50), + LongitudeDeg: db.MakeFloat(100), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + SingleStepEnabled: db.MakeBool(true), + IndoorDeployment: db.MakeBool(true), }, next: &storage.DBCbsd{ - AntennaGain: db.MakeFloat(20), - CbsdCategory: db.MakeString("A"), - LatitudeDeg: db.MakeFloat(50.00007), - LongitudeDeg: db.MakeFloat(100.0001), - HeightM: db.MakeFloat(8), - HeightType: db.MakeString("AGL"), - IndoorDeployment: db.MakeBool(true), - CpiDigitalSignature: sql.NullString{}, + AntennaGainDbi: db.MakeFloat(20), + CbsdCategory: db.MakeString("A"), + LatitudeDeg: db.MakeFloat(50.00007), + LongitudeDeg: db.MakeFloat(100.0001), + HeightM: db.MakeFloat(8), + HeightType: db.MakeString("AGL"), + IndoorDeployment: db.MakeBool(true), }, expected: true, }} diff --git a/dp/cloud/go/services/dp/storage/dp_manager.go b/dp/cloud/go/services/dp/storage/dp_manager.go new file mode 100644 index 000000000000..b8e3e394126a --- /dev/null +++ b/dp/cloud/go/services/dp/storage/dp_manager.go @@ -0,0 +1,31 @@ +package storage + +import ( + "database/sql" + + sq "github.com/Masterminds/squirrel" + + "magma/orc8r/cloud/go/sqorc" +) + +type dpManager struct { + db *sql.DB + builder sqorc.StatementBuilder + cache *enumCache + errorChecker sqorc.ErrorChecker + locker sqorc.Locker +} + +type queryRunner struct { + builder sq.StatementBuilderType + cache *enumCache + locker sqorc.Locker +} + +func (m *dpManager) getQueryRunner(tx sq.BaseRunner) *queryRunner { + return &queryRunner{ + builder: m.builder.RunWith(tx), + cache: m.cache, + locker: m.locker, + } +} diff --git a/dp/cloud/go/services/dp/storage/models.go b/dp/cloud/go/services/dp/storage/models.go index 3c609f73a073..fdbd6695ffc3 100644 --- a/dp/cloud/go/services/dp/storage/models.go +++ b/dp/cloud/go/services/dp/storage/models.go @@ -21,10 +21,12 @@ import ( ) const ( - GrantStateTable = "grant_states" - GrantTable = "grants" - CbsdStateTable = "cbsd_states" - CbsdTable = "cbsds" + RequestTypeTable = "request_types" + RequestTable = "requests" + GrantStateTable = "grant_states" + GrantTable = "grants" + CbsdStateTable = "cbsd_states" + CbsdTable = "cbsds" ) type EnumModel interface { @@ -32,6 +34,82 @@ type EnumModel interface { GetName() string } +type DBRequestType struct { + Id sql.NullInt64 + Name sql.NullString +} + +func (rt *DBRequestType) Fields() []db.BaseType { + return []db.BaseType{ + db.IntType{X: &rt.Id}, + db.StringType{X: &rt.Name}, + } +} + +func (rt *DBRequestType) GetMetadata() *db.ModelMetadata { + return &db.ModelMetadata{ + Table: RequestTypeTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, + CreateObject: func() db.Model { + return &DBRequestType{} + }, + } +} + +func (rt *DBRequestType) GetId() int64 { + return rt.Id.Int64 +} + +func (rt *DBRequestType) GetName() string { + return rt.Name.String +} + +type DBRequest struct { + Id sql.NullInt64 + TypeId sql.NullInt64 + CbsdId sql.NullInt64 + Payload any +} + +func (r *DBRequest) Fields() []db.BaseType { + return []db.BaseType{ + db.IntType{X: &r.Id}, + db.IntType{X: &r.TypeId}, + db.IntType{X: &r.CbsdId}, + db.JsonType{X: &r.Payload}, + } +} + +func (r *DBRequest) GetMetadata() *db.ModelMetadata { + return &db.ModelMetadata{ + Table: RequestTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "type_id", + SqlType: sqorc.ColumnTypeInt, + Relation: RequestTypeTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeInt, + Relation: CbsdTable, + }, { + Name: "payload", + SqlType: sqorc.ColumnTypeText, + }}, + CreateObject: func() db.Model { + return &DBRequest{} + }, + } +} + type DBGrantState struct { Id sql.NullInt64 Name sql.NullString @@ -47,16 +125,13 @@ func (gs *DBGrantState) Fields() []db.BaseType { func (gs *DBGrantState) GetMetadata() *db.ModelMetadata { return &db.ModelMetadata{ Table: GrantStateTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "name", - SqlType: sqorc.ColumnTypeText, - }, - }, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, CreateObject: func() db.Model { return &DBGrantState{} }, @@ -72,17 +147,18 @@ func (gs *DBGrantState) GetName() string { } type DBGrant struct { - Id sql.NullInt64 - StateId sql.NullInt64 - CbsdId sql.NullInt64 - GrantId sql.NullString - GrantExpireTime sql.NullTime - TransmitExpireTime sql.NullTime - HeartbeatInterval sql.NullInt64 - ChannelType sql.NullString - LowFrequency sql.NullInt64 - HighFrequency sql.NullInt64 - MaxEirp sql.NullFloat64 + Id sql.NullInt64 + StateId sql.NullInt64 + CbsdId sql.NullInt64 + GrantId sql.NullString + GrantExpireTime sql.NullTime + TransmitExpireTime sql.NullTime + HeartbeatIntervalSec sql.NullInt64 + LastHeartbeatRequestTime sql.NullTime + ChannelType sql.NullString + LowFrequencyHz sql.NullInt64 + HighFrequencyHz sql.NullInt64 + MaxEirp sql.NullFloat64 } func (g *DBGrant) Fields() []db.BaseType { @@ -93,10 +169,11 @@ func (g *DBGrant) Fields() []db.BaseType { db.StringType{X: &g.GrantId}, db.TimeType{X: &g.GrantExpireTime}, db.TimeType{X: &g.TransmitExpireTime}, - db.IntType{X: &g.HeartbeatInterval}, + db.IntType{X: &g.HeartbeatIntervalSec}, + db.TimeType{X: &g.LastHeartbeatRequestTime}, db.StringType{X: &g.ChannelType}, - db.IntType{X: &g.LowFrequency}, - db.IntType{X: &g.HighFrequency}, + db.IntType{X: &g.LowFrequencyHz}, + db.IntType{X: &g.HighFrequencyHz}, db.FloatType{X: &g.MaxEirp}, } } @@ -104,59 +181,51 @@ func (g *DBGrant) Fields() []db.BaseType { func (g *DBGrant) GetMetadata() *db.ModelMetadata { return &db.ModelMetadata{ Table: GrantTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: GrantStateTable, - }, - { - Name: "cbsd_id", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - Relation: CbsdTable, - }, - { - Name: "grant_id", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "grant_expire_time", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "transmit_expire_time", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "heartbeat_interval", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "channel_type", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "low_frequency", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "high_frequency", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "max_eirp", - SqlType: sqorc.ColumnTypeReal, - }, - }, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: GrantStateTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + Relation: CbsdTable, + }, { + Name: "grant_id", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "grant_expire_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "transmit_expire_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "heartbeat_interval", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "last_heartbeat_request_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "channel_type", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "low_frequency", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "high_frequency", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "max_eirp", + SqlType: sqorc.ColumnTypeReal, + }}, CreateObject: func() db.Model { return &DBGrant{} }, @@ -178,16 +247,13 @@ func (cs *DBCbsdState) Fields() []db.BaseType { func (cs *DBCbsdState) GetMetadata() *db.ModelMetadata { return &db.ModelMetadata{ Table: "cbsd_states", - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "name", - SqlType: sqorc.ColumnTypeText, - }, - }, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, CreateObject: func() db.Model { return &DBCbsdState{} }, @@ -213,10 +279,10 @@ type DBCbsd struct { CbsdSerialNumber sql.NullString LastSeen sql.NullTime PreferredBandwidthMHz sql.NullInt64 - PreferredFrequenciesMHz sql.NullString + PreferredFrequenciesMHz []int64 MinPower sql.NullFloat64 MaxPower sql.NullFloat64 - AntennaGain sql.NullFloat64 + AntennaGainDbi sql.NullFloat64 NumberOfPorts sql.NullInt64 IsDeleted sql.NullBool ShouldDeregister sql.NullBool @@ -227,17 +293,19 @@ type DBCbsd struct { LongitudeDeg sql.NullFloat64 HeightM sql.NullFloat64 HeightType sql.NullString - HorizontalAccuracyM sql.NullFloat64 - AntennaAzimuthDeg sql.NullInt64 - AntennaDowntiltDeg sql.NullInt64 - AntennaBeamwidthDeg sql.NullInt64 - AntennaModel sql.NullString - EirpCapabilityDbmMhz sql.NullInt64 IndoorDeployment sql.NullBool - CpiDigitalSignature sql.NullString CarrierAggregationEnabled sql.NullBool GrantRedundancy sql.NullBool MaxIbwMhx sql.NullInt64 + AvailableFrequencies []uint32 + Channels []Channel +} + +type Channel struct { + // TODO some of the fields may not be required + LowFrequencyHz int64 `json:"low_frequency"` + HighFrequencyHz int64 `json:"high_frequency"` + MaxEirp float64 `json:"max_eirp"` } func (c *DBCbsd) Fields() []db.BaseType { @@ -252,10 +320,10 @@ func (c *DBCbsd) Fields() []db.BaseType { db.StringType{X: &c.CbsdSerialNumber}, db.TimeType{X: &c.LastSeen}, db.IntType{X: &c.PreferredBandwidthMHz}, - db.StringType{X: &c.PreferredFrequenciesMHz}, + db.JsonType{X: &c.PreferredFrequenciesMHz}, db.FloatType{X: &c.MinPower}, db.FloatType{X: &c.MaxPower}, - db.FloatType{X: &c.AntennaGain}, + db.FloatType{X: &c.AntennaGainDbi}, db.IntType{X: &c.NumberOfPorts}, db.BoolType{X: &c.IsDeleted}, db.BoolType{X: &c.ShouldDeregister}, @@ -266,206 +334,147 @@ func (c *DBCbsd) Fields() []db.BaseType { db.FloatType{X: &c.LongitudeDeg}, db.FloatType{X: &c.HeightM}, db.StringType{X: &c.HeightType}, - db.FloatType{X: &c.HorizontalAccuracyM}, - db.IntType{X: &c.AntennaAzimuthDeg}, - db.IntType{X: &c.AntennaDowntiltDeg}, - db.IntType{X: &c.AntennaBeamwidthDeg}, - db.StringType{X: &c.AntennaModel}, - db.IntType{X: &c.EirpCapabilityDbmMhz}, db.BoolType{X: &c.IndoorDeployment}, - db.StringType{X: &c.CpiDigitalSignature}, db.BoolType{X: &c.CarrierAggregationEnabled}, db.BoolType{X: &c.GrantRedundancy}, db.IntType{X: &c.MaxIbwMhx}, + db.JsonType{X: &c.AvailableFrequencies}, + db.JsonType{X: &c.Channels}, } } func (c *DBCbsd) GetMetadata() *db.ModelMetadata { return &db.ModelMetadata{ Table: CbsdTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "network_id", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: CbsdStateTable, - }, - { - Name: "desired_state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: CbsdStateTable, - }, - { - Name: "cbsd_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "user_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "fcc_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "cbsd_serial_number", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - Unique: true, - }, - { - Name: "last_seen", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "preferred_bandwidth_mhz", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "preferred_frequencies_mhz", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "min_power", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "max_power", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "antenna_gain", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "number_of_ports", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "is_deleted", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "should_deregister", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "should_relinquish", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "single_step_enabled", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "cbsd_category", - SqlType: sqorc.ColumnTypeText, - HasDefault: true, - DefaultValue: "b", - }, - { - Name: "latitude_deg", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "longitude_deg", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "height_m", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "height_type", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "horizontal_accuracy_m", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "antenna_azimuth_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_downtilt_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_beamwidth_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_model", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "eirp_capability_dbm_mhz", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "indoor_deployment", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "cpi_digital_signature", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "carrier_aggregation_enabled", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "grant_redundancy", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: true, - }, - { - Name: "max_ibw_mhz", - SqlType: sqorc.ColumnTypeInt, - HasDefault: true, - DefaultValue: 150, - }, - }, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "network_id", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: CbsdStateTable, + }, { + Name: "desired_state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: CbsdStateTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "user_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "fcc_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "cbsd_serial_number", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + Unique: true, + }, { + Name: "last_seen", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "preferred_bandwidth_mhz", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "preferred_frequencies_mhz", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "min_power", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "max_power", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "antenna_gain", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "number_of_ports", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "is_deleted", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "should_deregister", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "should_relinquish", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "single_step_enabled", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "cbsd_category", + SqlType: sqorc.ColumnTypeText, + HasDefault: true, + DefaultValue: "b", + }, { + Name: "latitude_deg", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "longitude_deg", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "height_m", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "height_type", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "indoor_deployment", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "carrier_aggregation_enabled", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "grant_redundancy", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: true, + }, { + Name: "max_ibw_mhz", + SqlType: sqorc.ColumnTypeInt, + HasDefault: true, + DefaultValue: 150, + }, { + Name: "available_frequencies", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "channels", + SqlType: sqorc.ColumnTypeText, + Nullable: false, + HasDefault: true, + DefaultValue: "'[]'", + }}, CreateObject: func() db.Model { return &DBCbsd{} }, diff --git a/dp/cloud/go/services/dp/storage/models_test.go b/dp/cloud/go/services/dp/storage/models_test.go index 70f76ad9a84d..39e3ba77f0c3 100644 --- a/dp/cloud/go/services/dp/storage/models_test.go +++ b/dp/cloud/go/services/dp/storage/models_test.go @@ -24,6 +24,8 @@ import ( ) func TestFields(t *testing.T) { + dbRequestType := &storage.DBRequestType{} + dbRequest := &storage.DBRequest{} dbGrant := &storage.DBGrant{} dbCbsd := &storage.DBCbsd{} dbCbsdState := &storage.DBCbsdState{} @@ -32,82 +34,89 @@ func TestFields(t *testing.T) { name string model db.Model expected []db.BaseType - }{ - { - name: "check field names for DBGrantState", - model: dbGrantState, - expected: []db.BaseType{ - db.IntType{X: &dbGrantState.Id}, - db.StringType{X: &dbGrantState.Name}, - }, + }{{ + name: "check field names for DBRequestType", + model: dbRequestType, + expected: []db.BaseType{ + db.IntType{X: &dbRequestType.Id}, + db.StringType{X: &dbRequestType.Name}, }, - { - name: "check field names for DBGrant", - model: dbGrant, - expected: []db.BaseType{ - db.IntType{X: &dbGrant.Id}, - db.IntType{X: &dbGrant.StateId}, - db.IntType{X: &dbGrant.CbsdId}, - db.StringType{X: &dbGrant.GrantId}, - db.TimeType{X: &dbGrant.GrantExpireTime}, - db.TimeType{X: &dbGrant.TransmitExpireTime}, - db.IntType{X: &dbGrant.HeartbeatInterval}, - db.StringType{X: &dbGrant.ChannelType}, - db.IntType{X: &dbGrant.LowFrequency}, - db.IntType{X: &dbGrant.HighFrequency}, - db.FloatType{X: &dbGrant.MaxEirp}, - }, + }, { + name: "check field names for DBRequest", + model: dbRequest, + expected: []db.BaseType{ + db.IntType{X: &dbRequest.Id}, + db.IntType{X: &dbRequest.TypeId}, + db.IntType{X: &dbRequest.CbsdId}, + db.JsonType{X: &dbRequest.Payload}, }, - { - name: "check field names for DBCbsdState", - model: dbCbsdState, - expected: []db.BaseType{ - db.IntType{X: &dbCbsdState.Id}, - db.StringType{X: &dbCbsdState.Name}, - }, + }, { + name: "check field names for DBGrantState", + model: dbGrantState, + expected: []db.BaseType{ + db.IntType{X: &dbGrantState.Id}, + db.StringType{X: &dbGrantState.Name}, }, - { - name: "check field names for DBCbsd", - model: dbCbsd, - expected: []db.BaseType{ - db.IntType{X: &dbCbsd.Id}, - db.StringType{X: &dbCbsd.NetworkId}, - db.IntType{X: &dbCbsd.StateId}, - db.IntType{X: &dbCbsd.DesiredStateId}, - db.StringType{X: &dbCbsd.CbsdId}, - db.StringType{X: &dbCbsd.UserId}, - db.StringType{X: &dbCbsd.FccId}, - db.StringType{X: &dbCbsd.CbsdSerialNumber}, - db.TimeType{X: &dbCbsd.LastSeen}, - db.IntType{X: &dbCbsd.PreferredBandwidthMHz}, - db.StringType{X: &dbCbsd.PreferredFrequenciesMHz}, - db.FloatType{X: &dbCbsd.MinPower}, - db.FloatType{X: &dbCbsd.MaxPower}, - db.FloatType{X: &dbCbsd.AntennaGain}, - db.IntType{X: &dbCbsd.NumberOfPorts}, - db.BoolType{X: &dbCbsd.IsDeleted}, - db.BoolType{X: &dbCbsd.ShouldDeregister}, - db.BoolType{X: &dbCbsd.ShouldRelinquish}, - db.BoolType{X: &dbCbsd.SingleStepEnabled}, - db.StringType{X: &dbCbsd.CbsdCategory}, - db.FloatType{X: &dbCbsd.LatitudeDeg}, - db.FloatType{X: &dbCbsd.LongitudeDeg}, - db.FloatType{X: &dbCbsd.HeightM}, - db.StringType{X: &dbCbsd.HeightType}, - db.FloatType{X: &dbCbsd.HorizontalAccuracyM}, - db.IntType{X: &dbCbsd.AntennaAzimuthDeg}, - db.IntType{X: &dbCbsd.AntennaDowntiltDeg}, - db.IntType{X: &dbCbsd.AntennaBeamwidthDeg}, - db.StringType{X: &dbCbsd.AntennaModel}, - db.IntType{X: &dbCbsd.EirpCapabilityDbmMhz}, - db.BoolType{X: &dbCbsd.IndoorDeployment}, - db.StringType{X: &dbCbsd.CpiDigitalSignature}, - db.BoolType{X: &dbCbsd.CarrierAggregationEnabled}, - db.BoolType{X: &dbCbsd.GrantRedundancy}, - db.IntType{X: &dbCbsd.MaxIbwMhx}, - }, + }, { + name: "check field names for DBGrant", + model: dbGrant, + expected: []db.BaseType{ + db.IntType{X: &dbGrant.Id}, + db.IntType{X: &dbGrant.StateId}, + db.IntType{X: &dbGrant.CbsdId}, + db.StringType{X: &dbGrant.GrantId}, + db.TimeType{X: &dbGrant.GrantExpireTime}, + db.TimeType{X: &dbGrant.TransmitExpireTime}, + db.IntType{X: &dbGrant.HeartbeatIntervalSec}, + db.TimeType{X: &dbGrant.LastHeartbeatRequestTime}, + db.StringType{X: &dbGrant.ChannelType}, + db.IntType{X: &dbGrant.LowFrequencyHz}, + db.IntType{X: &dbGrant.HighFrequencyHz}, + db.FloatType{X: &dbGrant.MaxEirp}, }, - } + }, { + name: "check field names for DBCbsdState", + model: dbCbsdState, + expected: []db.BaseType{ + db.IntType{X: &dbCbsdState.Id}, + db.StringType{X: &dbCbsdState.Name}, + }, + }, { + name: "check field names for DBCbsd", + model: dbCbsd, + expected: []db.BaseType{ + db.IntType{X: &dbCbsd.Id}, + db.StringType{X: &dbCbsd.NetworkId}, + db.IntType{X: &dbCbsd.StateId}, + db.IntType{X: &dbCbsd.DesiredStateId}, + db.StringType{X: &dbCbsd.CbsdId}, + db.StringType{X: &dbCbsd.UserId}, + db.StringType{X: &dbCbsd.FccId}, + db.StringType{X: &dbCbsd.CbsdSerialNumber}, + db.TimeType{X: &dbCbsd.LastSeen}, + db.IntType{X: &dbCbsd.PreferredBandwidthMHz}, + db.JsonType{X: &dbCbsd.PreferredFrequenciesMHz}, + db.FloatType{X: &dbCbsd.MinPower}, + db.FloatType{X: &dbCbsd.MaxPower}, + db.FloatType{X: &dbCbsd.AntennaGainDbi}, + db.IntType{X: &dbCbsd.NumberOfPorts}, + db.BoolType{X: &dbCbsd.IsDeleted}, + db.BoolType{X: &dbCbsd.ShouldDeregister}, + db.BoolType{X: &dbCbsd.ShouldRelinquish}, + db.BoolType{X: &dbCbsd.SingleStepEnabled}, + db.StringType{X: &dbCbsd.CbsdCategory}, + db.FloatType{X: &dbCbsd.LatitudeDeg}, + db.FloatType{X: &dbCbsd.LongitudeDeg}, + db.FloatType{X: &dbCbsd.HeightM}, + db.StringType{X: &dbCbsd.HeightType}, + db.BoolType{X: &dbCbsd.IndoorDeployment}, + db.BoolType{X: &dbCbsd.CarrierAggregationEnabled}, + db.BoolType{X: &dbCbsd.GrantRedundancy}, + db.IntType{X: &dbCbsd.MaxIbwMhx}, + db.JsonType{X: &dbCbsd.AvailableFrequencies}, + db.JsonType{X: &dbCbsd.Channels}, + }, + }} for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.expected, tc.model.Fields()) @@ -120,292 +129,253 @@ func TestGetMetadata(t *testing.T) { name string model db.Model expected db.ModelMetadata - }{ - { - name: "check ModelMetadata structure for DBGrantState", - model: &storage.DBGrantState{}, - expected: db.ModelMetadata{ - Table: storage.GrantStateTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "name", - SqlType: sqorc.ColumnTypeText, - }, - }, - }, + }{{ + name: "check ModelMetadata structure for DBRequestType", + model: &storage.DBRequestType{}, + expected: db.ModelMetadata{ + Table: storage.RequestTypeTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, }, - { - name: "check ModelMetadata structure for DBGrant", - model: &storage.DBGrant{}, - expected: db.ModelMetadata{ - Table: storage.GrantTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: storage.GrantStateTable, - }, - { - Name: "cbsd_id", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - Relation: storage.CbsdTable, - }, - { - Name: "grant_id", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "grant_expire_time", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "transmit_expire_time", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "heartbeat_interval", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "channel_type", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "low_frequency", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "high_frequency", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "max_eirp", - SqlType: sqorc.ColumnTypeReal, - }, - }, - }, + }, { + name: "check ModelMetadata structure for DBRequest", + model: &storage.DBRequest{}, + expected: db.ModelMetadata{ + Table: storage.RequestTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "type_id", + SqlType: sqorc.ColumnTypeInt, + Relation: storage.RequestTypeTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeInt, + Relation: storage.CbsdTable, + }, { + Name: "payload", + SqlType: sqorc.ColumnTypeText, + }}, }, - { - name: "check ModelMetadata structure for DBCbsdState", - model: &storage.DBCbsdState{}, - expected: db.ModelMetadata{ - Table: storage.CbsdStateTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "name", - SqlType: sqorc.ColumnTypeText, - }, - }, - }, + }, { + name: "check ModelMetadata structure for DBGrantState", + model: &storage.DBGrantState{}, + expected: db.ModelMetadata{ + Table: storage.GrantStateTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, }, - { - name: "check ModelMetadata structure for DBCbsd", - model: &storage.DBCbsd{}, - expected: db.ModelMetadata{ - Table: storage.CbsdTable, - Properties: []*db.Field{ - { - Name: "id", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "network_id", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: storage.CbsdStateTable, - }, - { - Name: "desired_state_id", - SqlType: sqorc.ColumnTypeInt, - Relation: storage.CbsdStateTable, - }, - { - Name: "cbsd_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "user_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "fcc_id", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "cbsd_serial_number", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - Unique: true, - }, - { - Name: "last_seen", - SqlType: sqorc.ColumnTypeDatetime, - Nullable: true, - }, - { - Name: "preferred_bandwidth_mhz", - SqlType: sqorc.ColumnTypeInt, - }, - { - Name: "preferred_frequencies_mhz", - SqlType: sqorc.ColumnTypeText, - }, - { - Name: "min_power", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "max_power", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "antenna_gain", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "number_of_ports", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "is_deleted", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "should_deregister", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "should_relinquish", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "single_step_enabled", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "cbsd_category", - SqlType: sqorc.ColumnTypeText, - HasDefault: true, - DefaultValue: "b", - }, - { - Name: "latitude_deg", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "longitude_deg", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "height_m", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "height_type", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "horizontal_accuracy_m", - SqlType: sqorc.ColumnTypeReal, - Nullable: true, - }, - { - Name: "antenna_azimuth_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_downtilt_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_beamwidth_deg", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "antenna_model", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "eirp_capability_dbm_mhz", - SqlType: sqorc.ColumnTypeInt, - Nullable: true, - }, - { - Name: "indoor_deployment", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "cpi_digital_signature", - SqlType: sqorc.ColumnTypeText, - Nullable: true, - }, - { - Name: "carrier_aggregation_enabled", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: false, - }, - { - Name: "grant_redundancy", - SqlType: sqorc.ColumnTypeBool, - HasDefault: true, - DefaultValue: true, - }, - { - Name: "max_ibw_mhz", - SqlType: sqorc.ColumnTypeInt, - HasDefault: true, - DefaultValue: 150, - }, - }, - }, + }, { + name: "check ModelMetadata structure for DBGrant", + model: &storage.DBGrant{}, + expected: db.ModelMetadata{ + Table: storage.GrantTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: storage.GrantStateTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + Relation: storage.CbsdTable, + }, { + Name: "grant_id", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "grant_expire_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "transmit_expire_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "heartbeat_interval", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "last_heartbeat_request_time", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "channel_type", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "low_frequency", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "high_frequency", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "max_eirp", + SqlType: sqorc.ColumnTypeReal, + }}, }, - } + }, { + name: "check ModelMetadata structure for DBCbsdState", + model: &storage.DBCbsdState{}, + expected: db.ModelMetadata{ + Table: storage.CbsdStateTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "name", + SqlType: sqorc.ColumnTypeText, + }}, + }, + }, { + name: "check ModelMetadata structure for DBCbsd", + model: &storage.DBCbsd{}, + expected: db.ModelMetadata{ + Table: storage.CbsdTable, + Properties: []*db.Field{{ + Name: "id", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "network_id", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: storage.CbsdStateTable, + }, { + Name: "desired_state_id", + SqlType: sqorc.ColumnTypeInt, + Relation: storage.CbsdStateTable, + }, { + Name: "cbsd_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "user_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "fcc_id", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "cbsd_serial_number", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + Unique: true, + }, { + Name: "last_seen", + SqlType: sqorc.ColumnTypeDatetime, + Nullable: true, + }, { + Name: "preferred_bandwidth_mhz", + SqlType: sqorc.ColumnTypeInt, + }, { + Name: "preferred_frequencies_mhz", + SqlType: sqorc.ColumnTypeText, + }, { + Name: "min_power", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "max_power", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "antenna_gain", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "number_of_ports", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "is_deleted", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "should_deregister", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "should_relinquish", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "single_step_enabled", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "cbsd_category", + SqlType: sqorc.ColumnTypeText, + HasDefault: true, + DefaultValue: "b", + }, { + Name: "latitude_deg", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "longitude_deg", + SqlType: sqorc.ColumnTypeReal, + Nullable: true, + }, { + Name: "height_m", + SqlType: sqorc.ColumnTypeInt, + Nullable: true, + }, { + Name: "height_type", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "indoor_deployment", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "carrier_aggregation_enabled", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: false, + }, { + Name: "grant_redundancy", + SqlType: sqorc.ColumnTypeBool, + HasDefault: true, + DefaultValue: true, + }, { + Name: "max_ibw_mhz", + SqlType: sqorc.ColumnTypeInt, + HasDefault: true, + DefaultValue: 150, + }, { + Name: "available_frequencies", + SqlType: sqorc.ColumnTypeText, + Nullable: true, + }, { + Name: "channels", + SqlType: sqorc.ColumnTypeText, + Nullable: false, + HasDefault: true, + DefaultValue: "'[]'", + }}, + }, + }} for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actual := tc.model.GetMetadata() diff --git a/dp/cloud/helm/dp/charts/domain-proxy/Chart.yaml b/dp/cloud/helm/dp/charts/domain-proxy/Chart.yaml index f06337a7d64a..5848926f6dc0 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/Chart.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/Chart.yaml @@ -11,8 +11,8 @@ # limitations under the License. apiVersion: v2 -appVersion: "0.1.0" -version: 0.1.0 +appVersion: "1.8.0" +version: 1.8.0 description: A Helm chart for magma orchestrator's domain-proxy module. name: domain-proxy engine: gotpl diff --git a/dp/cloud/helm/dp/charts/domain-proxy/README.md b/dp/cloud/helm/dp/charts/domain-proxy/README.md index 5e5a00f7ae34..1b707c8773d2 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/README.md +++ b/dp/cloud/helm/dp/charts/domain-proxy/README.md @@ -118,34 +118,6 @@ The following table lists the configurable parameters of the Domain-proxy chart | `radio_controller.nodeSelector` | Kubernetes node selection constraint. | `{}` | | `radio_controller.tolerations` | Allow the pods to schedule onto nodes with matching taints. | `[]` | | `radio_controller.affinity` | Constrain which nodes your pod is eligible to be scheduled on. | `{}` | -| `active_mode_controller.nameOverride` | Replaces service part of the dp component deployment name. | `""` | -| `active_mode_controller.fullnameOverride` | Completely replaces dp component deployment name. | `""` | -| `active_mode_controller.enabled` | Enables deployment of the given dp component. | `true` | -| `active_mode_controller.name` | Domain proxy component name. | `"active-mode-controller"` | -| `active_mode_controller.image.repository` | Docker image repository. | `"active-mode-controller"` | -| `active_mode_controller.image.tag` | Overrides the image tag whose default is the chart appVersion. | `""` | -| `active_mode_controller.image.pullPolicy` | Default the pull policy of all containers in that pod. | `"IfNotPresent"` | -| `active_mode_controller.replicaCount` | How many replicas of particular component should be created. | `1` | -| `active_mode_controller.imagePullSecrets` | Name of the secret that contains container image registry keys. | `[]` | -| `active_mode_controller.serviceAccount.create` | Specifies whether a service account should be created. | `false` | -| `active_mode_controller.serviceAccount.annotations` | Annotations to add to the service account. | `{}` | -| `active_mode_controller.serviceAccount.name` | The name of the service account to use,If not set and create is true, a name is generated using the fullname template. | `""` | -| `active_mode_controller.podAnnotations` | Additional pod annotations. | `{}` | -| `active_mode_controller.podSecurityContext` | Holds pod-level security attributes. | `{}` | -| `active_mode_controller.securityContext` | Holds security configuration that will be applied to a container. | `{}` | -| `active_mode_controller.resources` | Resource requests and limits of Pod. | `{}` | -| `active_mode_controller.readinessProbe` | Readines probe definition. | `{}` | -| `active_mode_controller.livenessProbe` | Livenes probe definition. | `{}` | -| `active_mode_controller.autoscaling.enabled` | Enables horizontal pod autscaler kubernetes resource. | `false` | -| `active_mode_controller.autoscaling.minReplicas` | Minimum number of dp component replicas. | `1` | -| `active_mode_controller.autoscaling.maxReplicas` | Maximum number of dp component replicas. | `100` | -| `active_mode_controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization threshold in perecents when new replica should be created | `80` | -| `active_mode_controller.podDisruptionBudget.enabled` | Creates kubernetes podDisruptionBudget resource. | `false` | -| `active_mode_controller.podDisruptionBudget.minAvailable` | Minimum available pods for dp component. | `1` | -| `active_mode_controller.podDisruptionBudget.maxUnavailable` | Maximum unavailable pods for dp component. | `""` | -| `active_mode_controller.nodeSelector` | Kubernetes node selection constraint. | `{}` | -| `active_mode_controller.tolerations` | Allow the pods to schedule onto nodes with matching taints. | `[]` | -| `active_mode_controller.affinity` | Constrain which nodes your pod is eligible to be scheduled on. | `{}` | | `db_service.database` | | `{}` | | `db_service.enabled` | Enables deployment of the given service. | `true` | | `db_service.nameOverride` | Replaces service part of the dp component deployment name. | `""` | diff --git a/dp/cloud/helm/dp/charts/domain-proxy/examples/aws_nginx_values.yaml b/dp/cloud/helm/dp/charts/domain-proxy/examples/aws_nginx_values.yaml index fb3131ea4160..e3f65d6ab4a3 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/examples/aws_nginx_values.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/examples/aws_nginx_values.yaml @@ -135,49 +135,6 @@ dp: affinity: {} - active_mode_controller: - - nameOverride: "" - fullnameOverride: "" - enabled: true - name: active-mode-controller - - image: - repository: active-mode-controller - tag: "" - pullPolicy: IfNotPresent - - replicaCount: 1 - - imagePullSecrets: [] - - serviceAccount: - create: false - annotations: {} - name: "" - - podAnnotations: {} - podSecurityContext: {} - securityContext: {} - resources: {} - readinessProbe: {} - livenessProbe: {} - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - nodeSelector: {} - tolerations: [] - affinity: {} - db_service: enabled: true diff --git a/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values.yaml b/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values.yaml index dce20019f062..e7b17a5f199d 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values.yaml @@ -144,51 +144,6 @@ dp: affinity: {} - active_mode_controller: - - nameOverride: "" - fullnameOverride: "" - enabled: true - name: active-mode-controller - - image: - repository: active-mode-controller - tag: "" - pullPolicy: IfNotPresent - - replicaCount: 1 - - imagePullSecrets: [] - - serviceAccount: - create: false - annotations: {} - name: "" - - podAnnotations: {} - podSecurityContext: {} - securityContext: {} - - extraEnv: {} - resources: {} - readinessProbe: {} - livenessProbe: {} - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - nodeSelector: {} - tolerations: [] - affinity: {} - db_service: database: {} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values_nginx.yaml b/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values_nginx.yaml index bd8ab1ed7801..664f5598d8a2 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values_nginx.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/examples/minikube_values_nginx.yaml @@ -134,49 +134,6 @@ dp: affinity: {} - active_mode_controller: - - nameOverride: "" - fullnameOverride: "" - enabled: true - name: active-mode-controller - - image: - repository: active-mode-controller - tag: "" - pullPolicy: IfNotPresent - - replicaCount: 1 - - imagePullSecrets: [] - - serviceAccount: - create: false - annotations: {} - name: "" - - podAnnotations: {} - podSecurityContext: {} - securityContext: {} - resources: {} - readinessProbe: {} - livenessProbe: {} - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - nodeSelector: {} - tolerations: [] - affinity: {} - db_service: enabled: true diff --git a/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_integration_tests.yaml b/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_integration_tests.yaml new file mode 100644 index 000000000000..a798df37e9ae --- /dev/null +++ b/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_integration_tests.yaml @@ -0,0 +1,10 @@ +--- +dp: + config: + dp_backend: + cbsd_inactivity_interval_sec: 3 + active_mode_controller: + heartbeat_send_timeout_sec: 30 + request_processing_interval_sec: 1 + polling_interval: 1 + cbsd_inactivity_interval_sec: 3 diff --git a/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_minikube_values.yaml b/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_minikube_values.yaml index 77c968710672..b24398c1f8ce 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_minikube_values.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/examples/orc8r_minikube_values.yaml @@ -158,8 +158,6 @@ nms: targetPort: 9443 dp: enabled: true - cbsd_inactivity_interval_sec: 3 - log_consumer_url: "http://domain-proxy-fluentd:9888/dp" service: labels: orc8r.io/obsidian_handlers: "true" diff --git a/dp/cloud/helm/dp/charts/domain-proxy/templates/_helpers.tpl b/dp/cloud/helm/dp/charts/domain-proxy/templates/_helpers.tpl index 6d5a98a561e4..1f449a709427 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/templates/_helpers.tpl +++ b/dp/cloud/helm/dp/charts/domain-proxy/templates/_helpers.tpl @@ -60,22 +60,6 @@ Configuration controller labels {{ include "domain-proxy.common.metaLabels" . }} {{- end -}} -{{/* -Active mode controller match labels -*/}} -{{- define "domain-proxy.active_mode_controller.matchLabels" -}} -component: {{ .Values.dp.active_mode_controller.name | quote }} -{{ include "domain-proxy.common.matchLabels" . }} -{{- end -}} - -{{/* -Active mode controller labels -*/}} -{{- define "domain-proxy.active_mode_controller.labels" -}} -{{ include "domain-proxy.active_mode_controller.matchLabels" . }} -{{ include "domain-proxy.common.metaLabels" . }} -{{- end -}} - {{/* Radio controller match labels */}} @@ -204,23 +188,6 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- end -}} {{- end -}} -{{/* -Create a fully qualified active_mode_controller name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} - -{{- define "domain-proxy.active_mode_controller.fullname" -}} -{{- if .Values.dp.active_mode_controller.fullnameOverride -}} -{{- .Values.dp.active_mode_controller.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.dp.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.dp.active_mode_controller.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.dp.active_mode_controller.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} {{/* Create a fully qualified fluentd name. @@ -326,17 +293,6 @@ Create the name of the service account to use for db service {{- end }} {{- end }} -{{/* -Create the name of the service account to use for active mode controller -*/}} -{{- define "domain-proxy.active_mode_controller.serviceAccountName" -}} -{{- if .Values.dp.active_mode_controller.serviceAccount.create }} -{{- default (include "domain-proxy.fullname" .) .Values.dp.active_mode_controller.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.dp.active_mode_controller.serviceAccount.name }} -{{- end }} -{{- end }} - {{/* Create the name of the service account to use for fluentd */}} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/deployment.yaml b/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/deployment.yaml deleted file mode 100644 index ea50df507f0a..000000000000 --- a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/deployment.yaml +++ /dev/null @@ -1,84 +0,0 @@ -{{/* -# Copyright 2020 The Magma Authors. - -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -*/}} - -{{- if and .Values.dp.create .Values.dp.active_mode_controller.enabled -}} -apiVersion: {{ template "domain-proxy.deployment.apiVersion" . }} -kind: Deployment -metadata: - name: {{ include "domain-proxy.active_mode_controller.fullname" . }} - labels: - {{- include "domain-proxy.active_mode_controller.labels" . | nindent 4 }} -spec: - {{- if not .Values.dp.active_mode_controller.autoscaling.enabled }} - replicas: {{ .Values.dp.active_mode_controller.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "domain-proxy.active_mode_controller.matchLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.dp.active_mode_controller.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "domain-proxy.active_mode_controller.labels" . | nindent 8 }} - spec: - {{- with .Values.dp.active_mode_controller.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "domain-proxy.active_mode_controller.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.dp.active_mode_controller.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Values.dp.active_mode_controller.name }} - securityContext: - {{- toYaml .Values.dp.active_mode_controller.securityContext | nindent 12 }} - image: {{ .Values.dp.active_mode_controller.image.repository -}}:{{- .Values.dp.active_mode_controller.image.tag | default .Chart.AppVersion }} - imagePullPolicy: {{ .Values.dp.active_mode_controller.image.pullPolicy }} - env: - {{- range $key, $value := .Values.dp.active_mode_controller.extraEnv }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - - name: SERVICE_HOSTNAME - valueFrom: - fieldRef: - fieldPath: status.podIP - envFrom: - - configMapRef: - name: {{ include "domain-proxy.configuration_controller.fullname" . }}-common - {{- if .Values.dp.active_mode_controller.livenessProbe }} - livenessProbe: - {{- toYaml .Values.dp.active_mode_controller.livenessProbe | nindent 12 }} - {{- end }} - {{- if .Values.dp.active_mode_controller.readinessProbe }} - readinessProbe: - {{- toYaml .Values.dp.active_mode_controller.readinessProbe | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.dp.active_mode_controller.resources | nindent 12 }} - {{- with .Values.dp.active_mode_controller.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.dp.active_mode_controller.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.dp.active_mode_controller.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/hpa.yaml b/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/hpa.yaml deleted file mode 100644 index ccdcf0b37ea2..000000000000 --- a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/hpa.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{/* -# Copyright 2020 The Magma Authors. - -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -*/}} - -{{- if and .Values.dp.create .Values.dp.active_mode_controller.enabled -}} -{{- if .Values.dp.active_mode_controller.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "domain-proxy.active_mode_controller.fullname" . }} - labels: - {{- include "domain-proxy.active_mode_controller.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "domain-proxy.active_mode_controller.fullname" . }} - minReplicas: {{ .Values.dp.active_mode_controller.autoscaling.minReplicas }} - maxReplicas: {{ .Values.dp.active_mode_controller.autoscaling.maxReplicas }} - metrics: - {{- if .Values.dp.active_mode_controller.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.dp.active_mode_controller.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.dp.active_mode_controller.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.dp.active_mode_controller.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} -{{- end }} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/pdb.yaml b/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/pdb.yaml deleted file mode 100644 index dc481eeb38e0..000000000000 --- a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/pdb.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if and .Values.dp.create .Values.dp.active_mode_controller.enabled .Values.dp.active_mode_controller.podDisruptionBudget.enabled -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ include "domain-proxy.active_mode_controller.fullname" . }} - labels: - {{- include "domain-proxy.active_mode_controller.labels" . | nindent 4 }} -spec: - {{- with .Values.dp.active_mode_controller.podDisruptionBudget.minAvailable }} - minAvailable: {{ . }} - {{- end }} - {{- with .Values.dp.active_mode_controller.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ . }} - {{- end }} - selector: - matchLabels: - {{- include "domain-proxy.active_mode_controller.matchLabels" . | nindent 6 }} -{{- end }} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/serviceaccount.yaml b/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/serviceaccount.yaml deleted file mode 100644 index 16dfbb7d3d47..000000000000 --- a/dp/cloud/helm/dp/charts/domain-proxy/templates/active_mode_controller/serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and .Values.dp.create .Values.dp.active_mode_controller.enabled -}} -{{- if .Values.dp.active_mode_controller.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "domain-proxy.serviceAccountName.active_mode_controller" . }} - labels: - {{- include "domain-proxy.active_mode_controller.labels" . | nindent 4 }} - {{- with .Values.dp.active_mode_controller.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} -{{- end }} diff --git a/dp/cloud/helm/dp/charts/domain-proxy/values.yaml b/dp/cloud/helm/dp/charts/domain-proxy/values.yaml index 47d5106fa9d0..f3e3390856e7 100644 --- a/dp/cloud/helm/dp/charts/domain-proxy/values.yaml +++ b/dp/cloud/helm/dp/charts/domain-proxy/values.yaml @@ -227,85 +227,6 @@ dp: affinity: {} # Constrain which nodes your pod is eligible to be scheduled on. - active_mode_controller: - - nameOverride: "" # Replaces service part of the dp component deployment name. - fullnameOverride: "" # Completely replaces dp component deployment name. - enabled: true # Enables deployment of the given dp component. - name: active-mode-controller # Domain proxy component name. - - image: - repository: docker.artifactory.magmacore.org/active-mode-controller # Docker image repository. - tag: "1.7.0" - pullPolicy: IfNotPresent # Default the pull policy of all containers in that pod. - - replicaCount: 1 # How many replicas of particular component should be created. - - imagePullSecrets: [] # Name of the secret that contains container image registry keys. - - serviceAccount: - create: false # Specifies whether a service account should be created. - annotations: {} # Annotations to add to the service account. - name: "" # The name of the service account to use,If not set and create is true, a name is generated using the fullname template. - - podAnnotations: {} # Additional pod annotations. - - podSecurityContext: {} # Holds pod-level security attributes. - # fsGroup: 2000 - - securityContext: {} # Holds security configuration that will be applied to a container. - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - - resources: {} # Resource requests and limits of Pod. - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - readinessProbe: {} # Readines probe definition. - # Example httpGet probe - # httpGet: - # path: / - # port: http - - livenessProbe: {} # Livenes probe definition. - # Example httpget probe - # httpGet: - # path: / - # port: http - - autoscaling: - enabled: false # Enables horizontal pod autscaler kubernetes resource. - minReplicas: 1 # Minimum number of dp component replicas. - maxReplicas: 100 # Maximum number of dp component replicas. - targetCPUUtilizationPercentage: 80 # Target CPU utilization threshold in perecents when new replica should be created - # targetMemoryUtilizationPercentage: 80 # Target CPU utilization threshold in perecents when new replica should be created - # You can use one of these - - # ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - podDisruptionBudget: - enabled: false # Creates kubernetes podDisruptionBudget resource. - minAvailable: 1 # Minimum available pods for dp component. - maxUnavailable: "" # Maximum unavailable pods for dp component. - # You can use either one. - - nodeSelector: {} # Kubernetes node selection constraint. - - tolerations: [] # Allow the pods to schedule onto nodes with matching taints. - - affinity: {} # Constrain which nodes your pod is eligible to be scheduled on. - db_service: dbConnectionPoolSize: "6" # How many database connections are made and maintained diff --git a/dp/cloud/python/magma/configuration_controller/response_processor/strategies/response_processing.py b/dp/cloud/python/magma/configuration_controller/response_processor/strategies/response_processing.py index 9d56cfeef090..cc4bc9a142a4 100644 --- a/dp/cloud/python/magma/configuration_controller/response_processor/strategies/response_processing.py +++ b/dp/cloud/python/magma/configuration_controller/response_processor/strategies/response_processing.py @@ -19,13 +19,7 @@ from magma.configuration_controller.response_processor.response_db_processor import ( ResponseDBProcessor, ) -from magma.db_service.models import ( - DBCbsd, - DBCbsdState, - DBChannel, - DBGrant, - DBGrantState, -) +from magma.db_service.models import DBCbsd, DBCbsdState, DBGrant, DBGrantState from magma.db_service.session_manager import Session from magma.mappings.types import CbsdStates, GrantStates, ResponseCodes @@ -52,7 +46,6 @@ def unregister_cbsd_on_response_condition(process_response_func) -> Callable: Currently a CBSD should be marked as unregistered on Domain Proxy if: * SAS returns a response with responseCode 105 (ResponseCodes.DEREGISTER) * SAS returns a response with responseCode 103 (ResponseCodes.INVALID_VALUE) - and responseData has "cbsdId" listed as the INVALID_VALUE parameter Parameters: process_response_func: Response processing function @@ -61,10 +54,7 @@ def unregister_cbsd_on_response_condition(process_response_func) -> Callable: response processing function wrapper """ def process_response_wrapper(obj: ResponseDBProcessor, response: DBResponse, session: Session) -> None: - if any([ - response.response_code == ResponseCodes.DEREGISTER.value, - _is_response_invalid_value_cbsd_id(response), - ]): + if response.response_code in {ResponseCodes.DEREGISTER.value, ResponseCodes.INVALID_VALUE.value}: logger.info(f'SAS {response.payload} implies CBSD immedaite unregistration') _unregister_cbsd(response, session) return @@ -144,18 +134,16 @@ def _create_channels(response: DBResponse, session: Session): "Could not create channel from spectrumInquiryResponse. Response missing 'availableChannel' object", ) return + + channels = [] for ac in available_channels: frequency_range = ac["frequencyRange"] - channel = DBChannel( - cbsd=cbsd, - low_frequency=frequency_range["lowFrequency"], - high_frequency=frequency_range["highFrequency"], - channel_type=ac["channelType"], - rule_applied=ac["ruleApplied"], - max_eirp=ac.get("maxEirp"), - ) - logger.info(f"Creating channel for {cbsd.id=}") - session.add(channel) + channels.append({ + "low_frequency": frequency_range["lowFrequency"], + "high_frequency": frequency_range["highFrequency"], + "max_eirp": ac.get("maxEirp", 37), + }) + cbsd.channels = channels @unregister_cbsd_on_response_condition @@ -370,20 +358,18 @@ def _update_grant_from_response(response: DBResponse, grant: DBGrant) -> None: def _terminate_all_grants_from_response(response: DBResponse, session: Session) -> None: - cbsd_id = response.payload.get( - CBSD_ID, - ) or response.request.payload.get(CBSD_ID) + cbsd_id = response.cbsd_id if not cbsd_id: return - logger.info(f'Terminating all grants for {cbsd_id=}') + with session.no_autoflush: - session.query(DBGrant). \ - filter(DBGrant.cbsd_id == DBCbsd.id, DBCbsd.cbsd_id == cbsd_id). \ - delete(synchronize_session=False) + logger.info(f'Terminating all grants for {cbsd_id=}') + session.query(DBGrant).filter( + DBGrant.cbsd_id == DBCbsd.id, DBCbsd.cbsd_id == cbsd_id, + ).delete(synchronize_session=False) + logger.info(f"Deleting all channels for {cbsd_id=}") - session.query(DBChannel). \ - filter(DBChannel.cbsd_id == DBCbsd.id, DBCbsd.cbsd_id == cbsd_id). \ - delete(synchronize_session=False) + session.query(DBCbsd).filter(DBCbsd.cbsd_id == cbsd_id).update({DBCbsd.channels: []}) def _unsync_conflict_from_response(obj: ResponseDBProcessor, response: DBResponse, session: Session) -> None: @@ -410,13 +396,3 @@ def _unregister_cbsd(response: DBResponse, session: Session) -> None: filter(DBCbsdState.name == CbsdStates.UNREGISTERED.value) _terminate_all_grants_from_response(response, session) _update_cbsd(session, where, {"state_id": state_id.subquery()}) - - -def _is_response_invalid_value_cbsd_id(response: DBResponse) -> bool: - if response.response_code != ResponseCodes.INVALID_VALUE.value: - return False - - response_data = response.payload.get( - "response", {}, - ).get("responseData", []) - return CBSD_ID in response_data diff --git a/dp/cloud/python/magma/configuration_controller/tests/unit/test_response_processor.py b/dp/cloud/python/magma/configuration_controller/tests/unit/test_response_processor.py index e2f1a92fe46a..f9dc8023175e 100644 --- a/dp/cloud/python/magma/configuration_controller/tests/unit/test_response_processor.py +++ b/dp/cloud/python/magma/configuration_controller/tests/unit/test_response_processor.py @@ -27,7 +27,6 @@ from magma.db_service.models import ( DBCbsd, DBCbsdState, - DBChannel, DBGrant, DBRequest, DBRequestType, @@ -50,6 +49,7 @@ ) from magma.fixtures.fake_responses.spectrum_inquiry_responses import ( single_channel_for_one_cbsd, + single_channel_for_one_cbsd_with_no_max_eirp, two_channels_for_one_cbsd, zero_channels_for_one_cbsd, ) @@ -75,6 +75,7 @@ HEARTBEAT_REQ = RequestTypes.HEARTBEAT.value GRANT_REQ = RequestTypes.GRANT.value SPECTRUM_INQ_REQ = RequestTypes.SPECTRUM_INQUIRY.value +DEFAULT_MAX_EIRP = 37 _fake_requests_map = { REGISTRATION_REQ: registration_requests, @@ -124,33 +125,15 @@ def test_processor_splits_sas_response_into_separate_db_objects_and_links_them_w ) @parameterized.expand([ - ( - GRANT_REQ, ResponseCodes.SUCCESS.value, None, GrantStates.GRANTED.value, - ), - ( - GRANT_REQ, ResponseCodes.INTERFERENCE.value, None, None, - ), - ( - GRANT_REQ, ResponseCodes.GRANT_CONFLICT.value, ['grant1', 'grant2'], GrantStates.UNSYNC.value, - ), - ( - GRANT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, None, - ), - ( - HEARTBEAT_REQ, ResponseCodes.SUCCESS.value, None, GrantStates.AUTHORIZED.value, - ), - ( - HEARTBEAT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, None, - ), - ( - HEARTBEAT_REQ, ResponseCodes.SUSPENDED_GRANT.value, None, GrantStates.GRANTED.value, - ), - ( - HEARTBEAT_REQ, ResponseCodes.UNSYNC_OP_PARAM.value, None, GrantStates.UNSYNC.value, - ), - ( - RELINQUISHMENT_REQ, ResponseCodes.SUCCESS.value, None, None, - ), + (GRANT_REQ, ResponseCodes.SUCCESS.value, None, GrantStates.GRANTED.value), + (GRANT_REQ, ResponseCodes.INTERFERENCE.value, None, None), + (GRANT_REQ, ResponseCodes.GRANT_CONFLICT.value, ['grant1', 'grant2'], GrantStates.UNSYNC.value), + (GRANT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, None), + (HEARTBEAT_REQ, ResponseCodes.SUCCESS.value, None, GrantStates.AUTHORIZED.value), + (HEARTBEAT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, None), + (HEARTBEAT_REQ, ResponseCodes.SUSPENDED_GRANT.value, None, GrantStates.GRANTED.value), + (HEARTBEAT_REQ, ResponseCodes.UNSYNC_OP_PARAM.value, None, GrantStates.UNSYNC.value), + (RELINQUISHMENT_REQ, ResponseCodes.SUCCESS.value, None, None), ]) @responses.activate def test_grant_state_after_response( @@ -180,35 +163,19 @@ def test_grant_state_after_response( ) @parameterized.expand([ - ( - GRANT_REQ, ResponseCodes.SUCCESS.value, None, [GrantStates.GRANTED.value], - ), - ( - GRANT_REQ, ResponseCodes.INTERFERENCE.value, None, [], - ), + (GRANT_REQ, ResponseCodes.SUCCESS.value, None, [GrantStates.GRANTED.value]), + (GRANT_REQ, ResponseCodes.INTERFERENCE.value, None, []), ( GRANT_REQ, ResponseCodes.GRANT_CONFLICT.value, ['test_grant_id_for_1', 'test_grant_id_for_2'], [GrantStates.GRANTED.value, GrantStates.UNSYNC.value], ), - ( - GRANT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, [], - ), - ( - HEARTBEAT_REQ, ResponseCodes.SUCCESS.value, None, [GrantStates.AUTHORIZED.value], - ), - ( - HEARTBEAT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, [], - ), - ( - HEARTBEAT_REQ, ResponseCodes.SUSPENDED_GRANT.value, None, [GrantStates.GRANTED.value], - ), - ( - HEARTBEAT_REQ, ResponseCodes.UNSYNC_OP_PARAM.value, None, [GrantStates.UNSYNC.value], - ), - ( - RELINQUISHMENT_REQ, ResponseCodes.SUCCESS.value, None, [], - ), + (GRANT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, []), + (HEARTBEAT_REQ, ResponseCodes.SUCCESS.value, None, [GrantStates.AUTHORIZED.value]), + (HEARTBEAT_REQ, ResponseCodes.TERMINATED_GRANT.value, None, []), + (HEARTBEAT_REQ, ResponseCodes.SUSPENDED_GRANT.value, None, [GrantStates.GRANTED.value]), + (HEARTBEAT_REQ, ResponseCodes.UNSYNC_OP_PARAM.value, None, [GrantStates.UNSYNC.value]), + (RELINQUISHMENT_REQ, ResponseCodes.SUCCESS.value, None, []), ]) @responses.activate def test_preexisting_grant_state_after_response( @@ -332,6 +299,19 @@ def test_channels_created_after_spectrum_inquiry_response(self, response_fixture ).first() self.assertEqual(expected_channels_count, len(cbsd.channels)) + @responses.activate + def test_channel_created_with_default_max_eirp(self): + # Given + db_requests = self._create_db_requests(SPECTRUM_INQ_REQ, spectrum_inquiry_requests) + response = self._prepare_response_from_payload(single_channel_for_one_cbsd_with_no_max_eirp) + + # When + self._process_response(request_type_name=SPECTRUM_INQ_REQ, response=response, db_requests=db_requests) + + # Then + cbsd = self.session.query(DBCbsd).filter(DBCbsd.cbsd_id == "foo").first() + self.assertEqual(DEFAULT_MAX_EIRP, cbsd.channels[0]["max_eirp"]) + @responses.activate def test_old_channels_deleted_after_spectrum_inquiry_response(self): # Given @@ -408,10 +388,15 @@ def test_channel_params_set_on_grant_response(self): (HEARTBEAT_REQ, ResponseCodes.INVALID_VALUE.value, [CBSD_ID], CbsdStates.UNREGISTERED.value), (RELINQUISHMENT_REQ, ResponseCodes.INVALID_VALUE.value, [CBSD_ID], CbsdStates.UNREGISTERED.value), (DEREGISTRATION_REQ, ResponseCodes.INVALID_VALUE.value, [CBSD_ID], CbsdStates.UNREGISTERED.value), - (SPECTRUM_INQ_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.REGISTERED.value), - (GRANT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.REGISTERED.value), - (HEARTBEAT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.REGISTERED.value), - (RELINQUISHMENT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.REGISTERED.value), + (SPECTRUM_INQ_REQ, ResponseCodes.INVALID_VALUE.value, [GRANT_ID], CbsdStates.UNREGISTERED.value), + (GRANT_REQ, ResponseCodes.INVALID_VALUE.value, [GRANT_ID], CbsdStates.UNREGISTERED.value), + (HEARTBEAT_REQ, ResponseCodes.INVALID_VALUE.value, [GRANT_ID], CbsdStates.UNREGISTERED.value), + (RELINQUISHMENT_REQ, ResponseCodes.INVALID_VALUE.value, [GRANT_ID], CbsdStates.UNREGISTERED.value), + (DEREGISTRATION_REQ, ResponseCodes.INVALID_VALUE.value, [GRANT_ID], CbsdStates.UNREGISTERED.value), + (SPECTRUM_INQ_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.UNREGISTERED.value), + (GRANT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.UNREGISTERED.value), + (HEARTBEAT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.UNREGISTERED.value), + (RELINQUISHMENT_REQ, ResponseCodes.INVALID_VALUE.value, None, CbsdStates.UNREGISTERED.value), ]) @responses.activate def test_cbsd_state_after_unsuccessful_response_code(self, request_type, response_code, response_data, expected_cbsd_sate): @@ -582,17 +567,13 @@ def _create_channel( cbsd: DBCbsd, low_frequency: int, high_frequency: int, - ) -> DBChannel: - channel = DBChannel( - cbsd=cbsd, - low_frequency=low_frequency, - high_frequency=high_frequency, - channel_type="some_type", - rule_applied="some_rule", - ) - self.session.add(channel) + ) -> None: + channels = [{ + "low_frequency": low_frequency, + "high_frequency": high_frequency, + }] + cbsd.channels = channels self.session.commit() - return channel def _create_db_requests_from_fixture(self, request_type, fixture, cbsd_state): db_requests = [] @@ -613,14 +594,18 @@ def _create_db_requests_from_fixture(self, request_type, fixture, cbsd_state): def _create_response_payload_from_db_requests(response_type_name, db_requests, sas_response_code=0, sas_response_data=None): response_payload = {response_type_name: []} for i, db_request in enumerate(db_requests): - cbsd_id = db_request.cbsd.cbsd_id or str(i) response_json = { "response": { "responseCode": sas_response_code, - }, "cbsdId": cbsd_id, + }, } + if sas_response_data: response_json["response"]["responseData"] = sas_response_data + else: + cbsd_id = db_request.cbsd.cbsd_id or str(i) + response_json["cbsdId"] = cbsd_id + if db_request.payload.get(GRANT_ID, ""): response_json[GRANT_ID] = db_request.payload.get(GRANT_ID) elif response_type_name == request_response[GRANT_REQ]: diff --git a/dp/cloud/python/magma/db_service/migrations/versions/020_remove_cpi_related_fields.py b/dp/cloud/python/magma/db_service/migrations/versions/020_remove_cpi_related_fields.py new file mode 100644 index 000000000000..6fc92e4ed611 --- /dev/null +++ b/dp/cloud/python/magma/db_service/migrations/versions/020_remove_cpi_related_fields.py @@ -0,0 +1,40 @@ +"""empty message + +Revision ID: 37bd12af762a +Revises: fa12c537244a +Create Date: 2022-09-06 21:29:41.287889 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '37bd12af762a' +down_revision = 'fa12c537244a' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('cbsds', 'antenna_beamwidth_deg') + op.drop_column('cbsds', 'cpi_digital_signature') + op.drop_column('cbsds', 'horizontal_accuracy_m') + op.drop_column('cbsds', 'antenna_model') + op.drop_column('cbsds', 'eirp_capability_dbm_mhz') + op.drop_column('cbsds', 'antenna_azimuth_deg') + op.drop_column('cbsds', 'antenna_downtilt_deg') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('cbsds', sa.Column('antenna_downtilt_deg', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('antenna_azimuth_deg', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('eirp_capability_dbm_mhz', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('antenna_model', sa.VARCHAR(), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('horizontal_accuracy_m', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('cpi_digital_signature', sa.TEXT(), autoincrement=False, nullable=True)) + op.add_column('cbsds', sa.Column('antenna_beamwidth_deg', sa.INTEGER(), autoincrement=False, nullable=True)) + # ### end Alembic commands ### diff --git a/dp/cloud/python/magma/db_service/migrations/versions/021_467ad00fbc83_remove_channels.py b/dp/cloud/python/magma/db_service/migrations/versions/021_467ad00fbc83_remove_channels.py new file mode 100644 index 000000000000..740ec4f9bb63 --- /dev/null +++ b/dp/cloud/python/magma/db_service/migrations/versions/021_467ad00fbc83_remove_channels.py @@ -0,0 +1,82 @@ +"""Remove channels + +Revision ID: 467ad00fbc83 +Revises: fa12c537244a +Create Date: 2022-09-07 12:29:28.162120 + +""" +import json + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '467ad00fbc83' +down_revision = '37bd12af762a' +branch_labels = None +depends_on = None + + +def upgrade(): + """ Upgrade with data migration. """ + op.add_column('cbsds', sa.Column('channels', sa.JSON(), server_default=sa.text("'[]'::json"), nullable=False)) + + # migrate existing channels + conn = op.get_bind() + conn.execute( + "UPDATE channels SET max_eirp = %s WHERE max_eirp IS NULL", 37, + ) + for cbsd in conn.execute('SELECT cbsd_id FROM channels').fetchall(): + cbsd_id = cbsd[0] + channels = conn.execute( + "SELECT low_frequency, high_frequency, max_eirp " + "FROM channels WHERE cbsd_id = %s", + cbsd_id, + ) + channels_data = [dict(c) for c in channels.mappings().all()] + conn.execute("UPDATE cbsds SET channels = %s WHERE id = %s", (json.dumps(channels_data), cbsd_id)) + + op.drop_index('ix_channels_cbsd_id', table_name='channels') + op.drop_table('channels') + + +def downgrade(): + """ Downgrade with data migration. """ + op.create_table( + 'channels', + sa.Column('id', sa.INTEGER(), primary_key=True, autoincrement=True, nullable=False), + sa.Column('cbsd_id', sa.INTEGER(), autoincrement=False, nullable=True), + sa.Column('low_frequency', sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column('high_frequency', sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column('channel_type', sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column('rule_applied', sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + 'max_eirp', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True, + ), + sa.Column( + 'created_date', postgresql.TIMESTAMP(timezone=True), + server_default=sa.text('statement_timestamp()'), autoincrement=False, nullable=False, + ), + sa.Column( + 'updated_date', postgresql.TIMESTAMP(timezone=True), + server_default=sa.text('statement_timestamp()'), autoincrement=False, nullable=True, + ), + sa.ForeignKeyConstraint( + ['cbsd_id'], ['cbsds.id'], name='channels_cbsd_id_fkey', ondelete='CASCADE', + ), + sa.PrimaryKeyConstraint('id', name='channels_pkey'), + ) + op.create_index('ix_channels_cbsd_id', 'channels', ['cbsd_id'], unique=False) + + # migrate existing channels + conn = op.get_bind() + for cbsd_id, channels in conn.execute("SELECT id, channels FROM cbsds WHERE channels::text <> '[]'").fetchall(): + for channel in channels: + conn.execute( + "INSERT INTO channels (cbsd_id, low_frequency, high_frequency, max_eirp) " + "VALUES (%(cbsd_id)s, %(low_frequency)s, %(high_frequency)s, %(max_eirp)s)", + dict(cbsd_id=cbsd_id, **channel), + ) + + op.drop_column('cbsds', 'channels') diff --git a/dp/cloud/python/magma/db_service/models.py b/dp/cloud/python/magma/db_service/models.py index 8434d0224ba9..8d3b649fae15 100644 --- a/dp/cloud/python/magma/db_service/models.py +++ b/dp/cloud/python/magma/db_service/models.py @@ -21,7 +21,6 @@ ForeignKey, Integer, String, - Text, ) from sqlalchemy import text as sa_text from sqlalchemy.ext.declarative import declarative_base @@ -215,13 +214,6 @@ class DBCbsd(Base): longitude_deg = Column(Float) height_m = Column(Float) height_type = Column(String) - horizontal_accuracy_m = Column(Float) - antenna_azimuth_deg = Column(Integer) - antenna_downtilt_deg = Column(Integer) - antenna_beamwidth_deg = Column(Integer) - antenna_model = Column(String) - eirp_capability_dbm_mhz = Column(Integer) - cpi_digital_signature = Column(Text) indoor_deployment = Column(Boolean, nullable=False, server_default='false') is_deleted = Column(Boolean, nullable=False, server_default='false') should_deregister = Column(Boolean, nullable=False, server_default='false') @@ -243,7 +235,7 @@ class DBCbsd(Base): desired_state = relationship("DBCbsdState", foreign_keys=[desired_state_id]) requests = relationship("DBRequest", back_populates="cbsd") grants = relationship("DBGrant", back_populates="cbsd") - channels = relationship("DBChannel", back_populates="cbsd") + channels = Column(JSON, nullable=False, server_default=sa_text("'[]'::json")) def __repr__(self): """ @@ -258,34 +250,3 @@ def __repr__(self): f"cbsd_serial_number='{self.cbsd_serial_number}', " \ f"created_date='{self.created_date}' " \ f"updated_date='{self.updated_date}')>" - - -class DBChannel(Base): - """ - SAS DB Channel class - """ - __tablename__ = "channels" - id = Column(Integer, primary_key=True, autoincrement=True) - cbsd_id = Column(Integer, ForeignKey("cbsds.id", ondelete="CASCADE"), index=True) - low_frequency = Column(BigInteger, nullable=False) - high_frequency = Column(BigInteger, nullable=False) - channel_type = Column(String, nullable=False) - rule_applied = Column(String, nullable=False) - max_eirp = Column(Float) - created_date = Column( - DateTime(timezone=True), - nullable=False, server_default=now(), - ) - updated_date = Column( - DateTime(timezone=True), - server_default=now(), onupdate=now(), - ) - - cbsd = relationship("DBCbsd", back_populates="channels") - - def __repr__(self): - """ - Return string representation of DB object - """ - class_name = self.__class__.__name__ - return f"<{class_name}(id='{self.id}', cbsd_id='{self.cbsd_id}')>" diff --git a/dp/cloud/python/magma/db_service/tests/alembic_testcase.py b/dp/cloud/python/magma/db_service/tests/alembic_testcase.py index 18770b1418fd..e5f4f2bc227c 100644 --- a/dp/cloud/python/magma/db_service/tests/alembic_testcase.py +++ b/dp/cloud/python/magma/db_service/tests/alembic_testcase.py @@ -11,7 +11,7 @@ limitations under the License. """ import os -from typing import Any, Optional +from typing import Any, List, Optional import alembic.config import sqlalchemy as sa @@ -62,10 +62,16 @@ def get_table(self, table_name: str) -> sa.Table: return sa.Table(table_name, sa.MetaData(), autoload_with=self.engine) def has_column(self, table: sa.Table, column_name: str) -> bool: - for c in table.columns: - if c.name == column_name: - return True - return False + return self.has_columns(table=table, columns_names=[column_name]) + + def has_columns(self, table: sa.Table, columns_names: List[str]) -> bool: + # TODO add method for checking that none of the columns exists + existing = {c.name for c in table.columns} + + for c in columns_names: + if c not in existing: + return False + return True def upgrade(self, revision=None): revision = revision or self.up_revision diff --git a/dp/cloud/python/magma/db_service/tests/unit/test_00d77c9f7532.py b/dp/cloud/python/magma/db_service/tests/unit/test_00d77c9f7532.py index 5c9bbe118dce..ed5bb949b29d 100644 --- a/dp/cloud/python/magma/db_service/tests/unit/test_00d77c9f7532.py +++ b/dp/cloud/python/magma/db_service/tests/unit/test_00d77c9f7532.py @@ -60,8 +60,7 @@ def test_columns_present_post_upgrade(self): cbsds = self.get_table(CBSDS) # then - for col in NEW_COLUMNS: - self.assertTrue(self.has_column(cbsds, col)) + self.assertTrue(self.has_columns(cbsds, NEW_COLUMNS)) def test_default_values_post_upgrade(self): # given diff --git a/dp/cloud/python/magma/db_service/tests/unit/test_020_remove_cpi_related_fields.py b/dp/cloud/python/magma/db_service/tests/unit/test_020_remove_cpi_related_fields.py new file mode 100644 index 000000000000..237764e2eb8e --- /dev/null +++ b/dp/cloud/python/magma/db_service/tests/unit/test_020_remove_cpi_related_fields.py @@ -0,0 +1,42 @@ +""" +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +from magma.db_service.tests.alembic_testcase import AlembicTestCase + +TABLE = 'cbsds' +COLUMNS = [ + 'antenna_beamwidth_deg', 'cpi_digital_signature', + 'horizontal_accuracy_m', 'antenna_model', 'eirp_capability_dbm_mhz', + 'antenna_azimuth_deg', 'antenna_downtilt_deg', +] + + +class TestRemoveCpiRelatedFields(AlembicTestCase): + down_revision = 'fa12c537244a' + up_revision = '37bd12af762a' + + def setUp(self) -> None: + super().setUp() + self.upgrade(self.down_revision) + + def test_upgrade(self): + self.upgrade() + table = self.get_table(TABLE) + has = any(self.has_column(table, c) for c in COLUMNS) + self.assertFalse(has) + + def test_columns_present_post_upgrade(self): + self.upgrade() + self.downgrade() + table = self.get_table(TABLE) + has = self.has_columns(table, COLUMNS) + self.assertTrue(has) diff --git a/dp/cloud/python/magma/db_service/tests/unit/test_021_remove_channels.py b/dp/cloud/python/magma/db_service/tests/unit/test_021_remove_channels.py new file mode 100644 index 000000000000..b3c5a4ebf016 --- /dev/null +++ b/dp/cloud/python/magma/db_service/tests/unit/test_021_remove_channels.py @@ -0,0 +1,161 @@ +""" +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +from magma.db_service.tests.alembic_testcase import AlembicTestCase +from parameterized import parameterized +from sqlalchemy import select +from sqlalchemy.exc import NoSuchTableError + +DOWN_REVISION = '37bd12af762a' +UP_REVISION = '467ad00fbc83' + +CBSD_STATES_TABLE = 'cbsd_states' +CBSDS_TABLE = 'cbsds' +CHANNELS_TABLE = 'channels' + +CBSDS_CHANNELS_COLUMN = 'channels' + +TEST_STATE_ID = 1 +CBSD_ID = 1 +CHANNEL_ID = 1 +CHANNEL_DATA = { + "low_frequency": 1, + "high_frequency": 2, + "max_eirp": 3.0, +} +INCOMPLETE_CHANNEL_DATA = { + "low_frequency": 1, + "high_frequency": 2, +} +CHANNEL_DATA_WITH_DEFAULT_MAX_EIRP = { + "low_frequency": 1, + "high_frequency": 2, + "max_eirp": 37, +} + + +class TestRemoveChannels(AlembicTestCase): + down_revision = DOWN_REVISION + up_revision = UP_REVISION + + def setUp(self) -> None: + super().setUp() + self.upgrade(self.down_revision) + + def _given_cbsd_created(self, **data): + cbsd_states = self.get_table(CBSD_STATES_TABLE) + self.given_resource_inserted(cbsd_states, id=TEST_STATE_ID, name='some_state') + + cbsds = self.get_table(CBSDS_TABLE) + self.given_resource_inserted(cbsds, **data) + + +class TestRemoveChannelsUpgrade(TestRemoveChannels): + def test_table_removed(self): + # Given + self.assertFalse(self.has_column(self.get_table(CBSDS_TABLE), CBSDS_CHANNELS_COLUMN)) + self.get_table(CHANNELS_TABLE) + + # When + self.upgrade() + + # Then + self.assertTrue(self.has_column(self.get_table(CBSDS_TABLE), CBSDS_CHANNELS_COLUMN)) + with self.assertRaises(NoSuchTableError): + self.get_table(CHANNELS_TABLE) + + def test_default_is_set(self): + # Given + self._given_cbsd_created(id=CBSD_ID, state_id=TEST_STATE_ID, desired_state_id=TEST_STATE_ID) + + # When + self.upgrade() + + # Then + cbsds = self.get_table(CBSDS_TABLE) + data_after = self.engine.execute(cbsds.select()).mappings().one().get(CBSDS_CHANNELS_COLUMN) + self.assertEqual(data_after, []) + + @parameterized.expand([ + (CHANNEL_DATA, CHANNEL_DATA), + (INCOMPLETE_CHANNEL_DATA, CHANNEL_DATA_WITH_DEFAULT_MAX_EIRP), + ]) + def test_data_migrated(self, channel_data, expected_channel_data): + # Given + self._given_cbsd_created(id=CBSD_ID, state_id=TEST_STATE_ID, desired_state_id=TEST_STATE_ID) + + channels = self.get_table(CHANNELS_TABLE) + self.given_resource_inserted( + channels, id=CHANNEL_ID, cbsd_id=CBSD_ID, channel_type='channel', rule_applied='rule', **channel_data, + ) + + # When + self.upgrade() + + # Then + cbsds = self.get_table(CBSDS_TABLE) + data_after = self.engine.execute(cbsds.select()).mappings().one().get(CBSDS_CHANNELS_COLUMN) + + self.assertEqual([expected_channel_data], data_after, 'Data was not migrated') + + +class TestRemoveChannelsDowngrade(TestRemoveChannels): + down_revision = DOWN_REVISION + up_revision = UP_REVISION + + def setUp(self) -> None: + super().setUp() + self.upgrade() + + def test_table_added(self): + # Given + self.assertTrue(self.has_column(self.get_table(CBSDS_TABLE), CBSDS_CHANNELS_COLUMN)) + with self.assertRaises(NoSuchTableError): + self.get_table(CHANNELS_TABLE) + + # When + self.downgrade() + + # Then + self.assertFalse(self.has_column(self.get_table(CBSDS_TABLE), CBSDS_CHANNELS_COLUMN)) + + channels = self.get_table(CHANNELS_TABLE) + self.has_columns( + channels, + ['id', 'cbsd_id', 'low_frequency', 'high_frequency', 'max_eirp', 'channel_type', 'rule_applied'], + ) + + def test_data_migrated(self): + # Given + self._given_cbsd_created( + id=CBSD_ID, state_id=TEST_STATE_ID, desired_state_id=TEST_STATE_ID, channels=[CHANNEL_DATA], + ) + + # When + self.downgrade() + + # Then + data_before = [dict(id=CHANNEL_ID, cbsd_id=CBSD_ID, **CHANNEL_DATA)] + + channels = self.get_table(CHANNELS_TABLE) + data_after = [ + dict(r) for r in self.engine.execute( + select( + channels.c.id, + channels.c.cbsd_id, + channels.c.low_frequency, + channels.c.high_frequency, + channels.c.max_eirp, + ), + ).mappings().all() + ] + self.assertEqual(data_before, data_after, 'Data was not migrated') diff --git a/dp/cloud/python/magma/fixtures/fake_responses/spectrum_inquiry_responses.py b/dp/cloud/python/magma/fixtures/fake_responses/spectrum_inquiry_responses.py index f12339032cb9..ccbc32470a9c 100644 --- a/dp/cloud/python/magma/fixtures/fake_responses/spectrum_inquiry_responses.py +++ b/dp/cloud/python/magma/fixtures/fake_responses/spectrum_inquiry_responses.py @@ -45,6 +45,27 @@ ], } +single_channel_for_one_cbsd_with_no_max_eirp = { + "spectrumInquiryResponse": [ + { + "response": { + "responseCode": 0, + }, + "cbsdId": "foo", + "availableChannel": [ + { + "frequencyRange": { + "lowFrequency": 1, + "highFrequency": 1, + }, + "channelType": "test", + "ruleApplied": "test", + }, + ], + }, + ], +} + two_channels_for_one_cbsd = { "spectrumInquiryResponse": [ { diff --git a/dp/cloud/python/magma/radio_controller/run.py b/dp/cloud/python/magma/radio_controller/run.py index 6e86839a0c94..bf3f7fc588db 100644 --- a/dp/cloud/python/magma/radio_controller/run.py +++ b/dp/cloud/python/magma/radio_controller/run.py @@ -11,81 +11,11 @@ limitations under the License. """ -import logging -from concurrent import futures -from signal import SIGTERM, signal - -import grpc -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.interval import IntervalTrigger -from dp.protos.active_mode_pb2_grpc import ( - add_ActiveModeControllerServicer_to_server, -) -from magma.db_service.models import DBCbsdState, DBRequestType -from magma.db_service.session_manager import SessionManager -from magma.metricsd_client.client import get_metricsd_client, process_metrics -from magma.radio_controller.config import get_config -from magma.radio_controller.services.active_mode_controller.service import ( - ActiveModeControllerService, -) -from sqlalchemy import create_engine - -logging.basicConfig( - level=logging.DEBUG, - datefmt='%Y-%m-%d %H:%M:%S', - format='%(asctime)s %(levelname)-8s %(message)s', -) -logger = logging.getLogger("radio_controller.run") - def run(): - """ - Top-level function for radio controller - """ - logger.info("Starting grpc server") - config = get_config() - scheduler = BackgroundScheduler() - metricsd_client = get_metricsd_client() - scheduler.add_job( - process_metrics, - args=[metricsd_client, config.SERVICE_HOSTNAME, "radio_controller"], - trigger=IntervalTrigger( - seconds=config.METRICS_PROCESSING_INTERVAL_SEC, - ), - max_instances=1, - name="metrics_processing_job", - ) - scheduler.start() - - logger.info(f"grpc port is: {config.GRPC_PORT}") - db_engine = create_engine( - url=config.SQLALCHEMY_DB_URI, - encoding=config.SQLALCHEMY_DB_ENCODING, - echo=config.SQLALCHEMY_ECHO, - future=config.SQLALCHEMY_FUTURE, - pool_size=config.SQLALCHEMY_ENGINE_POOL_SIZE, - max_overflow=config.SQLALCHEMY_ENGINE_MAX_OVERFLOW, - ) - session_manager = SessionManager(db_engine) - with session_manager.session_scope() as session: - request_types = {req_type.name: req_type.id for req_type in session.query(DBRequestType).all()} - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - add_ActiveModeControllerServicer_to_server( - ActiveModeControllerService(session_manager=session_manager, request_types_map=request_types), server, - ) - server.add_insecure_port(f"[::]:{config.GRPC_PORT}") - server.start() - logger.info(f"GRPC Server started on port {config.GRPC_PORT}") - - def handle_sigterm(*_): - logger.info("Received shutdown signal") - all_rpcs_done_event = server.stop(30) - all_rpcs_done_event.wait(30) - logger.info("Shut down gracefully") - - signal(SIGTERM, handle_sigterm) - server.wait_for_termination() + # TODO Remove this file and all other radio controller files once DevOps part of RC removal is handled + pass if __name__ == "__main__": - run() + pass diff --git a/dp/cloud/python/magma/radio_controller/services/active_mode_controller/service.py b/dp/cloud/python/magma/radio_controller/services/active_mode_controller/service.py index 2b56c56666b0..c53b523268a0 100644 --- a/dp/cloud/python/magma/radio_controller/services/active_mode_controller/service.py +++ b/dp/cloud/python/magma/radio_controller/services/active_mode_controller/service.py @@ -38,13 +38,7 @@ from dp.protos.active_mode_pb2_grpc import ActiveModeControllerServicer from google.protobuf.empty_pb2 import Empty from google.protobuf.wrappers_pb2 import FloatValue -from magma.db_service.models import ( - DBCbsd, - DBChannel, - DBGrant, - DBRequest, - DBRequestType, -) +from magma.db_service.models import DBCbsd, DBGrant, DBRequest from magma.db_service.session_manager import Session, SessionManager from magma.mappings.cbsd_states import cbsd_state_mapping, grant_state_mapping from magma.radio_controller.metrics import ( @@ -227,7 +221,6 @@ def _list_cbsds(session: Session) -> State: options( joinedload(DBCbsd.state), joinedload(DBCbsd.desired_state), - joinedload(DBCbsd.channels), contains_eager(DBCbsd.grants). joinedload(DBGrant.state), ). @@ -289,10 +282,8 @@ def _build_cbsd(cbsd: DBCbsd) -> Cbsd: # Application may not need those to be sorted. # Applying ordering mostly for easier assertions in testing cbsd_db_grants = sorted(cbsd.grants, key=lambda x: x.id) - cbsd_db_channels = sorted(cbsd.channels, key=lambda x: x.id) - grants = [_build_grant(x) for x in cbsd_db_grants] - channels = [_build_channel(x) for x in cbsd_db_channels] + channels = [_build_channel(x) for x in cbsd.channels] last_seen = _to_timestamp(cbsd.last_seen) eirp_capabilities = _build_eirp_capabilities(cbsd) @@ -329,11 +320,11 @@ def _build_grant(grant: DBGrant) -> Grant: ) -def _build_channel(channel: DBChannel) -> Channel: +def _build_channel(channel: dict) -> Channel: return Channel( - low_frequency_hz=channel.low_frequency, - high_frequency_hz=channel.high_frequency, - max_eirp=_make_optional_float(channel.max_eirp), + low_frequency_hz=channel.get('low_frequency'), + high_frequency_hz=channel.get('high_frequency'), + max_eirp=_make_optional_float(channel.get('max_eirp')), ) diff --git a/dp/cloud/python/magma/radio_controller/tests/test_utils/db_cbsd_builder.py b/dp/cloud/python/magma/radio_controller/tests/test_utils/db_cbsd_builder.py index 2203f56143e2..5bd80362b624 100644 --- a/dp/cloud/python/magma/radio_controller/tests/test_utils/db_cbsd_builder.py +++ b/dp/cloud/python/magma/radio_controller/tests/test_utils/db_cbsd_builder.py @@ -16,7 +16,7 @@ from datetime import datetime from typing import List -from magma.db_service.models import DBCbsd, DBChannel, DBGrant, DBRequest +from magma.db_service.models import DBCbsd, DBGrant, DBRequest class DBCbsdBuilder: @@ -151,14 +151,16 @@ def with_channel( low: int, high: int, max_eirp: float = None, ) -> DBCbsdBuilder: - channel = DBChannel( - low_frequency=low, - high_frequency=high, - max_eirp=max_eirp, - channel_type='channel_type', - rule_applied='rule', - ) - self.cbsd.channels.append(channel) + if not self.cbsd.channels: + # Default is set on commit, so it might be None at this point. + self.cbsd.channels = [] + + channel = { + "low_frequency": low, + "high_frequency": high, + "max_eirp": max_eirp, + } + self.cbsd.channels = self.cbsd.channels + [channel] return self def with_request(self, type_id: int, payload: str) -> DBCbsdBuilder: diff --git a/dp/protos/active_mode.proto b/dp/protos/active_mode.proto index c61b36021f17..66f3d8e2f7f1 100644 --- a/dp/protos/active_mode.proto +++ b/dp/protos/active_mode.proto @@ -15,7 +15,7 @@ syntax = "proto3"; import "google/protobuf/empty.proto"; import "google/protobuf/wrappers.proto"; -option go_package = "magma/dp/cloud/go/active_mode_controller/protos/active_mode"; +option go_package = "magma/dp/cloud/go/services/dp/active_mode_controller/protos/active_mode"; service ActiveModeController { rpc GetState(GetStateRequest) returns (State) {} diff --git a/dp/skaffold.yaml b/dp/skaffold.yaml index fb4f201498d4..cc14b443e808 100644 --- a/dp/skaffold.yaml +++ b/dp/skaffold.yaml @@ -11,10 +11,6 @@ build: useBuildkit: true concurrency: 1 artifacts: - - image: active-mode-controller - context: .. - docker: - dockerfile: dp/cloud/docker/go/active_mode_controller/Dockerfile - image: configuration-controller context: .. docker: @@ -70,8 +66,6 @@ deploy: namespace: default version: "0.1.0" artifactOverrides: - dp.active_mode_controller: - image: active-mode-controller dp.configuration_controller: image: configuration-controller dp.radio_controller: @@ -96,11 +90,6 @@ profiles: path: /deploy/helm/releases/0/setValueTemplates value: dp: - active_mode_controller: - extraEnv: - POLLING_INTERVAL_SEC: "'1'" - REQUEST_PROCESSING_INTERVAL_SEC: "'1'" - CBSD_INACTIVITY_TIMEOUT_SEC: "'3'" configuration_controller: extraEnv: APP_CONFIG: "TestConfig" @@ -136,6 +125,9 @@ profiles: spec: database: db: dp_test + - op: add + path: /deploy/helm/releases/2/valuesFiles/- + value: ./cloud/helm/dp/charts/domain-proxy/examples/orc8r_integration_tests.yaml - name: integration-tests-dev patches: - op: add @@ -185,7 +177,6 @@ profiles: - lte/** - orc8r/** ignore: - - dp/cloud/go/active_mode_controller/** - lte/cloud/helm/lte-orc8r/charts/** - orc8r/cloud/helm/orc8r/charts/** - "**/*.swp" @@ -304,8 +295,8 @@ profiles: - name: remote-push patches: - op: remove - path: /build/artifacts/4 # remove fake sas image + path: /build/artifacts/3 # remove fake sas image - op: remove - path: /build/artifacts/4 # remove fluentd-forward image + path: /build/artifacts/3 # remove fluentd-forward image - op: remove - path: /build/artifacts/4 # remove postgresql image + path: /build/artifacts/3 # remove postgresql image diff --git a/example/gateway/Makefile b/example/gateway/Makefile deleted file mode 100644 index a55f16f92328..000000000000 --- a/example/gateway/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: all build run - -PIP_INSTALL := $(PYTHON_BUILD)/bin/pip3 install -q -U --cache-dir $(PIP_CACHE_HOME) - -build: - $(MAKE) -C ../../orc8r/gateway/python buildenv - $(PIP_INSTALL) -e python - -run: build - sudo service magma@magmad start diff --git a/example/gateway/Vagrantfile b/example/gateway/Vagrantfile deleted file mode 100644 index 1fc98ff15c75..000000000000 --- a/example/gateway/Vagrantfile +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2020 The Magma Authors. - -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -VAGRANTFILE_API_VERSION = "2" -Vagrant.require_version ">=1.9.1" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "bento/ubuntu-16.04" - config.vm.synced_folder "../..", "/home/vagrant/magma" - config.vm.provider "virtualbox" do |v| - v.linked_clone = true - v.memory = 512 - v.cpus = 1 - end - - config.vm.define :example, primary: true do |example| - example.vm.hostname = "example-dev" - example.vm.network "private_network", ip: "192.168.30.10" - example.vm.provision "ansible" do |ansible| - ansible.host_key_checking = false - ansible.playbook = "deploy/example.dev.yml" - ansible.limit = "all" - ansible.verbose = true - end - end - -end diff --git a/example/gateway/ansible.cfg b/example/gateway/ansible.cfg deleted file mode 100644 index 0eb86914a655..000000000000 --- a/example/gateway/ansible.cfg +++ /dev/null @@ -1,10 +0,0 @@ -[defaults] -# In gather_facts when ansible-playbook starts, innore hardware facts, -# facts from facter and ohai, so gather_facts finish faster -# See https://raw.githubusercontent.com/ansible/ansible/devel/examples/ansible.cfg -gather_subset = !hardware,!facter,!ohai -callbacks_enabled = profile_tasks -roles_path = ../../orc8r/tools/ansible/roles - -[connection] -pipelining=True diff --git a/example/gateway/configs/control_proxy.yml b/example/gateway/configs/control_proxy.yml deleted file mode 100644 index 62ab9032c3fb..000000000000 --- a/example/gateway/configs/control_proxy.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# -# Copyright 2020 The Magma Authors. - -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# nghttpx config will be generated here and used -nghttpx_config_location: /var/tmp/nghttpx.conf - -# Location for certs -rootca_cert: /var/opt/magma/certs/rootCA.pem -gateway_cert: /var/opt/magma/certs/gateway.crt -gateway_key: /var/opt/magma/certs/gateway.key - -# Listening port of the proxy for local services. The port would be closed -# for the rest of the world. -local_port: 8443 - -# Cloud address for reaching out to the cloud. -cloud_address: controller.magma.test -cloud_port: 9443 - -bootstrap_address: bootstrapper-controller.magma.test -bootstrap_port: 9444 - -# Option to use nghttpx for proxying. If disabled, the individual -# services would establish the TLS connections themselves. -proxy_cloud_connections: True - -# Allows http_proxy usage if the environment variable is present -allow_http_proxy: True diff --git a/example/gateway/configs/gateway.mconfig b/example/gateway/configs/gateway.mconfig deleted file mode 100644 index 4fd78cbdaae0..000000000000 --- a/example/gateway/configs/gateway.mconfig +++ /dev/null @@ -1,21 +0,0 @@ -{ - "configs_by_key": { - "control_proxy": { - "@type": "type.googleapis.com/magma.mconfig.ControlProxy", - "logLevel": "INFO" - }, - "magmad": { - "@type": "type.googleapis.com/magma.mconfig.MagmaD", - "logLevel": "INFO", - "checkinInterval": 60, - "checkinTimeout": 10, - "autoupgradeEnabled": false, - "autoupgradePollInterval": 300, - "package_version": "0.0.0-0" - }, - "metricsd": { - "@type": "type.googleapis.com/magma.mconfig.MetricsD", - "logLevel": "INFO" - } - } -} diff --git a/example/gateway/configs/magmad.yml b/example/gateway/configs/magmad.yml deleted file mode 100644 index 9ab6f607f732..000000000000 --- a/example/gateway/configs/magmad.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# List of services for magmad to control -magma_services: - - control_proxy - - hello - -# List of services that don't provide service303 interface -non_service303_services: - - control_proxy - -# Init system to use to control services -# Supported systems include: [systemd, runit, docker] -init_system: systemd - -# bootstrap_manager config -bootstrap_config: - # location of the challenge key - challenge_key: /var/opt/magma/certs/gw_challenge.key - -# Flags indicating the magmad features to be enabled -enable_config_streamer: True -enable_upgrade_manager: False -enable_network_monitor: False -enable_sync_rpc: False - -# Modules containing code generated for mconfig protobufs -mconfig_modules: - - orc8r.protos.mconfig.mconfigs_pb2 - -metricsd: - collect_interval: 60 # How frequently to collect metrics samples in seconds - sync_interval: 60 # How frequently to sync to cloud in seconds - grpc_timeout: 10 # Timeout in seconds - # List of services for metricsd to poll - services: - - magmad - - hello diff --git a/example/gateway/configs/service_registry.yml b/example/gateway/configs/service_registry.yml deleted file mode 100644 index ef7461eb16c3..000000000000 --- a/example/gateway/configs/service_registry.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -services: - # NOTE: do NOT include dash(-) in your service name. Use underscore instead. - # Example service name that contains dash: hello-world-blah - # As we use "-" in nghttpx config to connect service name and hostname, - # "-" is used as a delimiter in dispatcher to parse out service names. - magmad: - ip_address: 127.0.0.1 - port: 50052 - control_proxy: - ip_address: 127.0.0.1 - port: 50053 - hello: - ip_address: 127.0.0.1 - port: 9093 diff --git a/example/gateway/deploy/example.dev.yml b/example/gateway/deploy/example.dev.yml deleted file mode 100644 index a088492a76fa..000000000000 --- a/example/gateway/deploy/example.dev.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Set up dev build environment on a local machine - hosts: example - become: yes - vars: - magma_root: /home/{{ ansible_user }}/magma - preburn: true - full_provision: true - - roles: - # Role for gateway development. It also incudes required - # roles like pkgrepo, gateway_services, and python_dev. - - role: gateway_dev - vars: - distribution: "xenial" - repo: "dev" - config_dir: "example/gateway/configs" - # Role for running the extra services for the example device. - - role: example_services diff --git a/example/gateway/deploy/hosts b/example/gateway/deploy/hosts deleted file mode 100644 index ce26bb2c9d2b..000000000000 --- a/example/gateway/deploy/hosts +++ /dev/null @@ -1,2 +0,0 @@ -[example] -example ansible_ssh_host=192.168.30.10 ansible_ssh_port=22 ansible_user=vagrant diff --git a/example/gateway/fabfile.py b/example/gateway/fabfile.py deleted file mode 100644 index 6e2457873aab..000000000000 --- a/example/gateway/fabfile.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -import sys - -sys.path.append('../../orc8r') -import tools.fab.dev_utils as dev_utils - - -def register_vm(): - """ Provisions the gateway vm with the cloud vm """ - dev_utils.register_generic_gateway('test', 'example') diff --git a/example/gateway/python/magmaexample/hello/main.py b/example/gateway/python/magmaexample/hello/main.py deleted file mode 100755 index 06c8d0699a20..000000000000 --- a/example/gateway/python/magmaexample/hello/main.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -from magma.common.service import MagmaService -from magmaexample.hello.rpc_servicer import HelloRpcServicer - - -def main(): - """ main() for hello service """ - service = MagmaService('hello', None) - - # Add all servicers to the server - hello_servicer = HelloRpcServicer() - hello_servicer.add_to_server(service.rpc_server) - - # Run the service loop - service.run() - - # Cleanup the service - service.close() - - -if __name__ == "__main__": - main() diff --git a/example/gateway/python/magmaexample/hello/metrics.py b/example/gateway/python/magmaexample/hello/metrics.py deleted file mode 100644 index 74aae9ad3726..000000000000 --- a/example/gateway/python/magmaexample/hello/metrics.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -from prometheus_client import Counter - -# Define a new counter, which can be incremented in the rpc servicer -NUM_REQUESTS = Counter('num_requests', 'Total requests') diff --git a/example/gateway/python/magmaexample/hello/rpc_servicer.py b/example/gateway/python/magmaexample/hello/rpc_servicer.py deleted file mode 100644 index b799f86528cf..000000000000 --- a/example/gateway/python/magmaexample/hello/rpc_servicer.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -from magmaexample.hello import metrics -from protos import hello_pb2, hello_pb2_grpc - - -class HelloRpcServicer(hello_pb2_grpc.HelloServicer): - """ - gRPC based server for Hello service - """ - - def __init__(self): - pass - - def add_to_server(self, server): - """ - Add the servicer to a gRPC server - """ - hello_pb2_grpc.add_HelloServicer_to_server(self, server) - - def SayHello(self, request, context): - """ - Echo the message as the response - """ - metrics.NUM_REQUESTS.inc() - return hello_pb2.HelloReply(greeting=request.greeting) diff --git a/example/gateway/python/setup.py b/example/gateway/python/setup.py deleted file mode 100644 index 6dba540ef62b..000000000000 --- a/example/gateway/python/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -from setuptools import setup - -setup( - name='magmaexample', - version='0.1', - packages=['magmaexample.hello'], -) diff --git a/feg/cloud/helm/feg-orc8r/Chart.yaml b/feg/cloud/helm/feg-orc8r/Chart.yaml index f41c269246aa..aee4399be5b3 100644 --- a/feg/cloud/helm/feg-orc8r/Chart.yaml +++ b/feg/cloud/helm/feg-orc8r/Chart.yaml @@ -10,10 +10,10 @@ # limitations under the License. apiVersion: v2 -appVersion: "1.0" +appVersion: "1.8.0" description: A Helm chart for magma orchestrator's feg module name: feg-orc8r -version: 0.2.5 +version: 1.8.0 engine: gotpl sources: - https://github.com/magma/magma diff --git a/feg/gateway/Makefile b/feg/gateway/Makefile index 32096da88017..57eba6c2eba1 100644 --- a/feg/gateway/Makefile +++ b/feg/gateway/Makefile @@ -30,19 +30,13 @@ test: gotestsum --junitfile /tmp/test-results/aaa-eap.xml --packages magma/feg/gateway/services/eap/... magma/feg/gateway/services/aaa/... -- -tags link_local_service,with_builtin_radius go test -tags cli_test magma/feg/gateway/tools/... -buildenv: stop +buildenv: + sudo service magma@* stop PROTO_LIST="orc8r_protos feg_protos lte_protos" $(MAKE) -C $(MAGMA_ROOT)/orc8r/gateway/python $@ run: buildenv build sudo service magma@magmad start -restart: - sudo service magma@* stop - sudo service magma@magmad start - -stop: - sudo service magma@* stop - clean: $(MAKE) -C $(MAGMA_ROOT)/lte/gateway/python $@ go clean ./... diff --git a/feg/gateway/services/envoy_controller/BUILD.bazel b/feg/gateway/services/envoy_controller/BUILD.bazel index 2f54bc719cf7..d9eeee910ff7 100644 --- a/feg/gateway/services/envoy_controller/BUILD.bazel +++ b/feg/gateway/services/envoy_controller/BUILD.bazel @@ -29,5 +29,5 @@ go_library( go_binary( name = "envoy_controller", embed = [":envoy_controller_lib"], - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/release:__pkg__"], ) diff --git a/lte/cloud/go/protos/oai/s1ap_state.pb.go b/lte/cloud/go/protos/oai/s1ap_state.pb.go index d5a51f8fb78f..257d0646e164 100644 --- a/lte/cloud/go/protos/oai/s1ap_state.pb.go +++ b/lte/cloud/go/protos/oai/s1ap_state.pb.go @@ -20,6 +20,61 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type S1ApUeState int32 + +const ( + S1ApUeState_S1AP_UE_INVALID_STATE S1ApUeState = 0 + S1ApUeState_S1AP_UE_WAITING_ICSR S1ApUeState = 1 ///< Waiting for Initial Context Setup Response + S1ApUeState_S1AP_UE_HANDOVER S1ApUeState = 2 ///< Handover procedure triggered + S1ApUeState_S1AP_UE_CONNECTED S1ApUeState = 3 ///< UE context ready + S1ApUeState_S1AP_UE_WAITING_CRC S1ApUeState = 4 /// UE Context release Procedure initiated , waiting for +) + +// Enum value maps for S1ApUeState. +var ( + S1ApUeState_name = map[int32]string{ + 0: "S1AP_UE_INVALID_STATE", + 1: "S1AP_UE_WAITING_ICSR", + 2: "S1AP_UE_HANDOVER", + 3: "S1AP_UE_CONNECTED", + 4: "S1AP_UE_WAITING_CRC", + } + S1ApUeState_value = map[string]int32{ + "S1AP_UE_INVALID_STATE": 0, + "S1AP_UE_WAITING_ICSR": 1, + "S1AP_UE_HANDOVER": 2, + "S1AP_UE_CONNECTED": 3, + "S1AP_UE_WAITING_CRC": 4, + } +) + +func (x S1ApUeState) Enum() *S1ApUeState { + p := new(S1ApUeState) + *p = x + return p +} + +func (x S1ApUeState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (S1ApUeState) Descriptor() protoreflect.EnumDescriptor { + return file_lte_protos_oai_s1ap_state_proto_enumTypes[0].Descriptor() +} + +func (S1ApUeState) Type() protoreflect.EnumType { + return &file_lte_protos_oai_s1ap_state_proto_enumTypes[0] +} + +func (x S1ApUeState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use S1ApUeState.Descriptor instead. +func (S1ApUeState) EnumDescriptor() ([]byte, []int) { + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{0} +} + type S1ApTimer struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -204,6 +259,124 @@ func (x *SupportedTaList) GetSupportedTaiItems() []*SupportedTaiItems { return nil } +type ERabAdmittedItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ERabId uint32 `protobuf:"varint,1,opt,name=e_rab_id,json=eRabId,proto3" json:"e_rab_id,omitempty"` + TransportLayerAddress []byte `protobuf:"bytes,2,opt,name=transport_layer_address,json=transportLayerAddress,proto3" json:"transport_layer_address,omitempty"` + GtpTeid uint32 `protobuf:"varint,3,opt,name=gtp_teid,json=gtpTeid,proto3" json:"gtp_teid,omitempty"` +} + +func (x *ERabAdmittedItem) Reset() { + *x = ERabAdmittedItem{} + if protoimpl.UnsafeEnabled { + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ERabAdmittedItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ERabAdmittedItem) ProtoMessage() {} + +func (x *ERabAdmittedItem) ProtoReflect() protoreflect.Message { + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ERabAdmittedItem.ProtoReflect.Descriptor instead. +func (*ERabAdmittedItem) Descriptor() ([]byte, []int) { + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{3} +} + +func (x *ERabAdmittedItem) GetERabId() uint32 { + if x != nil { + return x.ERabId + } + return 0 +} + +func (x *ERabAdmittedItem) GetTransportLayerAddress() []byte { + if x != nil { + return x.TransportLayerAddress + } + return nil +} + +func (x *ERabAdmittedItem) GetGtpTeid() uint32 { + if x != nil { + return x.GtpTeid + } + return 0 +} + +type ERabAdmittedList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NoOfItems uint32 `protobuf:"varint,1,opt,name=no_of_items,json=noOfItems,proto3" json:"no_of_items,omitempty"` + Item []*ERabAdmittedItem `protobuf:"bytes,2,rep,name=item,proto3" json:"item,omitempty"` +} + +func (x *ERabAdmittedList) Reset() { + *x = ERabAdmittedList{} + if protoimpl.UnsafeEnabled { + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ERabAdmittedList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ERabAdmittedList) ProtoMessage() {} + +func (x *ERabAdmittedList) ProtoReflect() protoreflect.Message { + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ERabAdmittedList.ProtoReflect.Descriptor instead. +func (*ERabAdmittedList) Descriptor() ([]byte, []int) { + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{4} +} + +func (x *ERabAdmittedList) GetNoOfItems() uint32 { + if x != nil { + return x.NoOfItems + } + return 0 +} + +func (x *ERabAdmittedList) GetItem() []*ERabAdmittedItem { + if x != nil { + return x.Item + } + return nil +} + type EnbDescription struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -229,7 +402,7 @@ type EnbDescription struct { func (x *EnbDescription) Reset() { *x = EnbDescription{} if protoimpl.UnsafeEnabled { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[3] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -242,7 +415,7 @@ func (x *EnbDescription) String() string { func (*EnbDescription) ProtoMessage() {} func (x *EnbDescription) ProtoReflect() protoreflect.Message { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[3] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -255,7 +428,7 @@ func (x *EnbDescription) ProtoReflect() protoreflect.Message { // Deprecated: Use EnbDescription.ProtoReflect.Descriptor instead. func (*EnbDescription) Descriptor() ([]byte, []int) { - return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{3} + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{5} } func (x *EnbDescription) GetEnbId() uint32 { @@ -362,6 +535,7 @@ type UeDescription struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Deprecated: Do not use. S1UeState int32 `protobuf:"varint,2,opt,name=s1_ue_state,json=s1UeState,proto3" json:"s1_ue_state,omitempty"` // enum s1_ue_state_s EnbUeS1ApId uint32 `protobuf:"varint,3,opt,name=enb_ue_s1ap_id,json=enbUeS1apId,proto3" json:"enb_ue_s1ap_id,omitempty"` // enb_ue_s1ap_id_t MmeUeS1ApId uint32 `protobuf:"varint,4,opt,name=mme_ue_s1ap_id,json=mmeUeS1apId,proto3" json:"mme_ue_s1ap_id,omitempty"` // mme_ue_s1ap_id_t @@ -370,12 +544,14 @@ type UeDescription struct { S1ApUeContextRelTimer *S1ApTimer `protobuf:"bytes,7,opt,name=s1ap_ue_context_rel_timer,json=s1apUeContextRelTimer,proto3" json:"s1ap_ue_context_rel_timer,omitempty"` // struct s1ap_timer_t SctpAssocId uint32 `protobuf:"varint,8,opt,name=sctp_assoc_id,json=sctpAssocId,proto3" json:"sctp_assoc_id,omitempty"` // sctp_assoc_id_t S1ApHandoverState *S1ApHandoverState `protobuf:"bytes,9,opt,name=s1ap_handover_state,json=s1apHandoverState,proto3" json:"s1ap_handover_state,omitempty"` // s1ap_handover_state_t + CompS1ApId uint64 `protobuf:"varint,10,opt,name=comp_s1ap_id,json=compS1apId,proto3" json:"comp_s1ap_id,omitempty"` // sctp_assoc_id & enb_ue_s1ap_id + S1ApUeState S1ApUeState `protobuf:"varint,11,opt,name=s1ap_ue_state,json=s1apUeState,proto3,enum=magma.lte.oai.S1ApUeState" json:"s1ap_ue_state,omitempty"` } func (x *UeDescription) Reset() { *x = UeDescription{} if protoimpl.UnsafeEnabled { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[4] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -388,7 +564,7 @@ func (x *UeDescription) String() string { func (*UeDescription) ProtoMessage() {} func (x *UeDescription) ProtoReflect() protoreflect.Message { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[4] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -401,9 +577,10 @@ func (x *UeDescription) ProtoReflect() protoreflect.Message { // Deprecated: Use UeDescription.ProtoReflect.Descriptor instead. func (*UeDescription) Descriptor() ([]byte, []int) { - return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{4} + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{6} } +// Deprecated: Do not use. func (x *UeDescription) GetS1UeState() int32 { if x != nil { return x.S1UeState @@ -460,6 +637,20 @@ func (x *UeDescription) GetS1ApHandoverState() *S1ApHandoverState { return nil } +func (x *UeDescription) GetCompS1ApId() uint64 { + if x != nil { + return x.CompS1ApId + } + return 0 +} + +func (x *UeDescription) GetS1ApUeState() S1ApUeState { + if x != nil { + return x.S1ApUeState + } + return S1ApUeState_S1AP_UE_INVALID_STATE +} + type S1ApState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -473,7 +664,7 @@ type S1ApState struct { func (x *S1ApState) Reset() { *x = S1ApState{} if protoimpl.UnsafeEnabled { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[5] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -486,7 +677,7 @@ func (x *S1ApState) String() string { func (*S1ApState) ProtoMessage() {} func (x *S1ApState) ProtoReflect() protoreflect.Message { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[5] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -499,7 +690,7 @@ func (x *S1ApState) ProtoReflect() protoreflect.Message { // Deprecated: Use S1ApState.ProtoReflect.Descriptor instead. func (*S1ApState) Descriptor() ([]byte, []int) { - return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{5} + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{7} } func (x *S1ApState) GetEnbs() map[uint32]*EnbDescription { @@ -536,7 +727,7 @@ type S1ApImsiMap struct { func (x *S1ApImsiMap) Reset() { *x = S1ApImsiMap{} if protoimpl.UnsafeEnabled { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[6] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -549,7 +740,7 @@ func (x *S1ApImsiMap) String() string { func (*S1ApImsiMap) ProtoMessage() {} func (x *S1ApImsiMap) ProtoReflect() protoreflect.Message { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[6] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -562,7 +753,7 @@ func (x *S1ApImsiMap) ProtoReflect() protoreflect.Message { // Deprecated: Use S1ApImsiMap.ProtoReflect.Descriptor instead. func (*S1ApImsiMap) Descriptor() ([]byte, []int) { - return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{6} + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{8} } // Deprecated: Do not use. @@ -585,18 +776,22 @@ type S1ApHandoverState struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - MmeUeS1ApId uint32 `protobuf:"varint,1,opt,name=mme_ue_s1ap_id,json=mmeUeS1apId,proto3" json:"mme_ue_s1ap_id,omitempty"` - SourceEnbId uint32 `protobuf:"varint,2,opt,name=source_enb_id,json=sourceEnbId,proto3" json:"source_enb_id,omitempty"` - TargetEnbId uint32 `protobuf:"varint,3,opt,name=target_enb_id,json=targetEnbId,proto3" json:"target_enb_id,omitempty"` - TargetEnbUeS1ApId uint32 `protobuf:"varint,4,opt,name=target_enb_ue_s1ap_id,json=targetEnbUeS1apId,proto3" json:"target_enb_ue_s1ap_id,omitempty"` // enb_ue_s1ap_id_t - TargetSctpStreamRecv uint32 `protobuf:"varint,5,opt,name=target_sctp_stream_recv,json=targetSctpStreamRecv,proto3" json:"target_sctp_stream_recv,omitempty"` // sctp_stream_id_t - TargetSctpStreamSend uint32 `protobuf:"varint,6,opt,name=target_sctp_stream_send,json=targetSctpStreamSend,proto3" json:"target_sctp_stream_send,omitempty"` // sctp_stream_id_t + MmeUeS1ApId uint32 `protobuf:"varint,1,opt,name=mme_ue_s1ap_id,json=mmeUeS1apId,proto3" json:"mme_ue_s1ap_id,omitempty"` + SourceEnbId uint32 `protobuf:"varint,2,opt,name=source_enb_id,json=sourceEnbId,proto3" json:"source_enb_id,omitempty"` + TargetEnbId uint32 `protobuf:"varint,3,opt,name=target_enb_id,json=targetEnbId,proto3" json:"target_enb_id,omitempty"` + TargetEnbUeS1ApId uint32 `protobuf:"varint,4,opt,name=target_enb_ue_s1ap_id,json=targetEnbUeS1apId,proto3" json:"target_enb_ue_s1ap_id,omitempty"` // enb_ue_s1ap_id_t + TargetSctpStreamRecv uint32 `protobuf:"varint,5,opt,name=target_sctp_stream_recv,json=targetSctpStreamRecv,proto3" json:"target_sctp_stream_recv,omitempty"` // sctp_stream_id_t + TargetSctpStreamSend uint32 `protobuf:"varint,6,opt,name=target_sctp_stream_send,json=targetSctpStreamSend,proto3" json:"target_sctp_stream_send,omitempty"` // sctp_stream_id_t + SourceEnbUeS1ApId uint32 `protobuf:"varint,7,opt,name=source_enb_ue_s1ap_id,json=sourceEnbUeS1apId,proto3" json:"source_enb_ue_s1ap_id,omitempty"` + SourceSctpStreamRecv uint32 `protobuf:"varint,8,opt,name=source_sctp_stream_recv,json=sourceSctpStreamRecv,proto3" json:"source_sctp_stream_recv,omitempty"` + SourceSctpStreamSend uint32 `protobuf:"varint,9,opt,name=source_sctp_stream_send,json=sourceSctpStreamSend,proto3" json:"source_sctp_stream_send,omitempty"` + ERabAdmittedList *ERabAdmittedList `protobuf:"bytes,10,opt,name=e_rab_admitted_list,json=eRabAdmittedList,proto3" json:"e_rab_admitted_list,omitempty"` } func (x *S1ApHandoverState) Reset() { *x = S1ApHandoverState{} if protoimpl.UnsafeEnabled { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[7] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -609,7 +804,7 @@ func (x *S1ApHandoverState) String() string { func (*S1ApHandoverState) ProtoMessage() {} func (x *S1ApHandoverState) ProtoReflect() protoreflect.Message { - mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[7] + mi := &file_lte_protos_oai_s1ap_state_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -622,7 +817,7 @@ func (x *S1ApHandoverState) ProtoReflect() protoreflect.Message { // Deprecated: Use S1ApHandoverState.ProtoReflect.Descriptor instead. func (*S1ApHandoverState) Descriptor() ([]byte, []int) { - return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{7} + return file_lte_protos_oai_s1ap_state_proto_rawDescGZIP(), []int{9} } func (x *S1ApHandoverState) GetMmeUeS1ApId() uint32 { @@ -667,6 +862,34 @@ func (x *S1ApHandoverState) GetTargetSctpStreamSend() uint32 { return 0 } +func (x *S1ApHandoverState) GetSourceEnbUeS1ApId() uint32 { + if x != nil { + return x.SourceEnbUeS1ApId + } + return 0 +} + +func (x *S1ApHandoverState) GetSourceSctpStreamRecv() uint32 { + if x != nil { + return x.SourceSctpStreamRecv + } + return 0 +} + +func (x *S1ApHandoverState) GetSourceSctpStreamSend() uint32 { + if x != nil { + return x.SourceSctpStreamSend + } + return 0 +} + +func (x *S1ApHandoverState) GetERabAdmittedList() *ERabAdmittedList { + if x != nil { + return x.ERabAdmittedList + } + return nil +} + var File_lte_protos_oai_s1ap_state_proto protoreflect.FileDescriptor var file_lte_protos_oai_s1ap_state_proto_rawDesc = []byte{ @@ -692,78 +915,99 @@ var file_lte_protos_oai_s1ap_state_proto_rawDesc = []byte{ 0x20, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x69, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x69, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x22, 0xdd, 0x05, 0x0a, 0x0e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x65, 0x6e, 0x62, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x65, 0x6e, 0x62, 0x49, 0x64, 0x12, 0x19, - 0x0a, 0x08, 0x73, 0x31, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x73, 0x31, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x62, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x65, 0x6e, 0x62, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x72, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x44, - 0x72, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x62, 0x5f, 0x75, 0x65, 0x5f, 0x61, 0x73, 0x73, 0x6f, - 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6e, 0x62, - 0x55, 0x65, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0d, - 0x73, 0x63, 0x74, 0x70, 0x5f, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x73, 0x63, 0x74, 0x70, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x49, 0x64, - 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6e, 0x65, 0x78, 0x74, - 0x53, 0x63, 0x74, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x69, - 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6f, 0x75, - 0x74, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x43, 0x0a, 0x06, 0x75, 0x65, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, - 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x65, 0x49, 0x64, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x75, 0x65, 0x49, 0x64, 0x73, 0x12, 0x4a, 0x0a, - 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x5f, 0x6c, 0x69, - 0x73, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, - 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x54, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x54, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x61, 0x6e, - 0x5f, 0x63, 0x70, 0x5f, 0x69, 0x70, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0b, 0x72, 0x61, 0x6e, 0x43, 0x70, 0x49, 0x70, 0x61, 0x64, 0x64, 0x72, 0x12, 0x27, 0x0a, - 0x10, 0x72, 0x61, 0x6e, 0x5f, 0x63, 0x70, 0x5f, 0x69, 0x70, 0x61, 0x64, 0x64, 0x72, 0x5f, 0x73, - 0x7a, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x72, 0x61, 0x6e, 0x43, 0x70, 0x49, 0x70, - 0x61, 0x64, 0x64, 0x72, 0x53, 0x7a, 0x12, 0x46, 0x0a, 0x09, 0x75, 0x65, 0x5f, 0x69, 0x64, 0x5f, - 0x6d, 0x61, 0x70, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x67, 0x6d, - 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x65, 0x49, 0x64, 0x4d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x75, 0x65, 0x49, 0x64, 0x4d, 0x61, 0x70, 0x1a, 0x38, - 0x0a, 0x0a, 0x55, 0x65, 0x49, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x55, 0x65, 0x49, 0x64, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x03, 0x0a, 0x0d, 0x55, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0b, 0x73, 0x31, 0x5f, 0x75, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x31, 0x55, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0e, 0x65, 0x6e, 0x62, 0x5f, 0x75, 0x65, - 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, - 0x65, 0x6e, 0x62, 0x55, 0x65, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0e, 0x6d, - 0x6d, 0x65, 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x6d, 0x65, 0x55, 0x65, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, - 0x12, 0x28, 0x0a, 0x10, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x72, 0x65, 0x63, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x63, 0x74, 0x70, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x63, 0x76, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x63, - 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x63, 0x74, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x53, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x19, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x75, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, - 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x54, 0x69, 0x6d, 0x65, - 0x72, 0x52, 0x15, 0x73, 0x31, 0x61, 0x70, 0x55, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x52, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x63, 0x74, 0x70, - 0x5f, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0b, 0x73, 0x63, 0x74, 0x70, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x13, - 0x73, 0x31, 0x61, 0x70, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x61, 0x67, 0x6d, - 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x48, 0x61, - 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x11, 0x73, 0x31, 0x61, - 0x70, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xcb, + 0x74, 0x65, 0x6d, 0x73, 0x22, 0x7f, 0x0a, 0x10, 0x45, 0x52, 0x61, 0x62, 0x41, 0x64, 0x6d, 0x69, + 0x74, 0x74, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x18, 0x0a, 0x08, 0x65, 0x5f, 0x72, 0x61, + 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x65, 0x52, 0x61, 0x62, + 0x49, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6c, 0x61, 0x79, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x74, + 0x70, 0x5f, 0x74, 0x65, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x67, 0x74, + 0x70, 0x54, 0x65, 0x69, 0x64, 0x22, 0x67, 0x0a, 0x10, 0x45, 0x52, 0x61, 0x62, 0x41, 0x64, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x6e, 0x6f, 0x5f, + 0x6f, 0x66, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x6e, 0x6f, 0x4f, 0x66, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x33, 0x0a, 0x04, 0x69, 0x74, 0x65, + 0x6d, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, + 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x45, 0x52, 0x61, 0x62, 0x41, 0x64, 0x6d, 0x69, + 0x74, 0x74, 0x65, 0x64, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x69, 0x74, 0x65, 0x6d, 0x22, 0xdd, + 0x05, 0x0a, 0x0e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x65, 0x6e, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x65, 0x6e, 0x62, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x31, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x31, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x65, 0x6e, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x64, 0x72, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x44, 0x72, 0x78, 0x12, 0x28, 0x0a, 0x10, + 0x6e, 0x62, 0x5f, 0x75, 0x65, 0x5f, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6e, 0x62, 0x55, 0x65, 0x41, 0x73, 0x73, 0x6f, + 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x61, + 0x73, 0x73, 0x6f, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x73, + 0x63, 0x74, 0x70, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x63, 0x74, 0x70, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x12, 0x43, 0x0a, 0x06, 0x75, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, + 0x61, 0x69, 0x2e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x55, 0x65, 0x49, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x05, 0x75, 0x65, 0x49, 0x64, 0x73, 0x12, 0x4a, 0x0a, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, + 0x61, 0x69, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x61, 0x6e, 0x5f, 0x63, 0x70, 0x5f, 0x69, 0x70, + 0x61, 0x64, 0x64, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x61, 0x6e, 0x43, + 0x70, 0x49, 0x70, 0x61, 0x64, 0x64, 0x72, 0x12, 0x27, 0x0a, 0x10, 0x72, 0x61, 0x6e, 0x5f, 0x63, + 0x70, 0x5f, 0x69, 0x70, 0x61, 0x64, 0x64, 0x72, 0x5f, 0x73, 0x7a, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0d, 0x72, 0x61, 0x6e, 0x43, 0x70, 0x49, 0x70, 0x61, 0x64, 0x64, 0x72, 0x53, 0x7a, + 0x12, 0x46, 0x0a, 0x09, 0x75, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x0e, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, + 0x6f, 0x61, 0x69, 0x2e, 0x45, 0x6e, 0x62, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x55, 0x65, 0x49, 0x64, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x75, 0x65, 0x49, 0x64, 0x4d, 0x61, 0x70, 0x1a, 0x38, 0x0a, 0x0a, 0x55, 0x65, 0x49, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x55, 0x65, 0x49, 0x64, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, + 0x03, 0x0a, 0x0d, 0x55, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x22, 0x0a, 0x0b, 0x73, 0x31, 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x73, 0x31, 0x55, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0e, 0x65, 0x6e, 0x62, 0x5f, 0x75, 0x65, 0x5f, 0x73, + 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x6e, + 0x62, 0x55, 0x65, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0e, 0x6d, 0x6d, 0x65, + 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x6d, 0x6d, 0x65, 0x55, 0x65, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, + 0x63, 0x76, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x63, 0x74, 0x70, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x63, 0x76, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x63, 0x74, 0x70, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x63, 0x74, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, + 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x19, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x75, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, + 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x52, + 0x15, 0x73, 0x31, 0x61, 0x70, 0x55, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x65, + 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x61, + 0x73, 0x73, 0x6f, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x73, + 0x63, 0x74, 0x70, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x13, 0x73, 0x31, + 0x61, 0x70, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, + 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x48, 0x61, 0x6e, 0x64, + 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x11, 0x73, 0x31, 0x61, 0x70, 0x48, + 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0c, + 0x63, 0x6f, 0x6d, 0x70, 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, 0x12, 0x3e, + 0x0a, 0x0d, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, + 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x55, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x0b, 0x73, 0x31, 0x61, 0x70, 0x55, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xcb, 0x02, 0x0a, 0x09, 0x53, 0x31, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x65, 0x6e, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, 0x53, 0x31, 0x61, 0x70, 0x53, @@ -807,7 +1051,7 @@ var file_lte_protos_oai_s1ap_state_proto_rawDesc = []byte{ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xa0, 0x02, 0x0a, 0x11, 0x53, 0x31, 0x61, 0x70, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, + 0x90, 0x04, 0x0a, 0x11, 0x53, 0x31, 0x61, 0x70, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0e, 0x6d, 0x6d, 0x65, 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x6d, 0x65, 0x55, 0x65, 0x53, 0x31, 0x61, 0x70, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x6f, @@ -825,9 +1069,33 @@ var file_lte_protos_oai_s1ap_state_proto_rawDesc = []byte{ 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x63, 0x74, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, - 0x6e, 0x64, 0x42, 0x1f, 0x5a, 0x1d, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2f, 0x6c, 0x74, 0x65, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, - 0x6f, 0x61, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x64, 0x12, 0x30, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x62, + 0x5f, 0x75, 0x65, 0x5f, 0x73, 0x31, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x62, 0x55, 0x65, 0x53, 0x31, + 0x61, 0x70, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x63, 0x76, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x74, + 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x63, 0x76, 0x12, 0x35, 0x0a, 0x17, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x74, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x65, + 0x6e, 0x64, 0x12, 0x4e, 0x0a, 0x13, 0x65, 0x5f, 0x72, 0x61, 0x62, 0x5f, 0x61, 0x64, 0x6d, 0x69, + 0x74, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x6c, 0x74, 0x65, 0x2e, 0x6f, 0x61, 0x69, 0x2e, + 0x45, 0x52, 0x61, 0x62, 0x41, 0x64, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x10, 0x65, 0x52, 0x61, 0x62, 0x41, 0x64, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x88, 0x01, 0x0a, 0x0b, 0x53, 0x31, 0x61, 0x70, 0x55, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x31, 0x41, 0x50, 0x5f, 0x55, 0x45, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x31, 0x41, 0x50, 0x5f, 0x55, 0x45, 0x5f, 0x57, 0x41, 0x49, 0x54, 0x49, 0x4e, 0x47, + 0x5f, 0x49, 0x43, 0x53, 0x52, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x31, 0x41, 0x50, 0x5f, + 0x55, 0x45, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4f, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, + 0x11, 0x53, 0x31, 0x41, 0x50, 0x5f, 0x55, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x31, 0x41, 0x50, 0x5f, 0x55, 0x45, 0x5f, + 0x57, 0x41, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x43, 0x52, 0x43, 0x10, 0x04, 0x42, 0x1f, 0x5a, + 0x1d, 0x6d, 0x61, 0x67, 0x6d, 0x61, 0x2f, 0x6c, 0x74, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2f, 0x6f, 0x61, 0x69, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -842,40 +1110,47 @@ func file_lte_protos_oai_s1ap_state_proto_rawDescGZIP() []byte { return file_lte_protos_oai_s1ap_state_proto_rawDescData } -var file_lte_protos_oai_s1ap_state_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_lte_protos_oai_s1ap_state_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_lte_protos_oai_s1ap_state_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_lte_protos_oai_s1ap_state_proto_goTypes = []interface{}{ - (*S1ApTimer)(nil), // 0: magma.lte.oai.S1apTimer - (*SupportedTaiItems)(nil), // 1: magma.lte.oai.SupportedTaiItems - (*SupportedTaList)(nil), // 2: magma.lte.oai.SupportedTaList - (*EnbDescription)(nil), // 3: magma.lte.oai.EnbDescription - (*UeDescription)(nil), // 4: magma.lte.oai.UeDescription - (*S1ApState)(nil), // 5: magma.lte.oai.S1apState - (*S1ApImsiMap)(nil), // 6: magma.lte.oai.S1apImsiMap - (*S1ApHandoverState)(nil), // 7: magma.lte.oai.S1apHandoverState - nil, // 8: magma.lte.oai.EnbDescription.UeIdsEntry - nil, // 9: magma.lte.oai.EnbDescription.UeIdMapEntry - nil, // 10: magma.lte.oai.S1apState.EnbsEntry - nil, // 11: magma.lte.oai.S1apState.Mmeid2associdEntry - nil, // 12: magma.lte.oai.S1apImsiMap.MmeUeIdImsiMapEntry - nil, // 13: magma.lte.oai.S1apImsiMap.MmeUeS1apIdImsiMapEntry + (S1ApUeState)(0), // 0: magma.lte.oai.S1apUeState + (*S1ApTimer)(nil), // 1: magma.lte.oai.S1apTimer + (*SupportedTaiItems)(nil), // 2: magma.lte.oai.SupportedTaiItems + (*SupportedTaList)(nil), // 3: magma.lte.oai.SupportedTaList + (*ERabAdmittedItem)(nil), // 4: magma.lte.oai.ERabAdmittedItem + (*ERabAdmittedList)(nil), // 5: magma.lte.oai.ERabAdmittedList + (*EnbDescription)(nil), // 6: magma.lte.oai.EnbDescription + (*UeDescription)(nil), // 7: magma.lte.oai.UeDescription + (*S1ApState)(nil), // 8: magma.lte.oai.S1apState + (*S1ApImsiMap)(nil), // 9: magma.lte.oai.S1apImsiMap + (*S1ApHandoverState)(nil), // 10: magma.lte.oai.S1apHandoverState + nil, // 11: magma.lte.oai.EnbDescription.UeIdsEntry + nil, // 12: magma.lte.oai.EnbDescription.UeIdMapEntry + nil, // 13: magma.lte.oai.S1apState.EnbsEntry + nil, // 14: magma.lte.oai.S1apState.Mmeid2associdEntry + nil, // 15: magma.lte.oai.S1apImsiMap.MmeUeIdImsiMapEntry + nil, // 16: magma.lte.oai.S1apImsiMap.MmeUeS1apIdImsiMapEntry } var file_lte_protos_oai_s1ap_state_proto_depIdxs = []int32{ - 1, // 0: magma.lte.oai.SupportedTaList.supported_tai_items:type_name -> magma.lte.oai.SupportedTaiItems - 8, // 1: magma.lte.oai.EnbDescription.ue_ids:type_name -> magma.lte.oai.EnbDescription.UeIdsEntry - 2, // 2: magma.lte.oai.EnbDescription.supported_ta_list:type_name -> magma.lte.oai.SupportedTaList - 9, // 3: magma.lte.oai.EnbDescription.ue_id_map:type_name -> magma.lte.oai.EnbDescription.UeIdMapEntry - 0, // 4: magma.lte.oai.UeDescription.s1ap_ue_context_rel_timer:type_name -> magma.lte.oai.S1apTimer - 7, // 5: magma.lte.oai.UeDescription.s1ap_handover_state:type_name -> magma.lte.oai.S1apHandoverState - 10, // 6: magma.lte.oai.S1apState.enbs:type_name -> magma.lte.oai.S1apState.EnbsEntry - 11, // 7: magma.lte.oai.S1apState.mmeid2associd:type_name -> magma.lte.oai.S1apState.Mmeid2associdEntry - 12, // 8: magma.lte.oai.S1apImsiMap.mme_ue_id_imsi_map:type_name -> magma.lte.oai.S1apImsiMap.MmeUeIdImsiMapEntry - 13, // 9: magma.lte.oai.S1apImsiMap.mme_ue_s1ap_id_imsi_map:type_name -> magma.lte.oai.S1apImsiMap.MmeUeS1apIdImsiMapEntry - 3, // 10: magma.lte.oai.S1apState.EnbsEntry.value:type_name -> magma.lte.oai.EnbDescription - 11, // [11:11] is the sub-list for method output_type - 11, // [11:11] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name + 2, // 0: magma.lte.oai.SupportedTaList.supported_tai_items:type_name -> magma.lte.oai.SupportedTaiItems + 4, // 1: magma.lte.oai.ERabAdmittedList.item:type_name -> magma.lte.oai.ERabAdmittedItem + 11, // 2: magma.lte.oai.EnbDescription.ue_ids:type_name -> magma.lte.oai.EnbDescription.UeIdsEntry + 3, // 3: magma.lte.oai.EnbDescription.supported_ta_list:type_name -> magma.lte.oai.SupportedTaList + 12, // 4: magma.lte.oai.EnbDescription.ue_id_map:type_name -> magma.lte.oai.EnbDescription.UeIdMapEntry + 1, // 5: magma.lte.oai.UeDescription.s1ap_ue_context_rel_timer:type_name -> magma.lte.oai.S1apTimer + 10, // 6: magma.lte.oai.UeDescription.s1ap_handover_state:type_name -> magma.lte.oai.S1apHandoverState + 0, // 7: magma.lte.oai.UeDescription.s1ap_ue_state:type_name -> magma.lte.oai.S1apUeState + 13, // 8: magma.lte.oai.S1apState.enbs:type_name -> magma.lte.oai.S1apState.EnbsEntry + 14, // 9: magma.lte.oai.S1apState.mmeid2associd:type_name -> magma.lte.oai.S1apState.Mmeid2associdEntry + 15, // 10: magma.lte.oai.S1apImsiMap.mme_ue_id_imsi_map:type_name -> magma.lte.oai.S1apImsiMap.MmeUeIdImsiMapEntry + 16, // 11: magma.lte.oai.S1apImsiMap.mme_ue_s1ap_id_imsi_map:type_name -> magma.lte.oai.S1apImsiMap.MmeUeS1apIdImsiMapEntry + 5, // 12: magma.lte.oai.S1apHandoverState.e_rab_admitted_list:type_name -> magma.lte.oai.ERabAdmittedList + 6, // 13: magma.lte.oai.S1apState.EnbsEntry.value:type_name -> magma.lte.oai.EnbDescription + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_lte_protos_oai_s1ap_state_proto_init() } @@ -921,7 +1196,7 @@ func file_lte_protos_oai_s1ap_state_proto_init() { } } file_lte_protos_oai_s1ap_state_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnbDescription); i { + switch v := v.(*ERabAdmittedItem); i { case 0: return &v.state case 1: @@ -933,7 +1208,7 @@ func file_lte_protos_oai_s1ap_state_proto_init() { } } file_lte_protos_oai_s1ap_state_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UeDescription); i { + switch v := v.(*ERabAdmittedList); i { case 0: return &v.state case 1: @@ -945,7 +1220,7 @@ func file_lte_protos_oai_s1ap_state_proto_init() { } } file_lte_protos_oai_s1ap_state_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S1ApState); i { + switch v := v.(*EnbDescription); i { case 0: return &v.state case 1: @@ -957,7 +1232,7 @@ func file_lte_protos_oai_s1ap_state_proto_init() { } } file_lte_protos_oai_s1ap_state_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S1ApImsiMap); i { + switch v := v.(*UeDescription); i { case 0: return &v.state case 1: @@ -969,6 +1244,30 @@ func file_lte_protos_oai_s1ap_state_proto_init() { } } file_lte_protos_oai_s1ap_state_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S1ApState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_lte_protos_oai_s1ap_state_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S1ApImsiMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_lte_protos_oai_s1ap_state_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*S1ApHandoverState); i { case 0: return &v.state @@ -986,13 +1285,14 @@ func file_lte_protos_oai_s1ap_state_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_lte_protos_oai_s1ap_state_proto_rawDesc, - NumEnums: 0, - NumMessages: 14, + NumEnums: 1, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, GoTypes: file_lte_protos_oai_s1ap_state_proto_goTypes, DependencyIndexes: file_lte_protos_oai_s1ap_state_proto_depIdxs, + EnumInfos: file_lte_protos_oai_s1ap_state_proto_enumTypes, MessageInfos: file_lte_protos_oai_s1ap_state_proto_msgTypes, }.Build() File_lte_protos_oai_s1ap_state_proto = out.File diff --git a/lte/cloud/go/services/subscriberdb/servicers/southbound/subscriberdb_servicer.go b/lte/cloud/go/services/subscriberdb/servicers/southbound/subscriberdb_servicer.go index 109e704b5952..fe374288a819 100644 --- a/lte/cloud/go/services/subscriberdb/servicers/southbound/subscriberdb_servicer.go +++ b/lte/cloud/go/services/subscriberdb/servicers/southbound/subscriberdb_servicer.go @@ -75,6 +75,7 @@ func (s *subscriberdbServicer) Sync( ctx context.Context, req *lte_protos.SyncRequest, ) (*lte_protos.SyncResponse, error) { + cloudflag := GetCloudSubscriberDbEnabled(ctx) if !s.DigestsEnabled { return <e_protos.SyncResponse{Resync: true}, nil } @@ -110,12 +111,18 @@ func (s *subscriberdbServicer) Sync( return <e_protos.SyncResponse{Resync: true}, nil } - resync, renewed, deleted, err := s.getSubscribersChangeset(networkID, req.LeafDigests, digestTree.LeafDigests) + resync, renewed, deleted, err := s.getSubscribersChangeset(ctx, networkID, req.LeafDigests, digestTree.LeafDigests) if err != nil { return nil, err } if resync { - return <e_protos.SyncResponse{Resync: true}, nil + + if cloudflag { + return nil, nil + } else { + return <e_protos.SyncResponse{Resync: true}, nil + } + } // Since the cached protos don't contain gateway-specific information, inject @@ -141,7 +148,14 @@ func (s *subscriberdbServicer) Sync( }, Resync: false, } - return res, nil + + if cloudflag { + glog.V(2).Infof("Cloud Authentication enabled, not streaming subscriber data") + return nil, nil + } else { + glog.V(2).Infof("Cloud Authentication disabled, streaming subscriber data") + return res, nil + } } // ListSubscribers returns a page of subscribers and a token to be used on @@ -204,7 +218,15 @@ func (s *subscriberdbServicer) ListSubscribers(ctx context.Context, req *lte_pro LeafDigests: digest.LeafDigests, }, } - return listRes, nil + cloudflag := GetCloudSubscriberDbEnabled(ctx) + if cloudflag { + glog.V(2).Infof("Cloud Authentication enabled, not streaming subscriber data") + return nil, nil + } else { + glog.V(2).Infof("Cloud Authentication disabled, streaming subscriber data") + return listRes, nil + } + } func (s *subscriberdbServicer) ListSuciProfiles(ctx context.Context, req *protos.Void) (*lte_protos.SuciProfileList, error) { @@ -234,7 +256,7 @@ func (s *subscriberdbServicer) ListSuciProfiles(ctx context.Context, req *protos // 2. If no resync, the list of subscriber configs to be renewed. // 3. If no resync, the list of subscriber IDs to be deleted. // 4. Any error that occurred. -func (s *subscriberdbServicer) getSubscribersChangeset(networkID string, clientDigests []*protos.LeafDigest, cloudDigests []*protos.LeafDigest) (bool, []*lte_protos.SubscriberData, []string, error) { +func (s *subscriberdbServicer) getSubscribersChangeset(ctx context.Context, networkID string, clientDigests []*protos.LeafDigest, cloudDigests []*protos.LeafDigest) (bool, []*lte_protos.SubscriberData, []string, error) { toRenew, deleted := syncstore.GetLeafDigestsDiff(clientDigests, cloudDigests) if len(toRenew) > s.ChangesetSizeThreshold || len(toRenew) > int(s.MaxProtosLoadSize) { return true, nil, nil, nil @@ -249,7 +271,14 @@ func (s *subscriberdbServicer) getSubscribersChangeset(networkID string, clientD if err != nil { return true, nil, nil, err } - return false, renewed, deleted, nil + cloudflag := GetCloudSubscriberDbEnabled(ctx) + if cloudflag { + glog.V(2).Infof("Cloud Authentication enabled, not streaming subscriber data") + return false, nil, nil, nil + } else { + glog.V(2).Infof("Cloud Authentication disabled, streaming subscriber data") + return false, renewed, deleted, nil + } } func (s *subscriberdbServicer) loadSubscribersPageFromCache(ctx context.Context, networkID string, req *lte_protos.ListSubscribersRequest, gateway *protos.Identity_Gateway) ([]*lte_protos.SubscriberData, string, error) { @@ -271,7 +300,15 @@ func (s *subscriberdbServicer) loadSubscribersPageFromCache(ctx context.Context, return nil, "", err } - return subProtos, nextToken, nil + cloudflag := GetCloudSubscriberDbEnabled(ctx) + if cloudflag { + glog.V(2).Infof("Cloud Authentication enabled, not streaming subscriber data") + return nil, "", nil + } else { + glog.V(2).Infof("Cloud Authentication disabled, streaming subscriber data") + return subProtos, nextToken, nil + } + } // getDigest returns the digest tree for the network. @@ -309,6 +346,24 @@ func (s *subscriberdbServicer) shouldResync(network string, gateway string) bool return shouldResync } +func GetCloudSubscriberDbEnabled(ctx context.Context) bool { + gateway := protos.GetClientGateway(ctx) + networkID := gateway.NetworkId + network, err := configurator.LoadNetwork(ctx, networkID, false, true, serdes.Network) + if err != nil { + glog.Errorf("Load error for network %s: %+v", networkID, err) + return false + } + nwCellularConfigType, ok := network.Configs[lte.CellularNetworkConfigType] + if !ok { + glog.Errorf("Error fetching cellular configs") + return false + } + nwCellularConfig := nwCellularConfigType.(*lte_models.NetworkCellularConfigs) + EpcConfig := nwCellularConfig.Epc + return EpcConfig.CloudSubscriberdbEnabled +} + func loadAPNs(ctx context.Context, gateway *protos.Identity_Gateway) (map[string]*lte_models.ApnConfiguration, lte_models.ApnResources, error) { networkID := gateway.NetworkId gatewayID := gateway.LogicalId diff --git a/lte/cloud/helm/lte-orc8r/Chart.yaml b/lte/cloud/helm/lte-orc8r/Chart.yaml index 2eeb3767a81b..dd953e1a9a58 100644 --- a/lte/cloud/helm/lte-orc8r/Chart.yaml +++ b/lte/cloud/helm/lte-orc8r/Chart.yaml @@ -10,10 +10,10 @@ # limitations under the License. apiVersion: v2 -appVersion: "1.0" +appVersion: "1.8.0" description: A Helm chart for magma orchestrator's lte module name: lte-orc8r -version: 0.2.6 +version: 1.8.0 engine: gotpl sources: - https://github.com/magma/magma diff --git a/lte/gateway/Makefile b/lte/gateway/Makefile index 803e40e0a967..d11fdce5242e 100644 --- a/lte/gateway/Makefile +++ b/lte/gateway/Makefile @@ -1,9 +1,8 @@ -.PHONY: all build clean help log logs run status test +.PHONY: all build clean run test GATEWAY_C_DIR = $(MAGMA_ROOT)/lte/gateway/c GRPC_CPP_PLUGIN_PATH ?= `which grpc_cpp_plugin` BUILD_TYPE ?= Debug -ENABLE_ASAN ?= 0 # FEATURES: What kind of flavours do you want your MME or AGW have in it # MME is MME as described in 3GPP specs, it has at least S1AP, S11, S6a @@ -20,16 +19,6 @@ FEATURES ?= agw_of AVAILABLE_FEATURE_LIST = agw_of mme_oai REQUESTED_FEATURE_LIST = $(sort $(FEATURES)) -ifeq ($(BUILD_TYPE),Debug) - ifeq ($(ENABLE_ASAN),1) - BAZEL_FLAGS := $(BAZEL_FLAGS) --config=asan - endif -else - ## RelWithDebInfo enable LSAN and add debug information - BAZEL_FLAGS = --config=production -endif -$(info BAZEL_FLAGS $(BAZEL_FLAGS)) - # First, check that nothing outside of AVAILABLE_FEATURE_LIST is requested ifneq ($(words $(strip $(filter-out $(AVAILABLE_FEATURE_LIST),$(REQUESTED_FEATURE_LIST)))), 0) $(error Non allowed flags: "$(filter-out $(AVAILABLE_FEATURE_LIST),$(REQUESTED_FEATURE_LIST))") @@ -65,15 +54,11 @@ TEST_FLAG = -DBUILD_TESTS=1 OAI_TEST_FLAGS = -DMME_UNIT_TEST=True OAI_NOTEST_FLAGS = -DMME_UNIT_TEST=False OAI_TESTS ?= ".*" -OAI_BENCHMARK_FLAGS = -DMME_BENCHMARK=True -OAI_NOBENCHMARK_FLAGS = -DMME_BENCHMARK=False all: build build: build_python build_common build_oai build_sctpd build_session_manager build_connection_tracker build_envoy_controller build_li_agent ## Build all -smf_build: build_session_manager ## Build only sessionD component make smf_build - test: test_python test_common test_oai test_sctpd test_session_manager ## Run all tests clean: clean_python clean_envoy_controller ## Clean all builds @@ -85,28 +70,9 @@ clean_python: ## Clean Python-only builds clean_envoy_controller: ## Clean envoy controller build rm -rf $(GO_BUILD)/envoy_controller -start: ## Start all services - sudo service magma@magmad start - -stop: ## Stop all services +run: build ## Build and run all services sudo service magma@* stop - -restart: stop start ## Restart all services - -run: build restart ## Build and run all services - -status: ## Status of all services - sudo service magma@* status - -log: ## Follow logs for magmad service - sudo journalctl -fu magma@magmad | egrep 'error|$$' -i --color - -logs: ## Follow logs for all services - sudo journalctl -fu magma@* | egrep 'error|$$' -i --color - -# Ref: https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html -help: ## Show documented commands - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-25s\033[0m %s\n", $$1, $$2}' + sudo service magma@magmad start # run_cmake BUILD_DIRECTORY, FILE_DIRECTORY, FLAGS, ENV define run_cmake @@ -117,48 +83,21 @@ cd $(1) && awk '{if (/^CMAKE_EXPORT_COMPILE_COMMANDS/) gsub(/OFF/, "ON"); print} ninja -C $(1) endef -# run_scanbuild BUILD_DIRECTORY, FILE_DIRECTORY, FLAGS -define run_scanbuild -$(eval REPORT_DIR = "$(1)/reports") -mkdir -p $(1) -mkdir -p $(REPORT_DIR) -cd $(1) && scan-build cmake $(2) -DCMAKE_BUILD_TYPE=Debug $(3) -GNinja -scan-build -o $(REPORT_DIR) ninja -C $(1) -cp -r $(REPORT_DIR) $(MAGMA_ROOT) -@echo "Reports in magma/reports/.../index.html" -endef - # run_ctest BUILD_DIRECTORY, TEST_BUILD_DIRECTORY, FILE_DIRECTORY, FLAGS, LIST OF TESTS define run_ctest $(call run_cmake, $(1), $(3), $(4) $(TEST_FLAG)) cd $(2) && ctest --output-on-failure -R $(5) endef -build_python: stop ## Build Python environment +build_python: ## Build Python environment + sudo service magma@* stop make -C $(MAGMA_ROOT)/lte/gateway/python buildenv build_common: ## Build shared libraries $(call run_cmake, $(C_BUILD)/magma_common, $(MAGMA_ROOT)/orc8r/gateway/c/common, $(COMMON_FLAGS)) -define copy_bazel_c_build ## Copy Bazel build output to C_BUILD/ -# 1 - source directory, 2 - binary name -mkdir -p $(C_BUILD)/$(1) -sudo cp -f $(MAGMA_ROOT)/bazel-bin/lte/gateway/c/$(1)/$(2) $(C_BUILD)/$(1)/$(2) -endef - - -benchmark_pb: ## Benchmark ProtoBuf, same build_oai but with -DMME_BENCHMARK=True - $(call run_cmake, $(C_BUILD)/core, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(COMMON_FLAGS) $(OAI_NOTEST_FLAGS) $(OAI_BENCHMARK_FLAGS)) - -build_c: ## Build C/C++ targets with Bazel - bazel build $(BAZEL_FLAGS) //lte/gateway/c/session_manager:sessiond //lte/gateway/c/sctpd/src:sctpd //lte/gateway/c/connection_tracker/src:connectiond //lte/gateway/c/li_agent/src:liagentd - $(call copy_bazel_c_build,session_manager,sessiond) - $(call copy_bazel_c_build,sctpd/src,sctpd) - $(call copy_bazel_c_build,connection_tracker/src,connectiond) - $(call copy_bazel_c_build,li_agent/src,liagentd) - build_oai: ## Build OAI - $(call run_cmake, $(C_BUILD)/core, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(COMMON_FLAGS) $(OAI_NOTEST_FLAGS) $(OAI_NOBENCHMARK_FLAGS)) + $(call run_cmake, $(C_BUILD)/core, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(COMMON_FLAGS) $(OAI_NOTEST_FLAGS)) format_all: find $(MAGMA_ROOT)/orc8r/gateway/c/ \( -iname "*.c" -o -iname "*.cpp" -o -iname "*.h" -o -iname "*.hpp" \) -exec \ @@ -188,13 +127,12 @@ build_envoy_controller: ## Build envoy controller build_%: $(call run_cmake, $(C_BUILD)/$*, $(MAGMA_ROOT)/c/$*, $(COMMON_FLAGS)) -scan_oai: ## Scan OAI - $(call run_scanbuild, $(C_BUILD)/scan/core, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS)) - -test_python: stop ## Run all Python-specific tests +test_python: ## Run all Python-specific tests + sudo service magma@* stop make -C $(MAGMA_ROOT)/lte/gateway/python test_all -test_sudo_python: stop ## Run Python tests that require sudo (datapath, etc.) +test_sudo_python: ## Run Python tests that require sudo (datapath, etc.) + sudo service magma@* stop make -C $(MAGMA_ROOT)/lte/gateway/python test_all SKIP_NON_SUDO_TESTS=1 test_python_service: ## Run all Python-specific tests for a given service @@ -203,11 +141,8 @@ ifdef UT_PATH endif make -C $(MAGMA_ROOT)/lte/gateway/python unit_tests MAGMA_SERVICE=$(MAGMA_SERVICE) UT_PATH=$(ut_path) DONT_BUILD_ENV=$(DONT_BUILD_ENV) -test_c: ## Run all Bazel-ified C/C++ tests - bazel test $(BAZEL_FLAGS) -- //orc8r/gateway/c/...:* //lte/gateway/c/...:* -//lte/gateway/c/core/...:* - test_oai: ## Run all OAI-specific tests - $(call run_ctest, $(C_BUILD)/core, $(C_BUILD)/core/oai, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(OAI_TEST_FLAGS) $(OAI_NOBENCHMARK_FLAGS), $(OAI_TESTS)) + $(call run_ctest, $(C_BUILD)/core, $(C_BUILD)/core/oai, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(OAI_TEST_FLAGS), $(OAI_TESTS)) test_oai_runtime: export REPORT_FOLDER=${MAGMA_ROOT}/report/ test_oai_runtime: export UNITTEST_REPORT_FOLDER=${REPORT_FOLDER}/unittest_report/ @@ -216,7 +151,7 @@ test_oai_runtime: export GTEST_OUTPUT=xml:${UNITTEST_REPORT_FOLDER} test_oai_runtime: ## Run all OAI-specific tests with report about the running time mkdir -p ${UNITTEST_REPORT_FOLDER} mkdir -p ${MERGED_REPORT_FOLDER} - -$(call run_ctest, $(C_BUILD)/core, $(C_BUILD)/core/oai, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(OAI_TEST_FLAGS) $(OAI_NOBENCHMARK_FLAGS), $(OAI_TESTS)) + -$(call run_ctest, $(C_BUILD)/core, $(C_BUILD)/core/oai, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(OAI_TEST_FLAGS), $(OAI_TESTS)) python3 python/scripts/runtime_report.py -i .+\\.xml$$ -w ${UNITTEST_REPORT_FOLDER} rm ${UNITTEST_REPORT_FOLDER}/*.xml @@ -244,81 +179,6 @@ coverage_oai: test_oai lcov -r /tmp/coverage_oai.info.raw "/*/test/*" "/usr/*" "/build/*protos*" -o /tmp/coverage_oai.info rm -f `find $(C_BUILD) -name *.gcda` # Clean up any prior coverage data -# format and test c/session_manager -precommit_sm: format_all test_session_manager - -# format and test c/oai -precommit_oai: format_all test_oai - -build_oai_clang: ## Build OAI with Clang, store compiler outputs to log - $(call run_cmake, $(C_BUILD)/core, $(GATEWAY_C_DIR)/core, $(OAI_FLAGS) $(COMMON_FLAGS) $(OAI_NOTEST_FLAGS) $(OAI_NOBENCHMARK_FLAGS), CC="clang" CXX="clang++") 2>&1 | tee /tmp/clang-build.oai.log - -# Upload Clang-Warning counts by type to Google Sheet via Google Survey -# Graph available at https://docs.google.com/spreadsheets/d/1ndiIKJNI2IJZBwavnu1x_KwwvlQppnoYEOtYdihqiIQ/edit#gid=734064581 -# TODO: Move to a third party service (I could not find) or make a binary that parses + uploads this without missing new warning types. -clang_warning_oai_upload: build_oai_clang ## Generate and then upload warning statistics to Google Survey for telemetry - curl -L -v -G -Ss \ - --data-urlencode "entry.608561103=$(BRANCH)" \ - --data-urlencode "entry.1257088109=$(REVISION)" \ - --data-urlencode "entry.708918097=OAI" \ - --data-urlencode "entry.1995144005=$(shell grep '\[-Wconstant-conversion\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.889426184=$(shell grep '\[-Wdeprecated-declarations\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.6143542=$(shell grep '\[-Wenum-conversion\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1953446373=$(shell grep '\[-Wextern-c-compat\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1508747323=$(shell grep '\[-Winconsistent-missing-override\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1682903653=$(shell grep '\[-Winitializer-overrides\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1030484709=$(shell grep '\[-Wnon-c-typedef-for-linkage\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1885910120=$(shell grep '\[-Wnull-dereference\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.411691731=$(shell grep '\[-Wparentheses-equality\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.872392934=$(shell grep '\[-Wpointer-bool-conversion\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1058267165=$(shell grep '\[-Wtautological-constant-out-of-range-compare\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1145616398=$(shell grep '\[-Wtautological-overlap-compare\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.1265654844=$(shell grep '\[-Wtautological-pointer-compare\]' /tmp/clang-build.oai.log | wc -l)" \ - --data-urlencode "entry.599425517=$(shell grep '\[-Wtypedef-redefinition\]' /tmp/clang-build.oai.log | wc -l)" \ - https://docs.google.com/forms/d/e/1FAIpQLScKB3nLPASxzr4AXW5_yeHCjkEURY0K9OAFPIyNFzkA5CY_kw/formResponse?usp=pp_url - -# Run clang-tidy -# TODO: CMake config issue - compile_commands.json is only generated if you first make_oai, then test_oai (or do either twice). -# Additionally compile_commands.json is **not capturing all build artifacts this way** -clang_tidy_oai: - mkdir -p $(C_BUILD)/core/oai/build - cd $(C_BUILD)/core/oai/build;cmake $(GATEWAY_C_DIR)/core - sed -i 's/CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=OFF/CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON/g' $(C_BUILD)/core/oai/build/CMakeCache.txt - cmake --build $(C_BUILD)/core/oai/build/ - cd $(C_BUILD)/core/oai/build;wget https://raw.githubusercontent.com/llvm-mirror/clang-tools-extra/master/clang-tidy/tool/run-clang-tidy.py;\ - python run-clang-tidy.py -p $(C_BUILD)/core/oai/build/oai -j 2 -checks='-*,clang-analyzer-security*,android-*,cert-*,clang-analyzer-*,concurrency,misc-*,-misc-unused-parameters,bugprone-*' 2>&1 | tee /tmp/clang-tidy-oai.findings - -# Pushes per-finding counts to: -# https://docs.google.com/forms/d/1-45BZTHh4uBBOCqYM4LAD4zFT91B47sEEntHLkQuXWA/edit#responses -clang_tidy_oai_upload: clang_tidy_oai - # Generate a summary of per-finding counts in the log - grep '\]' /tmp/clang-tidy-oai.findings | grep warning: | awk -F'[][]' '{print $$2}' | sort | uniq -c - curl -L -v -G -Ss \ - --data-urlencode "entry.338748281=$(BRANCH)" \ - --data-urlencode "entry.1421502557=$(REVISION)" \ - --data-urlencode "entry.1687500610=OAI" \ - --data-urlencode "entry.1690779794=$(shell grep '\[android-cloexec' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1926965966=$(shell grep '\[bugprone-branch-clone\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.951746576=$(shell grep '\[bugprone-macro-parentheses\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.593794544=$(shell grep '\[bugprone-narrowing-conversions\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.607048444=$(shell grep '\[bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.2081825272=$(shell grep '\[bugprone-signed-char-misuse,cert-str34-c\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.268408734=$(shell grep '\[bugprone-sizeof-expression\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1339652996=$(shell grep '\[bugprone-suspicious-string-compare\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1446914229=$(shell grep '\[bugprone-too-small-loop-variable\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.718845718=$(shell grep '\[bugprone-undefined-memory-manipulation\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.851497596=$(shell grep '\[cert-' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1963803956=$(shell grep '\[clang-analyzer-core.CallAndMessage\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1218571043=$(shell grep '\[clang-analyzer-core.NullDereference\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1618711162=$(shell grep '\[clang-analyzer-core.uninitialized.Assign\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1062688275=$(shell grep '\[clang-analyzer-deadcode.DeadStores\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1790897376=$(shell grep '\[clang-analyzer-optin' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.1730273925=$(shell grep '\[clang-analyzer-security.insecureAPI.strcpy\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.806421502=$(shell grep '\[clang-analyzer-unix.Malloc\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.999694738=$(shell grep '\[clang-analyzer-unix.MallocSizeof\]' /tmp/clang-tidy-oai.findings | wc -l)" \ - --data-urlencode "entry.740240539=$(shell grep '\[misc-' /tmp/clang-tidy-oai.findings | wc -l)" \ - https://docs.google.com/forms/d/e/1FAIpQLSfHGqOmDhUMAWHSjA_w6NOGqglQBx2IaO1bXLo6zrOE95sRWQ/formResponse?usp=pp_url - ## Generate complete code structural information prior to any test execution base_coverage: build_oai lcov --initial --directory $(C_BUILD) -c --output-file /tmp/coverage_initialize.info.raw diff --git a/lte/gateway/Vagrantfile b/lte/gateway/Vagrantfile index dbb6985a476a..632c66cf474b 100644 --- a/lte/gateway/Vagrantfile +++ b/lte/gateway/Vagrantfile @@ -11,6 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# + +# Install vagrant-disksize to allow resizing the vagrant box disk. +unless Vagrant.has_plugin?("vagrant-disksize") + raise Vagrant::Errors::VagrantError.new, "vagrant-disksize plugin is missing. Please install it using 'vagrant plugin install vagrant-disksize' and rerun 'vagrant up'" +end # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! VAGRANTFILE_API_VERSION = "2" @@ -23,10 +29,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.define :magma, primary: true do |magma| magma.vm.box = "magmacore/magma_dev" magma.vm.box_version = "1.2.20220801" + magma.disksize.size = '75GB' + # Enable Dynamic Swap Space to prevent Out of Memory crashes magma.vm.provision :shell, inline: "swapoff -a && fallocate -l 4G /swapfile && chmod 0600 /swapfile && mkswap /swapfile && swapon /swapfile && echo '/swapfile none swap sw 0 0' >> /etc/fstab && swapon -a" magma.vm.provision :shell, inline: "echo vm.swappiness = 10 >> /etc/sysctl.conf && echo vm.vfs_cache_pressure = 50 >> /etc/sysctl.conf && sysctl -p" - magma.vbguest.auto_update = false # Create a private network, which allows host-only access to the machine @@ -229,6 +236,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ["--timeout=30"] ansible.verbose = 'v' end + + # Reload VM to apply correct network configuration + magma_deb.vm.provision :reload + end end diff --git a/lte/gateway/c/connection_tracker/src/BUILD.bazel b/lte/gateway/c/connection_tracker/src/BUILD.bazel index 88e8c5e149a3..8c6a0862f5e1 100644 --- a/lte/gateway/c/connection_tracker/src/BUILD.bazel +++ b/lte/gateway/c/connection_tracker/src/BUILD.bazel @@ -14,6 +14,7 @@ load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library") cc_binary( name = "connectiond", srcs = ["main.cpp"], + visibility = ["//lte/gateway/release:__pkg__"], deps = [ ":event_tracker", "//lte/protos:mconfigs_cpp_proto", diff --git a/lte/gateway/c/core/BUILD.bazel b/lte/gateway/c/core/BUILD.bazel index c01d3a6fb0fd..f77e913faf0e 100644 --- a/lte/gateway/c/core/BUILD.bazel +++ b/lte/gateway/c/core/BUILD.bazel @@ -82,6 +82,7 @@ MME_SRCS = [ "oai/lib/sms_orc8r_client/SMSOrc8rClient.cpp", "oai/lib/sms_orc8r_client/itti_msg_to_proto_msg.cpp", "oai/lib/sms_orc8r_client/sms_orc8r_client_api.cpp", + "oai/lib/store/sqlite.cpp", "oai/oai_mme/oai_mme_log.c", "oai/tasks/amf/Registration.cpp", "oai/tasks/amf/amf_Security_Mode.cpp", @@ -393,6 +394,7 @@ MME_SRCS = [ "oai/tasks/nas5g/src/ies/M5GMaxNumOfSupportedPacketFilters.cpp", "oai/tasks/nas5g/src/ies/M5GMessageType.cpp", "oai/tasks/nas5g/src/ies/M5GNASSecurityAlgorithms.cpp", + "oai/tasks/nas5g/src/ies/M5GNetworkFeatureSupport.cpp", "oai/tasks/nas5g/src/ies/M5GNSSAI.cpp", "oai/tasks/nas5g/src/ies/M5GNasKeySetIdentifier.cpp", "oai/tasks/nas5g/src/ies/M5GPDUAddress.cpp", @@ -635,6 +637,7 @@ MME_HDRS = [ "oai/lib/sms_orc8r_client/SMSOrc8rClient.hpp", "oai/lib/sms_orc8r_client/itti_msg_to_proto_msg.hpp", "oai/lib/sms_orc8r_client/sms_orc8r_client_api.hpp", + "oai/lib/store/sqlite.hpp", "oai/oai_mme/oai_mme.h", "oai/tasks/amf/amf_app_defs.hpp", "oai/tasks/amf/amf_app_state_converter.hpp", @@ -906,6 +909,7 @@ MME_HDRS = [ "oai/tasks/nas5g/include/ies/M5GMessageType.hpp", "oai/tasks/nas5g/include/ies/M5GNASKeySetIdentifier.hpp", "oai/tasks/nas5g/include/ies/M5GNASSecurityAlgorithms.hpp", + "oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp", "oai/tasks/nas5g/include/ies/M5GNSSAI.hpp", "oai/tasks/nas5g/include/ies/M5GPDUAddress.hpp", "oai/tasks/nas5g/include/ies/M5GPDUSessionIdentity.hpp", @@ -989,12 +993,10 @@ MME_HDRS = [ "oai/tasks/gtpv1-u/gtpv1u.h", "oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h", "oai/tasks/sgw/pgw_handlers.hpp", - "oai/tasks/sgw/pgw_pcef_emulation.hpp", "oai/tasks/sgw/pgw_pco.hpp", "oai/tasks/sgw/pgw_procedures.hpp", "oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp", "oai/tasks/sgw/s11_causes.hpp", - "oai/tasks/sgw/sgw.hpp", "oai/tasks/sgw/sgw_defs.hpp", "oai/tasks/sgw/sgw_handlers.hpp", "oai/tasks/sgw/sgw_paging.hpp", @@ -1070,6 +1072,7 @@ MME_DEPS = [ "@system_libraries//:libfd", "@system_libraries//:libgnutls", "@system_libraries//:libnettle", + "@system_libraries//:libsqlite3-dev", ] # EMBEDDED_SGW 1 @@ -1100,7 +1103,6 @@ SGW_C = [ "oai/tasks/sgw/mobilityd_ue_ip_address_alloc.cpp", "oai/tasks/sgw/pgw_config.cpp", "oai/tasks/sgw/pgw_handlers.cpp", - "oai/tasks/sgw/pgw_pcef_emulation.cpp", "oai/tasks/sgw/pgw_pco.cpp", "oai/tasks/sgw/pgw_procedures.cpp", "oai/tasks/sgw/s11_causes.cpp", @@ -1182,6 +1184,7 @@ cc_binary( srcs = ["oai/oai_mme/oai_mme.c"], copts = ["-DEMBEDDED_SGW=1"], linkstatic = True, + visibility = ["//lte/gateway/release:__pkg__"], deps = [":lib_agw_of"], ) diff --git a/lte/gateway/c/core/oai/CMakeAgwOptions.txt b/lte/gateway/c/core/oai/CMakeAgwOptions.txt index a7f3ebea487a..a90a2bb01572 100644 --- a/lte/gateway/c/core/oai/CMakeAgwOptions.txt +++ b/lte/gateway/c/core/oai/CMakeAgwOptions.txt @@ -4,4 +4,3 @@ add_boolean_option(S6A_OVER_GRPC True "S6a messages sent over gRPC") add_boolean_option(EMBEDDED_SGW True "S11/GTPV2-C interface not present") add_boolean_option(MME_UNIT_TEST False "MME unit testing is enabled") -add_boolean_option(MME_BENCHMARK False "Benchmarking Protobuf is enabled") diff --git a/lte/gateway/c/core/oai/CMakeMmeOptions.txt b/lte/gateway/c/core/oai/CMakeMmeOptions.txt index d2f0e1b50b2d..54d8e9b1c222 100644 --- a/lte/gateway/c/core/oai/CMakeMmeOptions.txt +++ b/lte/gateway/c/core/oai/CMakeMmeOptions.txt @@ -4,4 +4,3 @@ add_boolean_option(S6A_OVER_GRPC False "S6a messages sent over gRPC") add_boolean_option(EMBEDDED_SGW False "S11/GTPV2-C interface present") add_boolean_option(MME_UNIT_TEST False "MME unit testing is enabled") -add_boolean_option(MME_BENCHMARK False "Benchmarking Protobuf is enabled") diff --git a/lte/gateway/c/core/oai/common/itti_free_defined_msg.c b/lte/gateway/c/core/oai/common/itti_free_defined_msg.c index f42927146a28..c9d92a2230f4 100644 --- a/lte/gateway/c/core/oai/common/itti_free_defined_msg.c +++ b/lte/gateway/c/core/oai/common/itti_free_defined_msg.c @@ -222,6 +222,15 @@ void itti_free_msg_content(MessageDef* const message_p) { bdestroy_wrapper( &message_p->ittiMsg.s1ap_handover_request_ack.tgt_src_container); break; + case S1AP_HANDOVER_NOTIFY: { + e_rab_admitted_list_t e_rab_admitted_list = {}; + e_rab_admitted_list = + message_p->ittiMsg.s1ap_handover_notify.e_rab_admitted_list; + for (uint8_t idx = 0; idx < e_rab_admitted_list.no_of_items; idx++) { + bdestroy_wrapper( + &e_rab_admitted_list.item[idx].transport_layer_address); + } + } break; case S6A_UPDATE_LOCATION_REQ: case S6A_UPDATE_LOCATION_ANS: case S6A_AUTH_INFO_REQ: diff --git a/lte/gateway/c/core/oai/include/mme_app_messages_def.h b/lte/gateway/c/core/oai/include/mme_app_messages_def.h index 8b3534553c39..bccccf97921c 100644 --- a/lte/gateway/c/core/oai/include/mme_app_messages_def.h +++ b/lte/gateway/c/core/oai/include/mme_app_messages_def.h @@ -60,8 +60,3 @@ MESSAGE_DEF(MME_APP_HANDOVER_REQUEST, itti_mme_app_handover_request_t, mme_app_handover_request) MESSAGE_DEF(MME_APP_HANDOVER_COMMAND, itti_mme_app_handover_command_t, mme_app_handover_command) -#if MME_BENCHMARK -MESSAGE_DEF(MME_APP_TEST_PROTOBUF_SERIALIZATION, - itti_mme_app_test_protobuf_serialization_t, - mme_app_test_protobuf_serialization) -#endif diff --git a/lte/gateway/c/core/oai/include/mme_app_messages_types.h b/lte/gateway/c/core/oai/include/mme_app_messages_types.h index c0bf3a9808bf..a71e985b7dae 100644 --- a/lte/gateway/c/core/oai/include/mme_app_messages_types.h +++ b/lte/gateway/c/core/oai/include/mme_app_messages_types.h @@ -65,10 +65,6 @@ (mSGpTR)->ittiMsg.mme_app_handover_request #define MME_APP_HANDOVER_COMMAND(mSGpTR) \ (mSGpTR)->ittiMsg.mme_app_handover_command -#if MME_BENCHMARK -#define MME_APP_TEST_PROTOBUF_SERIALIZATION(mSGpTR) \ - (mSGpTR)->ittiMsg.mme_app_test_protobuf_serialization -#endif typedef struct itti_mme_app_connection_establishment_cnf_s { mme_ue_s1ap_id_t ue_id; @@ -195,9 +191,4 @@ typedef struct itti_mme_app_handover_command_s { uint32_t target_enb_id; } itti_mme_app_handover_command_t; -#if MME_BENCHMARK -typedef struct itti_mme_app_test_protobuf_serialization_s { - uint num_ues; -} itti_mme_app_test_protobuf_serialization_t; -#endif #endif /* FILE_MME_APP_MESSAGES_TYPES_SEEN */ diff --git a/lte/gateway/c/core/oai/include/mme_config.h b/lte/gateway/c/core/oai/include/mme_config.h index 378f4985ba43..3567807818ad 100644 --- a/lte/gateway/c/core/oai/include/mme_config.h +++ b/lte/gateway/c/core/oai/include/mme_config.h @@ -245,9 +245,6 @@ #define MME_CONFIG_STRING_URL_NATIVE "URL_NATIVE" typedef enum { RUN_MODE_TEST = 0, RUN_MODE_OTHER } run_mode_t; -#if MME_BENCHMARK -typedef enum { TEST_SERIALIZATION_PROTOBUF = 0 } test_type_t; -#endif typedef struct eps_network_feature_config_s { uint8_t ims_voice_over_ps_session_in_s1; @@ -394,11 +391,6 @@ typedef struct mme_config_s { uint8_t daylight_saving_time; run_mode_t run_mode; -#if MME_BENCHMARK - // Integer value for testing serialization - test_type_t test_type; - uint32_t test_param; -#endif uint32_t max_enbs; uint32_t max_ues; diff --git a/lte/gateway/c/core/oai/include/mme_init.hpp b/lte/gateway/c/core/oai/include/mme_init.hpp index 22138d8f5a96..78dde5d80b55 100644 --- a/lte/gateway/c/core/oai/include/mme_init.hpp +++ b/lte/gateway/c/core/oai/include/mme_init.hpp @@ -30,7 +30,3 @@ * @returns -1 in case of failure **/ status_code_e s1ap_mme_init(const mme_config_t* mme_config); - -/** \brief S1AP layer top exit - **/ -void s1ap_mme_exit(void); diff --git a/lte/gateway/c/core/oai/include/pgw_types.h b/lte/gateway/c/core/oai/include/pgw_types.h index 334f600096c3..7f1ac29697df 100644 --- a/lte/gateway/c/core/oai/include/pgw_types.h +++ b/lte/gateway/c/core/oai/include/pgw_types.h @@ -15,7 +15,7 @@ * contact@openairinterface.org */ -/*! \file pgw_pcef_emulation.h +/*! \file pgw_types.h * \brief * \author Lionel Gauthier * \company Eurecom diff --git a/lte/gateway/c/core/oai/include/s1ap_state.hpp b/lte/gateway/c/core/oai/include/s1ap_state.hpp index e704681b92df..fdbf1d3eb6dc 100644 --- a/lte/gateway/c/core/oai/include/s1ap_state.hpp +++ b/lte/gateway/c/core/oai/include/s1ap_state.hpp @@ -21,9 +21,13 @@ #ifdef __cplusplus extern "C" { #endif - -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/include/mme_config.h" +#ifdef __cplusplus +} +#endif + +namespace magma { +namespace lte { int s1ap_state_init(uint32_t max_ues, uint32_t max_enbs, bool use_stateless); @@ -36,12 +40,12 @@ void put_s1ap_state(void); enb_description_t* s1ap_state_get_enb(s1ap_state_t* state, sctp_assoc_id_t assoc_id); -ue_description_t* s1ap_state_get_ue_enbid(sctp_assoc_id_t sctp_assoc_id, - enb_ue_s1ap_id_t enb_ue_s1ap_id); +oai::UeDescription* s1ap_state_get_ue_enbid(sctp_assoc_id_t sctp_assoc_id, + enb_ue_s1ap_id_t enb_ue_s1ap_id); -ue_description_t* s1ap_state_get_ue_mmeid(mme_ue_s1ap_id_t mme_ue_s1ap_id); +oai::UeDescription* s1ap_state_get_ue_mmeid(mme_ue_s1ap_id_t mme_ue_s1ap_id); -ue_description_t* s1ap_state_get_ue_imsi(imsi64_t imsi64); +oai::UeDescription* s1ap_state_get_ue_imsi(imsi64_t imsi64); /** * Return unique composite id for S1AP UE context @@ -62,7 +66,7 @@ void put_s1ap_imsi_map(void); */ s1ap_imsi_map_t* get_s1ap_imsi_map(void); -hash_table_ts_t* get_s1ap_ue_state(void); +map_uint64_ue_description_t* get_s1ap_ue_state(void); int read_s1ap_ue_state_db(void); @@ -70,18 +74,18 @@ void put_s1ap_ue_state(imsi64_t imsi64); void delete_s1ap_ue_state(imsi64_t imsi64); -bool s1ap_ue_compare_by_mme_ue_id_cb(__attribute__((unused)) hash_key_t keyP, - void* elementP, void* parameterP, - void** resultP); +bool s1ap_ue_compare_by_mme_ue_id_cb(__attribute__((unused)) uint64_t keyP, + oai::UeDescription* elementP, + void* parameterP, void** resultP); -bool s1ap_ue_compare_by_imsi(__attribute__((unused)) hash_key_t keyP, - void* elementP, void* parameterP, void** resultP); +bool s1ap_ue_compare_by_imsi(__attribute__((unused)) uint64_t keyP, + oai::UeDescription* elementP, void* parameterP, + void** resultP); void remove_ues_without_imsi_from_ue_id_coll(void); void clean_stale_enb_state(s1ap_state_t* state, enb_description_t* new_enb_association); -#ifdef __cplusplus -} -#endif +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/include/s1ap_types.hpp b/lte/gateway/c/core/oai/include/s1ap_types.hpp index dcc1585dd798..2986b9128fa5 100644 --- a/lte/gateway/c/core/oai/include/s1ap_types.hpp +++ b/lte/gateway/c/core/oai/include/s1ap_types.hpp @@ -19,14 +19,16 @@ #include +#include "lte/protos/oai/s1ap_state.pb.h" + #include "lte/gateway/c/core/oai/include/proto_map.hpp" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_36.401.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_36.413.h" + #ifdef __cplusplus extern "C" { #endif #include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #ifdef __cplusplus } #endif @@ -53,18 +55,6 @@ typedef struct s1ap_imsi_map_s { magma::proto_map_uint32_uint64_t mme_ueid2imsi_map; } s1ap_imsi_map_t; -enum s1_timer_class_s { - S1AP_INVALID_TIMER_CLASS, - S1AP_ENB_TIMER, - S1AP_UE_TIMER -}; - -/* Timer structure */ -struct s1ap_timer_t { - long id; /* The timer identifier */ - long msec; /* The timer interval value in seconds */ -}; - // The current s1 state of the MME relating to the specific eNB. enum mme_s1_enb_state_s { S1AP_INIT, /// The sctp association has been established but s1 hasn't been @@ -75,60 +65,10 @@ enum mme_s1_enb_state_s { S1AP_SHUTDOWN /// The S1 state is being torn down due to sctp shutdown. }; -enum s1_ue_state_s { - S1AP_UE_INVALID_STATE, - S1AP_UE_WAITING_CSR, ///< Waiting for Initial Context Setup Response - S1AP_UE_HANDOVER, ///< Handover procedure triggered - S1AP_UE_CONNECTED, ///< UE context ready - S1AP_UE_WAITING_CRR, /// UE Context release Procedure initiated , waiting for - /// UE context Release Complete -}; - -typedef struct s1ap_handover_state_s { - mme_ue_s1ap_id_t mme_ue_s1ap_id; - uint32_t source_enb_id; - uint32_t target_enb_id; - enb_ue_s1ap_id_t source_enb_ue_s1ap_id : 24; - enb_ue_s1ap_id_t target_enb_ue_s1ap_id : 24; - sctp_stream_id_t source_sctp_stream_recv; ///< source eNB -> MME stream - sctp_stream_id_t target_sctp_stream_recv; ///< target eNB -> MME stream - sctp_stream_id_t source_sctp_stream_send; ///< MME -> source eNB stream - sctp_stream_id_t target_sctp_stream_send; ///< MME -> target eNB stream - e_rab_admitted_list_t e_rab_admitted_list; -} s1ap_handover_state_t; - -/** Main structure representing UE association over s1ap - * Generated every time a new InitialUEMessage is received - **/ -typedef struct ue_description_s { - enum s1_ue_state_s s1_ue_state; ///< S1AP UE state - - enb_ue_s1ap_id_t - enb_ue_s1ap_id : 24; ///< Unique UE id over eNB (24 bits wide) - mme_ue_s1ap_id_t mme_ue_s1ap_id; ///< Unique UE id over MME (32 bits wide) - sctp_assoc_id_t sctp_assoc_id; ///< Assoc id of eNB which this UE is attached - uint64_t comp_s1ap_id; ///< Unique composite UE id (sctp_assoc_id & - ///< enb_ue_s1ap_id) - - /** SCTP stream on which S1 message will be sent/received. - * During an UE S1 connection, a pair of streams is - * allocated and is used during all the connection. - * Stream 0 is reserved for non UE signalling. - * @name sctp stream identifier - **/ - /*@{*/ - sctp_stream_id_t sctp_stream_recv; ///< eNB -> MME stream - sctp_stream_id_t sctp_stream_send; ///< MME -> eNB stream - /*@}*/ - - // UE Context Release procedure guard timer - struct s1ap_timer_t s1ap_ue_context_rel_timer; - - // Handover status. We intentionally do not persist all of this state since - // it's time sensitive; if the MME restarts during a HO procedure the RAN - // will abort the procedure due to timeouts, rendering this state useless. - s1ap_handover_state_t s1ap_handover_state; -} ue_description_t; +// Map- Key:comp_s1ap_id of uint64_t, Data: pointer to protobuf object, +// UeDescription +typedef magma::proto_map_s + map_uint64_ue_description_t; /* Maximum no. of Broadcast PLMNs. Value is 6 * 3gpp spec 36.413 section-9.1.8.4 diff --git a/lte/gateway/c/core/oai/include/sgw_context_manager.hpp b/lte/gateway/c/core/oai/include/sgw_context_manager.hpp index 8d6402b53ea1..270c3b142f0f 100644 --- a/lte/gateway/c/core/oai/include/sgw_context_manager.hpp +++ b/lte/gateway/c/core/oai/include/sgw_context_manager.hpp @@ -23,15 +23,18 @@ */ #pragma once +#include + #ifdef __cplusplus extern "C" { #endif - -#include - #include "lte/gateway/c/core/common/common_defs.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_24.007.h" +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #define INITIAL_SGW_S8_S1U_TEID 0x7FFFFFFF void sgw_display_sgw_eps_bearer_context( @@ -61,7 +64,3 @@ spgw_ue_context_t* spgw_get_ue_context(imsi64_t imsi64); spgw_ue_context_t* spgw_create_or_get_ue_context(imsi64_t imsi64); status_code_e spgw_update_teid_in_ue_context(imsi64_t imsi64, teid_t teid); - -#ifdef __cplusplus -} -#endif diff --git a/lte/gateway/c/core/oai/include/sgw_s8_state.hpp b/lte/gateway/c/core/oai/include/sgw_s8_state.hpp index 071ca1d8c9dc..f745653ab1cd 100644 --- a/lte/gateway/c/core/oai/include/sgw_s8_state.hpp +++ b/lte/gateway/c/core/oai/include/sgw_s8_state.hpp @@ -13,14 +13,18 @@ limitations under the License. #pragma once +#include + #ifdef __cplusplus extern "C" { #endif - -#include #include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" -#include "lte/gateway/c/core/oai/include/spgw_types.hpp" #include "lte/gateway/c/core/oai/include/sgw_config.h" +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/include/spgw_types.hpp" // Initializes SGW state struct when task process starts. int sgw_state_init(bool persist_state, const sgw_config_t* config); @@ -71,7 +75,3 @@ void sgw_free_s11_bearer_context_information( * @param spgw_ue_context_t */ void sgw_free_ue_context(spgw_ue_context_t** ue_context_p); - -#ifdef __cplusplus -} -#endif diff --git a/lte/gateway/c/core/oai/include/spgw_state.hpp b/lte/gateway/c/core/oai/include/spgw_state.hpp index 83eb7d6b3e1a..b3189724cecb 100644 --- a/lte/gateway/c/core/oai/include/spgw_state.hpp +++ b/lte/gateway/c/core/oai/include/spgw_state.hpp @@ -17,17 +17,19 @@ #pragma once +#include + #ifdef __cplusplus extern "C" { #endif - -#include - #include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" - #include "lte/gateway/c/core/oai/include/gtpv1u_types.h" -#include "lte/gateway/c/core/oai/include/spgw_types.hpp" #include "lte/gateway/c/core/oai/include/spgw_config.h" +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/include/spgw_types.hpp" // Initializes SGW state struct when task process starts. int spgw_state_init(bool persist_state, const spgw_config_t* spgw_config_p); @@ -81,17 +83,8 @@ void sgw_free_pdn_connection(sgw_pdn_connection_t* pdn_connection_p); * @param sgw_eps_bearer_ctxt */ void sgw_free_eps_bearer_context(sgw_eps_bearer_ctxt_t** sgw_eps_bearer_ctxt); -/** - * Callback function for pcc_rule hashtables freefunc - * @param rule pcc_rule entry on hashtable - */ -void pgw_free_pcc_rule(void** rule); - /** * Callback function for imsi_ue_context hashtable's freefunc * @param spgw_ue_context_t */ void sgw_free_ue_context(spgw_ue_context_t** ue_context_p); -#ifdef __cplusplus -} -#endif diff --git a/lte/gateway/c/core/oai/include/spgw_types.hpp b/lte/gateway/c/core/oai/include/spgw_types.hpp index 17b227e7e9ee..00b98cc721f8 100644 --- a/lte/gateway/c/core/oai/include/spgw_types.hpp +++ b/lte/gateway/c/core/oai/include/spgw_types.hpp @@ -27,13 +27,20 @@ * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ -#ifndef FILE_SPGW_TYPES_SEEN -#define FILE_SPGW_TYPES_SEEN + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif +#include "lte/gateway/c/core/oai/include/gtpv1u_types.h" +#ifdef __cplusplus +} +#endif #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" #include "lte/gateway/c/core/oai/include/sgw_ie_defs.h" -#include "lte/gateway/c/core/oai/include/gtpv1u_types.h" typedef struct s5_create_session_request_s { teid_t context_teid; ///< local SGW S11 Tunnel Endpoint Identifier @@ -98,7 +105,6 @@ typedef struct sgw_state_s { typedef struct spgw_state_s { STAILQ_HEAD(ipv4_list_allocated_s, ipv4_list_elm_s) ipv4_list_allocated; hash_table_ts_t* deactivated_predefined_pcc_rules; - hash_table_ts_t* predefined_pcc_rules; gtpv1u_data_t gtpv1u_data; uint32_t gtpv1u_teid; struct in_addr sgw_ip_address_S1u_S12_S4_up; @@ -109,9 +115,3 @@ void handle_s5_create_session_response( spgw_state_t* state, s_plus_p_gw_eps_bearer_context_information_t* new_bearer_ctxt_info_p, s5_create_session_response_t session_resp); - -status_code_e sgw_handle_sgi_endpoint_created( - spgw_state_t* state, itti_sgi_create_end_point_response_t* const resp_p, - imsi64_t imsi64); - -#endif /* FILE_SPGW_TYPES_SEEN */ diff --git a/lte/gateway/c/core/oai/include/state_manager.hpp b/lte/gateway/c/core/oai/include/state_manager.hpp index dfc7ece61775..e7ac65620846 100644 --- a/lte/gateway/c/core/oai/include/state_manager.hpp +++ b/lte/gateway/c/core/oai/include/state_manager.hpp @@ -17,9 +17,6 @@ #pragma once -#if MME_BENCHMARK -#include -#endif #ifdef __cplusplus extern "C" { #endif @@ -167,42 +164,20 @@ class StateManager { is_initialized, "StateManager init() function should be called to initialize state"); -#if MME_BENCHMARK - auto start = std::chrono::high_resolution_clock::now(); -#endif std::string proto_str; ProtoUe ue_proto = ProtoUe(); StateConverter::ue_to_proto(ue_context, &ue_proto); redis_client->serialize(ue_proto, proto_str); std::size_t new_hash = std::hash{}(proto_str); -#if MME_BENCHMARK - auto stop = std::chrono::high_resolution_clock::now(); - std::cout << "TIME PROTOBUF CONVERSION : " - << (std::chrono::duration_cast(stop - - start)) - .count() - << std::endl; -#endif if (new_hash != this->ue_state_hash[imsi_str]) { std::string key = IMSI_PREFIX + imsi_str + ":" + task_name; -#if MME_BENCHMARK - start = std::chrono::high_resolution_clock::now(); -#endif if (redis_client->write_proto_str( key, proto_str, ue_state_version[imsi_str]) != RETURNok) { OAILOG_ERROR(log_task, "Failed to write UE state to db for IMSI %s", imsi_str.c_str()); return; } -#if MME_BENCHMARK - stop = std::chrono::high_resolution_clock::now(); - std::cout << "TIME PROTOBUF REDIS SERIALIZATION : " - << std::chrono::duration_cast(stop - - start) - .count() - << std::endl; -#endif this->ue_state_version[imsi_str]++; this->ue_state_hash[imsi_str] = new_hash; OAILOG_DEBUG(log_task, "Finished writing UE state for IMSI %s", diff --git a/lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h b/lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h index 508e71669721..2c2e81e13eb6 100644 --- a/lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h +++ b/lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h @@ -39,8 +39,15 @@ #include +#ifdef __cplusplus +extern "C" { +#endif #include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/lib/hashtable/obj_hashtable.h" +#ifdef __cplusplus +} +#endif + #include "lte/gateway/c/core/oai/common/queue.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.003.h" diff --git a/lte/gateway/c/core/oai/lib/CMakeLists.txt b/lte/gateway/c/core/oai/lib/CMakeLists.txt index 858a1fffa364..be74bbfbd8be 100644 --- a/lte/gateway/c/core/oai/lib/CMakeLists.txt +++ b/lte/gateway/c/core/oai/lib/CMakeLists.txt @@ -15,6 +15,7 @@ add_subdirectory(s8_proxy) # LIB_S8_PROXY add_subdirectory(sms_orc8r_client) # LIB_SMS_OCR8R_CLIENT add_subdirectory(event_client) # LIB_EVENT_CLIENT add_subdirectory(n11) #LIB_N11 +add_subdirectory(store) #LIB_STORE if (EMBEDDED_SGW) add_subdirectory(openflow) # LIB_OPENFLOW diff --git a/lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp b/lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp index 59b004df394f..bef4d8e318a5 100644 --- a/lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp +++ b/lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp @@ -26,8 +26,12 @@ extern "C" { #include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/oai/common/log.h" #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" // Status codes from gRPC #define RPC_STATUS_OK 0 @@ -169,8 +173,4 @@ int pgw_handle_allocate_ipv4v6_address(const char* subscriber_id, teid_t context_teid, ebi_t eps_bearer_id); -#ifdef __cplusplus -} -#endif - #endif // RPC_CLIENT_H diff --git a/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.cpp b/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.cpp index 49a21b763764..adb31b5b9f86 100644 --- a/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.cpp +++ b/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.cpp @@ -15,27 +15,27 @@ * contact@openairinterface.org */ -#include +#include "lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp" + #include #include +#include +#include "lte/protos/session_manager.pb.h" + #ifdef __cplusplus extern "C" { #endif - #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/common/conversions.h" #include "lte/gateway/c/core/oai/common/log.h" - +#include "lte/gateway/c/core/oai/lib/itti/itti_types.h" #ifdef __cplusplus } #endif -#include "lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp" #include "lte/gateway/c/core/oai/lib/pcef/PCEFClient.hpp" #include "lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp" -#include "lte/gateway/c/core/oai/lib/itti/itti_types.h" -#include "lte/protos/session_manager.pb.h" #include "lte/gateway/c/core/oai/include/spgw_types.hpp" extern task_zmq_ctx_t grpc_service_task_zmq_ctx; diff --git a/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp b/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp index 1e0d36788166..eddef1e525a6 100644 --- a/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp +++ b/lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp @@ -27,8 +27,11 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" #include "lte/gateway/c/core/oai/common/common_types.h" #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" -#include "lte/gateway/c/core/oai/include/spgw_types.hpp" +#ifdef __cplusplus +} +#endif +#include "lte/gateway/c/core/oai/include/spgw_types.hpp" struct pcef_create_session_data { char msisdn[MSISDN_LENGTH + 1]; char imeisv[IMEISV_DIGITS_MAX + 1]; @@ -104,6 +107,3 @@ int get_imeisv_from_session_req( void convert_imeisv_to_string(char* imeisv); bool pcef_delete_dedicated_bearer(const char* imsi, const ebi_list_t ebi_list); -#ifdef __cplusplus -} -#endif diff --git a/lte/gateway/c/core/oai/lib/s6a_proxy/CMakeLists.txt b/lte/gateway/c/core/oai/lib/s6a_proxy/CMakeLists.txt index 01b92f63e9a9..6a2e8c55c85c 100644 --- a/lte/gateway/c/core/oai/lib/s6a_proxy/CMakeLists.txt +++ b/lte/gateway/c/core/oai/lib/s6a_proxy/CMakeLists.txt @@ -31,6 +31,7 @@ add_library(LIB_S6A_PROXY ) target_link_libraries(LIB_S6A_PROXY + LIB_STORE COMMON LIB_MOBILITY_CLIENT ASYNC_GRPC SERVICE_REGISTRY MAGMA_CONFIG LIB_BSTR LIB_HASHTABLE TASK_S6A diff --git a/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.cpp b/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.cpp index d033c57fb150..e33b32157b21 100644 --- a/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.cpp +++ b/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.cpp @@ -222,4 +222,80 @@ void S6aClient::update_location_request( resp->set_response_reader(std::move(resp_rdr)); } +void S6aClient::convert_ula_to_subscriber_data( + feg::UpdateLocationAnswer response, magma::lte::SubscriberData* sub_data) { + if (response.apn_size() < 1) { + std::cout << "No APN configurations received" << std::endl; + return; + } + std::cout << "Converting ULA TO Subscriber Data object" << std::endl; + for (int i = 0; i < response.apn_size(); i++) { + auto apn = response.apn(i); + auto sub_apn_config = sub_data->mutable_non_3gpp()->add_apn_config(); + if (apn.context_id() != 0) { + sub_apn_config->set_context_id(apn.context_id()); + } + + if (apn.service_selection().size() > 0) { + sub_apn_config->set_service_selection(apn.service_selection()); + } + + if (apn.has_qos_profile()) { + auto qos_profile = sub_apn_config->mutable_qos_profile(); + if (apn.qos_profile().class_id()) { + qos_profile->set_class_id(apn.qos_profile().class_id()); + } + if (apn.qos_profile().priority_level()) { + qos_profile->set_priority_level(apn.qos_profile().priority_level()); + } + if (apn.qos_profile().preemption_capability()) { + qos_profile->set_preemption_capability( + apn.qos_profile().preemption_capability()); + } + if (apn.qos_profile().preemption_vulnerability()) { + qos_profile->set_preemption_vulnerability( + apn.qos_profile().preemption_vulnerability()); + } + } + + if (apn.has_ambr()) { + auto ambr = sub_apn_config->mutable_ambr(); + if (apn.ambr().max_bandwidth_dl() != 0) { + ambr->set_max_bandwidth_dl(apn.ambr().max_bandwidth_dl()); + } + if (apn.ambr().max_bandwidth_ul() != 0) { + ambr->set_max_bandwidth_ul(apn.ambr().max_bandwidth_ul()); + } + + ambr->set_br_unit( + (magma::lte::AggregatedMaximumBitrate_BitrateUnitsAMBR)apn.ambr() + .unit()); + } + + sub_apn_config->set_pdn((magma::lte::APNConfiguration_PDNType)apn.pdn()); + + // Only the first IP is assigned to the subscriber in the current + // implementation + if (apn.served_party_ip_address_size() > 0) { + sub_apn_config->set_assigned_static_ip(apn.served_party_ip_address(0)); + } + + if (apn.has_resource()) { + auto resource = sub_apn_config->mutable_resource(); + if (apn.resource().apn_name().size() > 0) { + resource->set_apn_name(apn.resource().apn_name()); + } + if (apn.resource().gateway_ip().size() > 0) { + resource->set_gateway_ip(apn.resource().gateway_ip()); + } + if (apn.resource().gateway_mac().size() > 0) { + resource->set_gateway_mac(apn.resource().gateway_mac()); + } + if (apn.resource().vlan_id() != 0) { + resource->set_vlan_id(apn.resource().vlan_id()); + } + } + } +} + } // namespace magma diff --git a/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.hpp b/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.hpp index 241a5026835c..5d2bcda23120 100644 --- a/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.hpp +++ b/lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.hpp @@ -23,10 +23,13 @@ #include #include +#include "feg/protos/s6a_proxy.pb.h" #include "feg/protos/s6a_proxy.grpc.pb.h" #include "orc8r/gateway/c/common/async_grpc/GRPCReceiver.hpp" #include "lte/gateway/c/core/oai/include/s6a_messages_types.h" +#include "lte/protos/subscriberdb.pb.h" + extern "C" { #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" @@ -75,6 +78,9 @@ class S6aClient : public GRPCReceiver { const s6a_update_location_req_t* const msg, std::function callback); + static void convert_ula_to_subscriber_data(feg::UpdateLocationAnswer response, + lte::SubscriberData* sub_data); + public: S6aClient(S6aClient const&) = delete; void operator=(S6aClient const&) = delete; diff --git a/lte/gateway/c/core/oai/lib/s6a_proxy/s6a_client_api.cpp b/lte/gateway/c/core/oai/lib/s6a_proxy/s6a_client_api.cpp index a7d0b2bfa880..7d2ac3a3a6c0 100644 --- a/lte/gateway/c/core/oai/lib/s6a_proxy/s6a_client_api.cpp +++ b/lte/gateway/c/core/oai/lib/s6a_proxy/s6a_client_api.cpp @@ -30,6 +30,8 @@ #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" #include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" #include "lte/gateway/c/core/oai/lib/itti/itti_types.h" +#include "lte/gateway/c/core/oai/lib/store/sqlite.hpp" +#include "lte/protos/subscriberdb.pb.h" extern "C" {} @@ -144,6 +146,19 @@ static void s6a_handle_update_location_ans(const std::string& imsi, itti_msg->result.choice.base = DIAMETER_SUCCESS; magma::convert_proto_msg_to_itti_s6a_update_location_ans(response, itti_msg); + + // convert ULA response to SubscriberData and write to subscriberdb + if (S6aClient::get_cloud_subscriberdb_enabled()) { + magma::lte::SubscriberData sub_data = magma::lte::SubscriberData(); + auto sub_id = sub_data.mutable_sid(); + sub_id->set_id(imsi); + sub_id->set_type(magma::lte::SubscriberID::IMSI); + magma::S6aClient::convert_ula_to_subscriber_data(response, &sub_data); + magma::lte::SqliteStore* sqlObj = new magma::lte::SqliteStore( + "/var/opt/magma/", 2); // location is same as SubscriberDB + sqlObj->add_subscriber(sub_data); + } + } else { itti_msg->result.present = S6A_RESULT_EXPERIMENTAL; itti_msg->result.choice.experimental = diff --git a/lte/gateway/c/core/oai/lib/store/CMakeLists.txt b/lte/gateway/c/core/oai/lib/store/CMakeLists.txt new file mode 100644 index 000000000000..58656759a0e8 --- /dev/null +++ b/lte/gateway/c/core/oai/lib/store/CMakeLists.txt @@ -0,0 +1,40 @@ +# Copyright 2022 The Magma Authors. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.7.2) +set(CMAKE_CXX_FLAGS "-Wno-write-strings -Wno-literal-suffix") + +#compile the required protos +set(SUB_LTE_PROTOS subscriberdb apn) +set(SUB_ORC8R_PROTOS digest) + +list(APPEND PROTO_SRCS "") +list(APPEND PROTO_HDRS "") + +create_proto_dir("lte" LTE_OUT_DIR) +create_proto_dir("orc8r" ORC8R_OUT_DIR) + +generate_cpp_protos("${SUB_LTE_PROTOS}" "${PROTO_SRCS}" + "${PROTO_HDRS}" ${LTE_PROTO_DIR} ${LTE_OUT_DIR}) + +generate_cpp_protos("${SUB_ORC8R_PROTOS}" "${PROTO_SRCS}" + "${PROTO_HDRS}" ${ORC8R_PROTO_DIR} ${ORC8R_OUT_DIR}) + +include_directories(${CMAKE_CURRENT_BINARY_DIR}) +include_directories(${LTE_OUT_DIR}) +include_directories(${ORC8R_OUT_DIR}) + +add_library(LIB_STORE sqlite.cpp ${PROTO_SRCS} ${PROTO_HDRS}) +target_link_libraries(LIB_STORE MAGMA_CONFIG COMMON protobuf sqlite3) + +target_include_directories(LIB_STORE PUBLIC + $ENV{MAGMA_ROOT} + ${CMAKE_CURRENT_SOURCE_DIR} + ${MAGMA_COMMON_DIR} + ) diff --git a/lte/gateway/c/core/oai/lib/store/sqlite.cpp b/lte/gateway/c/core/oai/lib/store/sqlite.cpp new file mode 100644 index 000000000000..1f80105fc128 --- /dev/null +++ b/lte/gateway/c/core/oai/lib/store/sqlite.cpp @@ -0,0 +1,145 @@ +/* + * Copyright 2022 The Magma Authors. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lte/gateway/c/core/oai/lib/store/sqlite.hpp" + +#include + +#include +#include + +#include "lte/protos/subscriberdb.pb.h" + +using google::protobuf::Message; +namespace magma { +namespace lte { +SqliteStore::SqliteStore(std::string db_location, int sid_digits) { + init_db_connection(db_location, sid_digits); +} +void SqliteStore::init_db_connection(std::string db_location, int sid_digits) { + _sid_digits = sid_digits; + _n_shards = std::pow(10, sid_digits); + _db_locations = _create_db_locations(db_location, _n_shards); + _create_store(); +} + +std::vector SqliteStore::_create_db_locations( + std::string db_location, int n_shards) { + // in memory if db_location is not specified + if (db_location.length() == 0) { + db_location = "/var/opt/magma/"; + } + + std::vector db_location_list; + for (int shard = 0; shard < n_shards; shard++) { + std::string to_push = "file:" + db_location + "subscriber" + + std::to_string(shard) + ".db?cache=shared"; + db_location_list.push_back(to_push); + std::cout << "Subscriber DB location: " << db_location_list[shard] + << std::endl; + } + return db_location_list; +} + +void SqliteStore::_create_store() { + int rc; + for (auto db_location_s : _db_locations) { + sqlite3* db; + int rc; + const char* db_location = db_location_s.c_str(); + rc = sqlite3_open(db_location, &db); + if (rc) { + std::cout << "Cannot open database " << sqlite3_errmsg(db) << std::endl; + } else { + std::cout << "Database opened successfully at" << db_location + << std::endl; + } + + const char* sql = + "CREATE TABLE IF NOT EXISTS subscriberdb" + "(subscriber_id text PRIMARY KEY, data text)"; + char* zErrMsg; + + rc = sqlite3_exec(db, sql, NULL, 0, &zErrMsg); + if (rc != SQLITE_OK) { + std::cout << "SQL Error " << zErrMsg << std::endl; + sqlite3_free(zErrMsg); + } else { + std::cout << "Table created successfully!!" << std::endl; + } + + sqlite3_close(db); + } +} + +void SqliteStore::add_subscriber(const SubscriberData& subscriber_data) { + std::string sid_s = _get_sid(subscriber_data); + const char* sid = sid_s.c_str(); + std::string data_str_s; + subscriber_data.SerializeToString(&data_str_s); + const char* data_str = data_str_s.c_str(); + + std::string db_location_s = _db_locations[_sid2bucket(sid)]; + const char* db_location = db_location_s.c_str(); + sqlite3* db; + int rc_open = sqlite3_open(db_location, &db); + if (rc_open) { + std::cout << "Cannot open database " << sqlite3_errmsg(db) << std::endl; + } else { + std::cout << "Database " << db_location << " opened successfully " + << std::endl; + } + const char* sql = + "INSERT INTO subscriberdb(subscriber_id, data) " + "VALUES (?, ?)"; + sqlite3_stmt* stmt; + const char* pzTail; + int rc_prep = sqlite3_prepare_v2(db, sql, strlen(sql), &stmt, &pzTail); + if (rc_prep == SQLITE_OK) { + sqlite3_bind_text(stmt, 1, sid, strlen(sid), NULL); + sqlite3_bind_blob(stmt, 2, data_str, strlen(data_str), NULL); + sqlite3_step(stmt); + sqlite3_finalize(stmt); + std::cout << "APN information for " << sid + << " has been written to SubscriberDB" << std::endl; + } else { + std::cout << "SQL Error " << std::endl; + } +} + +const char* SqliteStore::_get_sid(const SubscriberData& subscriber_data) { + if (subscriber_data.sid().type() == SubscriberID::IMSI) { + std::cout << "Valid sid: " << subscriber_data.sid().id() << std::endl; + std::string sid_s = "IMSI" + subscriber_data.sid().id(); + return sid_s.c_str(); + } else { + std::cout << "Invalid sid " << subscriber_data.sid().id() << " type " + << subscriber_data.sid().type() << std::endl; + return NULL; + } +} + +int SqliteStore::_sid2bucket(std::string sid) { + int bucket; + try { + bucket = std::stoi(sid.substr(sid.length() - _sid_digits, sid.length())); + } catch (int bucket) { + std::cout << "Last " << _sid_digits << "digits of subscriber id " << sid + << " cannot be mapped to a bucket, default to bucket 0" + << std::endl; + bucket = 0; + } + return bucket; +} +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/lib/store/sqlite.hpp b/lte/gateway/c/core/oai/lib/store/sqlite.hpp new file mode 100644 index 000000000000..bba33e22f304 --- /dev/null +++ b/lte/gateway/c/core/oai/lib/store/sqlite.hpp @@ -0,0 +1,47 @@ +/* + * Copyright 2022 The Magma Authors. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "lte/protos/subscriberdb.pb.h" + +using google::protobuf::Message; + +namespace magma { +namespace lte { +class SqliteStore { + public: + SqliteStore(std::string db_location, int sid_digits); + + // Initialize data store + void init_db_connection(std::string db_location, int sid_digits = 2); + + // Add subscriber + void add_subscriber(const SubscriberData& subscriber_data); + + // Delete subscriber + void delete_subscriber(); // TODO(vroon2703): add the parameters + + private: + int _sid_digits; + int _n_shards; + std::vector _db_locations; + std::vector _create_db_locations(std::string db_location, + int _n_shards); + void _create_store(); + const char* _get_sid(const SubscriberData& subscriber_data); + // Map subscriber ID to bucket + int _sid2bucket(std::string sid); +}; +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/oai_mme/oai_mme.c b/lte/gateway/c/core/oai/oai_mme/oai_mme.c index 77db292a6170..46b42441c7ca 100644 --- a/lte/gateway/c/core/oai/oai_mme/oai_mme.c +++ b/lte/gateway/c/core/oai/oai_mme/oai_mme.c @@ -63,9 +63,6 @@ #include "lte/gateway/c/core/oai/include/service303.hpp" #include "lte/gateway/c/core/oai/common/shared_ts_log.h" #include "lte/gateway/c/core/oai/include/grpc_service.hpp" -#if MME_BENCHMARK -#include "lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp" -#endif static void send_timer_recovery_message(void); @@ -183,14 +180,6 @@ int main(int argc, char* argv[]) { send_timer_recovery_message(); } -#if MME_BENCHMARK - if (mme_config.run_mode == RUN_MODE_TEST) { - if (mme_config.test_type == TEST_SERIALIZATION_PROTOBUF) { - mme_app_schedule_test_protobuf_serialization(mme_config.test_param); - } - } -#endif - /* * Handle signals here */ diff --git a/lte/gateway/c/core/oai/tasks/amf/amf_as.cpp b/lte/gateway/c/core/oai/tasks/amf/amf_as.cpp index 4b3b9de79ca1..77dd9ebae9ee 100644 --- a/lte/gateway/c/core/oai/tasks/amf/amf_as.cpp +++ b/lte/gateway/c/core/oai/tasks/amf/amf_as.cpp @@ -498,6 +498,26 @@ int amf_reg_acceptmsg(const guti_m5_t* guti, const tai_t* tai, .unit = 0; nas_msg->security_protected.plain.amf.msg.registrationacceptmsg.gprs_timer .timervalue = 6; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.iei = NETWORK_FEATURE; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.len = 2; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.MPSI = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.IWK_N26 = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.EMF = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.EMC = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.IMS_VoPS_N3GPP = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.IMS_VoPS_3GPP = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.MCSI = 0; + nas_msg->security_protected.plain.amf.msg.registrationacceptmsg + .network_feature.EMCN3 = 0; size += MOBILE_IDENTITY_MAX_LENGTH; size += 20; @@ -1278,6 +1298,14 @@ status_code_e initial_context_setup_request(amf_ue_ngap_id_t ue_id, } } + // UE Aggregate bit rate IE will be added only if PDU Session Resource setup + // transfer IE is added in initial context setup request message. + if (req->PDU_Session_Resource_Setup_Transfer_List.no_of_items > 0) { + // Get the ambr values + amf_smf_context_ue_aggregate_max_bit_rate_get( + amf_ctx, &(req->ue_aggregate_max_bit_rate.dl), + &(req->ue_aggregate_max_bit_rate.ul)); + } if (nas_msg) { req->nas_pdu = nas_msg; } else { diff --git a/lte/gateway/c/core/oai/tasks/amf/amf_session_manager_pco.cpp b/lte/gateway/c/core/oai/tasks/amf/amf_session_manager_pco.cpp index 9373e86aa1c8..0328ac6bb9b4 100644 --- a/lte/gateway/c/core/oai/tasks/amf/amf_session_manager_pco.cpp +++ b/lte/gateway/c/core/oai/tasks/amf/amf_session_manager_pco.cpp @@ -117,6 +117,31 @@ uint16_t sm_process_pco_dns_server_request( (poc_id_resp.length + SM_PCO_IPCP_HDR_LENGTH)); } +uint16_t sm_process_pco_p_cscf_address_request( + protocol_configuration_options_t* const pco_resp) { + OAILOG_FUNC_IN(LOG_AMF_APP); + in_addr_t ipcp_out_dns_prim_ipv4_addr = INADDR_NONE; + pco_protocol_or_container_id_t poc_id_resp = {0}; + uint8_t dns_array[4]; + + amf_config_read_lock(&amf_config); + ipcp_out_dns_prim_ipv4_addr = amf_config.ipv4.default_dns.s_addr; + amf_config_unlock(&amf_config); + + poc_id_resp.id = PCO_CI_P_CSCF_IPV4_ADDRESS_REQUEST; + poc_id_resp.length = 4; + dns_array[0] = (uint8_t)(ipcp_out_dns_prim_ipv4_addr & 0x000000FF); + dns_array[1] = (uint8_t)((ipcp_out_dns_prim_ipv4_addr >> 8) & 0x000000FF); + dns_array[2] = (uint8_t)((ipcp_out_dns_prim_ipv4_addr >> 16) & 0x000000FF); + dns_array[3] = (uint8_t)((ipcp_out_dns_prim_ipv4_addr >> 24) & 0x000000FF); + poc_id_resp.contents = blk2bstr(dns_array, sizeof(dns_array)); + + sm_pco_push_protocol_or_container_id(pco_resp, &poc_id_resp); + + OAILOG_FUNC_RETURN(LOG_AMF_APP, + (poc_id_resp.length + SM_PCO_IPCP_HDR_LENGTH)); +} + uint16_t sm_process_pco_request_ipcp( protocol_configuration_options_t* pco_resp, const pco_protocol_or_container_id_t* const poc_id) { @@ -216,6 +241,10 @@ uint16_t sm_process_pco_request(protocol_configuration_options_t* pco_req, pco_resp, &pco_req->protocol_or_container_ids[id]); break; + case PCO_CI_P_CSCF_IPV4_ADDRESS_REQUEST: + length += sm_process_pco_p_cscf_address_request(pco_resp); + break; + case PCO_CI_DNS_SERVER_IPV4_ADDRESS_REQUEST: length += sm_process_pco_dns_server_request(pco_resp); break; diff --git a/lte/gateway/c/core/oai/tasks/amf/amf_smf_send.cpp b/lte/gateway/c/core/oai/tasks/amf/amf_smf_send.cpp index 9b116aad1ff0..d04801f6fc07 100644 --- a/lte/gateway/c/core/oai/tasks/amf/amf_smf_send.cpp +++ b/lte/gateway/c/core/oai/tasks/amf/amf_smf_send.cpp @@ -928,6 +928,16 @@ status_code_e amf_smf_handle_ip_address_response( reinterpret_cast(response_p->pdu_session_id)); return rc; } + if (response_p->result != SGI_STATUS_OK) { + rc = amf_pdu_session_establishment_reject( + ue_context->amf_ue_ngap_id, response_p->pdu_session_id, response_p->pti, + AMF_CAUSE_PROTOCOL_ERROR); + ue_context->amf_context.smf_ctxt_map.erase(response_p->pdu_session_id); + OAILOG_ERROR(LOG_AMF_APP, + "Ip address allocation failed. Rejecting with cause %d", + AMF_CAUSE_PROTOCOL_ERROR); + OAILOG_FUNC_RETURN(LOG_AMF_APP, rc); + } rc = amf_update_smf_context_pdu_ip(smf_ctx, &(response_p->paa)); diff --git a/lte/gateway/c/core/oai/tasks/amf/amf_smf_session_qos.cpp b/lte/gateway/c/core/oai/tasks/amf/amf_smf_session_qos.cpp index 0fb38365ccb4..0e46f94c3b0d 100644 --- a/lte/gateway/c/core/oai/tasks/amf/amf_smf_session_qos.cpp +++ b/lte/gateway/c/core/oai/tasks/amf/amf_smf_session_qos.cpp @@ -138,9 +138,10 @@ int amf_smf_session_api_fill_qos_ie_info(std::shared_ptr smf_ctx, qos_rule.len = QOS_DEL_RULE_MIN_LEN; } - if (PDU_SESSION_DEFAULT_QFI == qos_flow_req_item->qos_flow_identifier) { + if (smf_ctx->subscribed_qos.qci == + qos_flow_req_item->qos_flow_identifier) { qos_rule.dqr_bit = QOS_RULE_DQR_BIT_SET; - qos_rule.qos_rule_id = PDU_SESSION_DEFAULT_QFI; + qos_rule.qos_rule_id = qos_flow_req_item->qos_flow_identifier; } else { qos_rule.dqr_bit = 0; qos_rule.qos_rule_id = qos_flow_req_item->qos_flow_identifier; @@ -242,6 +243,12 @@ int amf_smf_session_api_fill_qos_ie_info(std::shared_ptr smf_ctx, flow_des.numOfParams++; } + if (flow_des.numOfParams > 0) { + flow_des.Ebit = 1; + } else { + flow_des.Ebit = 0; + } + if (flow_des.numOfParams) { // Convert Authorized qos into bstring int encoded_result = flow_des.EncodeM5GQosFlowDescription( @@ -278,7 +285,9 @@ void amf_smf_session_set_default_qos_rule(qos_flow_list_t* pti_flow_list) { return; } - qos_flow_req_item->qos_flow_identifier = PDU_SESSION_DEFAULT_QFI; + if (!qos_flow_req_item->qos_flow_identifier) { + qos_flow_req_item->qos_flow_identifier = PDU_SESSION_DEFAULT_QFI; + } qos_flow_req_item->qos_flow_action = policy_action_add; qos_flow_req_item->ul_tft.tftoperationcode = TRAFFIC_FLOW_TEMPLATE_OPCODE_CREATE_NEW_TFT; diff --git a/lte/gateway/c/core/oai/tasks/grpc_service/CMakeLists.txt b/lte/gateway/c/core/oai/tasks/grpc_service/CMakeLists.txt index 9b394a7bcf19..439af2452084 100644 --- a/lte/gateway/c/core/oai/tasks/grpc_service/CMakeLists.txt +++ b/lte/gateway/c/core/oai/tasks/grpc_service/CMakeLists.txt @@ -53,8 +53,8 @@ generate_grpc_protos("${SMOSRV_LTE_GRPC_PROTOS}" "${PROTO_SRCS}" "${PROTO_HDRS}" ${LTE_PROTO_DIR} ${LTE_OUT_DIR}) # S1ap Service -set(S1APSRV_LTE_CPP_PROTOS s1ap_service) -set(S1APSRV_LTE_GRPC_PROTOS s1ap_service) +set(S1APSRV_LTE_CPP_PROTOS s1ap_service oai/s1ap_state) +set(S1APSRV_LTE_GRPC_PROTOS s1ap_service oai/s1ap_state) generate_cpp_protos("${S1APSRV_LTE_CPP_PROTOS}" "${PROTO_SRCS}" "${PROTO_HDRS}" ${LTE_PROTO_DIR} ${LTE_OUT_DIR}) generate_grpc_protos("${S1APSRV_LTE_GRPC_PROTOS}" "${PROTO_SRCS}" diff --git a/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h b/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h index b30a65501f24..b112bfbdf334 100644 --- a/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h +++ b/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h @@ -17,11 +17,16 @@ #ifndef FILE_GTPV1U_SGW_DEFS_SEEN #define FILE_GTPV1U_SGW_DEFS_SEEN +#ifdef __cplusplus +extern "C" { +#endif #include "lte/gateway/c/core/oai/include/gtpv1u_types.h" #include "lte/gateway/c/core/oai/include/spgw_config.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" +#ifdef __cplusplus +} +#endif -int gtpv1u_init(spgw_state_t* spgw_state_p, spgw_config_t* spgw_config, +int gtpv1u_init(gtpv1u_data_t* gtpv1u_data, spgw_config_t* spgw_config, bool persist_state); void gtpv1u_exit(void); diff --git a/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_task.c b/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_task.c index 88998425c240..02fb0d1e7bdf 100644 --- a/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_task.c +++ b/lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_task.c @@ -34,9 +34,16 @@ #include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" #include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtp_tunnel_upf.h" #include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u.h" -#include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp" +#ifdef __cplusplus +extern "C" { +#endif +#include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h" +#ifdef __cplusplus +} +#endif + const struct gtp_tunnel_ops* gtp_tunnel_ops; static struct in_addr current_ue_net; static int current_ue_net_mask; @@ -106,7 +113,7 @@ static bool ue_ip_is_in_subnet(struct in_addr _net, int mask, } //------------------------------------------------------------------------------ -int gtpv1u_init(spgw_state_t* spgw_state_p, spgw_config_t* spgw_config, +int gtpv1u_init(gtpv1u_data_t* gtpv1u_data, spgw_config_t* spgw_config, bool persist_state) { int rv = 0; struct in_addr netaddr; @@ -151,8 +158,7 @@ int gtpv1u_init(spgw_state_t* spgw_state_p, spgw_config_t* spgw_config, // Init GTP device, using the same MTU as SGi. gtp_tunnel_ops->init(&netaddr, netmask, spgw_config->pgw_config.ipv4.mtu_SGI, - &spgw_state_p->gtpv1u_data.fd0, - &spgw_state_p->gtpv1u_data.fd1u, persist_state); + >pv1u_data->fd0, >pv1u_data->fd1u, persist_state); // END-GTP quick integration only for evaluation purpose diff --git a/lte/gateway/c/core/oai/tasks/ha/ha_service_handler.cpp b/lte/gateway/c/core/oai/tasks/ha/ha_service_handler.cpp index ea453f5d970a..cb6061f4ce51 100644 --- a/lte/gateway/c/core/oai/tasks/ha/ha_service_handler.cpp +++ b/lte/gateway/c/core/oai/tasks/ha/ha_service_handler.cpp @@ -98,9 +98,15 @@ bool trigger_agw_offload_for_ue(const hash_key_t keyP, void* const elementP, IMSI_STRING_TO_IMSI64(offload_request->imsi, &imsi64); - enb_description_t* enb_ref_p = - s1ap_state_get_enb(s1ap_state, ue_context_p->sctp_assoc_id_key); + enb_description_t* enb_ref_p = magma::lte::s1ap_state_get_enb( + s1ap_state, ue_context_p->sctp_assoc_id_key); + if (!enb_ref_p) { + OAILOG_ERROR_UE(LOG_UTIL, imsi64, + "Failed to find enb_ref_p for assoc_id :%u", + ue_context_p->sctp_assoc_id_key); + return false; + } // Return if this UE does not satisfy any of the filtering criteria if ((imsi64 != ue_context_p->emm_context._imsi64) && (offload_request->eNB_id != enb_ref_p->enb_id)) { diff --git a/lte/gateway/c/core/oai/tasks/mme_app/CMakeLists.txt b/lte/gateway/c/core/oai/tasks/mme_app/CMakeLists.txt index f18271b7fe88..86bf703283d6 100644 --- a/lte/gateway/c/core/oai/tasks/mme_app/CMakeLists.txt +++ b/lte/gateway/c/core/oai/tasks/mme_app/CMakeLists.txt @@ -14,14 +14,6 @@ if (EMBEDDED_SGW) set(S11_RELATED_SRCS mme_app_embedded_spgw.c) endif (EMBEDDED_SGW) -list(APPEND MME_BENCHMARK_SRCS "") -if (MME_BENCHMARK) - list(APPEND MME_BENCHMARK_SRCS - "experimental/mme_app_serialization.cpp" - "experimental/mme_app_serialization.hpp" - ) -endif (MME_BENCHMARK) - add_library(TASK_MME_APP mme_app_capabilities.c mme_app_context.c @@ -66,7 +58,6 @@ add_library(TASK_MME_APP ${PROTO_SRCS} ${PROTO_HDRS} ${S11_RELATED_SRCS} - ${MME_BENCHMARK_SRCS} ) target_compile_definitions(TASK_MME_APP PRIVATE diff --git a/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.cpp b/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.cpp deleted file mode 100644 index c7c0eab74d8d..000000000000 --- a/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.cpp +++ /dev/null @@ -1,285 +0,0 @@ -/** - * Copyright 2021 The Magma Authors. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// --C includes --------------------------------------------------------------- -#include "lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp" -#include -#include // rusage() -#include // rusage() -// --C++ includes ------------------------------------------------------------- -#include -#include -#include -#include -// --Other includes ----------------------------------------------------------- -extern "C" { -#include "lte/gateway/c/core/common/dynamic_memory_check.h" -#include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/common/log.h" -#include "lte/gateway/c/core/oai/include/mme_app_state.hpp" -#include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.003.h" -#include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" -#include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" -#include "lte/gateway/c/core/oai/lib/itti/itti_types.h" -#include "lte/gateway/c/core/oai/tasks/nas/emm/emm_data.h" -#include "lte/gateway/c/core/oai/tasks/nas/emm/emm_proc.h" -#include "lte/gateway/c/core/oai/tasks/nas/esm/esm_proc.h" -#include "lte/gateway/c/core/oai/tasks/nas/nas_procedures.h" -} - -#include "lte/gateway/c/core/oai/tasks/mme_app/mme_app_state_manager.hpp" -extern task_zmq_ctx_t main_zmq_ctx; - -using magma::lte::MmeNasStateManager; - -uint64_t kFirstImsi = 1010000000000; - -std::vector mme_app_allocate_ues(uint num_ues); -void mme_app_deallocate_ues(mme_app_desc_t* mme_app_desc, - std::vector* contexts); -void mme_app_insert_ues(mme_app_desc_t* mme_app_desc, - const std::vector& contexts); -void mme_app_serialize_ues(const std::vector& contexts); -void mme_app_deserialize_ues(void); - -void mme_app_schedule_test_protobuf_serialization(uint num_ues) { - MessageDef* message_p = - itti_alloc_new_message(TASK_UNKNOWN, MME_APP_TEST_PROTOBUF_SERIALIZATION); - MME_APP_TEST_PROTOBUF_SERIALIZATION(message_p).num_ues = num_ues; - send_msg_to_task(&main_zmq_ctx, TASK_MME_APP, message_p); - return; -} - -std::vector mme_app_allocate_ues(uint num_ues) { - enb_s1ap_id_key_t enb_s1ap_id_key = INVALID_ENB_UE_S1AP_ID_KEY; - unsigned int seed = 0; - enb_ue_s1ap_id_t enb_ue_s1ap_id = rand_r(&seed) & 0X00FFFFFF; - mme_ue_s1ap_id_t mme_ue_s1ap_id = rand_r(&seed); - std::vector contexts; - - contexts.reserve(num_ues); - - for (int i = 0; i < num_ues; i++) { - ue_mm_context_t* ue_mm_context = mme_create_new_ue_context(); - emm_context_t* emm_ctx = &ue_mm_context->emm_context; - esm_context_t* esm_ctx = &emm_ctx->esm_ctx; - - enb_ue_s1ap_id++; - mme_ue_s1ap_id++; - - imsi64_t imsi64 = kFirstImsi + i; - imsi_t imsi = {}; - imsi.u.num.digit1 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 14)) % 10); - imsi.u.num.digit2 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 13)) % 10); - imsi.u.num.digit3 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 12)) % 10); - imsi.u.num.digit4 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 11)) % 10); - imsi.u.num.digit5 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 10)) % 10); - imsi.u.num.digit6 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 9)) % 10); - imsi.u.num.digit7 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 8)) % 10); - imsi.u.num.digit8 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 7)) % 10); - imsi.u.num.digit9 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 6)) % 10); - imsi.u.num.digit10 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 5)) % 10); - imsi.u.num.digit11 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 4)) % 10); - imsi.u.num.digit12 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 3)) % 10); - imsi.u.num.digit13 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 2)) % 10); - imsi.u.num.digit14 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 1)) % 10); - imsi.u.num.digit15 = (uint8_t)((imsi64_t)(imsi64 / std::pow(10, 0)) % 10); - imsi.u.num.parity = 0xF; - emm_ctx->saved_imsi64 = imsi64; - - guti_t guti = {}; - guti.gummei.plmn.mcc_digit2 = 0; - guti.gummei.plmn.mcc_digit1 = 0; - guti.gummei.plmn.mnc_digit3 = 1; - guti.gummei.plmn.mcc_digit3 = 0xF; - guti.gummei.plmn.mnc_digit2 = 1; - guti.gummei.plmn.mnc_digit1 = 0; - guti.gummei.mme_gid = 1; - guti.gummei.mme_code = 1; - guti.m_tmsi = 2106150532 + i; - guti_t old_guti = {}; - old_guti.gummei.plmn.mcc_digit2 = 0; - old_guti.gummei.plmn.mcc_digit1 = 0; - old_guti.gummei.plmn.mnc_digit3 = 0; - old_guti.gummei.plmn.mcc_digit3 = 0; - old_guti.gummei.plmn.mnc_digit2 = 0; - old_guti.gummei.plmn.mnc_digit1 = 0; - old_guti.gummei.mme_gid = 0; - old_guti.gummei.mme_code = 0; - old_guti.m_tmsi = 429496729 + i; - - emm_ctx_set_valid_imsi(emm_ctx, &imsi, imsi64); - emm_ctx_set_valid_guti(emm_ctx, &guti); - emm_ctx_set_valid_old_guti(emm_ctx, &old_guti); - - emm_ctx->emm_cause = -1; - emm_ctx->_emm_fsm_state = EMM_REGISTERED; - - esm_ctx->n_active_ebrs = 2; - esm_ctx->esm_proc_data = - (struct esm_proc_data_s*)calloc(1, sizeof(struct esm_proc_data_s)); - struct esm_proc_data_s* esm_proc_data = esm_ctx->esm_proc_data; - esm_proc_data->pti = 1; - esm_proc_data->request_type = 1; - esm_proc_data->apn = bfromcstr("ims"); - esm_proc_data->pdn_cid = 1; - esm_proc_data->pdn_type = ESM_PDN_TYPE_IPV4; - esm_proc_data->bearer_qos.pci = 1; - esm_proc_data->bearer_qos.pl = 15; - esm_proc_data->bearer_qos.qci = 5; - // no bearer_qos.gbr, bearer_qos.mbr - // no pco - - MME_APP_ENB_S1AP_ID_KEY(ue_mm_context->enb_s1ap_id_key, rand() & 0X0000FFFF, - enb_ue_s1ap_id); - ue_mm_context->enb_ue_s1ap_id = enb_ue_s1ap_id; - ue_mm_context->mme_ue_s1ap_id = mme_ue_s1ap_id; - - contexts.push_back(ue_mm_context); - } - return contexts; -} - -void mme_app_deallocate_ues(mme_app_desc_t* mme_app_desc, - std::vector* contexts) { - while (!contexts.empty()) { - mme_remove_ue_context(&mme_app_desc->mme_ue_contexts, contexts.back()); - contexts.pop_back(); - } -} - -void mme_app_serialize_ues(mme_app_desc_t* mme_app_desc, - const std::vector& contexts) { - for (auto it = contexts.begin(); it != contexts.end(); ++it) { - put_mme_ue_state(mme_app_desc, (*it)->emm_context.saved_imsi64, true); - } -} - -void mme_app_insert_ues(mme_app_desc_t* mme_app_desc, - const std::vector& contexts) { - for (auto it = contexts.begin(); it != contexts.end(); ++it) { - if (mme_insert_ue_context(&mme_app_desc->mme_ue_contexts, *it) != - RETURNok) { - OAILOG_ERROR_UE( - LOG_MME_APP, (*it)->emm_context.saved_imsi64, - "Failed to insert UE contxt, MME UE S1AP Id: " MME_UE_S1AP_ID_FMT - "\n", - (*it)->mme_ue_s1ap_id); - return; - } - } -} - -void mme_app_deserialize_ues(void) { - mme_app_desc_t* mme_app_desc2 = get_mme_nas_state(true); - MmeNasStateManager::getInstance().read_ue_state_from_db(); -} - -void log_rusage(const struct rusage& ru, const char* context) { - std::cout << context - << "\tCpu user/system: " << static_cast(ru.ru_utime.tv_sec) - << "." << static_cast(ru.ru_utime.tv_usec) << " / " - << static_cast(ru.ru_stime.tv_sec) << "." - << static_cast(ru.ru_stime.tv_usec) << std::endl; - std::cout << context << "\tMaximum resident set size: " << ru.ru_maxrss - << std::endl; - std::cout << context - << "\tPage reclaims (soft/hard page faults): " << ru.ru_minflt - << " / " << ru.ru_majflt << std::endl; - std::cout << context << "\tBlock operations (input/output): " << ru.ru_inblock - << " / " << ru.ru_oublock << std::endl; - std::cout << context - << "\tContext switches (voluntary/involuntary): " << ru.ru_nvcsw - << " / " << ru.ru_nivcsw << std::endl; -} - -void log_rusage_diff(const struct rusage& ru_first, - const struct rusage& ru_last, const char* context) { - struct rusage ru_diff = {0}; - ru_diff.ru_utime.tv_sec = ru_last.ru_utime.tv_sec - ru_first.ru_utime.tv_sec; - ru_diff.ru_utime.tv_usec = - ru_last.ru_utime.tv_usec - ru_first.ru_utime.tv_usec; - ru_diff.ru_stime.tv_sec = ru_last.ru_stime.tv_sec - ru_first.ru_stime.tv_sec; - ru_diff.ru_stime.tv_usec = - ru_last.ru_stime.tv_usec - ru_first.ru_stime.tv_usec; - - ru_diff.ru_maxrss = ru_last.ru_maxrss - ru_first.ru_maxrss; - ru_diff.ru_minflt = ru_last.ru_minflt - ru_first.ru_minflt; - ru_diff.ru_majflt = ru_last.ru_majflt - ru_first.ru_majflt; - ru_diff.ru_inblock = ru_last.ru_inblock - ru_first.ru_inblock; - ru_diff.ru_oublock = ru_last.ru_oublock - ru_first.ru_oublock; - ru_diff.ru_nvcsw = ru_last.ru_nvcsw - ru_first.ru_nvcsw; - ru_diff.ru_nivcsw = ru_last.ru_nivcsw - ru_first.ru_nivcsw; - std::string str3(context); - log_rusage(ru_diff, str3.c_str()); -} - -void mme_app_test_protobuf_serialization(mme_app_desc_t* mme_app_desc, - uint num_ues) { - srand(time(NULL)); - ue_mm_context_t* ue_mm_contexts[num_ues][2] = {}; - - std::vector contexts = mme_app_allocate_ues(num_ues); - - mme_app_insert_ues(mme_app_desc, contexts); - - struct rusage ru_start_ctxt_to_proto, ru_end_ctxt_to_proto; - - getrusage(RUSAGE_SELF, &ru_start_ctxt_to_proto); - auto start_ctxt_to_proto = std::chrono::high_resolution_clock::now(); - mme_app_serialize_ues(mme_app_desc, contexts); - auto end_ctxt_to_proto = std::chrono::high_resolution_clock::now(); - getrusage(RUSAGE_SELF, &ru_end_ctxt_to_proto); - log_rusage_diff(ru_start_ctxt_to_proto, ru_end_ctxt_to_proto, - "RUSAGE Contexts serialization"); - auto duration_ctxt_to_proto = - std::chrono::duration_cast(end_ctxt_to_proto - - start_ctxt_to_proto); - std::cout << "Time taken to serialize contexts " << num_ues - << " UEs : " << duration_ctxt_to_proto.count() << " nanoseconds" - << std::endl; - OAILOG_INFO(LOG_MME_APP, "Time taken to serialize contexts %d UEs: %ld µs\n", - num_ues, duration_ctxt_to_proto.count()); - - struct rusage ru_start_proto_to_ctxt, ru_end_proto_to_ctxt; - getrusage(RUSAGE_SELF, &ru_start_proto_to_ctxt); - auto start_proto_to_ctxt = std::chrono::high_resolution_clock::now(); - mme_app_deserialize_ues(); - auto end_proto_to_ctxt = std::chrono::high_resolution_clock::now(); - getrusage(RUSAGE_SELF, &ru_end_proto_to_ctxt); - log_rusage_diff(ru_start_proto_to_ctxt, ru_end_proto_to_ctxt, - "RUSAGE Contexts deserialization"); - auto duration_proto_to_ctxt = - std::chrono::duration_cast(end_proto_to_ctxt - - start_proto_to_ctxt); - std::cout << "Time taken to deserialize contexts " << num_ues - << " UEs : " << duration_proto_to_ctxt.count() << " nanoseconds" - << std::endl; - OAILOG_INFO(LOG_MME_APP, "Time taken to serialize contexts %d UEs : %ld ns\n", - num_ues, duration_proto_to_ctxt.count()); - /*auto imsi_str = MmeNasStateManager::getInstance().get_imsi_str(imsi64); - MmeNasStateManager::getInstance().write_ue_state_to_db( - ue_context, imsi_str); - put_mme_ue_state(mme_app_desc_p, imsi64, force_ue_write); - put_mme_nas_state(); */ - mme_app_desc_t* mme_app_desc_p = get_mme_nas_state(false); - if (!mme_app_desc_p) { - OAILOG_ERROR(LOG_MME_APP, "Failed to fetch mme_app_desc_p \n"); - return; - } - mme_app_deallocate_ues(mme_app_desc_p, &contexts); - - send_terminate_message_fatal(&main_zmq_ctx); - sleep(1); - exit(0); - return; -} diff --git a/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp b/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp deleted file mode 100644 index 92a86b325702..000000000000 --- a/lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -// C includes -------------------------------------------------------------- -#include "lte/gateway/c/core/oai/include/mme_app_desc.h" - -void mme_app_schedule_test_protobuf_serialization(uint num_ues); -void mme_app_test_protobuf_serialization(mme_app_desc_t* mme_app_desc, - uint num_ues); -#ifdef __cplusplus -} -#endif diff --git a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_bearer.c b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_bearer.c index 84196d925e37..18319666d0c8 100644 --- a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_bearer.c +++ b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_bearer.c @@ -3703,10 +3703,8 @@ void mme_app_handle_handover_notify( "Id: " MME_UE_S1AP_ID_FMT " (4 or 16 bytes was expected)\n", blength(e_rab_admitted_list.item[i].transport_layer_address), handover_notify_p->mme_ue_s1ap_id); - bdestroy_wrapper(&e_rab_admitted_list.item[i].transport_layer_address); OAILOG_FUNC_OUT(LOG_MME_APP); } - bdestroy_wrapper(&e_rab_admitted_list.item[i].transport_layer_address); s11_modify_bearer_request->bearer_contexts_to_be_modified .num_bearer_context++; diff --git a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_embedded_spgw.c b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_embedded_spgw.c index a8a179a07779..4dfe5d77b3a3 100644 --- a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_embedded_spgw.c +++ b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_embedded_spgw.c @@ -57,7 +57,7 @@ status_code_e mme_config_embedded_spgw_parse_opt_line( spgw_config_init(spgw_config_p); amf_config_init(amf_config_p); - while ((c = getopt(argc, argv, "c:hi:Ks:v:V:p:")) != -1) { + while ((c = getopt(argc, argv, "c:hi:Ks:v:V")) != -1) { switch (c) { case 'c': mme_config_p->config_file = bfromcstr(optarg); @@ -65,16 +65,6 @@ status_code_e mme_config_embedded_spgw_parse_opt_line( bdata(mme_config_p->config_file)); break; -#if MME_BENCHMARK - case 'p': { - mme_config_p->test_param = atoi(optarg); - mme_config_p->test_type = TEST_SERIALIZATION_PROTOBUF; - mme_config_p->run_mode = RUN_MODE_TEST; - OAI_FPRINTF_INFO("Test serialization protobuf, parameter %u\n", - mme_config_p->test_param); - } break; -#endif - case 'v': mme_config_p->log_config.asn1_verbosity_level = atoi(optarg); break; @@ -110,9 +100,6 @@ status_code_e mme_config_embedded_spgw_parse_opt_line( break; case 'h': -#if !MME_BENCHMARK - case 'p': -#endif default: usage(argv[0]); exit(0); diff --git a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_main.c b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_main.c index 75d2ba482f8f..2beb0c396228 100644 --- a/lte/gateway/c/core/oai/tasks/mme_app/mme_app_main.c +++ b/lte/gateway/c/core/oai/tasks/mme_app/mme_app_main.c @@ -50,9 +50,6 @@ #include "lte/gateway/c/core/oai/tasks/mme_app/mme_app_ha.hpp" #include "lte/gateway/c/core/oai/tasks/nas/nas_network.h" #include "lte/gateway/c/core/oai/tasks/nas/nas_proc.hpp" -#if MME_BENCHMARK -#include "lte/gateway/c/core/oai/tasks/mme_app/experimental/mme_app_serialization.hpp" -#endif static void check_mme_healthy_and_notify_service(void); static bool is_mme_app_healthy(void); @@ -462,15 +459,6 @@ static int handle_message(zloop_t* loop, zsock_t* reader, void* arg) { mme_app_desc_p, &S1AP_REMOVE_STALE_UE_CONTEXT(received_message_p)); } break; -#if MME_BENCHMARK - case MME_APP_TEST_PROTOBUF_SERIALIZATION: { - mme_app_test_protobuf_serialization( - mme_app_desc_p, - MME_APP_TEST_PROTOBUF_SERIALIZATION(received_message_p).num_ues); - force_ue_write = false; - is_task_state_same = true; - } break; -#endif case TERMINATE_MESSAGE: { itti_free_msg_content(received_message_p); free(received_message_p); diff --git a/lte/gateway/c/core/oai/tasks/mme_app/mme_config.c b/lte/gateway/c/core/oai/tasks/mme_app/mme_config.c index 6ca45b782a42..9100240de082 100644 --- a/lte/gateway/c/core/oai/tasks/mme_app/mme_config.c +++ b/lte/gateway/c/core/oai/tasks/mme_app/mme_config.c @@ -1796,13 +1796,6 @@ void mme_config_display(mme_config_t* config_pP) { config_pP->daylight_saving_time); OAILOG_INFO(LOG_CONFIG, "- Run mode .............................: %s\n", (RUN_MODE_TEST == config_pP->run_mode) ? "TEST" : "NORMAL"); -#if MME_BENCHMARK - if (RUN_MODE_TEST == config_pP->run_mode) { - OAILOG_INFO(LOG_CONFIG, - "- Benchmark Test param ...........................: %u\n", - config_pP->test_param); - } -#endif OAILOG_INFO(LOG_CONFIG, "- Max eNBs .............................: %u\n", config_pP->max_enbs); OAILOG_INFO(LOG_CONFIG, "- Max UEs ..............................: %u\n", @@ -2133,7 +2126,7 @@ int mme_config_parse_opt_line(int argc, char* argv[], mme_config_t* config_pP) { /* * Parsing command line */ - while ((c = getopt(argc, argv, "c:s:p:h:v:V")) != -1) { + while ((c = getopt(argc, argv, "c:s:h:v:V")) != -1) { switch (c) { case 'c': { /* @@ -2156,15 +2149,6 @@ int mme_config_parse_opt_line(int argc, char* argv[], mme_config_t* config_pP) { PACKAGE_NAME, PACKAGE_VERSION, PACKAGE_BUGREPORT); } break; -#if MME_BENCHMARK - case 'p': { - config_pP->test_param = atoi(optarg); - config_pP->test_type = TEST_SERIALIZATION_PROTOBUF; - config_pP->run_mode = RUN_MODE_TEST; - OAI_FPRINTF_INFO("Test serialization protobuf, parameter %u\n", - config_pP->test_param); - } break; -#endif case 's': { OAI_FPRINTF_INFO( "Ignoring command line option s as there is no embedded sgw \n"); @@ -2172,9 +2156,6 @@ int mme_config_parse_opt_line(int argc, char* argv[], mme_config_t* config_pP) { case 'h': /* Fall through */ -#if !MME_BENCHMARK - case 'p': /* Fall through */ -#endif default: OAI_FPRINTF_ERR("Unknown command line option %c\n", c); usage(argv[0]); diff --git a/lte/gateway/c/core/oai/tasks/nas5g/CMakeLists.txt b/lte/gateway/c/core/oai/tasks/nas5g/CMakeLists.txt index 14f605154c08..e69b84fcd34e 100644 --- a/lte/gateway/c/core/oai/tasks/nas5g/CMakeLists.txt +++ b/lte/gateway/c/core/oai/tasks/nas5g/CMakeLists.txt @@ -93,6 +93,7 @@ set(libnas_ies_OBJS ${CMAKE_CURRENT_SOURCE_DIR}/src/ies/M5GMaxNumOfSupportedPacketFilters.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ies/M5GQosFlowDescriptor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/ies/M5GQosFlowParam.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/ies/M5GNetworkFeatureSupport.cpp ) add_library(LIB_NAS5G diff --git a/lte/gateway/c/core/oai/tasks/nas5g/include/M5GRegistrationAccept.hpp b/lte/gateway/c/core/oai/tasks/nas5g/include/M5GRegistrationAccept.hpp index 2887f2b4add8..f830533ab875 100644 --- a/lte/gateway/c/core/oai/tasks/nas5g/include/M5GRegistrationAccept.hpp +++ b/lte/gateway/c/core/oai/tasks/nas5g/include/M5GRegistrationAccept.hpp @@ -21,6 +21,7 @@ #include "lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNSSAI.hpp" #include "lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GGprsTimer3.hpp" #include "lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GTAIList.hpp" +#include "lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp" namespace magma5g { class RegistrationAcceptMsg { @@ -35,6 +36,7 @@ class RegistrationAcceptMsg { TAIListMsg tai_list; NSSAIMsgList allowed_nssai; GPRSTimer3Msg gprs_timer; + NetworkFeatureSupportMsg network_feature; #define REGISTRATION_ACCEPT_MINIMUM_LENGTH 5 RegistrationAcceptMsg(); diff --git a/lte/gateway/c/core/oai/tasks/nas5g/include/M5gNasMessage.h b/lte/gateway/c/core/oai/tasks/nas5g/include/M5gNasMessage.h index 9c16910f6ad8..7e85d791758d 100644 --- a/lte/gateway/c/core/oai/tasks/nas5g/include/M5gNasMessage.h +++ b/lte/gateway/c/core/oai/tasks/nas5g/include/M5gNasMessage.h @@ -57,6 +57,7 @@ namespace magma5g { #define AUTH_PARAM_RAND 0x21 #define AUTH_PARAM_AUTN 0x20 #define AUTH_RESPONSE_PARAMETER 0x2D +#define NETWORK_FEATURE 0x21 // 5G Session Management IE Types #define REQUEST_PDU_SESSION_TYPE_TYPE 0x90 diff --git a/lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp b/lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp new file mode 100644 index 000000000000..d7d858534bf5 --- /dev/null +++ b/lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp @@ -0,0 +1,41 @@ +/* + * Copyright 2022 The Magma Authors. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * */ +#pragma once +#include +#include +namespace magma5g { +class NetworkFeatureSupportMsg { + public: +#define NETWORK_FEATURE_MINIMUM_LENGTH 3 +#define NETWORK_FEATURE_MAXIMUM_LENGTH 5 + + uint8_t iei; + uint8_t len; + uint8_t IMS_VoPS_3GPP : 1; + uint8_t IMS_VoPS_N3GPP : 1; + uint8_t EMC : 2; + uint8_t EMF : 2; + uint8_t IWK_N26 : 1; + uint8_t MPSI : 1; + uint8_t EMCN3 : 1; + uint8_t MCSI : 1; + NetworkFeatureSupportMsg(); + ~NetworkFeatureSupportMsg(); + + int EncodeNetworkFeatureSupportMsg(NetworkFeatureSupportMsg* networkfeature, + uint8_t iei, uint8_t* buffer, + uint32_t len); + + int DecodeNetworkFeatureSupportMsg(NetworkFeatureSupportMsg* networkfeature, + uint8_t iei, uint8_t* buffer, + uint32_t len); +}; +} // namespace magma5g diff --git a/lte/gateway/c/core/oai/tasks/nas5g/src/M5GRegistrationAccept.cpp b/lte/gateway/c/core/oai/tasks/nas5g/src/M5GRegistrationAccept.cpp index 56ae2f9c5ec1..2fd67a1d0941 100644 --- a/lte/gateway/c/core/oai/tasks/nas5g/src/M5GRegistrationAccept.cpp +++ b/lte/gateway/c/core/oai/tasks/nas5g/src/M5GRegistrationAccept.cpp @@ -123,6 +123,13 @@ int RegistrationAcceptMsg::EncodeRegistrationAcceptMsg( return encoded_result; else encoded += encoded_result; + if ((encoded_result = + reg_accept->network_feature.EncodeNetworkFeatureSupportMsg( + ®_accept->network_feature, 0x21, buffer + encoded, + len - encoded)) < 0) + return encoded_result; + else + encoded += encoded_result; if ((encoded_result = reg_accept->gprs_timer.EncodeGPRSTimer3Msg( ®_accept->gprs_timer, 0x5E, buffer + encoded, len - encoded)) < 0) return encoded_result; diff --git a/lte/gateway/c/core/oai/tasks/nas5g/src/ies/M5GNetworkFeatureSupport.cpp b/lte/gateway/c/core/oai/tasks/nas5g/src/ies/M5GNetworkFeatureSupport.cpp new file mode 100644 index 000000000000..c459eb5300b2 --- /dev/null +++ b/lte/gateway/c/core/oai/tasks/nas5g/src/ies/M5GNetworkFeatureSupport.cpp @@ -0,0 +1,89 @@ +/* + * Copyright 2022 The Magma Authors. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +#include "lte/gateway/c/core/oai/common/log.h" +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/tasks/nas5g/include/M5GCommonDefs.h" +#include "lte/gateway/c/core/oai/tasks/nas5g/include/ies/M5GNetworkFeatureSupport.hpp" + +namespace magma5g { +NetworkFeatureSupportMsg::NetworkFeatureSupportMsg() {} +NetworkFeatureSupportMsg::~NetworkFeatureSupportMsg() {} + +int NetworkFeatureSupportMsg::DecodeNetworkFeatureSupportMsg( + NetworkFeatureSupportMsg* networkfeature, uint8_t iei, uint8_t* buffer, + uint32_t len) { + int decoded = 0; + + if (iei > 0) { + CHECK_IEI_DECODER(iei, *buffer); + networkfeature->iei = *buffer; + decoded++; + } + networkfeature->len = *(buffer + decoded); + decoded++; + + networkfeature->MPSI = (*(buffer + decoded) >> 7) & 0x01; + networkfeature->IWK_N26 = (*(buffer + decoded) >> 6) & 0x01; + networkfeature->EMF = (*(buffer + decoded) >> 4) & 0x03; + networkfeature->EMC = (*(buffer + decoded) >> 2) & 0x03; + networkfeature->IMS_VoPS_N3GPP = (*(buffer + decoded) >> 1) & 0x01; + networkfeature->IMS_VoPS_3GPP = *(buffer + decoded) & 0x01; + decoded++; + + networkfeature->MCSI = (*(buffer + decoded) >> 1) & 0x01; + networkfeature->EMCN3 = (*(buffer + decoded)) & 0x01; + decoded++; + + return decoded; +} + +int NetworkFeatureSupportMsg::EncodeNetworkFeatureSupportMsg( + NetworkFeatureSupportMsg* networkfeature, uint8_t iei, uint8_t* buffer, + uint32_t len) { + uint32_t encoded = 0; + + // Checking IEI and pointer + CHECK_PDU_POINTER_AND_LENGTH_ENCODER(buffer, NETWORK_FEATURE_MINIMUM_LENGTH, + len); + + if (iei > 0) { + CHECK_IEI_ENCODER(iei, (unsigned char)networkfeature->iei); + *buffer = iei; + encoded++; + } + + *(buffer + encoded) = networkfeature->len; + encoded++; + *(buffer + encoded) = 0x00 | ((networkfeature->MPSI & 0x01) << 7) | + ((networkfeature->IWK_N26 & 0x01) << 6) | + ((networkfeature->EMF & 0x03) << 4) | + ((networkfeature->EMC & 0x03) << 2) | + ((networkfeature->IMS_VoPS_N3GPP & 0x01) << 1) | + (networkfeature->IMS_VoPS_3GPP & 0x01); + encoded++; + *(buffer + encoded) = 0x00 | ((networkfeature->MCSI & 0x01) << 1) | + (networkfeature->EMCN3 & 0x01); + encoded++; + return encoded; +} + +} // namespace magma5g diff --git a/lte/gateway/c/core/oai/tasks/ngap/ngap_amf_nas_procedures.c b/lte/gateway/c/core/oai/tasks/ngap/ngap_amf_nas_procedures.c index 10aba9304c97..6dde6ed4e5fe 100644 --- a/lte/gateway/c/core/oai/tasks/ngap/ngap_amf_nas_procedures.c +++ b/lte/gateway/c/core/oai/tasks/ngap/ngap_amf_nas_procedures.c @@ -676,9 +676,11 @@ void ngap_handle_conn_est_cnf( session_context->pDUSessionID = pdu_session_item->Pdu_Session_ID; /*NSSAI*/ - session_context->s_NSSAI.sST.size = 1; - session_context->s_NSSAI.sST.buf = (uint8_t*)calloc(1, sizeof(uint8_t)); - session_context->s_NSSAI.sST.buf[0] = 0x11; + Ngap_SST_t* sST = NULL; + sST = &session_context->s_NSSAI.sST; + + INT8_TO_OCTET_STRING( + amf_config.plmn_support_list.plmn_support[0].s_nssai.sst, sST); Ngap_PDUSessionResourceSetupRequestTransfer_t pduSessionResourceSetupRequestTransferIEs = {0}; diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.cpp index 3bdde84e59fe..4a3f4f574d1a 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.cpp @@ -36,7 +36,6 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/message_utils/service303_message_utils.h" #include "lte/gateway/c/core/common/assertions.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/include/mme_init.hpp" #ifdef __cplusplus } @@ -58,12 +57,15 @@ extern "C" { #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp" #include "orc8r/gateway/c/common/service303/MetricsHelpers.hpp" +bool hss_associated = false; +namespace magma { +namespace lte { + static void start_stats_timer(void); static int handle_stats_timer(zloop_t* loop, int id, void* arg); static long epc_stats_timer_id; static size_t epc_stats_timer_sec = 60; -bool hss_associated = false; static int indent = 0; task_zmq_ctx_t s1ap_task_zmq_ctx; @@ -71,6 +73,7 @@ bool s1ap_congestion_control_enabled = true; long s1ap_last_msg_latency = 0; long s1ap_zmq_th = LONG_MAX; +static void s1ap_mme_exit(void); //------------------------------------------------------------------------------ static int s1ap_send_init_sctp(void) { // Create and alloc new message @@ -357,7 +360,7 @@ extern "C" status_code_e s1ap_mme_init(const mme_config_t* mme_config_p) { } //------------------------------------------------------------------------------ -void s1ap_mme_exit(void) { +static void s1ap_mme_exit(void) { OAILOG_DEBUG(LOG_S1AP, "Cleaning S1AP\n"); stop_timer(&s1ap_task_zmq_ctx, epc_stats_timer_id); @@ -396,36 +399,43 @@ enb_description_t* s1ap_new_enb(void) { } //------------------------------------------------------------------------------ -ue_description_t* s1ap_new_ue(s1ap_state_t* state, - const sctp_assoc_id_t sctp_assoc_id, - enb_ue_s1ap_id_t enb_ue_s1ap_id) { +oai::UeDescription* s1ap_new_ue(s1ap_state_t* state, + const sctp_assoc_id_t sctp_assoc_id, + enb_ue_s1ap_id_t enb_ue_s1ap_id) { enb_description_t* enb_ref = NULL; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; enb_ref = s1ap_state_get_enb(state, sctp_assoc_id); DevAssert(enb_ref != NULL); - ue_ref = - reinterpret_cast(calloc(1, sizeof(ue_description_t))); + ue_ref = new oai::UeDescription(); /* - * Something bad happened during malloc... + * Something bad happened during memory allocation... * * * * May be we are running out of memory. * * * * TODO: Notify eNB with a cause like Hardware Failure. */ - DevAssert(ue_ref != NULL); - ue_ref->sctp_assoc_id = sctp_assoc_id; - ue_ref->enb_ue_s1ap_id = enb_ue_s1ap_id; - ue_ref->comp_s1ap_id = - S1AP_GENERATE_COMP_S1AP_ID(sctp_assoc_id, enb_ue_s1ap_id); - - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); - hashtable_rc_t hashrc = hashtable_ts_insert( - state_ue_ht, (const hash_key_t)ue_ref->comp_s1ap_id, (void*)ue_ref); + if (ue_ref == nullptr) { + OAILOG_ERROR(LOG_S1AP, + "Failed to allocate memory for protobuf object UeDescription"); + return nullptr; + } + ue_ref->set_sctp_assoc_id(sctp_assoc_id); + ue_ref->set_enb_ue_s1ap_id(enb_ue_s1ap_id); + ue_ref->set_comp_s1ap_id( + S1AP_GENERATE_COMP_S1AP_ID(sctp_assoc_id, enb_ue_s1ap_id)); + + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + if (s1ap_ue_state == nullptr) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return nullptr; + } + magma::proto_map_rc_t rc = + s1ap_ue_state->insert(ue_ref->comp_s1ap_id(), ue_ref); - if (HASH_TABLE_OK != hashrc) { + if (rc != magma::PROTO_MAP_OK) { OAILOG_ERROR(LOG_S1AP, "Could not insert UE descr in ue_coll: %s\n", - hashtable_rc_code2string(hashrc)); - free_wrapper((void**)&ue_ref); - return NULL; + magma::map_rc_code2string(rc)); + free_cpp_wrapper(reinterpret_cast(&ue_ref)); + return nullptr; } // Increment number of UE enb_ref->nb_ue_associated++; @@ -435,14 +445,14 @@ ue_description_t* s1ap_new_ue(s1ap_state_t* state, } //------------------------------------------------------------------------------ -void s1ap_remove_ue(s1ap_state_t* state, ue_description_t* ue_ref) { +void s1ap_remove_ue(s1ap_state_t* state, oai::UeDescription* ue_ref) { enb_description_t* enb_ref = NULL; // NULL reference... - if (ue_ref == NULL) return; + if (ue_ref == nullptr) return; - mme_ue_s1ap_id_t mme_ue_s1ap_id = ue_ref->mme_ue_s1ap_id; - enb_ref = s1ap_state_get_enb(state, ue_ref->sctp_assoc_id); + mme_ue_s1ap_id_t mme_ue_s1ap_id = ue_ref->mme_ue_s1ap_id(); + enb_ref = s1ap_state_get_enb(state, ue_ref->sctp_assoc_id()); DevAssert(enb_ref->nb_ue_associated > 0); // Updating number of UE enb_ref->nb_ue_associated--; @@ -450,16 +460,21 @@ void s1ap_remove_ue(s1ap_state_t* state, ue_description_t* ue_ref) { OAILOG_TRACE(LOG_S1AP, "Removing UE enb_ue_s1ap_id: " ENB_UE_S1AP_ID_FMT " mme_ue_s1ap_id:" MME_UE_S1AP_ID_FMT " in eNB id : %d\n", - ue_ref->enb_ue_s1ap_id, ue_ref->mme_ue_s1ap_id, enb_ref->enb_id); + ue_ref->enb_ue_s1ap_id(), ue_ref->mme_ue_s1ap_id(), + enb_ref->enb_id); - ue_ref->s1_ue_state = S1AP_UE_INVALID_STATE; - if (ue_ref->s1ap_ue_context_rel_timer.id != S1AP_TIMER_INACTIVE_ID) { - s1ap_stop_timer(ue_ref->s1ap_ue_context_rel_timer.id); - ue_ref->s1ap_ue_context_rel_timer.id = S1AP_TIMER_INACTIVE_ID; + ue_ref->set_s1ap_ue_state(oai::S1AP_UE_INVALID_STATE); + if (ue_ref->s1ap_ue_context_rel_timer().id() != S1AP_TIMER_INACTIVE_ID) { + s1ap_stop_timer(ue_ref->s1ap_ue_context_rel_timer().id()); + ue_ref->mutable_s1ap_ue_context_rel_timer()->set_id(S1AP_TIMER_INACTIVE_ID); } - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); - hashtable_ts_free(state_ue_ht, ue_ref->comp_s1ap_id); + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + if (s1ap_ue_state == nullptr) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return; + } + s1ap_ue_state->remove(ue_ref->comp_s1ap_id()); state->mmeid2associd.remove(mme_ue_s1ap_id); enb_ref->ue_id_coll.remove(mme_ue_s1ap_id); @@ -512,3 +527,6 @@ static void start_stats_timer(void) { start_timer(&s1ap_task_zmq_ctx, 1000 * epc_stats_timer_sec, TIMER_REPEAT_FOREVER, handle_stats_timer, NULL); } + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp index e1268b916b21..70c49a6d72e7 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp @@ -23,8 +23,9 @@ */ #pragma once -#include "lte/gateway/c/core/oai/include/s1ap_state.hpp" -#include "lte/gateway/c/core/oai/include/s1ap_types.hpp" + +#include "lte/protos/oai/s1ap_state.pb.h" + #ifdef __cplusplus extern "C" { #endif @@ -37,8 +38,18 @@ extern "C" { #define S1AP_ZMQ_LATENCY_TH \ s1ap_zmq_th // absolute threshold to be used for initial UE messages +#ifdef __cplusplus +} +#endif + +#include "lte/gateway/c/core/oai/include/s1ap_state.hpp" +#include "lte/gateway/c/core/oai/include/s1ap_types.hpp" + extern bool hss_associated; +namespace magma { +namespace lte { + /** \brief Allocate and add to the list a new eNB descriptor * @returns Reference to the new eNB element in list **/ @@ -49,19 +60,23 @@ enb_description_t* s1ap_new_enb(void); * \param enb_ue_s1ap_id ue ID over S1AP * @returns Reference to the new UE element in list **/ -ue_description_t* s1ap_new_ue(s1ap_state_t* state, - sctp_assoc_id_t sctp_assoc_id, - enb_ue_s1ap_id_t enb_ue_s1ap_id); +oai::UeDescription* s1ap_new_ue(s1ap_state_t* state, + sctp_assoc_id_t sctp_assoc_id, + enb_ue_s1ap_id_t enb_ue_s1ap_id); /** \brief Remove target UE from the list * \param ue_ref UE structure reference to remove **/ -void s1ap_remove_ue(s1ap_state_t* state, ue_description_t* ue_ref); +void s1ap_remove_ue(s1ap_state_t* state, oai::UeDescription* ue_ref); /** \brief Remove target eNB from the list and remove any UE associated * \param enb_ref eNB structure reference to remove **/ void s1ap_remove_enb(s1ap_state_t* state, enb_description_t* enb_ref); -#ifdef __cplusplus -} -#endif + +void free_enb_description(void** ptr); + +void free_ue_description(void** ptr); + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.cpp index 17562c0fe9a7..8f102193ca1a 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.cpp @@ -15,7 +15,7 @@ * contact@openairinterface.org */ -/*! \file s1ap_mme_handlers.c +/*! \file s1ap_mme_handlers.cpp \brief \author Sebastien ROUX, Lionel Gauthier \company Eurecom @@ -38,7 +38,6 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" #include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #ifdef __cplusplus } #endif @@ -104,6 +103,9 @@ extern "C" { #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp" #include "orc8r/gateway/c/common/service303/MetricsHelpers.hpp" +namespace magma { +namespace lte { + typedef struct arg_s1ap_send_enb_dereg_ind_s { uint8_t current_ue_index; uint32_t handled_ues; @@ -418,7 +420,7 @@ void clean_stale_enb_state(s1ap_state_t* state, stale_enb_association->sctp_assoc_id); // Remove the S1 context for UEs associated with old eNB association if (stale_enb_association->ue_id_coll.size()) { - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; for (auto itr_map = stale_enb_association->ue_id_coll.map->begin(); itr_map != stale_enb_association->ue_id_coll.map->end(); ++itr_map) { ue_ref = s1ap_state_get_ue_mmeid((mme_ue_s1ap_id_t)itr_map->first); @@ -443,32 +445,18 @@ static status_code_e s1ap_clear_ue_ctxt_for_unknown_mme_ue_s1ap_id( OAILOG_FUNC_IN(LOG_S1AP); unsigned int i = 0; unsigned int num_elements = 0; - hash_table_ts_t* hashtblP = get_s1ap_ue_state(); - hash_node_t *node = NULL, *oldnode = NULL; - if (!hashtblP) { - OAILOG_ERROR(LOG_S1AP, "No UEs found in comp_s1ap_id hash list"); + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + + if (!s1ap_ue_state) { + OAILOG_ERROR(LOG_S1AP, "s1ap_ue_state map doesn't exist"); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - while ((num_elements < hashtblP->num_elements) && (i < hashtblP->size)) { - pthread_mutex_lock(&hashtblP->lock_nodes[i]); - if (hashtblP->nodes[i] != NULL) { - node = hashtblP->nodes[i]; - } - while (node) { - num_elements++; - oldnode = node; - node = node->next; - if (oldnode->data && - (sctp_assoc_id == - ((ue_description_t*)oldnode->data)->sctp_assoc_id)) { - pthread_mutex_unlock(&hashtblP->lock_nodes[i]); - s1ap_remove_ue(state, - reinterpret_cast(oldnode->data)); - pthread_mutex_lock(&hashtblP->lock_nodes[i]); - } + + for (auto itr = s1ap_ue_state->map->begin(); itr != s1ap_ue_state->map->end(); + itr++) { + if ((itr->second) && (sctp_assoc_id == itr->second->sctp_assoc_id())) { + s1ap_remove_ue(state, reinterpret_cast(itr->second)); } - pthread_mutex_unlock(&hashtblP->lock_nodes[i]); - i++; } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } @@ -845,7 +833,7 @@ status_code_e s1ap_mme_handle_ue_cap_indication(s1ap_state_t* state, const sctp_assoc_id_t assoc_id, const sctp_stream_id_t stream, S1ap_S1AP_PDU_t* pdu) { - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; S1ap_UECapabilityInfoIndication_t* container; S1ap_UECapabilityInfoIndicationIEs_t* ie = NULL; status_code_e rc = RETURNok; @@ -868,7 +856,7 @@ status_code_e s1ap_mme_handle_ue_cap_indication(s1ap_state_t* state, if (ie) { mme_ue_s1ap_id = ie->value.choice.MME_UE_S1AP_ID; - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_DEBUG( LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT "\n", @@ -893,26 +881,26 @@ status_code_e s1ap_mme_handle_ue_cap_indication(s1ap_state_t* state, OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if (ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Mismatch in eNB UE S1AP ID, known: " ENB_UE_S1AP_ID_FMT ", received: " ENB_UE_S1AP_ID_FMT "\n", - ue_ref_p->enb_ue_s1ap_id, (uint32_t)enb_ue_s1ap_id); + ue_ref_p->enb_ue_s1ap_id(), (uint32_t)enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } /* * Just display a warning when message received over wrong stream */ - if (ue_ref_p->sctp_stream_recv != stream) { + if (ue_ref_p->sctp_stream_recv() != stream) { OAILOG_ERROR_UE(LOG_S1AP, imsi64, "Received ue capability indication for " "(MME UE S1AP ID/eNB UE S1AP ID) (" MME_UE_S1AP_ID_FMT "/" ENB_UE_S1AP_ID_FMT ") over wrong stream " "expecting %u, received on %u\n", - (uint32_t)mme_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id, - ue_ref_p->sctp_stream_recv, stream); + (uint32_t)mme_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id(), + ue_ref_p->sctp_stream_recv(), stream); } /* @@ -933,8 +921,8 @@ status_code_e s1ap_mme_handle_ue_cap_indication(s1ap_state_t* state, return RETURNerror; } ue_cap_ind_p = &message_p->ittiMsg.s1ap_ue_cap_ind; - ue_cap_ind_p->enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id; - ue_cap_ind_p->mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id; + ue_cap_ind_p->enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id(); + ue_cap_ind_p->mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id(); ue_cap_ind_p->radio_capabilities_length = ie->value.choice.UERadioCapability.size; ue_cap_ind_p->radio_capabilities = reinterpret_cast( @@ -965,7 +953,7 @@ status_code_e s1ap_mme_handle_initial_context_setup_response( S1ap_InitialContextSetupResponse_t* container; S1ap_InitialContextSetupResponseIEs_t* ie = NULL; S1ap_E_RABSetupItemCtxtSUResIEs_t* eRABSetupItemCtxtSURes_p = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; status_code_e rc = RETURNok; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; @@ -982,7 +970,7 @@ status_code_e s1ap_mme_handle_initial_context_setup_response( if (ie) { mme_ue_s1ap_id = ie->value.choice.MME_UE_S1AP_ID; if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == - NULL) { + nullptr) { OAILOG_DEBUG( LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT @@ -1006,11 +994,11 @@ status_code_e s1ap_mme_handle_initial_context_setup_response( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if (ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Mismatch in eNB UE S1AP ID, known: " ENB_UE_S1AP_ID_FMT " %u(10), received: 0x%06x %u(10)\n", - ue_ref_p->enb_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id, + ue_ref_p->enb_ue_s1ap_id(), ue_ref_p->enb_ue_s1ap_id(), (uint32_t)enb_ue_s1ap_id, (uint32_t)enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -1027,10 +1015,11 @@ status_code_e s1ap_mme_handle_initial_context_setup_response( } else { OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - ue_ref_p->s1_ue_state = S1AP_UE_CONNECTED; + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); message_p = DEPRECATEDitti_alloc_new_message_fatal( TASK_S1AP, MME_APP_INITIAL_CONTEXT_SETUP_RSP); - MME_APP_INITIAL_CONTEXT_SETUP_RSP(message_p).ue_id = ue_ref_p->mme_ue_s1ap_id; + MME_APP_INITIAL_CONTEXT_SETUP_RSP(message_p).ue_id = + ue_ref_p->mme_ue_s1ap_id(); MME_APP_INITIAL_CONTEXT_SETUP_RSP(message_p).e_rab_setup_list.no_of_items = ie->value.choice.E_RABSetupListCtxtSURes.list.count; for (int item = 0; item < ie->value.choice.E_RABSetupListCtxtSURes.list.count; @@ -1121,7 +1110,7 @@ status_code_e s1ap_mme_handle_ue_context_release_request( S1ap_S1AP_PDU_t* pdu) { S1ap_UEContextReleaseRequest_t* container; S1ap_UEContextReleaseRequest_IEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; enb_description_t* enb_ref_p = NULL; S1ap_Cause_PR cause_type; long cause_value; @@ -1242,7 +1231,7 @@ status_code_e s1ap_mme_handle_ue_context_release_request( don't care scenario till we add support for dedicated bearers. */ - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { /* * MME doesn't know the MME UE S1AP ID provided. * No need to do anything. Ignore the message @@ -1257,8 +1246,8 @@ status_code_e s1ap_mme_handle_ue_context_release_request( } else { s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); imsi_map->mme_ueid2imsi_map.get(mme_ue_s1ap_id, &imsi64); - if (ue_ref_p->sctp_assoc_id == assoc_id && - ue_ref_p->enb_ue_s1ap_id == enb_ue_s1ap_id) { + if (ue_ref_p->sctp_assoc_id() == assoc_id && + ue_ref_p->enb_ue_s1ap_id() == enb_ue_s1ap_id) { /* * Both eNB UE S1AP ID and MME UE S1AP ID match. * Send a UE context Release Command to eNB after releasing S1-U bearer @@ -1269,8 +1258,8 @@ status_code_e s1ap_mme_handle_ue_context_release_request( OAILOG_FUNC_RETURN(LOG_S1AP, rc); } else if (enb_ref_p->enb_id == - ue_ref_p->s1ap_handover_state.source_enb_id && - ue_ref_p->s1ap_handover_state.source_enb_ue_s1ap_id == + ue_ref_p->s1ap_handover_state().source_enb_id() && + ue_ref_p->s1ap_handover_state().source_enb_ue_s1ap_id() == enb_ue_s1ap_id) { /* * We just handed over from this eNB. @@ -1278,8 +1267,8 @@ status_code_e s1ap_mme_handle_ue_context_release_request( */ rc = s1ap_mme_generate_ue_context_release_command( state, ue_ref_p, S1AP_RADIO_EUTRAN_GENERATED_REASON, imsi64, assoc_id, - ue_ref_p->s1ap_handover_state.source_sctp_stream_send, mme_ue_s1ap_id, - enb_ue_s1ap_id); + ue_ref_p->s1ap_handover_state().source_sctp_stream_send(), + mme_ue_s1ap_id, enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, rc); } else { // abnormal case. No need to do anything. Ignore the message @@ -1287,7 +1276,7 @@ status_code_e s1ap_mme_handle_ue_context_release_request( LOG_S1AP, imsi64, "UE_CONTEXT_RELEASE_REQUEST ignored, cause mismatch enb_ue_s1ap_id: " "ctxt " ENB_UE_S1AP_ID_FMT " != request " ENB_UE_S1AP_ID_FMT " ", - (uint32_t)ue_ref_p->enb_ue_s1ap_id, (uint32_t)enb_ue_s1ap_id); + (uint32_t)ue_ref_p->enb_ue_s1ap_id(), (uint32_t)enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } } @@ -1296,7 +1285,7 @@ status_code_e s1ap_mme_handle_ue_context_release_request( //------------------------------------------------------------------------------ status_code_e s1ap_mme_generate_ue_context_release_command( - s1ap_state_t* state, ue_description_t* ue_ref_p, enum s1cause cause, + s1ap_state_t* state, oai::UeDescription* ue_ref_p, enum s1cause cause, imsi64_t imsi64, const sctp_assoc_id_t assoc_id, const sctp_stream_id_t stream, mme_ue_s1ap_id_t mme_ue_s1ap_id, enb_ue_s1ap_id_t enb_ue_s1ap_id) { @@ -1412,15 +1401,15 @@ status_code_e s1ap_mme_generate_ue_context_release_command( } // If cause is S1AP_INVALID_MME_UE_S1AP_ID, then it indicates that s1ap doen't // have valid UE context - if (cause == S1AP_INVALID_MME_UE_S1AP_ID && ue_ref_p == NULL) { + if (cause == S1AP_INVALID_MME_UE_S1AP_ID && ue_ref_p == nullptr) { OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } if (rc == RETURNok) { // Start timer to track UE context release complete from eNB - ue_ref_p->s1_ue_state = S1AP_UE_WAITING_CRR; - ue_ref_p->s1ap_ue_context_rel_timer.id = s1ap_start_timer( - ue_ref_p->s1ap_ue_context_rel_timer.msec, TIMER_REPEAT_ONCE, - handle_ue_context_rel_timer_expiry, mme_ue_s1ap_id); + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_WAITING_CRC); + ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_id(s1ap_start_timer( + ue_ref_p->s1ap_ue_context_rel_timer().msec(), TIMER_REPEAT_ONCE, + handle_ue_context_rel_timer_expiry, mme_ue_s1ap_id)); } else { // Remove UE context and inform MME_APP. s1ap_mme_release_ue_context(state, ue_ref_p, imsi64); @@ -1432,7 +1421,7 @@ status_code_e s1ap_mme_generate_ue_context_release_command( //------------------------------------------------------------------------------ status_code_e s1ap_mme_generate_ue_context_modification( - ue_description_t* ue_ref_p, + oai::UeDescription* ue_ref_p, const itti_s1ap_ue_context_mod_req_t* const ue_context_mod_req_pP, imsi64_t imsi64) { uint8_t* buffer = NULL; @@ -1443,7 +1432,7 @@ status_code_e s1ap_mme_generate_ue_context_modification( status_code_e rc = RETURNok; OAILOG_FUNC_IN(LOG_S1AP); - if (ue_ref_p == NULL) { + if (ue_ref_p == nullptr) { OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } pdu.present = S1ap_S1AP_PDU_PR_initiatingMessage; @@ -1464,7 +1453,7 @@ status_code_e s1ap_mme_generate_ue_context_modification( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_UEContextModificationRequestIEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref_p->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref_p->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&container->protocolIEs.list, ie); ie = (S1ap_UEContextModificationRequestIEs_t*)calloc( @@ -1473,7 +1462,7 @@ status_code_e s1ap_mme_generate_ue_context_modification( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_UEContextModificationRequestIEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&container->protocolIEs.list, ie); if ((ue_context_mod_req_pP->presencemask & S1AP_UE_CONTEXT_MOD_LAI_PRESENT) == @@ -1547,9 +1536,9 @@ status_code_e s1ap_mme_generate_ue_context_modification( bstring b = blk2bstr(buffer, length); free(buffer); - rc = s1ap_mme_itti_send_sctp_request(&b, ue_ref_p->sctp_assoc_id, - ue_ref_p->sctp_stream_send, - ue_ref_p->mme_ue_s1ap_id); + rc = s1ap_mme_itti_send_sctp_request(&b, ue_ref_p->sctp_assoc_id(), + ue_ref_p->sctp_stream_send(), + ue_ref_p->mme_ue_s1ap_id()); OAILOG_FUNC_RETURN(LOG_S1AP, rc); } @@ -1560,12 +1549,12 @@ status_code_e s1ap_handle_ue_context_release_command( const itti_s1ap_ue_context_release_command_t* const ue_context_release_command_pP, imsi64_t imsi64) { - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; status_code_e rc = RETURNok; OAILOG_FUNC_IN(LOG_S1AP); if ((ue_ref_p = s1ap_state_get_ue_mmeid( - ue_context_release_command_pP->mme_ue_s1ap_id)) == NULL) { + ue_context_release_command_pP->mme_ue_s1ap_id)) == nullptr) { OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Ignoring UE with mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT " enb_ue_s1ap_id " ENB_UE_S1AP_ID_FMT "\n", @@ -1584,8 +1573,8 @@ status_code_e s1ap_handle_ue_context_release_command( } else { rc = s1ap_mme_generate_ue_context_release_command( state, ue_ref_p, ue_context_release_command_pP->cause, imsi64, - ue_ref_p->sctp_assoc_id, ue_ref_p->sctp_stream_send, - ue_ref_p->mme_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id); + ue_ref_p->sctp_assoc_id(), ue_ref_p->sctp_stream_send(), + ue_ref_p->mme_ue_s1ap_id(), ue_ref_p->enb_ue_s1ap_id()); } } @@ -1599,7 +1588,7 @@ status_code_e s1ap_handle_ue_context_mod_req( s1ap_state_t* state, const itti_s1ap_ue_context_mod_req_t* const ue_context_mod_req_pP, imsi64_t imsi64) { - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; status_code_e rc = RETURNok; OAILOG_FUNC_IN(LOG_S1AP); @@ -1608,7 +1597,7 @@ status_code_e s1ap_handle_ue_context_mod_req( return RETURNerror; } if ((ue_ref_p = s1ap_state_get_ue_mmeid( - ue_context_mod_req_pP->mme_ue_s1ap_id)) == NULL) { + ue_context_mod_req_pP->mme_ue_s1ap_id)) == nullptr) { OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Ignoring UE with mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT " %u(10)\n", @@ -1630,7 +1619,7 @@ status_code_e s1ap_mme_handle_ue_context_release_complete( S1ap_S1AP_PDU_t* pdu) { S1ap_UEContextReleaseComplete_t* container; S1ap_UEContextReleaseComplete_IEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = 0; OAILOG_FUNC_IN(LOG_S1AP); @@ -1646,7 +1635,7 @@ status_code_e s1ap_mme_handle_ue_context_release_complete( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_DEBUG( LOG_S1AP, " UE Context Release commplete: S1 context cleared. Ignore message for " @@ -1654,7 +1643,7 @@ status_code_e s1ap_mme_handle_ue_context_release_complete( (uint32_t)mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } else { - if (ue_ref_p->sctp_assoc_id == assoc_id) { + if (ue_ref_p->sctp_assoc_id() == assoc_id) { OAILOG_INFO(LOG_S1AP, "UE Context Release complete: clearing S1 context for " "ueid " MME_UE_S1AP_ID_FMT "\n", @@ -1664,7 +1653,7 @@ status_code_e s1ap_mme_handle_ue_context_release_complete( s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); imsi_map->mme_ueid2imsi_map.get(mme_ue_s1ap_id, &imsi64); - ue_ref_p->s1_ue_state = S1AP_UE_WAITING_CRR; + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_WAITING_CRC); // We can safely remove UE context now and stop timer s1ap_mme_release_ue_context(state, ue_ref_p, imsi64); @@ -1691,7 +1680,7 @@ status_code_e s1ap_mme_handle_initial_context_setup_failure( S1ap_S1AP_PDU_t* pdu) { S1ap_InitialContextSetupFailure_t* container; S1ap_InitialContextSetupFailureIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; S1ap_Cause_PR cause_type; long cause_value; @@ -1722,7 +1711,7 @@ status_code_e s1ap_mme_handle_initial_context_setup_failure( enb_ue_s1ap_id = (enb_ue_s1ap_id_t)(ie->value.choice.ENB_UE_S1AP_ID & ENB_UE_S1AP_ID_MASK); - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { /* * MME doesn't know the MME UE S1AP ID provided. */ @@ -1734,13 +1723,13 @@ status_code_e s1ap_mme_handle_initial_context_setup_failure( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if (ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { // abnormal case. No need to do anything. Ignore the message OAILOG_DEBUG( LOG_S1AP, "INITIAL_CONTEXT_SETUP_FAILURE ignored, mismatch enb_ue_s1ap_id: " "ctxt " ENB_UE_S1AP_ID_FMT " != received " ENB_UE_S1AP_ID_FMT " ", - (uint32_t)ue_ref_p->enb_ue_s1ap_id, (uint32_t)enb_ue_s1ap_id); + (uint32_t)ue_ref_p->enb_ue_s1ap_id(), (uint32_t)enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -1817,7 +1806,7 @@ status_code_e s1ap_mme_handle_initial_context_setup_failure( memset((void*)&message_p->ittiMsg.mme_app_initial_context_setup_failure, 0, sizeof(itti_mme_app_initial_context_setup_failure_t)); MME_APP_INITIAL_CONTEXT_SETUP_FAILURE(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); message_p->ittiMsgHeader.imsi = imsi64; rc = send_msg_to_task(&s1ap_task_zmq_ctx, TASK_MME_APP, message_p); @@ -1830,7 +1819,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_response( S1ap_S1AP_PDU_t* pdu) { S1ap_UEContextModificationResponseIEs_t *ie, *ie_enb = NULL; S1ap_UEContextModificationResponse_t* container = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; status_code_e rc = RETURNok; imsi64_t imsi64 = INVALID_IMSI64; @@ -1855,7 +1844,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_response( return RETURNerror; } if ((ie) && (ue_ref_p = s1ap_state_get_ue_mmeid( - ie->value.choice.MME_UE_S1AP_ID)) == NULL) { + ie->value.choice.MME_UE_S1AP_ID)) == nullptr) { /* * MME doesn't know the MME UE S1AP ID provided. * No need to do anything. Ignore the message @@ -1870,7 +1859,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_response( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } else { if ((ie_enb) && - (ue_ref_p->enb_ue_s1ap_id == + (ue_ref_p->enb_ue_s1ap_id() == (ie_enb->value.choice.ENB_UE_S1AP_ID & ENB_UE_S1AP_ID_MASK))) { /* * Both eNB UE S1AP ID and MME UE S1AP ID match. @@ -1886,9 +1875,9 @@ status_code_e s1ap_mme_handle_ue_context_modification_response( memset((void*)&message_p->ittiMsg.s1ap_ue_context_mod_response, 0, sizeof(itti_s1ap_ue_context_mod_resp_t)); S1AP_UE_CONTEXT_MODIFICATION_RESPONSE(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); S1AP_UE_CONTEXT_MODIFICATION_RESPONSE(message_p).enb_ue_s1ap_id = - ue_ref_p->enb_ue_s1ap_id; + ue_ref_p->enb_ue_s1ap_id(); message_p->ittiMsgHeader.imsi = imsi64; rc = send_msg_to_task(&s1ap_task_zmq_ctx, TASK_MME_APP, message_p); @@ -1900,7 +1889,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_response( "S1AP_UE_CONTEXT_MODIFICATION_RESPONSE ignored, cause mismatch " "enb_ue_s1ap_id: ctxt" ENB_UE_S1AP_ID_FMT " != request " ENB_UE_S1AP_ID_FMT " ", - (uint32_t)ue_ref_p->enb_ue_s1ap_id, + (uint32_t)ue_ref_p->enb_ue_s1ap_id(), (uint32_t)ie_enb->value.choice.ENB_UE_S1AP_ID); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -1915,7 +1904,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_failure( S1ap_S1AP_PDU_t* pdu) { S1ap_UEContextModificationFailureIEs_t *ie, *ie_enb = NULL; S1ap_UEContextModificationFailure_t* container = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; status_code_e rc = RETURNok; S1ap_Cause_PR cause_type = {S1ap_Cause_PR_NOTHING}; @@ -1943,7 +1932,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_failure( } if ((ie) && (ue_ref_p = s1ap_state_get_ue_mmeid( - ie->value.choice.MME_UE_S1AP_ID)) == NULL) { + ie->value.choice.MME_UE_S1AP_ID)) == nullptr) { /* * MME doesn't know the MME UE S1AP ID provided. * No need to do anything. Ignore the message @@ -1958,7 +1947,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_failure( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } else { if ((ie_enb) && - (ue_ref_p->enb_ue_s1ap_id == + (ue_ref_p->enb_ue_s1ap_id() == (ie_enb->value.choice.ENB_UE_S1AP_ID & ENB_UE_S1AP_ID_MASK))) { s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); imsi_map->mme_ueid2imsi_map.get(ie->value.choice.MME_UE_S1AP_ID, &imsi64); @@ -2031,9 +2020,9 @@ status_code_e s1ap_mme_handle_ue_context_modification_failure( memset((void*)&message_p->ittiMsg.s1ap_ue_context_mod_response, 0, sizeof(itti_s1ap_ue_context_mod_resp_fail_t)); S1AP_UE_CONTEXT_MODIFICATION_FAILURE(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); S1AP_UE_CONTEXT_MODIFICATION_FAILURE(message_p).enb_ue_s1ap_id = - ue_ref_p->enb_ue_s1ap_id; + ue_ref_p->enb_ue_s1ap_id(); S1AP_UE_CONTEXT_MODIFICATION_FAILURE(message_p).cause = cause_value; message_p->ittiMsgHeader.imsi = imsi64; @@ -2046,7 +2035,7 @@ status_code_e s1ap_mme_handle_ue_context_modification_failure( "S1AP_UE_CONTEXT_MODIFICATION_FAILURE ignored, cause mismatch " "enb_ue_s1ap_id: ctxt " ENB_UE_S1AP_ID_FMT " != request " ENB_UE_S1AP_ID_FMT " ", - (uint32_t)ue_ref_p->enb_ue_s1ap_id, + (uint32_t)ue_ref_p->enb_ue_s1ap_id(), (uint32_t)ie_enb->value.choice.ENB_UE_S1AP_ID); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -2069,7 +2058,7 @@ status_code_e s1ap_mme_handle_handover_request_ack( enb_description_t* source_enb = NULL; enb_description_t* target_enb = NULL; uint32_t idx = 0; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_ue_s1ap_id_t tgt_enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; S1ap_HandoverType_t handover_type = -1; @@ -2155,7 +2144,7 @@ status_code_e s1ap_mme_handle_handover_request_ack( // Retrieve the association ID for the eNB that UE is currently connected // (i.e., Source eNB) and pull the Source eNB record from s1ap state using // this association - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR(LOG_S1AP, "MME_UE_S1AP_ID (" MME_UE_S1AP_ID_FMT ") does not point to any valid UE\n", @@ -2169,11 +2158,11 @@ status_code_e s1ap_mme_handle_handover_request_ack( if (!source_enb) { continue; } - if (source_enb->sctp_assoc_id == ue_ref_p->sctp_assoc_id) { + if (source_enb->sctp_assoc_id == ue_ref_p->sctp_assoc_id()) { break; } } - if (source_enb->sctp_assoc_id != ue_ref_p->sctp_assoc_id) { + if (source_enb->sctp_assoc_id != ue_ref_p->sctp_assoc_id()) { OAILOG_ERROR_UE(LOG_S1AP, imsi64, "No source eNB found for UE\n"); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -2195,11 +2184,24 @@ status_code_e s1ap_mme_handle_handover_request_ack( // if and when we receive the HANDOVER NOTIFY later in the procedure, so we // need to keep track of this. if (e_rab_list.no_of_items) { - ue_ref_p->s1ap_handover_state.e_rab_admitted_list = e_rab_list; + oai::S1apHandoverState* handover_state = + ue_ref_p->mutable_s1ap_handover_state(); + handover_state->mutable_e_rab_admitted_list()->set_no_of_items( + e_rab_list.no_of_items); + for (uint8_t idx = 0; idx < e_rab_list.no_of_items; idx++) { + oai::ERabAdmittedItem* e_rab_admitted_item = + handover_state->mutable_e_rab_admitted_list()->add_item(); + e_rab_admitted_item->set_e_rab_id(e_rab_list.item[idx].e_rab_id); + e_rab_admitted_item->set_transport_layer_address( + bdata(e_rab_list.item[idx].transport_layer_address), + blength(e_rab_list.item[idx].transport_layer_address)); + e_rab_admitted_item->set_gtp_teid(e_rab_list.item[idx].gtp_teid); + + bdestroy_wrapper(&e_rab_list.item[idx].transport_layer_address); + } } - s1ap_mme_itti_s1ap_handover_request_ack( - mme_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id, tgt_enb_ue_s1ap_id, + mme_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id(), tgt_enb_ue_s1ap_id, handover_type, source_enb->sctp_assoc_id, tgt_src_container, source_enb->enb_id, target_enb->enb_id, imsi64); @@ -2215,7 +2217,7 @@ status_code_e s1ap_mme_handle_handover_failure(s1ap_state_t* state, S1ap_S1AP_PDU_t out_pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_HandoverPreparationFailure_t* out; S1ap_HandoverPreparationFailureIEs_t* hpf_ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; S1ap_Cause_PR cause_type; long cause_value; @@ -2257,7 +2259,7 @@ status_code_e s1ap_mme_handle_handover_failure(s1ap_state_t* state, // to the source eNB. // get UE context - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR( LOG_S1AP, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT @@ -2266,12 +2268,12 @@ status_code_e s1ap_mme_handle_handover_failure(s1ap_state_t* state, OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->s1_ue_state == S1AP_UE_HANDOVER) { + if (ue_ref_p->s1ap_ue_state() == oai::S1AP_UE_HANDOVER) { // this effectively cancels the HandoverPreparation proecedure as we // only send a HandoverCommand if the UE is in the S1AP_UE_HANDOVER // state. - ue_ref_p->s1_ue_state = S1AP_UE_CONNECTED; - ue_ref_p->s1ap_handover_state = (struct s1ap_handover_state_s){0}; + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); + ue_ref_p->mutable_s1ap_handover_state()->Clear(); } else { // Not a failure, but nothing for us to do. OAILOG_INFO( @@ -2308,7 +2310,7 @@ status_code_e s1ap_mme_handle_handover_failure(s1ap_state_t* state, hpf_ie->criticality = S1ap_Criticality_ignore; hpf_ie->value.present = S1ap_HandoverPreparationFailureIEs__value_PR_ENB_UE_S1AP_ID; - hpf_ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id; + hpf_ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, hpf_ie); // cause (mandatory) @@ -2337,8 +2339,8 @@ status_code_e s1ap_mme_handle_handover_failure(s1ap_state_t* state, "\n", (uint32_t)mme_ue_s1ap_id); - s1ap_mme_itti_send_sctp_request(&b, ue_ref_p->sctp_assoc_id, - ue_ref_p->sctp_stream_send, mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref_p->sctp_assoc_id(), + ue_ref_p->sctp_stream_send(), mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } @@ -2352,7 +2354,7 @@ status_code_e s1ap_mme_handle_handover_cancel(s1ap_state_t* state, S1ap_S1AP_PDU_t out_pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_HandoverCancelAcknowledge_t* out; S1ap_HandoverCancelAcknowledgeIEs_t* hca_ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; e_rab_admitted_list_t e_rab_admitted_list = {0}; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; @@ -2411,7 +2413,7 @@ status_code_e s1ap_mme_handle_handover_cancel(s1ap_state_t* state, // connected state, and generate a cancel acknowledgement (immediately). // get UE context - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR( LOG_S1AP, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT @@ -2420,19 +2422,15 @@ status_code_e s1ap_mme_handle_handover_cancel(s1ap_state_t* state, OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->s1_ue_state == S1AP_UE_HANDOVER) { + if (ue_ref_p->s1ap_ue_state() == oai::S1AP_UE_HANDOVER) { // this effectively cancels the HandoverPreparation proecedure as we // only send a HandoverCommand if the UE is in the S1AP_UE_HANDOVER // state. - ue_ref_p->s1_ue_state = S1AP_UE_CONNECTED; + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); /* Free all the transport layer address pointers in ERAB admitted list * before actually resetting the S1AP handover state */ - e_rab_admitted_list = ue_ref_p->s1ap_handover_state.e_rab_admitted_list; - for (int i = 0; i < e_rab_admitted_list.no_of_items; i++) { - bdestroy_wrapper(&e_rab_admitted_list.item[i].transport_layer_address); - } - ue_ref_p->s1ap_handover_state = (struct s1ap_handover_state_s){0}; + ue_ref_p->mutable_s1ap_handover_state()->Clear(); } else { // Not a failure, but nothing for us to do. OAILOG_INFO( @@ -2498,7 +2496,7 @@ status_code_e s1ap_mme_handle_handover_request( S1ap_HandoverRequestIEs_t* ie = NULL; enb_description_t* target_enb = NULL; sctp_stream_id_t stream = 0x0; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; OAILOG_FUNC_IN(LOG_S1AP); if (ho_request_p == NULL) { @@ -2510,7 +2508,7 @@ status_code_e s1ap_mme_handle_handover_request( // get the ue description if ((ue_ref_p = s1ap_state_get_ue_mmeid(ho_request_p->mme_ue_s1ap_id)) == - NULL) { + nullptr) { OAILOG_ERROR( LOG_S1AP, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT @@ -2528,17 +2526,17 @@ status_code_e s1ap_mme_handle_handover_request( // set the recv and send streams for UE on the target. stream = target_enb->next_sctp_stream; - ue_ref_p->s1ap_handover_state.target_sctp_stream_recv = stream; - ue_ref_p->s1ap_handover_state.source_sctp_stream_recv = - ue_ref_p->sctp_stream_recv; + ue_ref_p->mutable_s1ap_handover_state()->set_target_sctp_stream_recv(stream); + ue_ref_p->mutable_s1ap_handover_state()->set_source_sctp_stream_recv( + ue_ref_p->sctp_stream_recv()); target_enb->next_sctp_stream += 1; if (target_enb->next_sctp_stream >= target_enb->instreams) { target_enb->next_sctp_stream = 1; } - ue_ref_p->s1ap_handover_state.target_sctp_stream_send = - target_enb->next_sctp_stream; - ue_ref_p->s1ap_handover_state.source_sctp_stream_send = - ue_ref_p->sctp_stream_send; + ue_ref_p->mutable_s1ap_handover_state()->set_target_sctp_stream_send( + target_enb->next_sctp_stream); + ue_ref_p->mutable_s1ap_handover_state()->set_source_sctp_stream_send( + ue_ref_p->sctp_stream_send()); // Build and send PDU pdu.present = S1ap_S1AP_PDU_PR_initiatingMessage; @@ -2959,7 +2957,7 @@ status_code_e s1ap_mme_handle_handover_command( S1ap_S1AP_PDU_t pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_HandoverCommand_t* out; S1ap_HandoverCommandIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; sctp_stream_id_t stream = 0x0; OAILOG_FUNC_IN(LOG_S1AP); @@ -2969,7 +2967,7 @@ status_code_e s1ap_mme_handle_handover_command( } if ((ue_ref_p = s1ap_state_get_ue_mmeid(ho_command_p->mme_ue_s1ap_id)) == - NULL) { + nullptr) { OAILOG_ERROR( LOG_S1AP, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT @@ -2977,18 +2975,21 @@ status_code_e s1ap_mme_handle_handover_command( (uint32_t)ho_command_p->mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } else { - stream = ue_ref_p->sctp_stream_send; + stream = ue_ref_p->sctp_stream_send(); } // we're doing handover, update the ue state - ue_ref_p->s1_ue_state = S1AP_UE_HANDOVER; - ue_ref_p->s1ap_handover_state.mme_ue_s1ap_id = ho_command_p->mme_ue_s1ap_id; - ue_ref_p->s1ap_handover_state.source_enb_id = ho_command_p->source_enb_id; - ue_ref_p->s1ap_handover_state.target_enb_id = ho_command_p->target_enb_id; - ue_ref_p->s1ap_handover_state.target_enb_ue_s1ap_id = - ho_command_p->tgt_enb_ue_s1ap_id; - ue_ref_p->s1ap_handover_state.source_enb_ue_s1ap_id = - ue_ref_p->enb_ue_s1ap_id; + ue_ref_p->set_s1ap_ue_state(oai::S1AP_UE_HANDOVER); + ue_ref_p->mutable_s1ap_handover_state()->set_mme_ue_s1ap_id( + ho_command_p->mme_ue_s1ap_id); + ue_ref_p->mutable_s1ap_handover_state()->set_source_enb_id( + ho_command_p->source_enb_id); + ue_ref_p->mutable_s1ap_handover_state()->set_target_enb_id( + ho_command_p->target_enb_id); + ue_ref_p->mutable_s1ap_handover_state()->set_target_enb_ue_s1ap_id( + ho_command_p->tgt_enb_ue_s1ap_id); + ue_ref_p->mutable_s1ap_handover_state()->set_source_enb_ue_s1ap_id( + ue_ref_p->enb_ue_s1ap_id()); OAILOG_INFO(LOG_S1AP, "Handover Command received"); pdu.present = S1ap_S1AP_PDU_PR_successfulOutcome; @@ -3058,8 +3059,8 @@ status_code_e s1ap_mme_handle_handover_notify(s1ap_state_t* state, S1ap_HandoverNotify_t* container = NULL; S1ap_HandoverNotifyIEs_t* ie = NULL; enb_description_t* target_enb = NULL; - ue_description_t* src_ue_ref_p = NULL; - ue_description_t* new_ue_ref_p = NULL; + oai::UeDescription* src_ue_ref_p = nullptr; + oai::UeDescription* new_ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_ue_s1ap_id_t tgt_enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; ecgi_t ecgi = {.plmn = {0}, .cell_identity = {0}}; @@ -3140,7 +3141,7 @@ status_code_e s1ap_mme_handle_handover_notify(s1ap_state_t* state, imsi_map->mme_ueid2imsi_map.get(mme_ue_s1ap_id, &imsi64); // get existing UE context - if ((src_ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((src_ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR_UE(LOG_S1AP, imsi64, "source MME_UE_S1AP_ID (" MME_UE_S1AP_ID_FMT ") does not point to any valid UE\n", @@ -3150,7 +3151,7 @@ status_code_e s1ap_mme_handle_handover_notify(s1ap_state_t* state, // create new UE context, remove the old one. new_ue_ref_p = s1ap_state_get_ue_enbid(target_enb->sctp_assoc_id, tgt_enb_ue_s1ap_id); - if (new_ue_ref_p != NULL) { + if (new_ue_ref_p != nullptr) { OAILOG_ERROR_UE( LOG_S1AP, imsi64, "S1AP:Handover Notify- Received ENB_UE_S1AP_ID is not Unique " @@ -3159,7 +3160,7 @@ status_code_e s1ap_mme_handle_handover_notify(s1ap_state_t* state, OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } if ((new_ue_ref_p = s1ap_new_ue(state, assoc_id, tgt_enb_ue_s1ap_id)) == - NULL) { + nullptr) { // If we failed to allocate a new UE return -1 OAILOG_ERROR_UE( LOG_S1AP, imsi64, @@ -3168,50 +3169,53 @@ status_code_e s1ap_mme_handle_handover_notify(s1ap_state_t* state, tgt_enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - new_ue_ref_p->s1_ue_state = S1AP_UE_CONNECTED; // handover has completed - new_ue_ref_p->enb_ue_s1ap_id = tgt_enb_ue_s1ap_id; + new_ue_ref_p->set_s1ap_ue_state( + oai::S1AP_UE_CONNECTED); // handover has completed + new_ue_ref_p->set_enb_ue_s1ap_id(tgt_enb_ue_s1ap_id); // Will be allocated by NAS - new_ue_ref_p->mme_ue_s1ap_id = mme_ue_s1ap_id; - - new_ue_ref_p->s1ap_ue_context_rel_timer.id = - src_ue_ref_p->s1ap_ue_context_rel_timer.id; - new_ue_ref_p->s1ap_ue_context_rel_timer.msec = - src_ue_ref_p->s1ap_ue_context_rel_timer.msec; - new_ue_ref_p->sctp_stream_recv = - src_ue_ref_p->s1ap_handover_state.target_sctp_stream_recv; - new_ue_ref_p->sctp_stream_send = - src_ue_ref_p->s1ap_handover_state.target_sctp_stream_send; - new_ue_ref_p->s1ap_handover_state = src_ue_ref_p->s1ap_handover_state; + new_ue_ref_p->set_mme_ue_s1ap_id(mme_ue_s1ap_id); + + new_ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_id( + src_ue_ref_p->s1ap_ue_context_rel_timer().id()); + new_ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_msec( + src_ue_ref_p->s1ap_ue_context_rel_timer().msec()); + new_ue_ref_p->set_sctp_stream_recv( + src_ue_ref_p->mutable_s1ap_handover_state()->target_sctp_stream_recv()); + new_ue_ref_p->set_sctp_stream_send( + src_ue_ref_p->mutable_s1ap_handover_state()->target_sctp_stream_send()); + + new_ue_ref_p->mutable_s1ap_handover_state()->MergeFrom( + src_ue_ref_p->s1ap_handover_state()); // generate a message to update bearers s1ap_mme_itti_s1ap_handover_notify( - mme_ue_s1ap_id, src_ue_ref_p->s1ap_handover_state, tgt_enb_ue_s1ap_id, + mme_ue_s1ap_id, src_ue_ref_p->s1ap_handover_state(), tgt_enb_ue_s1ap_id, assoc_id, ecgi, imsi64); // Send context release command to source eNB s1ap_mme_generate_ue_context_release_command( state, src_ue_ref_p, S1AP_SUCCESSFUL_HANDOVER, imsi64, - src_ue_ref_p->sctp_assoc_id, - src_ue_ref_p->s1ap_handover_state.source_sctp_stream_send, + src_ue_ref_p->sctp_assoc_id(), + src_ue_ref_p->s1ap_handover_state().source_sctp_stream_send(), mme_ue_s1ap_id, - src_ue_ref_p->s1ap_handover_state.source_enb_ue_s1ap_id); + src_ue_ref_p->s1ap_handover_state().source_enb_ue_s1ap_id()); /* Remove ue description from source eNB */ s1ap_remove_ue(state, src_ue_ref_p); /* Mapping between mme_ue_s1ap_id, assoc_id and enb_ue_s1ap_id */ magma::proto_map_rc_t rc = - state->mmeid2associd.insert(new_ue_ref_p->mme_ue_s1ap_id, assoc_id); + state->mmeid2associd.insert(new_ue_ref_p->mme_ue_s1ap_id(), assoc_id); - target_enb->ue_id_coll.insert(new_ue_ref_p->mme_ue_s1ap_id, - new_ue_ref_p->comp_s1ap_id); + target_enb->ue_id_coll.insert(new_ue_ref_p->mme_ue_s1ap_id(), + new_ue_ref_p->comp_s1ap_id()); OAILOG_DEBUG_UE( LOG_S1AP, imsi64, "Associated sctp_assoc_id %d, enb_ue_s1ap_id " ENB_UE_S1AP_ID_FMT ", mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT ":%s \n", - assoc_id, new_ue_ref_p->enb_ue_s1ap_id, new_ue_ref_p->mme_ue_s1ap_id, - magma::map_rc_code2string(rc)); + assoc_id, new_ue_ref_p->enb_ue_s1ap_id(), + new_ue_ref_p->mme_ue_s1ap_id(), magma::map_rc_code2string(rc)); } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); @@ -3222,7 +3226,7 @@ status_code_e s1ap_mme_handle_enb_status_transfer( const sctp_stream_id_t stream, S1ap_S1AP_PDU_t* pdu) { S1ap_ENBStatusTransfer_t* container = NULL; S1ap_ENBStatusTransferIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_description_t* target_enb_association = NULL; uint8_t* buffer = NULL; @@ -3246,7 +3250,7 @@ status_code_e s1ap_mme_handle_enb_status_transfer( } // get the UE and handover state - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR( LOG_S1AP, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT @@ -3259,14 +3263,14 @@ status_code_e s1ap_mme_handle_enb_status_transfer( "Received eNBStatusTransfer from source enb_id assoc %u for " "ue " MME_UE_S1AP_ID_FMT " to target enb_id %u\n", assoc_id, mme_ue_s1ap_id, - ue_ref_p->s1ap_handover_state.target_enb_ue_s1ap_id); + ue_ref_p->s1ap_handover_state().target_enb_ue_s1ap_id()); // set the target eNB_UE_S1AP_ID S1AP_FIND_PROTOCOLIE_BY_ID(S1ap_ENBStatusTransferIEs_t, ie, container, S1ap_ProtocolIE_ID_id_eNB_UE_S1AP_ID, true); if (ie) { ie->value.choice.ENB_UE_S1AP_ID = - ue_ref_p->s1ap_handover_state.target_enb_ue_s1ap_id; + ue_ref_p->s1ap_handover_state().target_enb_ue_s1ap_id(); } else { OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -3281,14 +3285,14 @@ status_code_e s1ap_mme_handle_enb_status_transfer( continue; } if (target_enb_association->enb_id == - ue_ref_p->s1ap_handover_state.target_enb_id) { + ue_ref_p->s1ap_handover_state().target_enb_id()) { break; } } if (target_enb_association->enb_id != - ue_ref_p->s1ap_handover_state.target_enb_id) { + ue_ref_p->s1ap_handover_state().target_enb_id()) { OAILOG_ERROR(LOG_S1AP, "No eNB for enb_id %d\n", - ue_ref_p->s1ap_handover_state.target_enb_id); + ue_ref_p->s1ap_handover_state().target_enb_id()); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } } @@ -3303,7 +3307,7 @@ status_code_e s1ap_mme_handle_enb_status_transfer( OAILOG_ERROR( LOG_S1AP, "Failed to encode MME Configuration Transfer message for enb_id %u\n", - ue_ref_p->s1ap_handover_state.target_enb_id); + ue_ref_p->s1ap_handover_state().target_enb_id()); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -3312,8 +3316,8 @@ status_code_e s1ap_mme_handle_enb_status_transfer( s1ap_mme_itti_send_sctp_request( &b, target_enb_association->sctp_assoc_id, - ue_ref_p->s1ap_handover_state.target_sctp_stream_recv, - ue_ref_p->mme_ue_s1ap_id); + ue_ref_p->s1ap_handover_state().target_sctp_stream_recv(), + ue_ref_p->mme_ue_s1ap_id()); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } @@ -3326,8 +3330,8 @@ status_code_e s1ap_mme_handle_path_switch_request( S1ap_PathSwitchRequestIEs_t* ie = NULL; S1ap_E_RABToBeSwitchedDLItemIEs_t* eRABToBeSwitchedDlItemIEs_p = NULL; enb_description_t* enb_association = NULL; - ue_description_t* ue_ref_p = NULL; - ue_description_t* new_ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; + oai::UeDescription* new_ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; ecgi_t ecgi = {.plmn = {0}, .cell_identity = {0}}; @@ -3390,7 +3394,7 @@ status_code_e s1ap_mme_handle_path_switch_request( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { /* * The MME UE S1AP ID provided by eNB doesn't point to any valid UE. * MME ignore this PATH SWITCH REQUEST. @@ -3403,7 +3407,7 @@ status_code_e s1ap_mme_handle_path_switch_request( } else { new_ue_ref_p = s1ap_state_get_ue_enbid(enb_association->sctp_assoc_id, enb_ue_s1ap_id); - if (new_ue_ref_p != NULL) { + if (new_ue_ref_p != nullptr) { OAILOG_ERROR_UE( LOG_S1AP, imsi64, "S1AP:Path Switch Request- Received ENB_UE_S1AP_ID is not Unique " @@ -3415,7 +3419,8 @@ status_code_e s1ap_mme_handle_path_switch_request( * Creat New UE Context with target eNB and delete Old UE Context * from source eNB. */ - if ((new_ue_ref_p = s1ap_new_ue(state, assoc_id, enb_ue_s1ap_id)) == NULL) { + if ((new_ue_ref_p = s1ap_new_ue(state, assoc_id, enb_ue_s1ap_id)) == + nullptr) { // If we failed to allocate a new UE return -1 OAILOG_ERROR_UE( LOG_S1AP, imsi64, @@ -3424,18 +3429,18 @@ status_code_e s1ap_mme_handle_path_switch_request( enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - new_ue_ref_p->s1_ue_state = ue_ref_p->s1_ue_state; - new_ue_ref_p->enb_ue_s1ap_id = enb_ue_s1ap_id; + new_ue_ref_p->set_s1ap_ue_state(ue_ref_p->s1ap_ue_state()); + new_ue_ref_p->set_enb_ue_s1ap_id(enb_ue_s1ap_id); // Will be allocated by NAS - new_ue_ref_p->mme_ue_s1ap_id = mme_ue_s1ap_id; + new_ue_ref_p->set_mme_ue_s1ap_id(mme_ue_s1ap_id); - new_ue_ref_p->s1ap_ue_context_rel_timer.id = - ue_ref_p->s1ap_ue_context_rel_timer.id; - new_ue_ref_p->s1ap_ue_context_rel_timer.msec = - ue_ref_p->s1ap_ue_context_rel_timer.msec; + new_ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_id( + ue_ref_p->s1ap_ue_context_rel_timer().id()); + new_ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_msec( + ue_ref_p->s1ap_ue_context_rel_timer().msec()); // On which stream we received the message - new_ue_ref_p->sctp_stream_recv = stream; - new_ue_ref_p->sctp_stream_send = enb_association->next_sctp_stream; + new_ue_ref_p->set_sctp_stream_recv(stream); + new_ue_ref_p->set_sctp_stream_send(enb_association->next_sctp_stream); enb_association->next_sctp_stream += 1; if (enb_association->next_sctp_stream >= enb_association->instreams) { enb_association->next_sctp_stream = 1; @@ -3445,17 +3450,17 @@ status_code_e s1ap_mme_handle_path_switch_request( /* Mapping between mme_ue_s1ap_id, assoc_id and enb_ue_s1ap_id */ magma::proto_map_rc_t rc = - state->mmeid2associd.insert(new_ue_ref_p->mme_ue_s1ap_id, assoc_id); + state->mmeid2associd.insert(new_ue_ref_p->mme_ue_s1ap_id(), assoc_id); - enb_association->ue_id_coll.insert(new_ue_ref_p->mme_ue_s1ap_id, - new_ue_ref_p->comp_s1ap_id); + enb_association->ue_id_coll.insert(new_ue_ref_p->mme_ue_s1ap_id(), + new_ue_ref_p->comp_s1ap_id()); OAILOG_DEBUG_UE( LOG_S1AP, imsi64, "Associated sctp_assoc_id %d, enb_ue_s1ap_id " ENB_UE_S1AP_ID_FMT ", mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT ":%s \n", - assoc_id, new_ue_ref_p->enb_ue_s1ap_id, new_ue_ref_p->mme_ue_s1ap_id, - magma::map_rc_code2string(rc)); + assoc_id, new_ue_ref_p->enb_ue_s1ap_id(), + new_ue_ref_p->mme_ue_s1ap_id(), magma::map_rc_code2string(rc)); S1AP_FIND_PROTOCOLIE_BY_ID(S1ap_PathSwitchRequestIEs_t, ie, container, S1ap_ProtocolIE_ID_id_E_RABToBeSwitchedDLList, @@ -3532,9 +3537,9 @@ status_code_e s1ap_mme_handle_path_switch_request( } s1ap_mme_itti_s1ap_path_switch_request( - assoc_id, enb_association->enb_id, new_ue_ref_p->enb_ue_s1ap_id, - &e_rab_to_be_switched_dl_list, new_ue_ref_p->mme_ue_s1ap_id, &ecgi, &tai, - encryption_algorithm_capabilities, integrity_algorithm_capabilities, + assoc_id, enb_association->enb_id, new_ue_ref_p->enb_ue_s1ap_id(), + &e_rab_to_be_switched_dl_list, new_ue_ref_p->mme_ue_s1ap_id(), &ecgi, + &tai, encryption_algorithm_capabilities, integrity_algorithm_capabilities, imsi64); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); @@ -3547,18 +3552,22 @@ static bool s1ap_send_enb_deregistered_ind(__attribute__((unused)) uint64_t const dataP, void* argP, void** resultP) { arg_s1ap_send_enb_dereg_ind_t* arg = (arg_s1ap_send_enb_dereg_ind_t*)argP; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; // Ask for the release of each UE context associated to the eNB - hash_table_ts_t* s1ap_ue_state = get_s1ap_ue_state(); - hashtable_ts_get(s1ap_ue_state, (const hash_key_t)dataP, (void**)&ue_ref_p); + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + if (!s1ap_ue_state) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + OAILOG_FUNC_RETURN(LOG_S1AP, false); + } + s1ap_ue_state->get(dataP, &ue_ref_p); if (ue_ref_p) { if (arg->current_ue_index == 0) { arg->message_p = DEPRECATEDitti_alloc_new_message_fatal( TASK_S1AP, S1AP_ENB_DEREGISTERED_IND); OAILOG_DEBUG(LOG_S1AP, "eNB Deregesteration"); } - if (ue_ref_p->mme_ue_s1ap_id == INVALID_MME_UE_S1AP_ID) { + if (ue_ref_p->mme_ue_s1ap_id() == INVALID_MME_UE_S1AP_ID) { /* * Send deregistered ind for this also and let MMEAPP find the context * using enb_ue_s1ap_id_key @@ -3571,9 +3580,9 @@ static bool s1ap_send_enb_deregistered_ind(__attribute__((unused)) "Too many deregistered UEs reported in S1AP_ENB_DEREGISTERED_IND " "message "); S1AP_ENB_DEREGISTERED_IND(arg->message_p) - .mme_ue_s1ap_id[arg->current_ue_index] = ue_ref_p->mme_ue_s1ap_id; + .mme_ue_s1ap_id[arg->current_ue_index] = ue_ref_p->mme_ue_s1ap_id(); S1AP_ENB_DEREGISTERED_IND(arg->message_p) - .enb_ue_s1ap_id[arg->current_ue_index] = ue_ref_p->enb_ue_s1ap_id; + .enb_ue_s1ap_id[arg->current_ue_index] = ue_ref_p->enb_ue_s1ap_id(); arg->handled_ues++; arg->current_ue_index++; @@ -3616,16 +3625,20 @@ bool construct_s1ap_mme_full_reset_req(uint32_t keyP, const uint64_t dataP, void* argP, void** resultP) { arg_s1ap_construct_enb_reset_req_t* arg = reinterpret_cast(argP); - ue_description_t* ue_ref = reinterpret_cast(dataP); + oai::UeDescription* ue_ref = reinterpret_cast(dataP); - hash_table_ts_t* s1ap_ue_state = get_s1ap_ue_state(); - hashtable_ts_get(s1ap_ue_state, (const hash_key_t)dataP, (void**)&ue_ref); + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + if (!s1ap_ue_state) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + OAILOG_FUNC_RETURN(LOG_S1AP, false); + } + s1ap_ue_state->get(dataP, &ue_ref); uint32_t i = arg->current_ue_index; if (ue_ref) { S1AP_ENB_INITIATED_RESET_REQ(arg->msg).ue_to_reset_list[i].mme_ue_s1ap_id = - ue_ref->mme_ue_s1ap_id; + ue_ref->mme_ue_s1ap_id(); S1AP_ENB_INITIATED_RESET_REQ(arg->msg).ue_to_reset_list[i].enb_ue_s1ap_id = - ue_ref->enb_ue_s1ap_id; + ue_ref->enb_ue_s1ap_id(); } else { OAILOG_TRACE(LOG_S1AP, "No valid UE provided in callback: %p\n", ue_ref); S1AP_ENB_INITIATED_RESET_REQ(arg->msg).ue_to_reset_list[i].mme_ue_s1ap_id = @@ -3802,18 +3815,19 @@ status_code_e s1ap_handle_new_association(s1ap_state_t* state, //------------------------------------------------------------------------------ void s1ap_mme_release_ue_context(s1ap_state_t* state, - ue_description_t* ue_ref_p, imsi64_t imsi64) { + oai::UeDescription* ue_ref_p, + imsi64_t imsi64) { MessageDef* message_p = NULL; OAILOG_FUNC_IN(LOG_S1AP); - if (ue_ref_p == NULL) { - OAILOG_ERROR(LOG_S1AP, "ue_ref_p is NULL\n"); + if (ue_ref_p == nullptr) { + OAILOG_ERROR(LOG_S1AP, "ue_ref_p is nullptr\n"); } // Stop the ue context release timer - s1ap_stop_timer(ue_ref_p->s1ap_ue_context_rel_timer.id); - ue_ref_p->s1ap_ue_context_rel_timer.id = S1AP_TIMER_INACTIVE_ID; + s1ap_stop_timer(ue_ref_p->s1ap_ue_context_rel_timer().id()); + ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_id(S1AP_TIMER_INACTIVE_ID); OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Releasing UE Context for UE id %d \n", - ue_ref_p->mme_ue_s1ap_id); + ue_ref_p->mme_ue_s1ap_id()); /* * Remove UE context and inform MME_APP. @@ -3823,16 +3837,16 @@ void s1ap_mme_release_ue_context(s1ap_state_t* state, memset((void*)&message_p->ittiMsg.s1ap_ue_context_release_complete, 0, sizeof(itti_s1ap_ue_context_release_complete_t)); S1AP_UE_CONTEXT_RELEASE_COMPLETE(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); message_p->ittiMsgHeader.imsi = imsi64; send_msg_to_task(&s1ap_task_zmq_ctx, TASK_MME_APP, message_p); - if (!(ue_ref_p->s1_ue_state == S1AP_UE_WAITING_CRR)) { + if (!(ue_ref_p->s1ap_ue_state() == oai::S1AP_UE_WAITING_CRC)) { OAILOG_ERROR(LOG_S1AP, "Incorrect S1AP UE state\n"); } OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Removed S1AP UE " MME_UE_S1AP_ID_FMT "\n", - (uint32_t)ue_ref_p->mme_ue_s1ap_id); + (uint32_t)ue_ref_p->mme_ue_s1ap_id()); s1ap_remove_ue(state, ue_ref_p); OAILOG_FUNC_OUT(LOG_S1AP); @@ -3848,7 +3862,7 @@ status_code_e s1ap_mme_handle_error_ind_message(s1ap_state_t* state, increment_counter("s1ap_error_ind_rcvd", 1, NO_LABELS); S1ap_ErrorIndication_t* container = NULL; S1ap_ErrorIndicationIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; S1ap_Cause_PR cause_type; @@ -3885,7 +3899,8 @@ status_code_e s1ap_mme_handle_error_ind_message(s1ap_state_t* state, OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == + nullptr) { OAILOG_WARNING( LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT @@ -3959,7 +3974,7 @@ status_code_e s1ap_mme_handle_erab_setup_response( OAILOG_FUNC_IN(LOG_S1AP); S1ap_E_RABSetupResponse_t* container = NULL; S1ap_E_RABSetupResponseIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; @@ -3984,7 +3999,8 @@ status_code_e s1ap_mme_handle_erab_setup_response( } else { OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == + nullptr) { OAILOG_DEBUG(LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT "\n", @@ -3992,21 +4008,21 @@ status_code_e s1ap_mme_handle_erab_setup_response( OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if (ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { OAILOG_DEBUG(LOG_S1AP, "Mismatch in eNB UE S1AP ID, known: " ENB_UE_S1AP_ID_FMT ", received: " ENB_UE_S1AP_ID_FMT "\n", - ue_ref_p->enb_ue_s1ap_id, enb_ue_s1ap_id); + ue_ref_p->enb_ue_s1ap_id(), enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); - imsi_map->mme_ueid2imsi_map.get(ue_ref_p->mme_ue_s1ap_id, &imsi64); + imsi_map->mme_ueid2imsi_map.get(ue_ref_p->mme_ue_s1ap_id(), &imsi64); message_p = DEPRECATEDitti_alloc_new_message_fatal(TASK_S1AP, S1AP_E_RAB_SETUP_RSP); - S1AP_E_RAB_SETUP_RSP(message_p).mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id; - S1AP_E_RAB_SETUP_RSP(message_p).enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id; + S1AP_E_RAB_SETUP_RSP(message_p).mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id(); + S1AP_E_RAB_SETUP_RSP(message_p).enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id(); S1AP_E_RAB_SETUP_RSP(message_p).e_rab_setup_list.no_of_items = 0; S1AP_E_RAB_SETUP_RSP(message_p).e_rab_failed_to_setup_list.no_of_items = 0; @@ -4078,7 +4094,7 @@ status_code_e s1ap_mme_handle_enb_reset(s1ap_state_t* state, S1ap_S1AP_PDU_t* pdu) { MessageDef* msg = NULL; itti_s1ap_enb_initiated_reset_req_t* reset_req = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; enb_description_t* enb_association = NULL; s1ap_reset_type_t s1ap_reset_type; S1ap_Reset_t* container = NULL; @@ -4236,17 +4252,17 @@ status_code_e s1ap_mme_handle_enb_reset(s1ap_state_t* state, if (s1_sig_conn_id_p->eNB_UE_S1AP_ID != NULL) { enb_ue_s1ap_id_t enb_ue_s1ap_id = (enb_ue_s1ap_id_t) * (s1_sig_conn_id_p->eNB_UE_S1AP_ID); - if (ue_ref_p->enb_ue_s1ap_id == + if (ue_ref_p->enb_ue_s1ap_id() == (enb_ue_s1ap_id & ENB_UE_S1AP_ID_MASK)) { reset_req->ue_to_reset_list[i].mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); enb_ue_s1ap_id &= ENB_UE_S1AP_ID_MASK; reset_req->ue_to_reset_list[i].enb_ue_s1ap_id = enb_ue_s1ap_id; } else { // mismatch in enb_ue_s1ap_id sent by eNB and stored in S1AP ue // context in EPC. Abnormal case. reset_req->ue_to_reset_list[i].mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); reset_req->ue_to_reset_list[i].enb_ue_s1ap_id = (enb_ue_s1ap_id_t) * (s1_sig_conn_id_p->eNB_UE_S1AP_ID); OAILOG_ERROR_UE( @@ -4255,11 +4271,11 @@ status_code_e s1ap_mme_handle_enb_reset(s1ap_state_t* state, "%d " "sent by eNB and id %d stored in epc for mme_ue_s1ap_id %d " "\n", - enb_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id, mme_ue_s1ap_id); + enb_ue_s1ap_id, ue_ref_p->enb_ue_s1ap_id(), mme_ue_s1ap_id); } } else { reset_req->ue_to_reset_list[i].mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); reset_req->ue_to_reset_list[i].enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; } @@ -4587,7 +4603,7 @@ status_code_e s1ap_mme_handle_erab_modification_indication( status_code_e rc = RETURNok; S1ap_E_RABModificationIndication_t* container = NULL; S1ap_E_RABModificationIndicationIEs_t* ie = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; container = @@ -4605,28 +4621,29 @@ status_code_e s1ap_mme_handle_erab_modification_indication( enb_ue_s1ap_id = (enb_ue_s1ap_id_t)(ie->value.choice.ENB_UE_S1AP_ID & ENB_UE_S1AP_ID_MASK); - if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == NULL) { - OAILOG_DEBUG(LOG_S1AP, + if ((ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == + nullptr) { + OAILOG_ERROR(LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT " %u(10)\n", (uint32_t)mme_ue_s1ap_id, (uint32_t)mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - if (ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if (ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { OAILOG_DEBUG(LOG_S1AP, "Mismatch in eNB UE S1AP ID, known: " ENB_UE_S1AP_ID_FMT ", received: " ENB_UE_S1AP_ID_FMT "\n", - ue_ref_p->enb_ue_s1ap_id, enb_ue_s1ap_id); + ue_ref_p->enb_ue_s1ap_id(), enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } message_p = DEPRECATEDitti_alloc_new_message_fatal( TASK_S1AP, S1AP_E_RAB_MODIFICATION_IND); S1AP_E_RAB_MODIFICATION_IND(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); S1AP_E_RAB_MODIFICATION_IND(message_p).enb_ue_s1ap_id = - ue_ref_p->enb_ue_s1ap_id; + ue_ref_p->enb_ue_s1ap_id(); /** Get the bearers to be modified. */ S1AP_FIND_PROTOCOLIE_BY_ID( @@ -4743,7 +4760,7 @@ void s1ap_mme_generate_erab_modification_confirm( s1ap_state_t* state, const itti_s1ap_e_rab_modification_cnf_t* const conf) { uint8_t* buffer_p = NULL; uint32_t length = 0; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; S1ap_S1AP_PDU_t pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_E_RABModificationConfirm_t* out; S1ap_E_RABModificationConfirmIEs_t* ie = NULL; @@ -4751,7 +4768,7 @@ void s1ap_mme_generate_erab_modification_confirm( OAILOG_FUNC_IN(LOG_S1AP); DevAssert(conf != NULL); - if ((ue_ref = s1ap_state_get_ue_mmeid(conf->mme_ue_s1ap_id)) == NULL) { + if ((ue_ref = s1ap_state_get_ue_mmeid(conf->mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR(LOG_S1AP, "This mme ue s1ap id (" MME_UE_S1AP_ID_FMT ") is not attached to any UE context\n", @@ -4826,13 +4843,13 @@ void s1ap_mme_generate_erab_modification_confirm( LOG_S1AP, "Send S1AP E_RAB_MODIFICATION_CONFIRM Command message MME_UE_S1AP_ID " "= " MME_UE_S1AP_ID_FMT " eNB_UE_S1AP_ID = " ENB_UE_S1AP_ID_FMT "\n", - (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id, - (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id); + (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id(), + (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id()); bstring b = blk2bstr(buffer_p, length); free(buffer_p); - s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id, - ue_ref->sctp_stream_send, - ue_ref->mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id(), + ue_ref->sctp_stream_send(), + ue_ref->mme_ue_s1ap_id()); OAILOG_FUNC_OUT(LOG_S1AP); } @@ -5008,15 +5025,15 @@ status_code_e s1ap_handle_path_switch_req_ack( uint8_t* buffer = NULL; uint32_t length = 0; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; S1ap_S1AP_PDU_t pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_PathSwitchRequestAcknowledge_t* out = NULL; S1ap_PathSwitchRequestAcknowledgeIEs_t* ie = NULL; status_code_e rc = RETURNok; if ((ue_ref_p = s1ap_state_get_ue_mmeid( - path_switch_req_ack_p->mme_ue_s1ap_id)) == NULL) { - OAILOG_DEBUG_UE( + path_switch_req_ack_p->mme_ue_s1ap_id)) == nullptr) { + OAILOG_ERROR_UE( LOG_S1AP, imsi64, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT "\n", (uint32_t)path_switch_req_ack_p->mme_ue_s1ap_id); @@ -5038,7 +5055,7 @@ status_code_e s1ap_handle_path_switch_req_ack( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_PathSwitchRequestAcknowledgeIEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref_p->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref_p->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -5048,7 +5065,7 @@ status_code_e s1ap_handle_path_switch_req_ack( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_PathSwitchRequestAcknowledgeIEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref_p->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /** Add the security context. */ @@ -5087,7 +5104,7 @@ status_code_e s1ap_handle_path_switch_req_ack( (uint32_t)path_switch_req_ack_p->mme_ue_s1ap_id); rc = s1ap_mme_itti_send_sctp_request(&b, path_switch_req_ack_p->sctp_assoc_id, - ue_ref_p->sctp_stream_send, + ue_ref_p->sctp_stream_send(), path_switch_req_ack_p->mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, rc); @@ -5099,7 +5116,7 @@ status_code_e s1ap_handle_path_switch_req_failure( S1ap_PathSwitchRequestFailure_t* container = NULL; uint8_t* buffer = NULL; uint32_t length = 0; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; S1ap_S1AP_PDU_t pdu = {S1ap_S1AP_PDU_PR_NOTHING, {0}}; S1ap_PathSwitchRequestFailureIEs_t* ie = NULL; status_code_e rc = RETURNok; @@ -5108,8 +5125,8 @@ status_code_e s1ap_handle_path_switch_req_failure( mme_ue_s1ap_id = path_switch_req_failure_p->mme_ue_s1ap_id; ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id); - if (ue_ref_p == NULL) { - OAILOG_DEBUG_UE( + if (ue_ref_p == nullptr) { + OAILOG_ERROR_UE( LOG_S1AP, imsi64, "could not get ue context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT "\n", mme_ue_s1ap_id); @@ -5161,8 +5178,8 @@ status_code_e s1ap_handle_path_switch_req_failure( (uint32_t)path_switch_req_failure_p->mme_ue_s1ap_id); rc = s1ap_mme_itti_send_sctp_request( - &b, path_switch_req_failure_p->sctp_assoc_id, ue_ref_p->sctp_stream_send, - path_switch_req_failure_p->mme_ue_s1ap_id); + &b, path_switch_req_failure_p->sctp_assoc_id, + ue_ref_p->sctp_stream_send(), path_switch_req_failure_p->mme_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, rc); } @@ -5205,7 +5222,7 @@ status_code_e s1ap_mme_handle_erab_rel_response(s1ap_state_t* state, OAILOG_FUNC_IN(LOG_S1AP); S1ap_E_RABReleaseResponseIEs_t* ie = NULL; S1ap_E_RABReleaseResponse_t* container = NULL; - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; MessageDef* message_p = NULL; status_code_e rc = RETURNok; imsi64_t imsi64 = INVALID_IMSI64; @@ -5218,8 +5235,8 @@ status_code_e s1ap_mme_handle_erab_rel_response(s1ap_state_t* state, S1ap_ProtocolIE_ID_id_MME_UE_S1AP_ID, true); mme_ue_s1ap_id = ie->value.choice.MME_UE_S1AP_ID; - if ((ie) && - (ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == NULL) { + if ((ie) && (ue_ref_p = s1ap_state_get_ue_mmeid((uint32_t)mme_ue_s1ap_id)) == + nullptr) { OAILOG_ERROR(LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT "\n", @@ -5233,11 +5250,11 @@ status_code_e s1ap_mme_handle_erab_rel_response(s1ap_state_t* state, enb_ue_s1ap_id = (enb_ue_s1ap_id_t)(ie->value.choice.ENB_UE_S1AP_ID & ENB_UE_S1AP_ID_MASK); - if ((ie) && ue_ref_p->enb_ue_s1ap_id != enb_ue_s1ap_id) { + if ((ie) && ue_ref_p->enb_ue_s1ap_id() != enb_ue_s1ap_id) { OAILOG_ERROR(LOG_S1AP, "Mismatch in eNB UE S1AP ID, known: " ENB_UE_S1AP_ID_FMT ", received: " ENB_UE_S1AP_ID_FMT "\n", - ue_ref_p->enb_ue_s1ap_id, (enb_ue_s1ap_id_t)enb_ue_s1ap_id); + ue_ref_p->enb_ue_s1ap_id(), (enb_ue_s1ap_id_t)enb_ue_s1ap_id); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } @@ -5249,8 +5266,8 @@ status_code_e s1ap_mme_handle_erab_rel_response(s1ap_state_t* state, OAILOG_ERROR(LOG_S1AP, "itti_alloc_new_message Failed\n"); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } - S1AP_E_RAB_REL_RSP(message_p).mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id; - S1AP_E_RAB_REL_RSP(message_p).enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id; + S1AP_E_RAB_REL_RSP(message_p).mme_ue_s1ap_id = ue_ref_p->mme_ue_s1ap_id(); + S1AP_E_RAB_REL_RSP(message_p).enb_ue_s1ap_id = ue_ref_p->enb_ue_s1ap_id(); S1AP_E_RAB_REL_RSP(message_p).e_rab_rel_list.no_of_items = 0; S1AP_E_RAB_REL_RSP(message_p).e_rab_failed_to_rel_list.no_of_items = 0; @@ -5311,7 +5328,7 @@ status_code_e s1ap_mme_remove_stale_ue_context(enb_ue_s1ap_id_t enb_ue_s1ap_id, } status_code_e s1ap_send_mme_ue_context_release(s1ap_state_t* state, - ue_description_t* ue_ref_p, + oai::UeDescription* ue_ref_p, enum s1cause s1_release_cause, S1ap_Cause_t ie_cause, imsi64_t imsi64) { @@ -5325,12 +5342,12 @@ status_code_e s1ap_send_mme_ue_context_release(s1ap_state_t* state, } enb_description_t* enb_ref_p = - s1ap_state_get_enb(state, ue_ref_p->sctp_assoc_id); + s1ap_state_get_enb(state, ue_ref_p->sctp_assoc_id()); S1AP_UE_CONTEXT_RELEASE_REQ(message_p).mme_ue_s1ap_id = - ue_ref_p->mme_ue_s1ap_id; + ue_ref_p->mme_ue_s1ap_id(); S1AP_UE_CONTEXT_RELEASE_REQ(message_p).enb_ue_s1ap_id = - ue_ref_p->enb_ue_s1ap_id; + ue_ref_p->enb_ue_s1ap_id(); S1AP_UE_CONTEXT_RELEASE_REQ(message_p).enb_id = enb_ref_p->enb_id; S1AP_UE_CONTEXT_RELEASE_REQ(message_p).relCause = s1_release_cause; S1AP_UE_CONTEXT_RELEASE_REQ(message_p).cause = ie_cause; @@ -5343,7 +5360,7 @@ status_code_e s1ap_send_mme_ue_context_release(s1ap_state_t* state, static int handle_ue_context_rel_timer_expiry(zloop_t* loop, int timer_id, void* arg) { OAILOG_FUNC_IN(LOG_S1AP); - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; mme_ue_s1ap_id_t mme_ue_s1ap_id = 0; imsi64_t imsi64 = INVALID_IMSI64; s1ap_state_t* state = NULL; @@ -5353,7 +5370,7 @@ static int handle_ue_context_rel_timer_expiry(zloop_t* loop, int timer_id, // Timer handlers need to return 0 to avoid triggering ZMQ thread exit OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } - if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref_p = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_ERROR( LOG_S1AP, "Failed to find UE context for mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT, @@ -5363,15 +5380,49 @@ static int handle_ue_context_rel_timer_expiry(zloop_t* loop, int timer_id, } state = get_s1ap_state(false); - ue_ref_p->s1ap_ue_context_rel_timer.id = S1AP_TIMER_INACTIVE_ID; + ue_ref_p->mutable_s1ap_ue_context_rel_timer()->set_id(S1AP_TIMER_INACTIVE_ID); s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); imsi_map->mme_ueid2imsi_map.get(mme_ue_s1ap_id, &imsi64); OAILOG_DEBUG_UE(LOG_S1AP, imsi64, "Expired- UE Context Release Timer for " "mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT, - ue_ref_p->mme_ue_s1ap_id); + ue_ref_p->mme_ue_s1ap_id()); // Remove UE context and inform MME_APP. s1ap_mme_release_ue_context(state, ue_ref_p, imsi64); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } + +// Frees the contents of pointer, called while freeing an entry from protobuf +// map +void free_enb_description(void** ptr) { + if (ptr) { + delete *ptr; + *ptr = nullptr; + } +} + +// Frees the contents of UE context, called while freeing an entry from protobuf +// map +void free_ue_description(void** ptr) { + if (ptr) { + oai::UeDescription* ue_context_p = + reinterpret_cast(*ptr); + if ((ue_context_p)->has_s1ap_ue_context_rel_timer()) { + ue_context_p->clear_s1ap_ue_context_rel_timer(); + } + if ((ue_context_p)->has_s1ap_handover_state()) { + if ((ue_context_p)->s1ap_handover_state().has_e_rab_admitted_list()) { + (ue_context_p) + ->mutable_s1ap_handover_state() + ->clear_e_rab_admitted_list(); + } + ue_context_p->clear_s1ap_handover_state(); + } + delete ue_context_p; + *ptr = nullptr; + } +} + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.hpp index 158971d7ca4e..c720467d2ed8 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_handlers.hpp @@ -15,8 +15,8 @@ * contact@openairinterface.org */ -#ifndef FILE_S1AP_MME_HANDLERS_SEEN -#define FILE_S1AP_MME_HANDLERS_SEEN +#pragma once + #include #include "S1ap_Cause.h" @@ -27,6 +27,9 @@ #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp" +namespace magma { +namespace lte { + #define MAX_NUM_PARTIAL_S1_CONN_RESET 256 const char* s1_enb_state2str(enum mme_s1_enb_state_s state); @@ -155,7 +158,7 @@ status_code_e s1ap_mme_handle_erab_setup_failure(s1ap_state_t* state, S1ap_S1AP_PDU_t* message); void s1ap_mme_release_ue_context(s1ap_state_t* state, - ue_description_t* ue_ref_p, imsi64_t imsi64); + oai::UeDescription* ue_ref_p, imsi64_t imsi64); status_code_e s1ap_mme_handle_error_ind_message(s1ap_state_t* state, const sctp_assoc_id_t assoc_id, @@ -209,12 +212,12 @@ void s1ap_mme_generate_erab_modification_confirm( s1ap_state_t* state, const itti_s1ap_e_rab_modification_cnf_t* const conf); status_code_e s1ap_mme_generate_ue_context_release_command( - s1ap_state_t* state, ue_description_t* ue_ref_p, enum s1cause, + s1ap_state_t* state, oai::UeDescription* ue_ref_p, enum s1cause, imsi64_t imsi64, sctp_assoc_id_t assoc_id, sctp_stream_id_t stream, mme_ue_s1ap_id_t mme_ue_s1ap_id, enb_ue_s1ap_id_t enb_ue_s1ap_id); status_code_e s1ap_mme_generate_ue_context_modification( - ue_description_t* ue_ref_p, + oai::UeDescription* ue_ref_p, const itti_s1ap_ue_context_mod_req_t* const ue_context_mod_req_pP, imsi64_t imsi64); @@ -222,8 +225,10 @@ status_code_e s1ap_mme_remove_stale_ue_context(enb_ue_s1ap_id_t enb_ue_s1ap_id, uint32_t enb_id); status_code_e s1ap_send_mme_ue_context_release(s1ap_state_t* state, - ue_description_t* ue_ref_p, + oai::UeDescription* ue_ref_p, enum s1cause s1_release_cause, S1ap_Cause_t ie_cause, imsi64_t imsi64); -#endif /* FILE_S1AP_MME_HANDLERS_SEEN */ + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.cpp index 625f9a6f0e71..dfe7bb429d2c 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.cpp @@ -40,6 +40,9 @@ extern "C" { #include "lte/gateway/c/core/oai/include/s1ap_messages_types.h" #include "lte/gateway/c/core/oai/include/sctp_messages_types.h" +namespace magma { +namespace lte { + //------------------------------------------------------------------------------ status_code_e s1ap_mme_itti_send_sctp_request(STOLEN_REF bstring* payload, const sctp_assoc_id_t assoc_id, @@ -368,7 +371,7 @@ status_code_e s1ap_mme_itti_s1ap_handover_request_ack( status_code_e s1ap_mme_itti_s1ap_handover_notify( const mme_ue_s1ap_id_t mme_ue_s1ap_id, - const s1ap_handover_state_t handover_state, + const oai::S1apHandoverState handover_state, const enb_ue_s1ap_id_t target_enb_ue_s1ap_id, const sctp_assoc_id_t target_sctp_assoc_id, const ecgi_t ecgi, imsi64_t imsi64) { @@ -380,14 +383,29 @@ status_code_e s1ap_mme_itti_s1ap_handover_notify( } S1AP_HANDOVER_NOTIFY(message_p).mme_ue_s1ap_id = mme_ue_s1ap_id; - S1AP_HANDOVER_NOTIFY(message_p).target_enb_id = handover_state.target_enb_id; + S1AP_HANDOVER_NOTIFY(message_p).target_enb_id = + handover_state.target_enb_id(); S1AP_HANDOVER_NOTIFY(message_p).target_sctp_assoc_id = target_sctp_assoc_id; S1AP_HANDOVER_NOTIFY(message_p).ecgi = ecgi; S1AP_HANDOVER_NOTIFY(message_p).target_enb_ue_s1ap_id = target_enb_ue_s1ap_id; - S1AP_HANDOVER_NOTIFY(message_p).e_rab_admitted_list = - handover_state.e_rab_admitted_list; + e_rab_admitted_list_t* e_rab_admitted_list = + &S1AP_HANDOVER_NOTIFY(message_p).e_rab_admitted_list; + e_rab_admitted_list->no_of_items = + handover_state.e_rab_admitted_list().no_of_items(); + for (uint8_t idx = 0; idx < e_rab_admitted_list->no_of_items; idx++) { + const oai::ERabAdmittedItem& proto_e_rab_item = + handover_state.e_rab_admitted_list().item(idx); + e_rab_admitted_list->item[idx].e_rab_id = proto_e_rab_item.e_rab_id(); + e_rab_admitted_list->item[idx].transport_layer_address = + blk2bstr(proto_e_rab_item.transport_layer_address().c_str(), + proto_e_rab_item.transport_layer_address().length()); + e_rab_admitted_list->item[idx].gtp_teid = proto_e_rab_item.gtp_teid(); + } message_p->ittiMsgHeader.imsi = imsi64; send_msg_to_task(&s1ap_task_zmq_ctx, TASK_MME_APP, message_p); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.hpp index 1b34ef9ccd70..67d130d4c588 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_itti_messaging.hpp @@ -22,8 +22,7 @@ \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_S1AP_MME_ITTI_MESSAGING_SEEN -#define FILE_S1AP_MME_ITTI_MESSAGING_SEEN +#pragma once #include #include @@ -41,6 +40,9 @@ #include "lte/gateway/c/core/oai/include/s1ap_state.hpp" +namespace magma { +namespace lte { + extern task_zmq_ctx_t s1ap_task_zmq_ctx; extern long s1ap_last_msg_latency; @@ -100,8 +102,10 @@ status_code_e s1ap_mme_itti_s1ap_handover_request_ack( status_code_e s1ap_mme_itti_s1ap_handover_notify( const mme_ue_s1ap_id_t mme_ue_s1ap_id, - const s1ap_handover_state_t handover_state, + const oai::S1apHandoverState handover_state, const enb_ue_s1ap_id_t target_ue_s1ap_id, const sctp_assoc_id_t target_sctp_assoc_id, const ecgi_t ecgi, imsi64_t imsi64); -#endif /* FILE_S1AP_MME_ITTI_MESSAGING_SEEN */ + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.cpp index dbae61dbcef0..e343b0c0ef4d 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.cpp @@ -73,7 +73,6 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.003.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_36.413.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_common.hpp" #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp" #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_encoder.hpp" @@ -84,10 +83,13 @@ extern "C" { #define EXT_UE_AMBR_UL 10000000000 #define EXT_UE_AMBR_DL 10000000000 + +namespace magma { +namespace lte { + extern bool s1ap_congestion_control_enabled; extern long s1ap_last_msg_latency; extern long s1ap_zmq_th; - //------------------------------------------------------------------------------ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, const sctp_assoc_id_t assoc_id, @@ -96,7 +98,7 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, S1ap_InitialUEMessage_t* container = NULL; S1ap_InitialUEMessage_IEs_t *ie = NULL, *ie_e_tmsi = NULL, *ie_csg_id = NULL, *ie_gummei = NULL, *ie_cause = NULL; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; enb_description_t* eNB_ref = NULL; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; @@ -136,7 +138,7 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, enb_ue_s1ap_id, eNB_ref->sctp_assoc_id); ue_ref = s1ap_state_get_ue_enbid(eNB_ref->sctp_assoc_id, enb_ue_s1ap_id); - if (ue_ref == NULL) { + if (ue_ref == nullptr) { tai_t tai = {0}; gummei_t gummei = {0}; s_tmsi_t s_tmsi = {.mme_code = 0, .m_tmsi = INVALID_M_TMSI}; @@ -149,7 +151,7 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, * * * * Update eNB UE list. * * * * Forward message to NAS. */ - if ((ue_ref = s1ap_new_ue(state, assoc_id, enb_ue_s1ap_id)) == NULL) { + if ((ue_ref = s1ap_new_ue(state, assoc_id, enb_ue_s1ap_id)) == nullptr) { // If we failed to allocate a new UE return -1 OAILOG_ERROR(LOG_S1AP, "Initial UE Message- Failed to allocate S1AP UE Context, " @@ -160,19 +162,19 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, OAILOG_DEBUG(LOG_S1AP, "Creating new UE Ref on S1ap"); - ue_ref->s1_ue_state = S1AP_UE_WAITING_CSR; + ue_ref->set_s1ap_ue_state(oai::S1AP_UE_WAITING_ICSR); - ue_ref->enb_ue_s1ap_id = enb_ue_s1ap_id; + ue_ref->set_enb_ue_s1ap_id(enb_ue_s1ap_id); // Will be allocated by NAS - ue_ref->mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; + ue_ref->set_mme_ue_s1ap_id(INVALID_MME_UE_S1AP_ID); - ue_ref->s1ap_ue_context_rel_timer.id = S1AP_TIMER_INACTIVE_ID; - ue_ref->s1ap_ue_context_rel_timer.msec = - 1000 * S1AP_UE_CONTEXT_REL_COMP_TIMER; + ue_ref->mutable_s1ap_ue_context_rel_timer()->set_id(S1AP_TIMER_INACTIVE_ID); + ue_ref->mutable_s1ap_ue_context_rel_timer()->set_msec( + 1000 * S1AP_UE_CONTEXT_REL_COMP_TIMER); // On which stream we received the message - ue_ref->sctp_stream_recv = stream; - ue_ref->sctp_stream_send = eNB_ref->next_sctp_stream; + ue_ref->set_sctp_stream_recv(stream); + ue_ref->set_sctp_stream_send(eNB_ref->next_sctp_stream); /* * Increment the sctp stream for the eNB association. @@ -248,7 +250,7 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, S1ap_ProtocolIE_ID_id_RRC_Establishment_Cause, true); s1ap_mme_itti_s1ap_initial_ue_message( - assoc_id, eNB_ref->enb_id, ue_ref->enb_ue_s1ap_id, + assoc_id, eNB_ref->enb_id, ue_ref->enb_ue_s1ap_id(), ie->value.choice.NAS_PDU.buf, ie->value.choice.NAS_PDU.size, &tai, &ecgi, ie_cause->value.choice.RRC_Establishment_Cause, ie_e_tmsi ? &s_tmsi : NULL, ie_csg_id ? &csg_id : NULL, @@ -261,14 +263,14 @@ status_code_e s1ap_mme_handle_initial_ue_message(s1ap_state_t* state, } else { imsi64_t imsi64 = INVALID_IMSI64; s1ap_imsi_map_t* s1ap_imsi_map = get_s1ap_imsi_map(); - s1ap_imsi_map->mme_ueid2imsi_map.get(ue_ref->mme_ue_s1ap_id, &imsi64); + s1ap_imsi_map->mme_ueid2imsi_map.get(ue_ref->mme_ue_s1ap_id(), &imsi64); OAILOG_ERROR_UE( LOG_S1AP, imsi64, "Initial UE Message- Duplicate ENB_UE_S1AP_ID. Ignoring the " "message, eNB UE S1AP ID:" ENB_UE_S1AP_ID_FMT "\n, mme UE s1ap ID: " MME_UE_S1AP_ID_FMT "UE state %u", - enb_ue_s1ap_id, ue_ref->mme_ue_s1ap_id, ue_ref->s1_ue_state); + enb_ue_s1ap_id, ue_ref->mme_ue_s1ap_id(), ue_ref->s1ap_ue_state()); } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); @@ -281,7 +283,7 @@ status_code_e s1ap_mme_handle_uplink_nas_transport( S1ap_S1AP_PDU_t* pdu) { S1ap_UplinkNASTransport_t* container = NULL; S1ap_UplinkNASTransport_IEs_t *ie, *ie_nas_pdu = NULL; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; enb_description_t* enb_ref = NULL; tai_t tai = {0}; ecgi_t ecgi = {.plmn = {0}, .cell_identity = {0}}; @@ -347,7 +349,7 @@ status_code_e s1ap_mme_handle_uplink_nas_transport( } } - if (ue_ref->s1_ue_state != S1AP_UE_CONNECTED) { + if (ue_ref->s1ap_ue_state() != oai::S1AP_UE_CONNECTED) { OAILOG_WARNING(LOG_S1AP, "Received S1AP UPLINK_NAS_TRANSPORT while UE in state != " "S1AP_UE_CONNECTED\n"); @@ -395,7 +397,7 @@ status_code_e s1ap_mme_handle_nas_non_delivery(s1ap_state_t* state, S1ap_S1AP_PDU_t* pdu) { S1ap_NASNonDeliveryIndication_t* container; S1ap_NASNonDeliveryIndication_IEs_t *ie = NULL, *ie_nas_pdu = NULL; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; imsi64_t imsi64 = INVALID_IMSI64; mme_ue_s1ap_id_t mme_ue_s1ap_id = INVALID_MME_UE_S1AP_ID; enb_ue_s1ap_id_t enb_ue_s1ap_id = INVALID_ENB_UE_S1AP_ID; @@ -445,7 +447,7 @@ status_code_e s1ap_mme_handle_nas_non_delivery(s1ap_state_t* state, " enb_ue_s1ap_id " ENB_UE_S1AP_ID_FMT "\n", mme_ue_s1ap_id, enb_ue_s1ap_id); - if ((ue_ref = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == NULL) { + if ((ue_ref = s1ap_state_get_ue_mmeid(mme_ue_s1ap_id)) == nullptr) { OAILOG_DEBUG(LOG_S1AP, "No UE is attached to this mme UE s1ap id: " MME_UE_S1AP_ID_FMT "\n", @@ -456,7 +458,7 @@ status_code_e s1ap_mme_handle_nas_non_delivery(s1ap_state_t* state, s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); imsi_map->mme_ueid2imsi_map.get(mme_ue_s1ap_id, &imsi64); - if (ue_ref->s1_ue_state != S1AP_UE_CONNECTED) { + if (ue_ref->s1ap_ue_state() != oai::S1AP_UE_CONNECTED) { OAILOG_DEBUG_UE( LOG_S1AP, imsi64, "Received S1AP NAS_NON_DELIVERY_INDICATION while UE in state != " @@ -476,7 +478,7 @@ status_code_e s1ap_generate_downlink_nas_transport( s1ap_state_t* state, const enb_ue_s1ap_id_t enb_ue_s1ap_id, const mme_ue_s1ap_id_t ue_id, STOLEN_REF bstring* payload, const imsi64_t imsi64, bool* is_state_same) { - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; uint8_t* buffer_p = NULL; uint32_t length = 0; uint32_t sctp_assoc_id = 0; @@ -536,14 +538,14 @@ status_code_e s1ap_generate_downlink_nas_transport( out = &pdu.choice.initiatingMessage.value.choice.DownlinkNASTransport; - if (ue_ref->s1_ue_state == S1AP_UE_WAITING_CRR) { + if (ue_ref->s1ap_ue_state() == oai::S1AP_UE_WAITING_CRC) { OAILOG_ERROR_UE( LOG_S1AP, imsi64, "Already triggered UE Context Release Command and UE is" - "in S1AP_UE_WAITING_CRR, so dropping the DownlinkNASTransport \n"); + "in S1AP_UE_WAITING_CRC, so dropping the DownlinkNASTransport \n"); OAILOG_FUNC_RETURN(LOG_S1AP, RETURNerror); } else { - ue_ref->s1_ue_state = S1AP_UE_CONNECTED; + ue_ref->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); } /* * Setting UE informations with the ones found in ue_ref @@ -553,7 +555,7 @@ status_code_e s1ap_generate_downlink_nas_transport( ie->id = S1ap_ProtocolIE_ID_id_MME_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_DownlinkNASTransport_IEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -562,7 +564,7 @@ status_code_e s1ap_generate_downlink_nas_transport( ie->id = S1ap_ProtocolIE_ID_id_eNB_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_DownlinkNASTransport_IEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ ie = reinterpret_cast( @@ -590,12 +592,12 @@ status_code_e s1ap_generate_downlink_nas_transport( "Send S1AP DOWNLINK_NAS_TRANSPORT message ue_id = " MME_UE_S1AP_ID_FMT " MME_UE_S1AP_ID = " MME_UE_S1AP_ID_FMT " eNB_UE_S1AP_ID = " ENB_UE_S1AP_ID_FMT "\n", - ue_id, ue_ref->mme_ue_s1ap_id, enb_ue_s1ap_id); + ue_id, ue_ref->mme_ue_s1ap_id(), enb_ue_s1ap_id); bstring b = blk2bstr(buffer_p, length); free(buffer_p); - s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id, - ue_ref->sctp_stream_send, - ue_ref->mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id(), + ue_ref->sctp_stream_send(), + ue_ref->mme_ue_s1ap_id()); } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); @@ -605,7 +607,7 @@ status_code_e s1ap_generate_downlink_nas_transport( status_code_e s1ap_generate_s1ap_e_rab_setup_req( s1ap_state_t* state, itti_s1ap_e_rab_setup_req_t* const e_rab_setup_req) { OAILOG_FUNC_IN(LOG_S1AP); - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; uint8_t* buffer_p = NULL; uint32_t length = 0; uint32_t sctp_assoc_id = 0; @@ -650,7 +652,7 @@ status_code_e s1ap_generate_s1ap_e_rab_setup_req( pdu.choice.initiatingMessage.value.present = S1ap_InitiatingMessage__value_PR_E_RABSetupRequest; out = &pdu.choice.initiatingMessage.value.choice.E_RABSetupRequest; - ue_ref->s1_ue_state = S1AP_UE_CONNECTED; + ue_ref->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); /* * Setting UE information with the ones found in ue_ref */ @@ -659,7 +661,7 @@ status_code_e s1ap_generate_s1ap_e_rab_setup_req( ie->id = S1ap_ProtocolIE_ID_id_MME_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_E_RABSetupRequestIEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -668,7 +670,7 @@ status_code_e s1ap_generate_s1ap_e_rab_setup_req( ie->id = S1ap_ProtocolIE_ID_id_eNB_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_E_RABSetupRequestIEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /*eNB * Fill in the NAS pdu @@ -822,13 +824,13 @@ status_code_e s1ap_generate_s1ap_e_rab_setup_req( LOG_S1AP, "Send S1AP E_RABSetup message MME_UE_S1AP_ID = " MME_UE_S1AP_ID_FMT " eNB_UE_S1AP_ID = " ENB_UE_S1AP_ID_FMT "\n", - (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id, - (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id); + (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id(), + (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id()); bstring b = blk2bstr(buffer_p, length); free(buffer_p); - s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id, - ue_ref->sctp_stream_send, - ue_ref->mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id(), + ue_ref->sctp_stream_send(), + ue_ref->mme_ue_s1ap_id()); } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); @@ -847,7 +849,7 @@ void s1ap_handle_conn_est_cnf( uint8_t* buffer_p = NULL; uint8_t err = 0; uint32_t length = 0; - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; S1ap_InitialContextSetupRequest_t* out; S1ap_InitialContextSetupRequestIEs_t* ie = NULL; S1ap_UEAggregate_MaximumBitrates_ExtIEs_t* ie_ambrext = NULL; @@ -893,7 +895,7 @@ void s1ap_handle_conn_est_cnf( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_InitialContextSetupRequestIEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -903,7 +905,7 @@ void s1ap_handle_conn_est_cnf( ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_InitialContextSetupRequestIEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ @@ -1149,13 +1151,13 @@ void s1ap_handle_conn_est_cnf( LOG_S1AP, imsi64, "Send S1AP_INITIAL_CONTEXT_SETUP_REQUEST message MME_UE_S1AP_ID " "= " MME_UE_S1AP_ID_FMT " eNB_UE_S1AP_ID = " ENB_UE_S1AP_ID_FMT "\n", - (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id, - (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id); + (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id(), + (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id()); bstring b = blk2bstr(buffer_p, length); free(buffer_p); - s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id, - ue_ref->sctp_stream_send, - ue_ref->mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id(), + ue_ref->sctp_stream_send(), + ue_ref->mme_ue_s1ap_id()); OAILOG_FUNC_OUT(LOG_S1AP); } @@ -1189,7 +1191,7 @@ void s1ap_handle_mme_ue_id_notification( enb_description_t* enb_ref = s1ap_state_get_enb(state, sctp_assoc_id); if (enb_ref) { - ue_description_t* ue_ref = + oai::UeDescription* ue_ref = s1ap_state_get_ue_enbid(enb_ref->sctp_assoc_id, enb_ue_s1ap_id); if (ue_ref) { if (enb_ref->s1_state == S1AP_RESETING) { @@ -1197,12 +1199,12 @@ void s1ap_handle_mme_ue_id_notification( enb_ref->enb_id); return; } - ue_ref->mme_ue_s1ap_id = mme_ue_s1ap_id; + ue_ref->set_mme_ue_s1ap_id(mme_ue_s1ap_id); magma::proto_map_rc_t rc = state->mmeid2associd.insert(mme_ue_s1ap_id, sctp_assoc_id); enb_ref->ue_id_coll.insert((const hash_key_t)mme_ue_s1ap_id, - ue_ref->comp_s1ap_id); + ue_ref->comp_s1ap_id()); OAILOG_DEBUG(LOG_S1AP, "Num elements in ue_id_coll %lu and num ue associated %u", @@ -1233,7 +1235,7 @@ status_code_e s1ap_generate_s1ap_e_rab_rel_cmd( s1ap_state_t* state, itti_s1ap_e_rab_rel_cmd_t* const e_rab_rel_cmd) { OAILOG_FUNC_IN(LOG_S1AP); - ue_description_t* ue_ref = NULL; + oai::UeDescription* ue_ref = nullptr; uint8_t* buffer_p = NULL; uint32_t length = 0; uint32_t id = 0; @@ -1287,7 +1289,7 @@ status_code_e s1ap_generate_s1ap_e_rab_rel_cmd( ie->id = S1ap_ProtocolIE_ID_id_MME_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_E_RABReleaseCommandIEs__value_PR_MME_UE_S1AP_ID; - ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id; + ie->value.choice.MME_UE_S1AP_ID = ue_ref->mme_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); /* mandatory */ ie = (S1ap_E_RABReleaseCommandIEs_t*)calloc( @@ -1295,9 +1297,9 @@ status_code_e s1ap_generate_s1ap_e_rab_rel_cmd( ie->id = S1ap_ProtocolIE_ID_id_eNB_UE_S1AP_ID; ie->criticality = S1ap_Criticality_reject; ie->value.present = S1ap_E_RABReleaseCommandIEs__value_PR_ENB_UE_S1AP_ID; - ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id; + ie->value.choice.ENB_UE_S1AP_ID = ue_ref->enb_ue_s1ap_id(); ASN_SEQUENCE_ADD(&out->protocolIEs.list, ie); - ue_ref->s1_ue_state = S1AP_UE_CONNECTED; + ue_ref->set_s1ap_ue_state(oai::S1AP_UE_CONNECTED); ie = (S1ap_E_RABReleaseCommandIEs_t*)calloc( 1, sizeof(S1ap_E_RABReleaseCommandIEs_t)); @@ -1358,14 +1360,17 @@ status_code_e s1ap_generate_s1ap_e_rab_rel_cmd( "Send S1AP E_RABRelease Command message MME_UE_S1AP_ID " "= " MME_UE_S1AP_ID_FMT " eNB_UE_S1AP_ID = " ENB_UE_S1AP_ID_FMT "\n", - (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id, - (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id); + (mme_ue_s1ap_id_t)ue_ref->mme_ue_s1ap_id(), + (enb_ue_s1ap_id_t)ue_ref->enb_ue_s1ap_id()); bstring b = blk2bstr(buffer_p, length); free(buffer_p); - s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id, - ue_ref->sctp_stream_send, - ue_ref->mme_ue_s1ap_id); + s1ap_mme_itti_send_sctp_request(&b, ue_ref->sctp_assoc_id(), + ue_ref->sctp_stream_send(), + ue_ref->mme_ue_s1ap_id()); } OAILOG_FUNC_RETURN(LOG_S1AP, RETURNok); } + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.hpp index 23221429f009..a0e8debc74a7 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme_nas_procedures.hpp @@ -22,8 +22,7 @@ \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_S1AP_MME_NAS_PROCEDURES_SEEN -#define FILE_S1AP_MME_NAS_PROCEDURES_SEEN +#pragma once #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/common/common_types.h" @@ -33,6 +32,9 @@ #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_36.401.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" +namespace magma { +namespace lte { + /** \brief Handle an Initial UE message. * \param assocId lower layer assoc id (SCTP) * \param stream SCTP stream on which data had been received @@ -86,4 +88,5 @@ status_code_e s1ap_generate_s1ap_e_rab_setup_req( status_code_e s1ap_generate_s1ap_e_rab_rel_cmd( s1ap_state_t* state, itti_s1ap_e_rab_rel_cmd_t* const e_rab_rel_cmd); -#endif /* FILE_S1AP_MME_NAS_PROCEDURES_SEEN */ +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state.cpp index 3066f9c568c0..2ee099e900e8 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state.cpp @@ -32,7 +32,8 @@ extern "C" { #include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.hpp" -using magma::lte::S1apStateManager; +namespace magma { +namespace lte { int s1ap_state_init(uint32_t max_ues, uint32_t max_enbs, bool use_stateless) { S1apStateManager::getInstance().init(max_ues, max_enbs, use_stateless); @@ -58,36 +59,48 @@ enb_description_t* s1ap_state_get_enb(s1ap_state_t* state, return enb; } -ue_description_t* s1ap_state_get_ue_enbid(sctp_assoc_id_t sctp_assoc_id, - enb_ue_s1ap_id_t enb_ue_s1ap_id) { - ue_description_t* ue = nullptr; +oai::UeDescription* s1ap_state_get_ue_enbid(sctp_assoc_id_t sctp_assoc_id, + enb_ue_s1ap_id_t enb_ue_s1ap_id) { + oai::UeDescription* ue = nullptr; - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); + map_uint64_ue_description_t* state_ue_map = get_s1ap_ue_state(); + if (!state_ue_map) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return ue; + } uint64_t comp_s1ap_id = S1AP_GENERATE_COMP_S1AP_ID(sctp_assoc_id, enb_ue_s1ap_id); - hashtable_ts_get(state_ue_ht, (const hash_key_t)comp_s1ap_id, (void**)&ue); + state_ue_map->get(comp_s1ap_id, &ue); return ue; } -ue_description_t* s1ap_state_get_ue_mmeid(mme_ue_s1ap_id_t mme_ue_s1ap_id) { - ue_description_t* ue = nullptr; +oai::UeDescription* s1ap_state_get_ue_mmeid(mme_ue_s1ap_id_t mme_ue_s1ap_id) { + oai::UeDescription* ue = nullptr; - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); - hashtable_ts_apply_callback_on_elements((hash_table_ts_t* const)state_ue_ht, - s1ap_ue_compare_by_mme_ue_id_cb, - &mme_ue_s1ap_id, (void**)&ue); + map_uint64_ue_description_t* state_ue_map = get_s1ap_ue_state(); + if (!state_ue_map) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return ue; + } + state_ue_map->map_apply_callback_on_all_elements( + s1ap_ue_compare_by_mme_ue_id_cb, reinterpret_cast(&mme_ue_s1ap_id), + reinterpret_cast(&ue)); return ue; } -ue_description_t* s1ap_state_get_ue_imsi(imsi64_t imsi64) { - ue_description_t* ue = nullptr; +oai::UeDescription* s1ap_state_get_ue_imsi(imsi64_t imsi64) { + oai::UeDescription* ue = nullptr; - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); - hashtable_ts_apply_callback_on_elements((hash_table_ts_t* const)state_ue_ht, - s1ap_ue_compare_by_imsi, &imsi64, - (void**)&ue); + map_uint64_ue_description_t* state_ue_map = get_s1ap_ue_state(); + if (!state_ue_map) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return ue; + } + state_ue_map->map_apply_callback_on_all_elements( + s1ap_ue_compare_by_imsi, reinterpret_cast(&imsi64), + reinterpret_cast(&ue)); return ue; } @@ -100,31 +113,30 @@ s1ap_imsi_map_t* get_s1ap_imsi_map() { return S1apStateManager::getInstance().get_s1ap_imsi_map(); } -bool s1ap_ue_compare_by_mme_ue_id_cb(__attribute__((unused)) - const hash_key_t keyP, - void* const elementP, void* parameterP, - void** resultP) { +bool s1ap_ue_compare_by_mme_ue_id_cb(__attribute__((unused)) uint64_t keyP, + oai::UeDescription* elementP, + void* parameterP, void** resultP) { mme_ue_s1ap_id_t* mme_ue_s1ap_id_p = (mme_ue_s1ap_id_t*)parameterP; - ue_description_t* ue_ref = (ue_description_t*)elementP; - if (*mme_ue_s1ap_id_p == ue_ref->mme_ue_s1ap_id) { + oai::UeDescription* ue_ref = (oai::UeDescription*)elementP; + if (*mme_ue_s1ap_id_p == ue_ref->mme_ue_s1ap_id()) { *resultP = elementP; OAILOG_TRACE(LOG_S1AP, "Found ue_ref %p mme_ue_s1ap_id " MME_UE_S1AP_ID_FMT "\n", - ue_ref, ue_ref->mme_ue_s1ap_id); + ue_ref, ue_ref->mme_ue_s1ap_id()); return true; } return false; } -bool s1ap_ue_compare_by_imsi(__attribute__((unused)) const hash_key_t keyP, - void* const elementP, void* parameterP, +bool s1ap_ue_compare_by_imsi(__attribute__((unused)) uint64_t keyP, + oai::UeDescription* elementP, void* parameterP, void** resultP) { imsi64_t imsi64 = INVALID_IMSI64; imsi64_t* target_imsi64 = (imsi64_t*)parameterP; - ue_description_t* ue_ref = (ue_description_t*)elementP; + oai::UeDescription* ue_ref = (oai::UeDescription*)elementP; s1ap_imsi_map_t* imsi_map = get_s1ap_imsi_map(); - imsi_map->mme_ueid2imsi_map.get(ue_ref->mme_ue_s1ap_id, &imsi64); + imsi_map->mme_ueid2imsi_map.get(ue_ref->mme_ue_s1ap_id(), &imsi64); if (*target_imsi64 != INVALID_IMSI64 && *target_imsi64 == imsi64) { *resultP = elementP; @@ -134,16 +146,17 @@ bool s1ap_ue_compare_by_imsi(__attribute__((unused)) const hash_key_t keyP, return false; } -hash_table_ts_t* get_s1ap_ue_state(void) { - return S1apStateManager::getInstance().get_ue_state_ht(); +map_uint64_ue_description_t* get_s1ap_ue_state(void) { + return S1apStateManager::getInstance().get_s1ap_ue_state(); } void put_s1ap_ue_state(imsi64_t imsi64) { if (S1apStateManager::getInstance().is_persist_state_enabled()) { - ue_description_t* ue_ctxt = s1ap_state_get_ue_imsi(imsi64); + oai::UeDescription* ue_ctxt = s1ap_state_get_ue_imsi(imsi64); if (ue_ctxt) { auto imsi_str = S1apStateManager::getInstance().get_imsi_str(imsi64); - S1apStateManager::getInstance().write_ue_state_to_db(ue_ctxt, imsi_str); + S1apStateManager::getInstance().s1ap_write_ue_state_to_db(ue_ctxt, + imsi_str); } } } @@ -155,13 +168,18 @@ void delete_s1ap_ue_state(imsi64_t imsi64) { void remove_ues_without_imsi_from_ue_id_coll() { s1ap_state_t* s1ap_state_p = get_s1ap_state(false); - hash_table_ts_t* s1ap_ue_state = get_s1ap_ue_state(); + map_uint64_ue_description_t* s1ap_ue_state = get_s1ap_ue_state(); + + if (!(s1ap_ue_state)) { + OAILOG_ERROR(LOG_S1AP, "Failed to get s1ap_ue_state"); + return; + } std::vector mme_ue_id_no_imsi_list = {}; if (!s1ap_state_p || (s1ap_state_p->enbs.isEmpty())) { return; } s1ap_imsi_map_t* s1ap_imsi_map = get_s1ap_imsi_map(); - ue_description_t* ue_ref_p = NULL; + oai::UeDescription* ue_ref_p = nullptr; // get each eNB in s1ap_state for (auto itr = s1ap_state_p->enbs.map->begin(); @@ -180,8 +198,7 @@ void remove_ues_without_imsi_from_ue_id_coll() { for (auto ue_itr = enb_association_p->ue_id_coll.map->begin(); ue_itr != enb_association_p->ue_id_coll.map->end(); ue_itr++) { // Check if a UE reference exists for this comp_s1ap_id - hashtable_ts_get(s1ap_ue_state, (const hash_key_t)ue_itr->second, - reinterpret_cast(&ue_ref_p)); + s1ap_ue_state->get(ue_itr->second, &ue_ref_p); if (!ue_ref_p) { mme_ue_id_no_imsi_list.push_back(ue_itr->first); OAILOG_DEBUG(LOG_S1AP, @@ -205,3 +222,6 @@ void remove_ues_without_imsi_from_ue_id_coll() { } } } + +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.cpp index 92dc1bc02e91..29581965c201 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.cpp @@ -17,23 +17,20 @@ */ #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp" -using magma::lte::oai::EnbDescription; -using magma::lte::oai::S1apState; -using magma::lte::oai::UeDescription; - namespace magma { namespace lte { S1apStateConverter::~S1apStateConverter() = default; S1apStateConverter::S1apStateConverter() = default; -void S1apStateConverter::state_to_proto(s1ap_state_t* state, S1apState* proto) { +void S1apStateConverter::state_to_proto(s1ap_state_t* state, + oai::S1apState* proto) { proto->Clear(); // copy over enbs state_map_to_proto(state->enbs, proto->mutable_enbs(), - enb_to_proto, LOG_S1AP); + oai::EnbDescription>(state->enbs, proto->mutable_enbs(), + enb_to_proto, LOG_S1AP); // copy over mmeid2associd mme_ue_s1ap_id_t mmeid; @@ -56,9 +53,9 @@ void S1apStateConverter::state_to_proto(s1ap_state_t* state, S1apState* proto) { proto->set_num_enbs(state->num_enbs); } -void S1apStateConverter::proto_to_state(const S1apState& proto, +void S1apStateConverter::proto_to_state(const oai::S1apState& proto, s1ap_state_t* state) { - proto_to_state_map(proto.enbs(), state->enbs, proto_to_enb, LOG_S1AP); @@ -124,60 +121,16 @@ void S1apStateConverter::proto_to_enb(const oai::EnbDescription& proto, proto_to_supported_ta_list(&enb->supported_ta_list, proto.supported_ta_list()); } -void S1apStateConverter::ue_to_proto(const ue_description_t* ue, +void S1apStateConverter::ue_to_proto(const oai::UeDescription* ue, oai::UeDescription* proto) { proto->Clear(); - - proto->set_s1_ue_state(ue->s1_ue_state); - proto->set_enb_ue_s1ap_id(ue->enb_ue_s1ap_id); - proto->set_mme_ue_s1ap_id(ue->mme_ue_s1ap_id); - proto->set_sctp_assoc_id(ue->sctp_assoc_id); - proto->set_sctp_stream_recv(ue->sctp_stream_recv); - proto->set_sctp_stream_send(ue->sctp_stream_send); - proto->mutable_s1ap_ue_context_rel_timer()->set_id( - ue->s1ap_ue_context_rel_timer.id); - proto->mutable_s1ap_ue_context_rel_timer()->set_msec( - ue->s1ap_ue_context_rel_timer.msec); - proto->mutable_s1ap_handover_state()->set_mme_ue_s1ap_id( - ue->s1ap_handover_state.mme_ue_s1ap_id); - proto->mutable_s1ap_handover_state()->set_source_enb_id( - ue->s1ap_handover_state.source_enb_id); - proto->mutable_s1ap_handover_state()->set_target_enb_id( - ue->s1ap_handover_state.target_enb_id); - proto->mutable_s1ap_handover_state()->set_target_enb_ue_s1ap_id( - ue->s1ap_handover_state.target_enb_ue_s1ap_id); - proto->mutable_s1ap_handover_state()->set_target_sctp_stream_recv( - ue->s1ap_handover_state.target_sctp_stream_recv); - proto->mutable_s1ap_handover_state()->set_target_sctp_stream_send( - ue->s1ap_handover_state.target_sctp_stream_send); + proto->MergeFrom(*ue); } -void S1apStateConverter::proto_to_ue(const oai::UeDescription& proto, - ue_description_t* ue) { - memset(ue, 0, sizeof(*ue)); - ue->s1_ue_state = (s1_ue_state_s)proto.s1_ue_state(); - ue->enb_ue_s1ap_id = proto.enb_ue_s1ap_id(); - ue->mme_ue_s1ap_id = proto.mme_ue_s1ap_id(); - ue->sctp_assoc_id = proto.sctp_assoc_id(); - ue->sctp_stream_recv = proto.sctp_stream_recv(); - ue->sctp_stream_send = proto.sctp_stream_send(); - ue->s1ap_ue_context_rel_timer.id = proto.s1ap_ue_context_rel_timer().id(); - ue->s1ap_ue_context_rel_timer.msec = proto.s1ap_ue_context_rel_timer().msec(); - ue->s1ap_handover_state.mme_ue_s1ap_id = - proto.s1ap_handover_state().mme_ue_s1ap_id(); - ue->s1ap_handover_state.source_enb_id = - proto.s1ap_handover_state().source_enb_id(); - ue->s1ap_handover_state.target_enb_id = - proto.s1ap_handover_state().target_enb_id(); - ue->s1ap_handover_state.target_enb_ue_s1ap_id = - proto.s1ap_handover_state().target_enb_ue_s1ap_id(); - ue->s1ap_handover_state.target_sctp_stream_recv = - proto.s1ap_handover_state().target_sctp_stream_recv(); - ue->s1ap_handover_state.target_sctp_stream_send = - proto.s1ap_handover_state().target_sctp_stream_send(); - - ue->comp_s1ap_id = - S1AP_GENERATE_COMP_S1AP_ID(ue->sctp_assoc_id, ue->enb_ue_s1ap_id); +void S1apStateConverter::proto_to_ue(const oai::UeDescription& proto, + oai::UeDescription* ue) { + ue->Clear(); + ue->MergeFrom(proto); } void S1apStateConverter::s1ap_imsi_map_to_proto( diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp index dae74f8fcb39..3f51ff8107e4 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp @@ -18,15 +18,12 @@ #pragma once +#include + #ifdef __cplusplus extern "C" { #endif - -#include - #include "lte/gateway/c/core/common/assertions.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" - #ifdef __cplusplus } #endif @@ -85,11 +82,11 @@ class S1apStateConverter : StateConverter { static void proto_to_enb(const oai::EnbDescription& proto, enb_description_t* enb); - static void ue_to_proto(const ue_description_t* ue, + static void ue_to_proto(const oai::UeDescription* ue, oai::UeDescription* proto); static void proto_to_ue(const oai::UeDescription& proto, - ue_description_t* ue); + oai::UeDescription* ue); private: S1apStateConverter(); diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.cpp index d11f52c12983..f3c4e8bb752b 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.cpp @@ -27,10 +27,9 @@ constexpr char S1AP_ENB_COLL[] = "s1ap_eNB_coll"; constexpr char S1AP_MME_ID2ASSOC_ID_COLL[] = "s1ap_mme_id2assoc_id_coll"; constexpr char S1AP_MME_UEID2IMSI_MAP[] = "s1ap_mme_ueid2imsi_map"; constexpr char S1AP_IMSI_MAP_TABLE_NAME[] = "s1ap_imsi_map"; +constexpr char S1AP_STATE_UE_MAP[] = "s1ap_state_ue_map"; } // namespace -using magma::lte::oai::UeDescription; - namespace magma { namespace lte { @@ -83,10 +82,18 @@ s1ap_state_t* create_s1ap_state(void) { void S1apStateManager::create_state() { state_cache_p = create_s1ap_state(); + if (!state_cache_p) { + OAILOG_ERROR(LOG_S1AP, "Failed to create s1ap state"); + return; + } - bstring ht_name = bfromcstr(S1AP_ENB_COLL); - state_ue_ht = hashtable_ts_create(max_ues_, nullptr, free_wrapper, ht_name); - bdestroy(ht_name); + state_ue_map.map = new google::protobuf::Map(); + if (!(state_ue_map.map)) { + OAILOG_ERROR(LOG_S1AP, "Failed to allocate memory for state_ue_map "); + return; + } + state_ue_map.set_name(S1AP_STATE_UE_MAP); + state_ue_map.bind_callback(free_ue_description); create_s1ap_imsi_map(); } @@ -135,9 +142,8 @@ void S1apStateManager::free_state() { free_s1ap_state(state_cache_p); state_cache_p = nullptr; - if (hashtable_ts_destroy(state_ue_ht) != HASH_TABLE_OK) { - OAILOG_ERROR(LOG_S1AP, - "An error occurred while destroying assoc_id hash table"); + if (state_ue_map.destroy_map() != PROTO_MAP_OK) { + OAILOG_ERROR(LOG_S1AP, "An error occurred while destroying state_ue_map"); } clear_s1ap_imsi_map(); } @@ -151,31 +157,35 @@ status_code_e S1apStateManager::read_ue_state_from_db() { for (const auto& key : keys) { OAILOG_DEBUG(log_task, "Reading UE state from db for %s", key.c_str()); - UeDescription ue_proto = UeDescription(); - auto* ue_context = (ue_description_t*)calloc(1, sizeof(ue_description_t)); + oai::UeDescription ue_proto = oai::UeDescription(); + auto* ue_context = new oai::UeDescription(); + if (!ue_context) { + OAILOG_ERROR(log_task, "Failed to allocate memory for ue context"); + return RETURNerror; + } if (redis_client->read_proto(key, ue_proto) != RETURNok) { return RETURNerror; } - S1apStateConverter::proto_to_ue(ue_proto, ue_context); + ue_context->MergeFrom(ue_proto); - hashtable_rc_t h_rc = hashtable_ts_insert( - state_ue_ht, ue_context->comp_s1ap_id, (void*)ue_context); - if (HASH_TABLE_OK != h_rc) { + proto_map_rc_t rc = + state_ue_map.insert(ue_context->comp_s1ap_id(), ue_context); + if (rc != PROTO_MAP_OK) { OAILOG_ERROR( log_task, "Failed to insert UE state with key comp_s1ap_id " COMP_S1AP_ID_FMT ", ENB UE S1AP Id: " ENB_UE_S1AP_ID_FMT ", MME UE S1AP Id: " MME_UE_S1AP_ID_FMT " (Error Code: %s)\n", - ue_context->comp_s1ap_id, ue_context->enb_ue_s1ap_id, - ue_context->mme_ue_s1ap_id, hashtable_rc_code2string(h_rc)); + ue_context->comp_s1ap_id(), ue_context->enb_ue_s1ap_id(), + ue_context->mme_ue_s1ap_id(), magma::map_rc_code2string(rc)); } else { OAILOG_DEBUG(log_task, "Inserted UE state with key comp_s1ap_id " COMP_S1AP_ID_FMT ", ENB UE S1AP Id: " ENB_UE_S1AP_ID_FMT ", MME UE S1AP Id: " MME_UE_S1AP_ID_FMT, - ue_context->comp_s1ap_id, ue_context->enb_ue_s1ap_id, - ue_context->mme_ue_s1ap_id); + ue_context->comp_s1ap_id(), ue_context->enb_ue_s1ap_id(), + ue_context->mme_ue_s1ap_id()); } } #endif @@ -226,5 +236,32 @@ void S1apStateManager::write_s1ap_imsi_map_to_db() { } } +map_uint64_ue_description_t* S1apStateManager::get_s1ap_ue_state() { + return &state_ue_map; +} + +void S1apStateManager::s1ap_write_ue_state_to_db( + const oai::UeDescription* ue_context, const std::string& imsi_str) { + AssertFatal( + is_initialized, + "StateManager init() function should be called to initialize state"); + + std::string proto_str; + redis_client->serialize(*ue_context, proto_str); + std::size_t new_hash = std::hash{}(proto_str); + if (new_hash != this->ue_state_hash[imsi_str]) { + std::string key = IMSI_PREFIX + imsi_str + ":" + task_name; + if (redis_client->write_proto_str(key, proto_str, + ue_state_version[imsi_str]) != RETURNok) { + OAILOG_ERROR(log_task, "Failed to write UE state to db for IMSI %s", + imsi_str.c_str()); + return; + } + this->ue_state_version[imsi_str]++; + this->ue_state_hash[imsi_str] = new_hash; + OAILOG_DEBUG(log_task, "Finished writing UE state for IMSI %s", + imsi_str.c_str()); + } +} } // namespace lte } // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.hpp index 8ffbd802dbf3..aa904294be24 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_manager.hpp @@ -56,9 +56,8 @@ void free_s1ap_state(s1ap_state_t* state_cache_p); * to maintain S1AP task state, allocating and freeing related state structs. */ class S1apStateManager - : public StateManager { + : public StateManager { public: /** * Returns an instance of S1apStateManager, guaranteed to be thread safe and @@ -99,6 +98,9 @@ class S1apStateManager * Returns a pointer to s1ap_imsi_map */ s1ap_imsi_map_t* get_s1ap_imsi_map(); + map_uint64_ue_description_t* get_s1ap_ue_state(); + void s1ap_write_ue_state_to_db(const oai::UeDescription* ue_context, + const std::string& imsi_str); private: S1apStateManager(); @@ -116,6 +118,7 @@ class S1apStateManager uint32_t max_enbs_; std::size_t s1ap_imsi_map_hash_; s1ap_imsi_map_t* s1ap_imsi_map_; + map_uint64_ue_description_t state_ue_map; }; } // namespace lte } // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp index daceb628692c..64e9c1c90a03 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer.hpp @@ -15,10 +15,17 @@ limitations under the License. #include #include "lte/gateway/c/core/oai/include/s1ap_types.hpp" + #ifdef __cplusplus extern "C" { #endif #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" +#ifdef __cplusplus +} +#endif + +namespace magma { +namespace lte { typedef struct s1ap_timer_arg_s { mme_ue_s1ap_id_t ue_id; @@ -33,6 +40,5 @@ void s1ap_stop_timer(int timer_id); // These functions are supposed to be used only by expired timers. bool s1ap_pop_timer_arg_ue_id(int timer_id, mme_ue_s1ap_id_t* ue_id); -#ifdef __cplusplus -} -#endif +} // namespace lte +} // namespace magma diff --git a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer_management.cpp b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer_management.cpp index ba669772b15d..78da53e4be53 100644 --- a/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer_management.cpp +++ b/lte/gateway/c/core/oai/tasks/s1ap/s1ap_timer_management.cpp @@ -26,6 +26,9 @@ extern "C" { // --Other includes // ------------------------------------------------------------- +namespace magma { +namespace lte { + extern task_zmq_ctx_t s1ap_task_zmq_ctx; //------------------------------------------------------------------------------ @@ -33,27 +36,22 @@ int s1ap_start_timer(size_t msec, timer_repeat_t repeat, zloop_timer_fn handler, mme_ue_s1ap_id_t ue_id) { s1ap_timer_arg_t arg; arg.ue_id = ue_id; - return magma::lte::S1apUeContext::Instance().StartTimer(msec, repeat, handler, - arg); + return S1apUeContext::Instance().StartTimer(msec, repeat, handler, arg); } //------------------------------------------------------------------------------ void s1ap_stop_timer(int timer_id) { - magma::lte::S1apUeContext::Instance().StopTimer(timer_id); + S1apUeContext::Instance().StopTimer(timer_id); } //------------------------------------------------------------------------------ bool s1ap_pop_timer_arg_ue_id(int timer_id, mme_ue_s1ap_id_t* ue_id) { s1ap_timer_arg_t arg; - bool result = - magma::lte::S1apUeContext::Instance().PopTimerById(timer_id, &arg); + bool result = S1apUeContext::Instance().PopTimerById(timer_id, &arg); *ue_id = arg.ue_id; return result; } -namespace magma { -namespace lte { - //------------------------------------------------------------------------------ int S1apUeContext::StartTimer(size_t msec, timer_repeat_t repeat, zloop_timer_fn handler, diff --git a/lte/gateway/c/core/oai/tasks/sgw/CMakeLists.txt b/lte/gateway/c/core/oai/tasks/sgw/CMakeLists.txt index 73581b86abad..69ba4e8fdb9c 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/CMakeLists.txt +++ b/lte/gateway/c/core/oai/tasks/sgw/CMakeLists.txt @@ -22,7 +22,6 @@ add_library(TASK_SGW pgw_pco.cpp mobilityd_ue_ip_address_alloc.cpp sgw_paging.cpp - pgw_pcef_emulation.cpp pgw_procedures.cpp spgw_state.cpp spgw_state_manager.cpp diff --git a/lte/gateway/c/core/oai/tasks/sgw/mobilityd_ue_ip_address_alloc.cpp b/lte/gateway/c/core/oai/tasks/sgw/mobilityd_ue_ip_address_alloc.cpp index 21b3bc56a652..abb2dff7b7b0 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/mobilityd_ue_ip_address_alloc.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/mobilityd_ue_ip_address_alloc.cpp @@ -17,11 +17,18 @@ #include "lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp" -#include "lte/gateway/c/core/oai/common/log.h" #include "lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp" #include "lte/gateway/c/core/oai/include/service303.hpp" #include "orc8r/gateway/c/common/service303/MetricsHelpers.hpp" +#ifdef __cplusplus +extern "C" { +#endif +#include "lte/gateway/c/core/oai/common/log.h" +#ifdef __cplusplus +} +#endif + struct in_addr; void release_ue_ipv4_address(const char* imsi, const char* apn, diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_config.cpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_config.cpp index c75ee23c7843..7edad9f67d04 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_config.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_config.cpp @@ -54,7 +54,6 @@ extern "C" { #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp" #ifdef LIBCONFIG_LONG #define libconfig_int long diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.cpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.cpp index 5ebc2a8ecb89..6ed500a9c60d 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.cpp @@ -24,6 +24,8 @@ #define PGW #define S5_HANDLERS_C +#include "lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp" + #include #include #include @@ -31,21 +33,7 @@ #include #include -#ifdef __cplusplus -extern "C" { -#endif -#include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/common/log.h" -#include "lte/gateway/c/core/common/assertions.h" -#include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" -#include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" -#include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" -#include "lte/gateway/c/core/oai/lib/itti/itti_types.h" -extern void print_bearer_ids_helper(const ebi_t*, uint32_t); -#ifdef __cplusplus -} -#endif #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/common/conversions.h" #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" @@ -59,15 +47,28 @@ extern void print_bearer_ids_helper(const ebi_t*, uint32_t); #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_24.008.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_29.274.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/lib/mobility_client/MobilityClientAPI.hpp" #include "lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp" -#include "lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp" +#ifdef __cplusplus +extern "C" { +#endif +#include "lte/gateway/c/core/oai/common/common_types.h" +#include "lte/gateway/c/core/oai/common/log.h" +#include "lte/gateway/c/core/common/assertions.h" +#include "lte/gateway/c/core/common/dynamic_memory_check.h" +#include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" +#include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" +#include "lte/gateway/c/core/oai/lib/itti/itti_types.h" +#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" +extern void print_bearer_ids_helper(const ebi_t*, uint32_t); +#ifdef __cplusplus +} +#endif + extern task_zmq_ctx_t sgw_s8_task_zmq_ctx; extern spgw_config_t spgw_config; diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp index b731597c9701..27416e21c4c9 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp @@ -22,8 +22,8 @@ * \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_PGW_HANDLERS_SEEN -#define FILE_PGW_HANDLERS_SEEN +#pragma once + #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/include/gx_messages_types.h" #include "lte/gateway/c/core/oai/include/spgw_state.hpp" @@ -65,5 +65,3 @@ status_code_e spgw_build_and_send_s11_deactivate_bearer_req( #ifdef __cplusplus } #endif - -#endif /* FILE_PGW_HANDLERS_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.cpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.cpp deleted file mode 100644 index d8ed604a302d..000000000000 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.cpp +++ /dev/null @@ -1,552 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the terms found in the LICENSE file in the root of this source tree. - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -/*! \file sgw_handlers.cpp - \brief - \author Lionel Gauthier - \company Eurecom - \email: lionel.gauthier@eurecom.fr - */ -#define SGW - -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif -#include "lte/gateway/c/core/oai/common/log.h" -#include "lte/gateway/c/core/common/dynamic_memory_check.h" -#include "lte/gateway/c/core/common/assertions.h" -#include "lte/gateway/c/core/oai/common/async_system.h" -#ifdef __cplusplus -} -#endif -#include "lte/gateway/c/core/common/common_defs.h" -#include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/include/pgw_config.h" -#include "lte/gateway/c/core/oai/include/pgw_types.h" -#include "lte/gateway/c/core/oai/include/spgw_config.h" -#include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" -#include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" - -/* - * Function that adds predefined PCC rules to PGW struct, - * it returns an error or success code after adding rules. - */ -status_code_e pgw_pcef_emulation_init(spgw_state_t* state_p, - const pgw_config_t* const pgw_config_p) { - status_code_e rc = RETURNok; - hashtable_rc_t hrc = HASH_TABLE_OK; - - //-------------------------- - // Predefined PCC rules - //-------------------------- - pcc_rule_t* pcc_rule; - // Initializing PCC rules only if PGW state doesn't already contain them - hrc = hashtable_ts_is_key_exists(state_p->deactivated_predefined_pcc_rules, - SDF_ID_GBR_VOLTE_40K); - if (hrc == HASH_TABLE_KEY_NOT_EXISTS) { - pcc_rule = (pcc_rule_t*)calloc(1, sizeof(pcc_rule_t)); - pcc_rule->name = bfromcstr("VOLTE_40K_PCC_RULE"); - pcc_rule->is_activated = false; - pcc_rule->sdf_id = SDF_ID_GBR_VOLTE_40K; - pcc_rule->bearer_qos.pci = PRE_EMPTION_CAPABILITY_ENABLED; - pcc_rule->bearer_qos.pl = 2; - pcc_rule->bearer_qos.pvi = PRE_EMPTION_VULNERABILITY_DISABLED; - pcc_rule->bearer_qos.qci = 1; - pcc_rule->bearer_qos.gbr.br_ul = - 40; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.gbr.br_dl = - 40; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_ul = - 40; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_dl = - 40; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->sdf_template.sdf_filter[0].identifier = PF_ID_VOLTE; - pcc_rule->sdf_template.sdf_filter[0].spare = 0; - pcc_rule->sdf_template.sdf_filter[0].direction = - TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL; - pcc_rule->sdf_template.sdf_filter[0].eval_precedence = 2; - pcc_rule->sdf_template.sdf_filter[0].length = 9; - pcc_rule->sdf_template.sdf_filter[0].packetfiltercontents.flags = - TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .addr = 216; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .addr = 58; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .addr = 210; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .addr = 212; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .mask = 255; - pcc_rule->sdf_template.number_of_packet_filters = 1; - hrc = hashtable_ts_insert(state_p->deactivated_predefined_pcc_rules, - pcc_rule->sdf_id, pcc_rule); - if (HASH_TABLE_OK != hrc) { - return RETURNerror; - } - } - - hrc = hashtable_ts_is_key_exists(state_p->deactivated_predefined_pcc_rules, - SDF_ID_GBR_VOLTE_64K); - if (hrc == HASH_TABLE_KEY_NOT_EXISTS) { - pcc_rule = (pcc_rule_t*)calloc(1, sizeof(pcc_rule_t)); - pcc_rule->name = bfromcstr("VOLTE_64K_PCC_RULE"); - pcc_rule->is_activated = false; - pcc_rule->sdf_id = SDF_ID_GBR_VOLTE_64K; - pcc_rule->bearer_qos.pci = PRE_EMPTION_CAPABILITY_ENABLED; - pcc_rule->bearer_qos.pl = 2; - pcc_rule->bearer_qos.pvi = PRE_EMPTION_VULNERABILITY_DISABLED; - pcc_rule->bearer_qos.qci = 1; - pcc_rule->bearer_qos.gbr.br_ul = - 64; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.gbr.br_dl = - 64; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_ul = - 64; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_dl = - 64; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->sdf_template.sdf_filter[0].identifier = PF_ID_VOLTE; - pcc_rule->sdf_template.sdf_filter[0].spare = 0; - pcc_rule->sdf_template.sdf_filter[0].direction = - TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL; - pcc_rule->sdf_template.sdf_filter[0].eval_precedence = 2; - pcc_rule->sdf_template.sdf_filter[0].length = 9; - pcc_rule->sdf_template.sdf_filter[0].packetfiltercontents.flags = - TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .addr = 216; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .addr = 58; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .addr = 210; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .addr = 212; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .mask = 255; - pcc_rule->sdf_template.number_of_packet_filters = 1; - hrc = hashtable_ts_insert(state_p->deactivated_predefined_pcc_rules, - pcc_rule->sdf_id, pcc_rule); - if (HASH_TABLE_OK != hrc) { - return RETURNerror; - } - } - - hrc = hashtable_ts_is_key_exists(state_p->deactivated_predefined_pcc_rules, - SDF_ID_GBR_VILTE_192K); - if (hrc == HASH_TABLE_KEY_NOT_EXISTS) { - pcc_rule = (pcc_rule_t*)calloc(1, sizeof(pcc_rule_t)); - pcc_rule->name = bfromcstr("VILTE_192K_PCC_RULE"); - pcc_rule->is_activated = false; - pcc_rule->sdf_id = SDF_ID_GBR_VILTE_192K; - pcc_rule->bearer_qos.pci = PRE_EMPTION_CAPABILITY_ENABLED; - pcc_rule->bearer_qos.pl = 2; - pcc_rule->bearer_qos.pvi = PRE_EMPTION_VULNERABILITY_DISABLED; - pcc_rule->bearer_qos.qci = 2; - pcc_rule->bearer_qos.gbr.br_ul = - 192; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.gbr.br_dl = - 192; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_ul = - 192; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_dl = - 192; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->sdf_template.sdf_filter[0].identifier = PF_ID_VILTE; - pcc_rule->sdf_template.sdf_filter[0].spare = 0; - pcc_rule->sdf_template.sdf_filter[0].direction = - TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL; - pcc_rule->sdf_template.sdf_filter[0].eval_precedence = 2; - pcc_rule->sdf_template.sdf_filter[0].length = 9; - pcc_rule->sdf_template.sdf_filter[0].packetfiltercontents.flags = - TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .addr = 216; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .addr = 58; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .addr = 210; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .addr = 213; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .mask = 255; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .mask = 255; - pcc_rule->sdf_template.number_of_packet_filters = 1; - hrc = hashtable_ts_insert(state_p->deactivated_predefined_pcc_rules, - pcc_rule->sdf_id, pcc_rule); - if (HASH_TABLE_OK != hrc) { - return RETURNerror; - } - } - hrc = hashtable_ts_is_key_exists(state_p->deactivated_predefined_pcc_rules, - SDF_ID_TEST_PING); - if (hrc == HASH_TABLE_KEY_NOT_EXISTS) { - pcc_rule = (pcc_rule_t*)calloc(1, sizeof(pcc_rule_t)); - pcc_rule->name = bfromcstr("TEST_PING_PCC_RULE"); - pcc_rule->is_activated = false; - pcc_rule->sdf_id = SDF_ID_TEST_PING; - pcc_rule->bearer_qos.pci = PRE_EMPTION_CAPABILITY_DISABLED; - pcc_rule->bearer_qos.pl = 15; - pcc_rule->bearer_qos.pvi = PRE_EMPTION_VULNERABILITY_ENABLED; - pcc_rule->bearer_qos.qci = 7; - pcc_rule->bearer_qos.gbr.br_ul = - 0; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.gbr.br_dl = - 0; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_ul = - 8; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_dl = - 8; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->sdf_template.sdf_filter[0].identifier = PF_ID_PING; - pcc_rule->sdf_template.sdf_filter[0].spare = 0; - pcc_rule->sdf_template.sdf_filter[0].direction = - TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL; - pcc_rule->sdf_template.sdf_filter[0].eval_precedence = 2; - pcc_rule->sdf_template.sdf_filter[0].length = 9; - pcc_rule->sdf_template.sdf_filter[0].packetfiltercontents.flags = - TRAFFIC_FLOW_TEMPLATE_PROTOCOL_NEXT_HEADER_FLAG; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.protocolidentifier_nextheader = IPPROTO_ICMP; - pcc_rule->sdf_template.number_of_packet_filters = 1; - hrc = hashtable_ts_insert(state_p->deactivated_predefined_pcc_rules, - pcc_rule->sdf_id, pcc_rule); - if (HASH_TABLE_OK != hrc) { - return RETURNerror; - } - } - - hrc = hashtable_ts_is_key_exists(state_p->deactivated_predefined_pcc_rules, - SDF_ID_NGBR_DEFAULT); - if (hrc == HASH_TABLE_KEY_NOT_EXISTS) { - // really necessary ? - pcc_rule = (pcc_rule_t*)calloc(1, sizeof(pcc_rule_t)); - pcc_rule->name = bfromcstr("DEFAULT_PCC_RULE"); - pcc_rule->is_activated = false; - pcc_rule->sdf_id = SDF_ID_NGBR_DEFAULT; - pcc_rule->bearer_qos.pci = PRE_EMPTION_CAPABILITY_DISABLED; - pcc_rule->bearer_qos.pl = 15; - pcc_rule->bearer_qos.pvi = PRE_EMPTION_VULNERABILITY_ENABLED; - pcc_rule->bearer_qos.qci = 9; - pcc_rule->bearer_qos.gbr.br_ul = - 0; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.gbr.br_dl = - 0; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_ul = - 1000; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->bearer_qos.mbr.br_dl = - 1000; // kilobits per second (1 kbps = 1000 bps) - pcc_rule->sdf_template.sdf_filter[0].identifier = PF_ID_DEFAULT; - pcc_rule->sdf_template.sdf_filter[0].spare = 0; - pcc_rule->sdf_template.sdf_filter[0].direction = - TRAFFIC_FLOW_TEMPLATE_DOWNLINK_ONLY; - pcc_rule->sdf_template.sdf_filter[0].eval_precedence = 2; - pcc_rule->sdf_template.sdf_filter[0].length = 9; - pcc_rule->sdf_template.sdf_filter[0].packetfiltercontents.flags = - TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG; - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .addr = (uint8_t)((pgw_config_p->ue_pool_addr[0].s_addr) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .addr = - (uint8_t)((pgw_config_p->ue_pool_addr[0].s_addr >> 8) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .addr = - (uint8_t)((pgw_config_p->ue_pool_addr[0].s_addr >> 16) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .addr = - (uint8_t)((pgw_config_p->ue_pool_addr[0].s_addr >> 24) & 0x000000FF); - struct in_addr addr_mask = {0}; - addr_mask.s_addr = - htonl(0xFFFFFFFF << (32 - pgw_config_p->ue_pool_mask[0])); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[0] - .mask = (uint8_t)((addr_mask.s_addr) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[1] - .mask = (uint8_t)((addr_mask.s_addr >> 8) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[2] - .mask = (uint8_t)((addr_mask.s_addr >> 16) & 0x000000FF); - pcc_rule->sdf_template.sdf_filter[0] - .packetfiltercontents.ipv4remoteaddr[3] - .mask = (uint8_t)((addr_mask.s_addr >> 24) & 0x000000FF); - pcc_rule->sdf_template.number_of_packet_filters = 1; - hrc = hashtable_ts_insert(state_p->deactivated_predefined_pcc_rules, - pcc_rule->sdf_id, pcc_rule); - if (HASH_TABLE_OK != hrc) { - return RETURNerror; - } - } - - for (int i = 0; i < (SDF_ID_MAX - 1); i++) { - if (pgw_config_p->pcef.preload_static_sdf_identifiers[i]) { - pgw_pcef_emulation_apply_rule( - state_p, pgw_config_p->pcef.preload_static_sdf_identifiers[i], - pgw_config_p); - } else - break; - } - - if (pgw_config_p->pcef.automatic_push_dedicated_bearer_sdf_identifier) { - pgw_pcef_emulation_apply_rule( - state_p, - pgw_config_p->pcef.automatic_push_dedicated_bearer_sdf_identifier, - pgw_config_p); - } - return rc; -} - -//------------------------------------------------------------------------------ -// may change sdf_id to PCC_rule name ? -void pgw_pcef_emulation_apply_rule(spgw_state_t* state_p, const sdf_id_t sdf_id, - const pgw_config_t* const pgw_config_p) { - pcc_rule_t* pcc_rule = NULL; - hashtable_rc_t hrc = hashtable_ts_get( - state_p->deactivated_predefined_pcc_rules, sdf_id, (void**)&pcc_rule); - - if (HASH_TABLE_OK == hrc) { - if (!pcc_rule->is_activated) { - OAILOG_INFO(LOG_SPGW_APP, "Loading PCC rule %s\n", bdata(pcc_rule->name)); - pcc_rule->is_activated = true; - for (int sdff_i = 0; - sdff_i < pcc_rule->sdf_template.number_of_packet_filters; sdff_i++) { - pgw_pcef_emulation_apply_sdf_filter( - &pcc_rule->sdf_template.sdf_filter[sdff_i], pcc_rule->sdf_id, - pgw_config_p); - } - } - } -} - -//------------------------------------------------------------------------------ -void pgw_pcef_emulation_apply_sdf_filter( - sdf_filter_t* const sdf_f, const sdf_id_t sdf_id, - const pgw_config_t* const pgw_config_p) { - if ((TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL == sdf_f->direction) || - (TRAFFIC_FLOW_TEMPLATE_DOWNLINK_ONLY == sdf_f->direction)) { - bstring filter = pgw_pcef_emulation_packet_filter_2_iptable_string( - &sdf_f->packetfiltercontents, sdf_f->direction); - - bstring marking_command = NULL; - if ((TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG | - TRAFFIC_FLOW_TEMPLATE_IPV6_REMOTE_ADDR_FLAG) & - sdf_f->packetfiltercontents.flags) { - marking_command = - bformat("iptables -I POSTROUTING -t mangle %s -j MARK --set-mark %d", - bdata(filter), sdf_id); - } else { - // marking_command = bformat("iptables -I PREROUTING -t mangle - // --in-interface %s --dest %"PRIu8".%"PRIu8".%"PRIu8".%"PRIu8"/%"PRIu8" - // %s -j MARK --set-mark %d", - marking_command = - bformat("iptables -I POSTROUTING -t mangle --dest %" PRIu8 ".%" PRIu8 - ".%" PRIu8 ".%" PRIu8 "/%" PRIu8 " %s -j MARK --set-mark %d", - NIPADDR(pgw_config_p->ue_pool_addr[0].s_addr), - pgw_config_p->ue_pool_mask[0], bdata(filter), sdf_id); - } - bdestroy_wrapper(&filter); - async_system_command(TASK_ASYNC_SYSTEM, false, bdata(marking_command)); - bdestroy_wrapper(&marking_command); - - // for UE <-> PGW traffic - filter = pgw_pcef_emulation_packet_filter_2_iptable_string( - &sdf_f->packetfiltercontents, sdf_f->direction); - - marking_command = NULL; - if ((TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG | - TRAFFIC_FLOW_TEMPLATE_IPV6_REMOTE_ADDR_FLAG) & - sdf_f->packetfiltercontents.flags) { - marking_command = - bformat("iptables -I OUTPUT -t mangle %s -j MARK --set-mark %d", - bdata(filter), sdf_id); - } else { - marking_command = - bformat("iptables -I OUTPUT -t mangle --dest %" PRIu8 ".%" PRIu8 - ".%" PRIu8 ".%" PRIu8 "/%" PRIu8 " %s -j MARK --set-mark %d", - NIPADDR(pgw_config_p->ue_pool_addr[0].s_addr), - pgw_config_p->ue_pool_mask[0], bdata(filter), sdf_id); - } - bdestroy_wrapper(&filter); - async_system_command(TASK_ASYNC_SYSTEM, false, bdata(marking_command)); - bdestroy_wrapper(&marking_command); - } -} - -//------------------------------------------------------------------------------ -bstring pgw_pcef_emulation_packet_filter_2_iptable_string( - packet_filter_contents_t* const packetfiltercontents, uint8_t direction) { - bstring bstr = bfromcstralloc(64, " "); - - if ((TRAFFIC_FLOW_TEMPLATE_DOWNLINK_ONLY == direction) || - (TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL == direction)) { - if (TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG & - packetfiltercontents->flags) { - bformata(bstr, " --destination %d.%d.%d.%d/%d.%d.%d.%d", - packetfiltercontents->ipv4remoteaddr[0].addr, - packetfiltercontents->ipv4remoteaddr[1].addr, - packetfiltercontents->ipv4remoteaddr[2].addr, - packetfiltercontents->ipv4remoteaddr[3].addr, - packetfiltercontents->ipv4remoteaddr[0].mask, - packetfiltercontents->ipv4remoteaddr[1].mask, - packetfiltercontents->ipv4remoteaddr[2].mask, - packetfiltercontents->ipv4remoteaddr[3].mask); - } else { - bformata(bstr, " --source %d.%d.%d.%d/%d.%d.%d.%d", - packetfiltercontents->ipv4remoteaddr[0].addr, - packetfiltercontents->ipv4remoteaddr[1].addr, - packetfiltercontents->ipv4remoteaddr[2].addr, - packetfiltercontents->ipv4remoteaddr[3].addr, - packetfiltercontents->ipv4remoteaddr[0].mask, - packetfiltercontents->ipv4remoteaddr[1].mask, - packetfiltercontents->ipv4remoteaddr[2].mask, - packetfiltercontents->ipv4remoteaddr[3].mask); - } - } - if (TRAFFIC_FLOW_TEMPLATE_IPV6_REMOTE_ADDR_FLAG & - packetfiltercontents->flags) { - Fatal("TODO Implement pgw_pcef_emulation_packet_filter_2_iptable_string"); - } - if (TRAFFIC_FLOW_TEMPLATE_PROTOCOL_NEXT_HEADER_FLAG & - packetfiltercontents->flags) { - bformata(bstr, " --protocol %u", - packetfiltercontents->protocolidentifier_nextheader); - } - if (TRAFFIC_FLOW_TEMPLATE_SINGLE_LOCAL_PORT_FLAG & - packetfiltercontents->flags) { - if ((TRAFFIC_FLOW_TEMPLATE_DOWNLINK_ONLY == direction) || - (TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL == direction)) { - bformata(bstr, " --destination-port %" PRIu16 " ", - packetfiltercontents->singlelocalport); - } else if (TRAFFIC_FLOW_TEMPLATE_UPLINK_ONLY == direction) { - bformata(bstr, " --source-port %" PRIu16 " ", - packetfiltercontents->singlelocalport); - } - } - if (TRAFFIC_FLOW_TEMPLATE_LOCAL_PORT_RANGE_FLAG & - packetfiltercontents->flags) { - Fatal("TODO LOCAL_PORT_RANGE"); - } - if (TRAFFIC_FLOW_TEMPLATE_SINGLE_REMOTE_PORT_FLAG & - packetfiltercontents->flags) { - if ((TRAFFIC_FLOW_TEMPLATE_DOWNLINK_ONLY == direction) || - (TRAFFIC_FLOW_TEMPLATE_BIDIRECTIONAL == direction)) { - bformata(bstr, " --source-port %" PRIu16 " ", - packetfiltercontents->singleremoteport); - } else if (TRAFFIC_FLOW_TEMPLATE_UPLINK_ONLY == direction) { - bformata(bstr, " --destination-port %" PRIu16 " ", - packetfiltercontents->singleremoteport); - } - } - if (TRAFFIC_FLOW_TEMPLATE_REMOTE_PORT_RANGE_FLAG & - packetfiltercontents->flags) { - Fatal("TODO REMOTE_PORT_RANGE"); - } - if (TRAFFIC_FLOW_TEMPLATE_SECURITY_PARAMETER_INDEX_FLAG & - packetfiltercontents->flags) { - bformata(bstr, " -m esp --espspi %" PRIu32 " ", - packetfiltercontents->securityparameterindex); - } - if (TRAFFIC_FLOW_TEMPLATE_TYPE_OF_SERVICE_TRAFFIC_CLASS_FLAG & - packetfiltercontents->flags) { - // TODO mask - bformata(bstr, " -m tos --tos 0x%02X", - packetfiltercontents->typdeofservice_trafficclass.value); - } - if (TRAFFIC_FLOW_TEMPLATE_FLOW_LABEL_FLAG & packetfiltercontents->flags) { - Fatal("TODO Implement pgw_pcef_emulation_packet_filter_2_iptable_string"); - } - return bstr; -} - -//------------------------------------------------------------------------------ -status_code_e pgw_pcef_get_sdf_parameters(spgw_state_t* state_p, - const sdf_id_t sdf_id, - bearer_qos_t* const bearer_qos, - packet_filter_t* const packet_filter, - uint8_t* const num_pf) { - pcc_rule_t* pcc_rule = NULL; - hashtable_rc_t hrc = hashtable_ts_get( - state_p->deactivated_predefined_pcc_rules, sdf_id, (void**)&pcc_rule); - - if (HASH_TABLE_OK == hrc) { - if (pcc_rule->is_activated) { - memcpy(bearer_qos, &pcc_rule->bearer_qos, sizeof(pcc_rule->bearer_qos)); - memcpy(packet_filter, &pcc_rule->sdf_template.sdf_filter, - sizeof(pcc_rule->sdf_template.sdf_filter[0]) * - pcc_rule->sdf_template.number_of_packet_filters); - *num_pf = pcc_rule->sdf_template.number_of_packet_filters; - return RETURNok; - } - } - memset(bearer_qos, 0, sizeof(*bearer_qos)); - memset(packet_filter, 0, sizeof(*packet_filter)); - *num_pf = 0; - return RETURNerror; -} diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp deleted file mode 100644 index f0093d7a2ca7..000000000000 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the terms found in the LICENSE file in the root of this source tree. - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -/*! \file pgw_pcef_emulation.hpp - * \brief - * \author Lionel Gauthier - * \company Eurecom - * \email: lionel.gauthier@eurecom.fr - */ - -#ifndef FILE_PGW_PCEF_EMULATION_SEEN -#define FILE_PGW_PCEF_EMULATION_SEEN - -#include -#include - -#include "lte/gateway/c/core/common/common_defs.h" -#include "lte/gateway/c/core/oai/include/pgw_config.h" -#include "lte/gateway/c/core/oai/include/pgw_types.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" -#include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/gtpv2-c/nwgtpv2c-0.11/include/queue.h" - -status_code_e pgw_pcef_emulation_init(spgw_state_t* state_p, - const pgw_config_t* pgw_config_p); -void pgw_pcef_emulation_apply_rule(spgw_state_t* state_p, sdf_id_t sdf_id, - const pgw_config_t* pgw_config_p); -void pgw_pcef_emulation_apply_sdf_filter(sdf_filter_t* sdf_f, sdf_id_t sdf_id, - const pgw_config_t* pgw_config_p); -bstring pgw_pcef_emulation_packet_filter_2_iptable_string( - packet_filter_contents_t* packetfiltercontents, uint8_t direction); -status_code_e pgw_pcef_get_sdf_parameters(spgw_state_t* state, sdf_id_t sdf_id, - bearer_qos_t* bearer_qos, - packet_filter_t* packet_filter, - uint8_t* num_pf); - -#endif /* FILE_PGW_PCEF_EMULATION_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp index f538b72ba246..052707b2eb7b 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp @@ -21,8 +21,8 @@ * \company Eurecom * \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_PGW_PCO_SEEN -#define FILE_PGW_PCO_SEEN + +#pragma once #include @@ -68,5 +68,3 @@ status_code_e pgw_process_pco_request( const protocol_configuration_options_t* const pco_req, protocol_configuration_options_t* pco_resp, protocol_configuration_options_ids_t* const pco_ids); - -#endif diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.cpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.cpp index 4b3aa8759f57..9f5222c92193 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.cpp @@ -27,6 +27,7 @@ #include #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" + #ifdef __cplusplus extern "C" { #endif diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp index ba2812cb58ad..3b0ad964d9d9 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp @@ -14,8 +14,8 @@ * For more information about the OpenAirInterface (OAI) Software Alliance: * contact@openairinterface.org */ -#ifndef FILE_PGW_PROCEDURES_SEEN -#define FILE_PGW_PROCEDURES_SEEN + +#pragma once /*! \file pgw_procedures.hpp \brief Just a workaround waiting for PCEF implementation @@ -24,10 +24,9 @@ \email: lionel.gauthier@eurecom.fr */ -#include "lte/gateway/c/core/oai/lib/gtpv2-c/nwgtpv2c-0.11/include/queue.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" +#include "lte/gateway/c/core/oai/lib/gtpv2-c/nwgtpv2c-0.11/include/queue.h" #include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" typedef enum { @@ -70,5 +69,3 @@ pgw_ni_cbr_proc_t* pgw_create_procedure_create_bearer( sgw_eps_bearer_context_information_t* const ctx_p); void pgw_delete_procedure_create_bearer( s_plus_p_gw_eps_bearer_context_information_t* ctx_p); - -#endif diff --git a/lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp b/lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp index e84eff16ea6c..99031be857df 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp @@ -22,13 +22,11 @@ * \email: */ -#ifndef PGW_UE_IP_ADDRESS_ALLOC_SEEN -#define PGW_UE_IP_ADDRESS_ALLOC_SEEN +#pragma once #include #include -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" void release_ue_ipv4_address(const char* imsi, const char* apn, @@ -43,5 +41,3 @@ int get_ip_block(struct in_addr* netaddr, uint32_t* netmask); #endif void release_ue_ipv6_address(const char* imsi, const char* apn, struct in6_addr* addr); - -#endif /*PGW_UE_IP_ADDRESS_ALLOC_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/s11_causes.hpp b/lte/gateway/c/core/oai/tasks/sgw/s11_causes.hpp index a3ec7dd35f4e..7c1f9e500d83 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/s11_causes.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/s11_causes.hpp @@ -21,8 +21,8 @@ * \company Eurecom * \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_S11_CAUSES_SEEN -#define FILE_S11_CAUSES_SEEN + +#pragma once #include @@ -38,5 +38,3 @@ typedef struct SGWCauseMapping_e { } SGWCauseMapping_t; char* sgw_cause_2_string(uint8_t cause_value); - -#endif /* FILE_S11_CAUSES_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw.hpp b/lte/gateway/c/core/oai/tasks/sgw/sgw.hpp deleted file mode 100644 index eba88d7c839d..000000000000 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the terms found in the LICENSE file in the root of this source tree. - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -/*! \file sgw_lite.hpp - * \brief - * \author Lionel Gauthier - * \company Eurecom - * \email: lionel.gauthier@eurecom.fr - */ - -#ifndef FILE_SGW_SEEN -#define FILE_SGW_SEEN -#include -#include - -#include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/gtpv2-c/nwgtpv2c-0.11/include/queue.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" - -#include "lte/gateway/c/core/oai/include/nas/commonDef.h" -#include "lte/gateway/c/core/oai/common/common_types.h" -#include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" -#include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.h" - -typedef struct sgw_app_s { - bstring sgw_if_name_S1u_S12_S4_up; - struct in_addr sgw_ip_address_S1u_S12_S4_up; - - bstring sgw_if_name_S11_S4; - struct in_addr sgw_ip_address_S11_S4; - - struct in_addr sgw_ip_address_S5_S8_up; // unused now - - // key is S11 S-GW local teid - hash_table_ts_t* s11teid2mme_hashtable; - - // the key of this hashtable is the S11 s-gw local teid. - hash_table_ts_t* s11_bearer_context_information_hashtable; - - gtpv1u_data_t gtpv1u_data; -} sgw_app_t; - -struct ipv4_list_elm_s { - STAILQ_ENTRY(ipv4_list_elm_s) ipv4_entries; - struct in_addr addr; -}; - -typedef struct pgw_app_s { - STAILQ_HEAD(ipv4_list_free_head_s, ipv4_list_elm_s) ipv4_list_free; - STAILQ_HEAD(ipv4_list_allocated_head_s, ipv4_list_elm_s) ipv4_list_allocated; - hash_table_ts_t* deactivated_predefined_pcc_rules; - hash_table_ts_t* predefined_pcc_rules; -} pgw_app_t; - -#endif diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp b/lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp index b9049b772f7b..36aa783ee6e4 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp @@ -22,8 +22,7 @@ * \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_SGW_DEFS_SEEN -#define FILE_SGW_DEFS_SEEN +#pragma once #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/include/spgw_config.h" @@ -36,4 +35,3 @@ status_code_e spgw_app_init(spgw_config_t* spgw_config_pP, bool persist_state); #ifdef __cplusplus } #endif -#endif /* FILE_SGW_DEFS_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.cpp b/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.cpp index c6ec859527ea..7d4101b20015 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.cpp @@ -42,9 +42,11 @@ extern "C" { #include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u.h" #include "lte/gateway/c/core/oai/common/conversions.h" #include "lte/gateway/c/core/common/dynamic_memory_check.h" +#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #ifdef __cplusplus } #endif + #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/common/common_types.h" #include "lte/gateway/c/core/oai/include/pgw_config.h" @@ -61,15 +63,12 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_29.274.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" #include "lte/gateway/c/core/oai/lib/gtpv2-c/nwgtpv2c-0.11/include/queue.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/lib/itti/itti_types.h" #include "lte/gateway/c/core/oai/lib/pcef/pcef_handlers.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_pco.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp" -#include "lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp" #include "orc8r/gateway/c/common/service303/MetricsHelpers.hpp" diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp b/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp index 1fb7b4532e74..8a652796980c 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp @@ -22,8 +22,7 @@ * \email: lionel.gauthier@eurecom.fr */ -#ifndef FILE_SGW_HANDLERS_SEEN -#define FILE_SGW_HANDLERS_SEEN +#pragma once #include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/common/common_types.h" @@ -72,7 +71,11 @@ uint32_t spgw_get_new_s1u_teid(spgw_state_t* state); #ifdef __cplusplus extern "C" { #endif + bool is_enb_ip_address_same(const fteid_t* fte_p, ip_address_t* ip_p); +status_code_e sgw_handle_sgi_endpoint_created( + spgw_state_t* state, itti_sgi_create_end_point_response_t* const resp_p, + imsi64_t imsi64); status_code_e send_mbr_failure( log_proto_t module, const itti_s11_modify_bearer_request_t* const modify_bearer_pP, @@ -132,4 +135,3 @@ void generate_dl_flow(packet_filter_contents_t* packet_filter, void sgw_handle_delete_bearer_cmd( itti_s11_delete_bearer_command_t* s11_delete_bearer_command, imsi64_t imsi64); -#endif /* FILE_SGW_HANDLERS_SEEN */ diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw_paging.hpp b/lte/gateway/c/core/oai/tasks/sgw/sgw_paging.hpp index 94b22a16d6cc..49854352518e 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw_paging.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/sgw_paging.hpp @@ -14,13 +14,12 @@ * For more information about the OpenAirInterface (OAI) Software Alliance: * contact@openairinterface.org */ -#ifndef FILE_SGW_PAGING_SEEN -#define FILE_SGW_PAGING_SEEN + +#pragma once + #include struct in_addr; #define ETH_HEADER_LENGTH 14 void sgw_send_paging_request(const struct in_addr* dest_ip, const struct in6_addr* dest_ipv6); - -#endif diff --git a/lte/gateway/c/core/oai/tasks/sgw/sgw_task.cpp b/lte/gateway/c/core/oai/tasks/sgw/sgw_task.cpp index 1bbe5ddb08d6..00913573b7b3 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/sgw_task.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/sgw_task.cpp @@ -28,11 +28,10 @@ #include #include -#include "lte/gateway/c/core/common/common_defs.h" -#include "lte/gateway/c/core/common/dynamic_memory_check.h" #ifdef __cplusplus extern "C" { #endif +#include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/common/assertions.h" #include "lte/gateway/c/core/oai/common/log.h" #include "lte/gateway/c/core/oai/common/itti_free_defined_msg.h" @@ -40,16 +39,18 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/itti/intertask_interface_types.h" #include "lte/gateway/c/core/oai/include/gtpv1_u_messages_types.h" #include "lte/gateway/c/core/oai/tasks/gtpv1-u/gtpv1u_sgw_defs.h" +#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #ifdef __cplusplus } #endif + +#include "lte/gateway/c/core/common/common_defs.h" #include "lte/gateway/c/core/oai/include/sgw_config.h" +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/include/spgw_config.h" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_pcef_emulation.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_ue_ip_address_alloc.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp" @@ -240,7 +241,8 @@ static void* spgw_app_thread(__attribute__((unused)) void* args) { } //------------------------------------------------------------------------------ -status_code_e spgw_app_init(spgw_config_t* spgw_config_pP, bool persist_state) { +extern "C" status_code_e spgw_app_init(spgw_config_t* spgw_config_pP, + bool persist_state) { OAILOG_DEBUG(LOG_SPGW_APP, "Initializing SPGW-APP task interface\n"); if (spgw_state_init(persist_state, spgw_config_pP) < 0) { @@ -254,17 +256,13 @@ status_code_e spgw_app_init(spgw_config_t* spgw_config_pP, bool persist_state) { read_spgw_ue_state_db(); #if !MME_UNIT_TEST // No need to initialize OVS data path for unit tests - if (gtpv1u_init(spgw_state_p, spgw_config_pP, persist_state) < 0) { + if (gtpv1u_init(&spgw_state_p->gtpv1u_data, spgw_config_pP, persist_state) < + 0) { OAILOG_ALERT(LOG_SPGW_APP, "Initializing GTPv1-U ERROR\n"); return RETURNerror; } #endif - if (RETURNerror == - pgw_pcef_emulation_init(spgw_state_p, &spgw_config_pP->pgw_config)) { - return RETURNerror; - } - if (itti_create_task(TASK_SPGW_APP, &spgw_app_thread, NULL) < 0) { perror("pthread_create"); OAILOG_ALERT(LOG_SPGW_APP, "Initializing SPGW-APP task interface: ERROR\n"); diff --git a/lte/gateway/c/core/oai/tasks/sgw/spgw_state.cpp b/lte/gateway/c/core/oai/tasks/sgw/spgw_state.cpp index 8db53bea5fe2..91c265716b6f 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/spgw_state.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/spgw_state.cpp @@ -18,15 +18,19 @@ #include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include -#include "lte/gateway/c/core/oai/common/conversions.h" +#ifdef __cplusplus extern "C" { +#endif #include "lte/gateway/c/core/common/assertions.h" #include "lte/gateway/c/core/common/dynamic_memory_check.h" -#include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/lib/bstr/bstrlib.h" +#ifdef __cplusplus } +#endif +#include "lte/gateway/c/core/oai/common/conversions.h" +#include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.hpp" @@ -117,18 +121,6 @@ void sgw_free_eps_bearer_context(sgw_eps_bearer_ctxt_t** sgw_eps_bearer_ctxt) { } } -void pgw_free_pcc_rule(void** rule) { - if (rule) { - auto* pcc_rule = (pcc_rule_t*)*rule; - if (pcc_rule) { - if (pcc_rule->name) { - bdestroy_wrapper(&pcc_rule->name); - } - free_wrapper(rule); - } - } -} - void sgw_free_ue_context(spgw_ue_context_t** ue_context_p) { if (*ue_context_p) { sgw_s11_teid_t* p1 = LIST_FIRST(&(*ue_context_p)->sgw_s11_teid_list); diff --git a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_converter.cpp b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_converter.cpp index e39268b71e87..adfd0bab1285 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_converter.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_converter.cpp @@ -20,9 +20,10 @@ extern "C" { #include "lte/gateway/c/core/common/dynamic_memory_check.h" #include "lte/gateway/c/core/oai/common/conversions.h" -#include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" } +#include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" + using magma::lte::oai::CreateSessionMessage; using magma::lte::oai::GTPV1uData; using magma::lte::oai::PacketFilter; diff --git a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.cpp b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.cpp index b9d71baec99f..7b9bcd375733 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.cpp +++ b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.cpp @@ -75,13 +75,6 @@ void SpgwStateManager::create_state() { &state_cache_p->sgw_ipv6_address_S1u_S12_S4_up, sizeof(state_cache_p->gtpv1u_data.sgw_ipv6_address_for_S1u_S12_S4_up)); - // Creating PGW related state structs - state_cache_p->deactivated_predefined_pcc_rules = hashtable_ts_create( - MAX_PREDEFINED_PCC_RULES_HT_SIZE, nullptr, pgw_free_pcc_rule, nullptr); - - state_cache_p->predefined_pcc_rules = hashtable_ts_create( - MAX_PREDEFINED_PCC_RULES_HT_SIZE, nullptr, pgw_free_pcc_rule, nullptr); - state_cache_p->gtpv1u_teid = 0; bdestroy_wrapper(&b); @@ -104,13 +97,6 @@ void SpgwStateManager::free_state() { hashtable_ts_destroy(state_teid_ht_); - if (state_cache_p->deactivated_predefined_pcc_rules) { - hashtable_ts_destroy(state_cache_p->deactivated_predefined_pcc_rules); - } - - if (state_cache_p->predefined_pcc_rules) { - hashtable_ts_destroy(state_cache_p->predefined_pcc_rules); - } free_wrapper((void**)&state_cache_p); } diff --git a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.hpp b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.hpp index b7e4e984fc61..697877498019 100644 --- a/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw/spgw_state_manager.hpp @@ -24,7 +24,6 @@ namespace { constexpr int SGW_STATE_CONTEXT_HT_MAX_SIZE = 512; -constexpr int MAX_PREDEFINED_PCC_RULES_HT_SIZE = 32; constexpr char S11_BEARER_CONTEXT_INFO_HT_NAME[] = "s11_bearer_context_information_htbl"; constexpr char SPGW_STATE_TABLE_NAME[] = "spgw_state"; diff --git a/lte/gateway/c/core/oai/tasks/sgw_s8/sgw_s8_state_manager.hpp b/lte/gateway/c/core/oai/tasks/sgw_s8/sgw_s8_state_manager.hpp index 7b617af713f6..fdbb8f59637b 100644 --- a/lte/gateway/c/core/oai/tasks/sgw_s8/sgw_s8_state_manager.hpp +++ b/lte/gateway/c/core/oai/tasks/sgw_s8/sgw_s8_state_manager.hpp @@ -21,7 +21,6 @@ limitations under the License. namespace { constexpr int SGW_STATE_CONTEXT_HT_MAX_SIZE = 512; -constexpr int MAX_PREDEFINED_PCC_RULES_HT_SIZE = 32; constexpr char S11_BEARER_CONTEXT_INFO_HT_NAME[] = "s11_bearer_context_information_htbl"; constexpr char SGW_STATE_TABLE_NAME[] = "sgw_state"; diff --git a/lte/gateway/c/core/oai/test/amf/test_amf_encode_decode.cpp b/lte/gateway/c/core/oai/test/amf/test_amf_encode_decode.cpp index 42c2c5b8c4d4..912704664ab0 100644 --- a/lte/gateway/c/core/oai/test/amf/test_amf_encode_decode.cpp +++ b/lte/gateway/c/core/oai/test/amf/test_amf_encode_decode.cpp @@ -1576,6 +1576,74 @@ TEST(test_optional_pdu, test_pdu_session_accept_optional) { .pdu_session_estab_accept.authorized_qosrules); } +TEST(test_protocol_configuration_options, test_protocol_configuration_options) { + uint32_t bytes = 0; + uint32_t container_len = 0; + bstring buffer; + amf_nas_message_t msg = {}; + protocol_configuration_options_t* msg_accept_pco = nullptr; + protocol_configuration_options_t* decode_msg_accept_pco = nullptr; + + // downlink nas transport(pdu session accept) + uint8_t pdu[82] = { + 0x7e, 0x00, 0x68, 0x01, 0x00, 0x4a, 0x2e, 0x01, 0x01, 0xc2, 0x11, 0x00, + 0x09, 0x02, 0x00, 0x06, 0x31, 0x31, 0x01, 0x01, 0x02, 0x09, 0x06, 0x0a, + 0x00, 0x01, 0x0a, 0x00, 0x01, 0x29, 0x05, 0x01, 0x05, 0x05, 0x05, 0x1e, + 0x22, 0x04, 0x03, 0x03, 0x06, 0x09, 0x79, 0x00, 0x06, 0x09, 0x20, 0x41, + 0x01, 0x01, 0x09, 0x7b, 0x00, 0x0f, 0x80, 0x00, 0x0c, 0x04, 0xb7, 0x10, + 0x8b, 0x32, 0x00, 0x0d, 0x04, 0xb7, 0x10, 0x8b, 0x32, 0x25, 0x09, 0x08, + 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x45, 0x54, 0x12, 0x01}; + + uint32_t len = sizeof(pdu) / sizeof(uint8_t); + + NAS5GPktSnapShot nas5g_pkt_snap; + DLNASTransportMsg pdu_sess_accept; + int decode_res = 0; + memset(&pdu_sess_accept, 0, sizeof(DLNASTransportMsg)); + SmfMsg* smf_msg = &pdu_sess_accept.payload_container.smf_msg; + + msg_accept_pco = + &(smf_msg->msg.pdu_session_estab_accept.protocolconfigurationoptions.pco); + decode_res = + pdu_sess_accept.DecodeDLNASTransportMsg(&pdu_sess_accept, pdu, len); + + EXPECT_GT(decode_res, 0); + + buffer = bfromcstralloc(len, "\0"); + bytes = pdu_sess_accept.EncodeDLNASTransportMsg(&pdu_sess_accept, + buffer->data, len); + EXPECT_GT(bytes, 0); + DLNASTransportMsg decode_pdu_sess_accept; + memset(&decode_pdu_sess_accept, 0, sizeof(DLNASTransportMsg)); + decode_res = decode_pdu_sess_accept.DecodeDLNASTransportMsg( + &decode_pdu_sess_accept, pdu, len); + SmfMsg* decode_smf_msg = &decode_pdu_sess_accept.payload_container.smf_msg; + + decode_msg_accept_pco = &(decode_smf_msg->msg.pdu_session_estab_accept + .protocolconfigurationoptions.pco); + + EXPECT_EQ(msg_accept_pco->num_protocol_or_container_id, + decode_msg_accept_pco->num_protocol_or_container_id); + EXPECT_EQ(msg_accept_pco->protocol_or_container_ids[0].id, + decode_msg_accept_pco->protocol_or_container_ids[0].id); + EXPECT_EQ(msg_accept_pco->protocol_or_container_ids[0].length, + decode_msg_accept_pco->protocol_or_container_ids[0].length); + EXPECT_EQ(msg_accept_pco->protocol_or_container_ids[1].id, + decode_msg_accept_pco->protocol_or_container_ids[1].id); + EXPECT_EQ(msg_accept_pco->protocol_or_container_ids[1].length, + decode_msg_accept_pco->protocol_or_container_ids[1].length); + + bdestroy(buffer); + + // Clean up the PCO contents + sm_free_protocol_configuration_options(&decode_msg_accept_pco); + // Clean up the PCO contents + sm_free_protocol_configuration_options(&msg_accept_pco); + + bdestroy(smf_msg->msg.pdu_session_estab_accept.authorized_qosrules); + bdestroy(decode_smf_msg->msg.pdu_session_estab_accept.authorized_qosrules); +} + TEST(test_PDUAddressMsg, test_pdu_session_accept_optional_addressinfo) { paa_t pa = {}; pa.pdn_type = IPv4; @@ -1897,5 +1965,43 @@ TEST(test_qos_rules, test_qos_rules) { qosrules.qos_rule[0].new_qos_rule_pkt_filter[0].contents[0], decoded_qosrules.qos_rule[0].new_qos_rule_pkt_filter[0].contents[0]); } +TEST(test_network_feature, test_network_feature) { + NetworkFeatureSupportMsg networkfeature; + NetworkFeatureSupportMsg decoded_networkfeature; + networkfeature.iei = 0x21; + uint8_t iei = networkfeature.iei; + + networkfeature.len = 2; + networkfeature.IMS_VoPS_3GPP = 1; + networkfeature.IMS_VoPS_N3GPP = 0; + networkfeature.EMC = 2; + networkfeature.EMF = 3; + networkfeature.IWK_N26 = 1; + networkfeature.MPSI = 0; + networkfeature.EMCN3 = 1; + networkfeature.MCSI = 0; + + uint8_t network_feature_buffer[4096]; + + int encoded_network_feature = networkfeature.EncodeNetworkFeatureSupportMsg( + &networkfeature, iei, network_feature_buffer, 4096); + EXPECT_EQ(encoded_network_feature, 4); + + int decoded_network_feature = networkfeature.DecodeNetworkFeatureSupportMsg( + &decoded_networkfeature, iei, network_feature_buffer, 4096); + EXPECT_EQ(decoded_network_feature, 4); + + EXPECT_EQ(networkfeature.iei, decoded_networkfeature.iei); + EXPECT_EQ(networkfeature.len, decoded_networkfeature.len); + EXPECT_EQ(networkfeature.IMS_VoPS_3GPP, decoded_networkfeature.IMS_VoPS_3GPP); + EXPECT_EQ(networkfeature.IMS_VoPS_N3GPP, + decoded_networkfeature.IMS_VoPS_N3GPP); + EXPECT_EQ(networkfeature.EMC, decoded_networkfeature.EMC); + EXPECT_EQ(networkfeature.EMF, decoded_networkfeature.EMF); + EXPECT_EQ(networkfeature.IWK_N26, decoded_networkfeature.IWK_N26); + EXPECT_EQ(networkfeature.MPSI, decoded_networkfeature.MPSI); + EXPECT_EQ(networkfeature.EMCN3, decoded_networkfeature.EMCN3); + EXPECT_EQ(networkfeature.MCSI, decoded_networkfeature.MCSI); +} } // namespace magma5g diff --git a/lte/gateway/c/core/oai/test/lib/BUILD.bazel b/lte/gateway/c/core/oai/test/lib/BUILD.bazel index 9d82160e5c5f..364815fd92c2 100644 --- a/lte/gateway/c/core/oai/test/lib/BUILD.bazel +++ b/lte/gateway/c/core/oai/test/lib/BUILD.bazel @@ -37,3 +37,15 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) + +cc_test( + name = "lib_ula_subdata_test", + size = "small", + srcs = [ + "test_ula_subData.cpp", + ], + deps = [ + "//lte/gateway/c/core:lib_agw_of", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/lte/gateway/c/core/oai/test/lib/CMakeLists.txt b/lte/gateway/c/core/oai/test/lib/CMakeLists.txt index 769b3766b254..d72904c8b493 100644 --- a/lte/gateway/c/core/oai/test/lib/CMakeLists.txt +++ b/lte/gateway/c/core/oai/test/lib/CMakeLists.txt @@ -23,4 +23,8 @@ add_test(test_bstr bstr_test) add_executable(3gpp_test test_3gpp.cpp) target_link_libraries(3gpp_test LIB_3GPP gmock_main gtest gtest_main gmock) -add_test(test_3gpp 3gpp_test) \ No newline at end of file +add_test(test_3gpp 3gpp_test) + +add_executable(ula_sub_data_test test_ula_subData.cpp) +target_link_libraries(ula_sub_data_test LIB_STORE LIB_S6A_PROXY COMMON TASK_MME_APP gmock_main gtest gtest_main gmock) +add_test(test_ula_subdata ula_sub_data_test) diff --git a/lte/gateway/c/core/oai/test/lib/test_ula_subData.cpp b/lte/gateway/c/core/oai/test/lib/test_ula_subData.cpp new file mode 100644 index 000000000000..13694ffebc1a --- /dev/null +++ b/lte/gateway/c/core/oai/test/lib/test_ula_subData.cpp @@ -0,0 +1,153 @@ +/* + * Copyright 2022 The Magma Authors. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "feg/protos/s6a_proxy.pb.h" +#include "lte/gateway/c/core/oai/lib/s6a_proxy/S6aClient.hpp" + +extern "C" { +#include "lte/gateway/c/core/oai/include/mme_config.h" +} + +namespace magma { + +class ULA2SubDataTest : public ::testing::Test { + virtual void SetUp() { mme_config_init(&mme_config); } + + virtual void TearDown() { free_mme_config(&mme_config); } +}; + +void create_ula_object( + magma::feg::UpdateLocationAnswer *ula_object, + google::protobuf::uint32 context_id, std::string service_selection, + google::protobuf::int32 class_id, google::protobuf::uint32 priority_level, + bool preemption_capability, bool preemption_vulnerability, + + std::string apn_name, std::string gateway_mac, std::string gateway_ip, + google::protobuf::uint32 vlan_id, + + google::protobuf::uint32 max_bandwidth_ul, + google::protobuf::uint32 max_bandwidth_dl, apn_ambr_bitrate_unit_t unit, + + pdu_session_type_e pdn, + + std::string served_party_ip_address) { + // create UpdateLocationAnswer object + + magma::feg::UpdateLocationAnswer_APNConfiguration *apn_config = + ula_object->add_apn(); + apn_config->set_context_id(context_id); + apn_config->set_service_selection(service_selection); + auto qos_profile_msg = apn_config->mutable_qos_profile(); + qos_profile_msg->set_class_id(class_id); + qos_profile_msg->set_priority_level(priority_level); + qos_profile_msg->set_preemption_capability(preemption_capability); + qos_profile_msg->set_preemption_vulnerability(preemption_vulnerability); + + auto ambr_msg = apn_config->mutable_ambr(); + ambr_msg->set_max_bandwidth_ul(max_bandwidth_ul); + ambr_msg->set_max_bandwidth_dl(max_bandwidth_dl); + ambr_msg->set_unit( + (magma::feg:: + UpdateLocationAnswer_AggregatedMaximumBitrate_BitrateUnitsAMBR)unit); + + apn_config->set_pdn( + (magma::feg::UpdateLocationAnswer_APNConfiguration_PDNType)pdn); + + auto resource_msg = apn_config->mutable_resource(); + resource_msg->set_apn_name(apn_name); + resource_msg->set_gateway_ip(gateway_ip); + resource_msg->set_gateway_mac(gateway_mac); + resource_msg->set_vlan_id(vlan_id); + + apn_config->add_served_party_ip_address(served_party_ip_address); +} +void initSubscriber(magma::lte::SubscriberData *sub_data) { + // initialize subscriberData object + auto sub_id = sub_data->mutable_sid(); + sub_id->set_id("IMSI123123123"); + sub_id->set_type(magma::lte::SubscriberID::IMSI); +} + +TEST(ULA2SubDataTest, TestULAallFields) { + magma::feg::UpdateLocationAnswer ula_object = + magma::feg::UpdateLocationAnswer(); + magma::lte::SubscriberData sub_data = magma::lte::SubscriberData(); + + google::protobuf::uint32 context_id = 1; + std::string service_selection = "abc"; + + google::protobuf::int32 class_id = 1; + google::protobuf::uint32 priority_level = 2; + bool preemption_capability = true; + bool preemption_vulnerability = false; + + std::string apn_name = "apn_name"; + std::string gateway_mac = "A:B:C:D"; + std::string gateway_ip = "0.0.0.0"; + google::protobuf::uint32 vlan_id = 123; + + google::protobuf::uint32 max_bandwidth_ul = 200000; + google::protobuf::uint32 max_bandwidth_dl = 100000; + apn_ambr_bitrate_unit_t unit = BPS; + + pdu_session_type_e pdn = IPV4; + + std::string served_party_ip_address = "123.123.123.123"; + + create_ula_object(&ula_object, context_id, service_selection, class_id, + priority_level, preemption_capability, + preemption_vulnerability, apn_name, gateway_mac, gateway_ip, + vlan_id, max_bandwidth_ul, max_bandwidth_dl, unit, pdn, + served_party_ip_address); + initSubscriber(&sub_data); + + // call data converting function + + S6aClient::convert_ula_to_subscriber_data(ula_object, &sub_data); + + // test equality for each of the fields in the subscriberdata object + + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).context_id(), context_id); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).service_selection(), + service_selection); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).qos_profile().class_id(), + class_id); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).qos_profile().priority_level(), + priority_level); + EXPECT_EQ( + sub_data.non_3gpp().apn_config(0).qos_profile().preemption_capability(), + preemption_capability); + EXPECT_EQ(sub_data.non_3gpp() + .apn_config(0) + .qos_profile() + .preemption_vulnerability(), + preemption_vulnerability); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).ambr().max_bandwidth_dl(), + max_bandwidth_dl); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).ambr().max_bandwidth_ul(), + max_bandwidth_ul); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).ambr().br_unit(), unit); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).pdn(), + (magma::lte::APNConfiguration_PDNType)pdn); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).resource().apn_name(), apn_name); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).resource().gateway_ip(), + gateway_ip); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).resource().gateway_mac(), + gateway_mac); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).resource().vlan_id(), vlan_id); + EXPECT_EQ(sub_data.non_3gpp().apn_config(0).assigned_static_ip(), + served_party_ip_address); +} +} // namespace magma diff --git a/lte/gateway/c/core/oai/test/s1ap_task/mock_s1ap_op.cpp b/lte/gateway/c/core/oai/test/s1ap_task/mock_s1ap_op.cpp index 1632d538bcc1..f9978ce2d05f 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/mock_s1ap_op.cpp +++ b/lte/gateway/c/core/oai/test/s1ap_task/mock_s1ap_op.cpp @@ -20,9 +20,7 @@ #include #include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_state_converter.hpp" - -using magma::lte::oai::S1apState; -using magma::lte::oai::UeDescription; +#include "lte/gateway/c/core/oai/tasks/s1ap/s1ap_mme.hpp" namespace magma { namespace lte { @@ -52,14 +50,14 @@ std::vector load_file_into_vector_of_line_content( // samples status_code_e mock_read_s1ap_ue_state_db( const std::vector& ue_samples) { - hash_table_ts_t* state_ue_ht = get_s1ap_ue_state(); - if (!state_ue_ht) { + map_uint64_ue_description_t* state_ue_map = get_s1ap_ue_state(); + if (!state_ue_map) { std::cerr << "Cannot get S1AP UE State" << std::endl; return RETURNerror; } for (const auto& name_of_sample_file : ue_samples) { - UeDescription ue_proto = UeDescription(); + oai::UeDescription ue_proto = oai::UeDescription(); std::fstream input(name_of_sample_file.c_str(), std::ios::in | std::ios::binary); if (!ue_proto.ParseFromIstream(&input)) { @@ -68,17 +66,21 @@ status_code_e mock_read_s1ap_ue_state_db( return RETURNerror; } - ue_description_t* ue_context_p = reinterpret_cast( - calloc(1, sizeof(ue_description_t))); + oai::UeDescription* ue_context_p = new oai::UeDescription(); + if (!ue_context_p) { + std::cerr << "Failed to allocate memory for ue_context_p" << std::endl; + return RETURNerror; + } S1apStateConverter::proto_to_ue(ue_proto, ue_context_p); - hashtable_rc_t h_rc = - hashtable_ts_insert(state_ue_ht, ue_context_p->comp_s1ap_id, - reinterpret_cast(ue_context_p)); + proto_map_rc_t rc = state_ue_map->insert( + ue_context_p->comp_s1ap_id(), + reinterpret_cast(ue_context_p)); - if (HASH_TABLE_OK != h_rc) { + if (rc != magma::PROTO_MAP_OK) { std::cerr << "Failed to insert UE state :" << name_of_sample_file << std::endl; + free_ue_description(reinterpret_cast(&ue_context_p)); return RETURNerror; } } @@ -91,7 +93,7 @@ status_code_e mock_read_s1ap_state_db( const std::string& file_name_state_sample) { s1ap_state_t* state_cache_p = get_s1ap_state(false); - S1apState state_proto = S1apState(); + oai::S1apState state_proto = oai::S1apState(); std::ifstream input(file_name_state_sample.c_str(), std::ios::in | std::ios::binary); @@ -164,4 +166,4 @@ std::string decode_msg(const std::vector& encoded_msg) { } } // namespace lte -} // namespace magma \ No newline at end of file +} // namespace magma diff --git a/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.cpp b/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.cpp index 040bbca40429..fceb16c626e9 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.cpp +++ b/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.cpp @@ -17,7 +17,6 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_36.401.h" -#include "lte/gateway/c/core/oai/lib/hashtable/hashtable.h" #include "lte/gateway/c/core/oai/common/common_types.h" #include "lte/gateway/c/core/oai/common/conversions.h" #include "lte/gateway/c/core/oai/common/itti_free_defined_msg.h" @@ -31,6 +30,8 @@ extern "C" { namespace magma { namespace lte { +using oai::S1apUeState; +using oai::UeDescription; task_zmq_ctx_t task_zmq_ctx_main_s1ap; status_code_e setup_new_association(s1ap_state_t* state, @@ -429,16 +430,20 @@ bool is_num_enbs_valid(s1ap_state_t* state, uint32_t expected_num_enbs) { } bool is_ue_state_valid(sctp_assoc_id_t assoc_id, enb_ue_s1ap_id_t enb_ue_id, - enum s1_ue_state_s expected_ue_state) { - ue_description_t* ue = nullptr; - hash_table_ts_t* ue_ht = S1apStateManager::getInstance().get_ue_state_ht(); + enum S1apUeState expected_ue_state) { + UeDescription* ue = nullptr; + map_uint64_ue_description_t* state_ue_map = get_s1ap_ue_state(); + if (!state_ue_map) { + std::cerr << "Failed to get s1ap_ue_state" << std::endl; + return false; + } uint64_t comp_s1ap_id = S1AP_GENERATE_COMP_S1AP_ID(assoc_id, enb_ue_id); - hashtable_rc_t ht_rc = hashtable_ts_get(ue_ht, (const hash_key_t)comp_s1ap_id, - reinterpret_cast(&ue)); - if (ht_rc != HASH_TABLE_OK) { + + magma::proto_map_rc_t rc = state_ue_map->get(comp_s1ap_id, &ue); + if (rc != magma::PROTO_MAP_OK) { return false; } - return ue->s1_ue_state == expected_ue_state ? true : false; + return ue->s1ap_ue_state() == expected_ue_state ? true : false; } status_code_e simulate_pdu_s1_message(uint8_t* bytes, long bytes_len, diff --git a/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.h b/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.h index d37eff0929b4..e4d4a07aa18b 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.h +++ b/lte/gateway/c/core/oai/test/s1ap_task/s1ap_mme_test_utils.h @@ -24,6 +24,7 @@ extern "C" { namespace magma { namespace lte { +using oai::S1apUeState; status_code_e setup_new_association(s1ap_state_t* state, sctp_assoc_id_t assoc_id); @@ -85,7 +86,7 @@ bool is_enb_state_valid(s1ap_state_t* state, sctp_assoc_id_t assoc_id, bool is_num_enbs_valid(s1ap_state_t* state, uint32_t expected_num_enbs); bool is_ue_state_valid(sctp_assoc_id_t assoc_id, enb_ue_s1ap_id_t enb_ue_id, - enum s1_ue_state_s expected_ue_state); + enum S1apUeState expected_ue_state); status_code_e simulate_pdu_s1_message(uint8_t* bytes, long bytes_len, s1ap_state_t* state, diff --git a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers.cpp b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers.cpp index afd9294b29ff..cf6640ad051e 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers.cpp +++ b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers.cpp @@ -420,14 +420,14 @@ TEST_F(S1apMmeHandlersTest, HandleUECapIndication) { } TEST_F(S1apMmeHandlersTest, GenerateUEContextReleaseCommand) { - ue_description_t ue_ref_p = { - .enb_ue_s1ap_id = 1, - .mme_ue_s1ap_id = 1, - .sctp_assoc_id = assoc_id, - .comp_s1ap_id = S1AP_GENERATE_COMP_S1AP_ID(assoc_id, 1)}; - - ue_ref_p.s1ap_ue_context_rel_timer.id = -1; - ue_ref_p.s1ap_ue_context_rel_timer.msec = 1000; + oai::UeDescription ue_ref_p; + ue_ref_p.set_enb_ue_s1ap_id(1); + ue_ref_p.set_mme_ue_s1ap_id(1); + ue_ref_p.set_sctp_assoc_id(assoc_id); + ue_ref_p.set_comp_s1ap_id(S1AP_GENERATE_COMP_S1AP_ID(assoc_id, 1)); + + ue_ref_p.mutable_s1ap_ue_context_rel_timer()->set_id(-1); + ue_ref_p.mutable_s1ap_ue_context_rel_timer()->set_msec(1000); EXPECT_CALL(*sctp_handler, sctpd_send_dl()).Times(2); EXPECT_CALL(*mme_app_handler, mme_app_handle_initial_ue_message()).Times(1); @@ -465,7 +465,7 @@ TEST_F(S1apMmeHandlersTest, GenerateUEContextReleaseCommand) { state, &ue_ref_p, S1AP_INITIAL_CONTEXT_SETUP_FAILED, INVALID_IMSI64, assoc_id, stream_id, 1, 1)); - EXPECT_NE(ue_ref_p.s1ap_ue_context_rel_timer.id, S1AP_TIMER_INACTIVE_ID); + EXPECT_NE(ue_ref_p.s1ap_ue_context_rel_timer().id(), S1AP_TIMER_INACTIVE_ID); // Freeing pdu and payload data ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_S1ap_S1AP_PDU, &pdu_s1); @@ -1047,7 +1047,7 @@ TEST_F(S1apMmeHandlersTest, HandleS1apHandoverCommand) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // State validation - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_HANDOVER)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_HANDOVER)); } TEST_F(S1apMmeHandlersTest, HandleMmeHandover) { @@ -1153,7 +1153,7 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandover) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -1203,9 +1203,9 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandover) { // Send S1AP_HANDOVER_COMMAND mimicing MME_APP ASSERT_EQ(send_s1ap_mme_handover_command(assoc_id, 7, 1, 2, 10, 2), RETURNok); - ue_description_t* ue_ref_p = s1ap_state_get_ue_mmeid(7); + oai::UeDescription* ue_ref_p = s1ap_state_get_ue_mmeid(7); cv.wait_for(lock, std::chrono::milliseconds(1000)); - ASSERT_EQ(ue_ref_p->s1_ue_state, S1AP_UE_HANDOVER); + ASSERT_EQ(ue_ref_p->s1ap_ue_state(), oai::S1AP_UE_HANDOVER); // Simulate ENB Status Transfer uint8_t enb_transfer[] = {0x00, 0x18, 0x40, 0x24, 0x00, 0x00, 0x03, 0x00, @@ -1231,12 +1231,7 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandover) { // Free up eRAB data on target eNB ue_ref_p = s1ap_state_get_ue_enbid(target_assoc_id, 2); - ASSERT_EQ(ue_ref_p->s1ap_handover_state.target_enb_id, 2); - for (int i = 0; - i < ue_ref_p->s1ap_handover_state.e_rab_admitted_list.no_of_items; i++) { - bdestroy_wrapper(&ue_ref_p->s1ap_handover_state.e_rab_admitted_list.item[i] - .transport_layer_address); - } + ASSERT_EQ(ue_ref_p->s1ap_handover_state().target_enb_id(), 2); } TEST_F(S1apMmeHandlersTest, HandleMmeHandoverFailure) { @@ -1342,7 +1337,7 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandoverFailure) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -1487,7 +1482,7 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandoverCancel) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -1538,9 +1533,9 @@ TEST_F(S1apMmeHandlersTest, HandleMmeHandoverCancel) { ASSERT_EQ(send_s1ap_mme_handover_command(assoc_id, 7, 1, 2, 10, 11), RETURNok); - ue_description_t* ue_ref_p = s1ap_state_get_ue_mmeid(7); + oai::UeDescription* ue_ref_p = s1ap_state_get_ue_mmeid(7); cv.wait_for(lock, std::chrono::milliseconds(1000)); - ASSERT_EQ(ue_ref_p->s1_ue_state, S1AP_UE_HANDOVER); + ASSERT_EQ(ue_ref_p->s1ap_ue_state(), oai::S1AP_UE_HANDOVER); // Simulate Handover Cancel uint8_t hand_cancel[] = {0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x03, 0x00, 0x00, @@ -1639,7 +1634,7 @@ TEST_F(S1apMmeHandlersTest, HandleErabSetupResponse) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -1765,7 +1760,7 @@ TEST_F(S1apMmeHandlersTest, HandleErrorIndicationMessage) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -1889,7 +1884,7 @@ TEST_F(S1apMmeHandlersTest, HandleEnbResetPartial) { assoc_id, stream_id), RETURNok); - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); // Simulate Attach Complete uint8_t attach_compl_bytes[] = { @@ -2114,7 +2109,7 @@ TEST_F(S1apMmeHandlersTest, HandlePathSwitchRequestSuccess) { RETURNok); // State validation - ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(assoc_id, 1, oai::S1AP_UE_CONNECTED)); ASSERT_TRUE(is_enb_state_valid(state, assoc_id, S1AP_READY, 1)); // Simulate Attach Complete @@ -2170,7 +2165,7 @@ TEST_F(S1apMmeHandlersTest, HandlePathSwitchRequestSuccess) { RETURNok); // Confirm UE state update (assoc_id, enb_ue_s1ap_id) - ASSERT_TRUE(is_ue_state_valid(switch_assoc_id, 2, S1AP_UE_CONNECTED)); + ASSERT_TRUE(is_ue_state_valid(switch_assoc_id, 2, oai::S1AP_UE_CONNECTED)); // Simulate Detach request PDU payload uint8_t detach_req_bytes[] = { diff --git a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers_with_injected_state.cpp b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers_with_injected_state.cpp index 849afcb9ff16..09888a137e41 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers_with_injected_state.cpp +++ b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_mme_handlers_with_injected_state.cpp @@ -122,13 +122,14 @@ class S1apMmeHandlersWithInjectedStatesTest : public ::testing::Test { }; TEST_F(S1apMmeHandlersWithInjectedStatesTest, GenerateUEContextReleaseCommand) { - ue_description_t ue_ref_p = { - .enb_ue_s1ap_id = 1, - .mme_ue_s1ap_id = 99, - .sctp_assoc_id = assoc_id, - .comp_s1ap_id = S1AP_GENERATE_COMP_S1AP_ID(assoc_id, 1)}; - ue_ref_p.s1ap_ue_context_rel_timer.id = -1; - ue_ref_p.s1ap_ue_context_rel_timer.msec = 1000; + oai::UeDescription ue_ref_p; + ue_ref_p.Clear(); + ue_ref_p.set_enb_ue_s1ap_id(1); + ue_ref_p.set_mme_ue_s1ap_id(99); + ue_ref_p.set_sctp_assoc_id(assoc_id); + ue_ref_p.set_comp_s1ap_id(S1AP_GENERATE_COMP_S1AP_ID(assoc_id, 1)); + ue_ref_p.mutable_s1ap_ue_context_rel_timer()->set_id(-1); + ue_ref_p.mutable_s1ap_ue_context_rel_timer()->set_msec(1000); S1ap_S1AP_PDU_t pdu_s1; memset(&pdu_s1, 0, sizeof(pdu_s1)); @@ -148,7 +149,7 @@ TEST_F(S1apMmeHandlersWithInjectedStatesTest, GenerateUEContextReleaseCommand) { state, &ue_ref_p, S1AP_INITIAL_CONTEXT_SETUP_FAILED, INVALID_IMSI64, assoc_id, stream_id, 99, 1)); - EXPECT_NE(ue_ref_p.s1ap_ue_context_rel_timer.id, S1AP_TIMER_INACTIVE_ID); + EXPECT_NE(ue_ref_p.s1ap_ue_context_rel_timer().id(), S1AP_TIMER_INACTIVE_ID); // State validation ASSERT_TRUE( diff --git a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_state_converter.cpp b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_state_converter.cpp index 68fa6a9c01f4..3f6d419c79da 100644 --- a/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_state_converter.cpp +++ b/lte/gateway/c/core/oai/test/s1ap_task/test_s1ap_state_converter.cpp @@ -112,7 +112,7 @@ TEST_F(S1APStateConverterTest, S1apStateConversionExpectedEnbCount) { // Inserting 1 enb association init_state->enbs.insert(enb_association->sctp_assoc_id, enb_association); // state_to_proto should update num_enbs to match expected eNB count on the - // hashtable + // map init_state->num_enbs = 5; oai::S1apState state_proto; @@ -127,31 +127,30 @@ TEST_F(S1APStateConverterTest, S1apStateConversionExpectedEnbCount) { } TEST_F(S1APStateConverterTest, S1apStateConversionUeContext) { - ue_description_t* ue = (ue_description_t*)calloc(1, sizeof(ue_description_t)); - ue_description_t* final_ue = - (ue_description_t*)calloc(1, sizeof(ue_description_t)); + oai::UeDescription* ue = new oai::UeDescription(); + oai::UeDescription* final_ue = new oai::UeDescription(); // filling with test values - ue->mme_ue_s1ap_id = 1; - ue->enb_ue_s1ap_id = 1; - ue->sctp_assoc_id = 1; - ue->comp_s1ap_id = S1AP_GENERATE_COMP_S1AP_ID(1, 1); - ue->s1ap_handover_state.mme_ue_s1ap_id = 1; - ue->s1ap_handover_state.source_enb_id = 1; - ue->s1ap_ue_context_rel_timer.id = 1; - ue->s1ap_ue_context_rel_timer.msec = 1000; + ue->set_mme_ue_s1ap_id(1); + ue->set_enb_ue_s1ap_id(1); + ue->set_sctp_assoc_id(1); + ue->set_comp_s1ap_id(S1AP_GENERATE_COMP_S1AP_ID(1, 1)); + ue->mutable_s1ap_handover_state()->set_mme_ue_s1ap_id(1); + ue->mutable_s1ap_handover_state()->set_source_enb_id(1); + ue->mutable_s1ap_ue_context_rel_timer()->set_id(1); + ue->mutable_s1ap_ue_context_rel_timer()->set_msec(1000); oai::UeDescription ue_proto; S1apStateConverter::ue_to_proto(ue, &ue_proto); S1apStateConverter::proto_to_ue(ue_proto, final_ue); - EXPECT_EQ(ue->comp_s1ap_id, final_ue->comp_s1ap_id); - EXPECT_EQ(ue->mme_ue_s1ap_id, final_ue->mme_ue_s1ap_id); - EXPECT_EQ(ue->s1ap_ue_context_rel_timer.id, - final_ue->s1ap_ue_context_rel_timer.id); + EXPECT_EQ(ue->comp_s1ap_id(), final_ue->comp_s1ap_id()); + EXPECT_EQ(ue->mme_ue_s1ap_id(), final_ue->mme_ue_s1ap_id()); + EXPECT_EQ(ue->s1ap_ue_context_rel_timer().id(), + final_ue->s1ap_ue_context_rel_timer().id()); - free_wrapper((void**)&ue); - free_wrapper((void**)&final_ue); + delete ue; + delete final_ue; } } // namespace lte diff --git a/lte/gateway/c/core/oai/test/spgw_task/spgw_procedures_test_fixture.cpp b/lte/gateway/c/core/oai/test/spgw_task/spgw_procedures_test_fixture.cpp index 0548c11cd784..1a3c2324f21d 100644 --- a/lte/gateway/c/core/oai/test/spgw_task/spgw_procedures_test_fixture.cpp +++ b/lte/gateway/c/core/oai/test/spgw_task/spgw_procedures_test_fixture.cpp @@ -26,6 +26,7 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" } +#include "lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp" #include "lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/pgw_handlers.hpp" diff --git a/lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h b/lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h index a811f71dfa08..14d4a1045629 100644 --- a/lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h +++ b/lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h @@ -18,10 +18,10 @@ extern "C" { #include "lte/gateway/c/core/oai/lib/itti/intertask_interface.h" #include "lte/gateway/c/core/oai/include/sgw_ie_defs.h" -#include "lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" } +#include "lte/gateway/c/core/oai/tasks/sgw/pgw_procedures.hpp" +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" namespace magma { diff --git a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_dedicated_bearer.cpp b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_dedicated_bearer.cpp index c2a56e6e3bc0..3206c287a531 100644 --- a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_dedicated_bearer.cpp +++ b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_dedicated_bearer.cpp @@ -23,13 +23,13 @@ extern "C" { #include "lte/gateway/c/core/oai/include/gx_messages_types.h" #include "lte/gateway/c/core/oai/include/ngap_messages_types.h" #include "lte/gateway/c/core/oai/include/s11_messages_types.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_24.007.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_24.008.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_29.274.h" } +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/include/spgw_types.hpp" #include "lte/gateway/c/core/oai/test/spgw_task/spgw_test_util.h" diff --git a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_session.cpp b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_session.cpp index 7b66372b8df0..42f966180128 100644 --- a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_session.cpp +++ b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_procedures_session.cpp @@ -25,6 +25,8 @@ #include "lte/gateway/c/core/oai/tasks/sgw/sgw_handlers.hpp" #include "lte/gateway/c/core/oai/include/sgw_context_manager.hpp" #include "lte/gateway/c/core/oai/include/spgw_types.hpp" +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" +#include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" extern "C" { #include "lte/gateway/c/core/common/common_defs.h" @@ -34,8 +36,6 @@ extern "C" { #include "lte/gateway/c/core/oai/include/ip_forward_messages_types.h" #include "lte/gateway/c/core/oai/include/ngap_messages_types.h" #include "lte/gateway/c/core/oai/include/s11_messages_types.h" -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" -#include "lte/gateway/c/core/oai/lib/3gpp/3gpp_23.401.h" #include "lte/gateway/c/core/oai/lib/3gpp/3gpp_29.274.h" } diff --git a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_state_converter.cpp b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_state_converter.cpp index 56c6f465342a..f27f35b4670a 100644 --- a/lte/gateway/c/core/oai/test/spgw_task/test_spgw_state_converter.cpp +++ b/lte/gateway/c/core/oai/test/spgw_task/test_spgw_state_converter.cpp @@ -20,9 +20,9 @@ #include "lte/gateway/c/core/oai/test/spgw_task/state_creators.hpp" #include "lte/gateway/c/core/oai/tasks/sgw/sgw_defs.hpp" #include "lte/protos/oai/mme_nas_state.pb.h" +#include "lte/gateway/c/core/oai/include/spgw_state.hpp" extern "C" { -#include "lte/gateway/c/core/oai/include/spgw_state.hpp" #include "lte/gateway/c/core/oai/lib/message_utils/ie_to_bytes.h" } diff --git a/lte/gateway/c/li_agent/src/BUILD.bazel b/lte/gateway/c/li_agent/src/BUILD.bazel index 667563a64def..53ca7686f4c9 100644 --- a/lte/gateway/c/li_agent/src/BUILD.bazel +++ b/lte/gateway/c/li_agent/src/BUILD.bazel @@ -16,6 +16,7 @@ package(default_visibility = ["//lte/gateway/c/li_agent/src:__subpackages__"]) cc_binary( name = "liagentd", srcs = ["main.cpp"], + visibility = ["//lte/gateway/release:__pkg__"], deps = [ ":interface_monitor", "//orc8r/gateway/c/common/config:mconfig_loader", diff --git a/lte/gateway/c/session_manager/BUILD.bazel b/lte/gateway/c/session_manager/BUILD.bazel index 40b055035712..f84d38a6a7f2 100644 --- a/lte/gateway/c/session_manager/BUILD.bazel +++ b/lte/gateway/c/session_manager/BUILD.bazel @@ -427,6 +427,7 @@ cc_binary( # From bazel doc: this flag makes it so all user libraries are linked statically (if a static version is available), # but where system libraries (excluding C/C++ runtime libraries) are linked dynamically linkstatic = True, + visibility = ["//lte/gateway/release:__pkg__"], deps = [ ":operational_states_handler", ":policy_loader", diff --git a/lte/gateway/configs/BUILD.bazel b/lte/gateway/configs/BUILD.bazel new file mode 100644 index 000000000000..f366bf171624 --- /dev/null +++ b/lte/gateway/configs/BUILD.bazel @@ -0,0 +1,72 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@rules_pkg//pkg:mappings.bzl", "pkg_filegroup", "pkg_files", "pkg_mklink") + +pkg_filegroup( + name = "magma_config_files", + srcs = [ + ":lte_configs", + ":pipelined_symlink", + ":sessiond_symlink", + ], + visibility = ["//lte/gateway/release:__pkg__"], +) + +pkg_mklink( + name = "pipelined_symlink", + link_name = "/etc/magma/pipelined.yml", + target = "/etc/magma/pipelined.yml_prod", +) + +pkg_mklink( + name = "sessiond_symlink", + link_name = "/etc/magma/sessiond.yml", + target = "/etc/magma/sessiond.yml_prod", +) + +# The files pipelined.yml and sessiond.yml are not packaged for a production .deb. +# Symbolic links are created to pipelined.yml_prod and sessiond.yml_prod. +# TODO control_proxy.yml is packaged as default - but is a possible parameter. +pkg_files( + name = "lte_configs", + srcs = [ + ":control_proxy.yml", + ":ctraced.yml", + ":directoryd.yml", + ":dnsd.yml", + ":dnsmasq.conf", + ":enodebd.yml", + ":eventd.yml", + ":gateway.mconfig", + ":health.yml", + ":kernsnoopd.yml", + ":liagentd.yml", + ":lighttpd.yml", + ":logfiles.txt", + ":magmad.yml", + ":mme.yml", + ":mobilityd.yml", + ":monitord.yml", + ":pipelined.yml_prod", + ":policydb.yml", + ":redirectd.yml", + ":redis.yml", + ":sctpd.yml", + ":service_registry.yml", + ":sessiond.yml_prod", + ":smsd.yml", + ":spgw.yml", + ":state.yml", + ":streamer.yml", + ":subscriberdb.yml", + ], +) diff --git a/lte/gateway/configs/templates/BUILD.bazel b/lte/gateway/configs/templates/BUILD.bazel new file mode 100644 index 000000000000..d380c3e78f7c --- /dev/null +++ b/lte/gateway/configs/templates/BUILD.bazel @@ -0,0 +1,26 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") + +pkg_files( + name = "magma_lte_config_template_files", + srcs = [ + ":dnsd.conf.template", + ":hss_oai.json", + ":lighttpd.conf.template", + ":mme.conf.template", + ":mme_fd.conf.template", + ":spgw.conf.template", + ], + prefix = "templates", + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/deploy/agw_install_docker.sh b/lte/gateway/deploy/agw_install_docker.sh index 179b9dec65ae..59514c164055 100755 --- a/lte/gateway/deploy/agw_install_docker.sh +++ b/lte/gateway/deploy/agw_install_docker.sh @@ -79,7 +79,8 @@ EOF fi alias python=python3 - pip3 install ansible + # TODO GH13915 pinned for now because of breaking change in ansible-core 2.13.4 + pip3 install ansible==5.0.1 rm -rf /opt/magma/ git clone "${GIT_URL}" /opt/magma diff --git a/lte/gateway/deploy/agwc-helm-charts/README.md b/lte/gateway/deploy/agwc-helm-charts/README.md index c0214df46bee..e0eeb95e4bc2 100644 --- a/lte/gateway/deploy/agwc-helm-charts/README.md +++ b/lte/gateway/deploy/agwc-helm-charts/README.md @@ -1,4 +1,8 @@ -# AGW Helm Deployment +# AGW Helm Deployment - Experimental + +This folder contains a Helm Chart for the containerized AGW. + +This is currently not in a working state. ## Configuration diff --git a/lte/gateway/deploy/magma_deb.yml b/lte/gateway/deploy/magma_deb.yml index 362975869c5f..8db02a7f40dd 100644 --- a/lte/gateway/deploy/magma_deb.yml +++ b/lte/gateway/deploy/magma_deb.yml @@ -18,3 +18,4 @@ roles: - role: magma_deb + - role: service_aliases diff --git a/lte/gateway/deploy/magma_dev_focal.yml b/lte/gateway/deploy/magma_dev_focal.yml index 25654266cd71..7090f560c115 100644 --- a/lte/gateway/deploy/magma_dev_focal.yml +++ b/lte/gateway/deploy/magma_dev_focal.yml @@ -43,6 +43,7 @@ - role: bazel - role: fluent_bit - role: pyvenv + - role: service_aliases tasks: # Only run installation for docker diff --git a/lte/gateway/deploy/roles/magma/files/BUILD.bazel b/lte/gateway/deploy/roles/magma/files/BUILD.bazel new file mode 100644 index 000000000000..0f2c7fc8ff04 --- /dev/null +++ b/lte/gateway/deploy/roles/magma/files/BUILD.bazel @@ -0,0 +1,93 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_filegroup", "pkg_files") + +pkg_files( + name = "magma_modules_load_conf", + srcs = [":magma_modules_load"], + prefix = "/etc/modules-load.d", + renames = {":magma_modules_load": "magma.conf"}, +) + +pkg_files( + name = "magma_usr_local_bin", + srcs = [ + ":configure_envoy_namespace.sh", + ":coredump", + ":magma-bridge-reset.sh", + ":magma-create-gtp-port.sh", + ":magma-setup-wg.sh", + ":ovs-kmod-upgrade.sh", + ":set_irq_affinity", + ], + attributes = pkg_attributes(mode = "0755"), + prefix = "/usr/local/bin", +) + +pkg_files( + name = "magma_envoy_yaml", + srcs = [":envoy.yaml"], + prefix = "/var/opt/magma", +) + +pkg_files( + name = "magma_logrotate", + srcs = [ + ":logrotate_oai.conf", + ":logrotate_rsyslog.conf", + ], + prefix = "/etc/logrotate.d", + renames = { + ":logrotate_oai.conf": "oai", + ":logrotate_rsyslog.conf": "rsyslog.magma", + }, +) + +pkg_files( + name = "magma_local_cdn", + srcs = [":local-cdn/index.html"], + prefix = "/var/www/local-cdn", +) + +pkg_files( + name = "magma_99_magma_conf", + srcs = [":99-magma.conf"], + prefix = "/etc/sysctl.d", +) + +pkg_files( + name = "magma_magma_ifaces_gtp", + srcs = [":magma_ifaces_gtp"], + prefix = "/etc/network/interfaces.d", + renames = {":magma_ifaces_gtp": "gtp"}, +) + +pkg_files( + name = "magma_20auto_upgrades", + srcs = [":20auto-upgrades"], + prefix = "/etc/apt/apt.conf.d", +) + +pkg_filegroup( + name = "ansible_configs", + srcs = [ + ":magma_20auto_upgrades", + ":magma_99_magma_conf", + ":magma_envoy_yaml", + ":magma_local_cdn", + ":magma_logrotate", + ":magma_magma_ifaces_gtp", + ":magma_modules_load_conf", + ":magma_usr_local_bin", + ], + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/deploy/roles/magma/files/nx_actions.py b/lte/gateway/deploy/roles/magma/files/nx_actions.py deleted file mode 100644 index 32acd38efe6c..000000000000 --- a/lte/gateway/deploy/roles/magma/files/nx_actions.py +++ /dev/null @@ -1,3449 +0,0 @@ -# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2015 YAMAMOTO Takashi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import struct - -import six -from ryu import utils -from ryu.lib import type_desc -from ryu.lib.pack_utils import msg_pack_into -from ryu.ofproto import nicira_ext, ofproto_common -from ryu.ofproto.ofproto_parser import StringifyMixin - - -def generate(ofp_name, ofpp_name): - import sys - - ofp = sys.modules[ofp_name] - ofpp = sys.modules[ofpp_name] - - class _NXFlowSpec(StringifyMixin): - _hdr_fmt_str = '!H' # 2 bit 0s, 1 bit src, 2 bit dst, 11 bit n_bits - _dst_type = None - _subclasses = {} - _TYPE = { - 'nx-flow-spec-field': [ - 'src', - 'dst', - ], - } - - def __init__(self, src, dst, n_bits): - self.src = src - self.dst = dst - self.n_bits = n_bits - - @classmethod - def register(cls, subcls): - assert issubclass(subcls, cls) - assert subcls._dst_type not in cls._subclasses - cls._subclasses[subcls._dst_type] = subcls - - @classmethod - def parse(cls, buf): - (hdr,) = struct.unpack_from(cls._hdr_fmt_str, buf, 0) - rest = buf[struct.calcsize(cls._hdr_fmt_str):] - if hdr == 0: - return None, rest # all-0 header is no-op for padding - src_type = (hdr >> 13) & 0x1 - dst_type = (hdr >> 11) & 0x3 - n_bits = hdr & 0x3ff - subcls = cls._subclasses[dst_type] - if src_type == 0: # subfield - src = cls._parse_subfield(rest) - rest = rest[6:] - elif src_type == 1: # immediate - src_len = (n_bits + 15) // 16 * 2 - src_bin = rest[:src_len] - src = type_desc.IntDescr(size=src_len).to_user(src_bin) - rest = rest[src_len:] - if dst_type == 0: # match - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 1: # load - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 2: # output - dst = '' # empty - return subcls(src=src, dst=dst, n_bits=n_bits), rest - - def serialize(self): - buf = bytearray() - if isinstance(self.src, tuple): - src_type = 0 # subfield - else: - src_type = 1 # immediate - # header - val = (src_type << 13) | (self._dst_type << 11) | self.n_bits - msg_pack_into(self._hdr_fmt_str, buf, 0, val) - # src - if src_type == 0: # subfield - buf += self._serialize_subfield(self.src) - elif src_type == 1: # immediate - src_len = (self.n_bits + 15) // 16 * 2 - buf += type_desc.IntDescr(size=src_len).from_user(self.src) - # dst - if self._dst_type == 0: # match - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 1: # load - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 2: # output - pass # empty - return buf - - @staticmethod - def _parse_subfield(buf): - (n, len) = ofp.oxm_parse_header(buf, 0) - assert len == 4 # only 4-bytes NXM/OXM are defined - field = ofp.oxm_to_user_header(n) - rest = buf[len:] - (ofs,) = struct.unpack_from('!H', rest, 0) - return (field, ofs) - - @staticmethod - def _serialize_subfield(subfield): - (field, ofs) = subfield - buf = bytearray() - n = ofp.oxm_from_user_header(field) - ofp.oxm_serialize_header(n, buf, 0) - assert len(buf) == 4 # only 4-bytes NXM/OXM are defined - msg_pack_into('!H', buf, 4, ofs) - return buf - - class NXFlowSpecMatch(_NXFlowSpec): - """ - Specification for adding match criterion - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add a match criteria - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_VLAN_TCI[0..11] - _dst_type = 0 - - class NXFlowSpecLoad(_NXFlowSpec): - """ - Add NXAST_REG_LOAD actions - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add NXAST_REG_LOAD actions - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[] - _dst_type = 1 - - class NXFlowSpecOutput(_NXFlowSpec): - """ - Add an OFPAT_OUTPUT action - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst Must be '' - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add an OFPAT_OUTPUT action - # an example of the corresponding ovs-ofctl syntax: - # output:NXM_OF_IN_PORT[] - _dst_type = 2 - - def __init__(self, src, n_bits, dst=''): - assert dst == '' - super(NXFlowSpecOutput, self).__init__( - src=src, dst=dst, - n_bits=n_bits, - ) - - class NXAction(ofpp.OFPActionExperimenter): - _fmt_str = '!H' # subtype - _subtypes = {} - _experimenter = ofproto_common.NX_EXPERIMENTER_ID - - def __init__(self): - super(NXAction, self).__init__(self._experimenter) - self.subtype = self._subtype - - @classmethod - def parse(cls, buf): - fmt_str = NXAction._fmt_str - (subtype,) = struct.unpack_from(fmt_str, buf, 0) - subtype_cls = cls._subtypes.get(subtype) - rest = buf[struct.calcsize(fmt_str):] - if subtype_cls is None: - return NXActionUnknown(subtype, rest) - return subtype_cls.parser(rest) - - def serialize(self, buf, offset): - data = self.serialize_body() - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXAction, self).serialize(buf, offset) - msg_pack_into( - NXAction._fmt_str, - buf, - offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE, - self.subtype, - ) - buf += data - - @classmethod - def register(cls, subtype_cls): - assert subtype_cls._subtype is not cls._subtypes - cls._subtypes[subtype_cls._subtype] = subtype_cls - - class NXActionUnknown(NXAction): - def __init__( - self, subtype, data=None, - type_=None, len_=None, experimenter=None, - ): - self._subtype = subtype - super(NXActionUnknown, self).__init__() - self.data = data - - @classmethod - def parser(cls, buf): - return cls(data=buf) - - def serialize_body(self): - # fixup - return bytearray() if self.data is None else self.data - - # For OpenFlow1.0 only - class NXActionSetQueue(NXAction): - r""" - Set queue action - - This action sets the queue that should be used to queue - when packets are output. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_queue:queue - .. - - +-------------------------+ - | **set_queue**\:\ *queue*| - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - queue_id Queue ID for the packets - ================ ====================================================== - - .. note:: - This actions is supported by - ``OFPActionSetQueue`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetQueue(queue_id=10)] - """ - _subtype = nicira_ext.NXAST_SET_QUEUE - - # queue_id - _fmt_str = '!2xI' - - def __init__( - self, queue_id, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetQueue, self).__init__() - self.queue_id = queue_id - - @classmethod - def parser(cls, buf): - (queue_id,) = struct.unpack_from(cls._fmt_str, buf, 0) - return cls(queue_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.queue_id) - return data - - class NXActionPopQueue(NXAction): - """ - Pop queue action - - This action restors the queue to the value it was before any - set_queue actions were applied. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_queue - .. - - +---------------+ - | **pop_queue** | - +---------------+ - - Example:: - - actions += [parser.NXActionPopQueue()] - """ - _subtype = nicira_ext.NXAST_POP_QUEUE - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionPopQueue, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionRegLoad(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - load:value->dst[start..end] - .. - - +-----------------------------------------------------------------+ - | **load**\:\ *value*\->\ *dst*\ **[**\ *start*\..\ *end*\ **]** | - +-----------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for destination field - value OXM/NXM value to be loaded - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="eth_dst", - value=0x112233)] - """ - _subtype = nicira_ext.NXAST_REG_LOAD - _fmt_str = '!HIQ' # ofs_nbits, dst, value - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, ofs_nbits, dst, value, - type_=None, len_=None, experimenter=None, - subtype=None, - ): - super(NXActionRegLoad, self).__init__() - self.ofs_nbits = ofs_nbits - self.dst = dst - self.value = value - - @classmethod - def parser(cls, buf): - (ofs_nbits, dst, value) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - # Right-shift instead of using oxm_parse_header for simplicity... - dst_name = ofp.oxm_to_user_header(dst >> 9) - return cls(ofs_nbits, dst_name, value) - - def serialize_body(self): - hdr_data = bytearray() - n = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(n, hdr_data, 0) - (dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, dst_num, self.value, - ) - return data - - class NXActionRegLoad2(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_field:value[/mask]->dst - .. - - +------------------------------------------------------------+ - | **set_field**\:\ *value*\ **[**\/\ *mask*\ **]**\->\ *dst* | - +------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - value OXM/NXM value to be loaded - mask Mask for destination field - dst OXM/NXM header for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad2(dst="tun_ipv4_src", - value="192.168.10.0", - mask="255.255.255.0")] - """ - _subtype = nicira_ext.NXAST_REG_LOAD2 - _TYPE = { - 'ascii': [ - 'dst', - 'value', - ], - } - - def __init__( - self, dst, value, mask=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegLoad2, self).__init__() - self.dst = dst - self.value = value - self.mask = mask - - @classmethod - def parser(cls, buf): - (n, uv, mask, _len) = ofp.oxm_parse(buf, 0) - dst, value = ofp.oxm_to_user(n, uv, mask) - - if isinstance(value, (tuple, list)): - return cls(dst, value[0], value[1]) - else: - return cls(dst, value, None) - - def serialize_body(self): - data = bytearray() - if self.mask is None: - value = self.value - else: - value = (self.value, self.mask) - self._TYPE['ascii'].append('mask') - - n, value, mask = ofp.oxm_from_user(self.dst, value) - len_ = ofp.oxm_serialize(n, value, mask, data, 0) - msg_pack_into("!%dx" % (14 - len_), data, len_) - - return data - - class NXActionNote(NXAction): - r""" - Note action - - This action does nothing at all. - - And equivalent to the followings action of ovs-ofctl command. - - .. - note:[hh].. - .. - - +-----------------------------------+ - | **note**\:\ **[**\ *hh*\ **]**\.. | - +-----------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - note A list of integer type values - ================ ====================================================== - - Example:: - - actions += [parser.NXActionNote(note=[0xaa,0xbb,0xcc,0xdd])] - """ - _subtype = nicira_ext.NXAST_NOTE - - # note - _fmt_str = '!%dB' - - # set the integer array in a note - def __init__( - self, - note, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNote, self).__init__() - self.note = note - - @classmethod - def parser(cls, buf): - note = struct.unpack_from( - cls._fmt_str % len(buf), buf, 0, - ) - return cls(list(note)) - - def serialize_body(self): - assert isinstance(self.note, (tuple, list)) - for n in self.note: - assert isinstance(n, six.integer_types) - - pad = (len(self.note) + nicira_ext.NX_ACTION_HEADER_0_SIZE) % 8 - if pad: - self.note += [0x0 for i in range(8 - pad)] - note_len = len(self.note) - data = bytearray() - msg_pack_into( - self._fmt_str % note_len, data, 0, - *self.note, - ) - return data - - class _NXActionSetTunnelBase(NXAction): - # _subtype, _fmt_str must be attributes of subclass. - - def __init__( - self, - tun_id, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(_NXActionSetTunnelBase, self).__init__() - self.tun_id = tun_id - - @classmethod - def parser(cls, buf): - (tun_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(tun_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tun_id, - ) - return data - - class NXActionSetTunnel(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action sets the identifier (such as GRE) to the specified id. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel:id - .. - - +------------------------+ - | **set_tunnel**\:\ *id* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(32bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL - - # tun_id - _fmt_str = '!2xI' - - class NXActionSetTunnel64(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action outputs to a port that encapsulates - the packet in a tunnel. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel64:id - .. - - +--------------------------+ - | **set_tunnel64**\:\ *id* | - +--------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(64bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel64(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL64 - - # tun_id - _fmt_str = '!6xQ' - - class NXActionRegMove(NXAction): - r""" - Move register action - - This action copies the src to dst. - - And equivalent to the followings action of ovs-ofctl command. - - .. - move:src[start..end]->dst[start..end] - .. - - +--------------------------------------------------------+ - | **move**\:\ *src*\ **[**\ *start*\..\ *end*\ **]**\->\ | - | *dst*\ **[**\ *start*\..\ *end* \ **]** | - +--------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src_field OXM/NXM header for source field - dst_field OXM/NXM header for destination field - n_bits Number of bits - src_ofs Starting bit offset in source - dst_ofs Starting bit offset in destination - ================ ====================================================== - - .. CAUTION:: - **src_start**\ and \ **src_end**\ difference and \ **dst_start**\ - and \ **dst_end**\ difference must be the same. - - Example:: - - actions += [parser.NXActionRegMove(src_field="reg0", - dst_field="reg1", - n_bits=5, - src_ofs=0 - dst_ofs=10)] - """ - _subtype = nicira_ext.NXAST_REG_MOVE - _fmt_str = '!HHH' # n_bits, src_ofs, dst_ofs - # Followed by OXM fields (src, dst) and padding to 8 bytes boundary - _TYPE = { - 'ascii': [ - 'src_field', - 'dst_field', - ], - } - - def __init__( - self, src_field, dst_field, n_bits, src_ofs=0, dst_ofs=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegMove, self).__init__() - self.n_bits = n_bits - self.src_ofs = src_ofs - self.dst_ofs = dst_ofs - self.src_field = src_field - self.dst_field = dst_field - - @classmethod - def parser(cls, buf): - (n_bits, src_ofs, dst_ofs) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(NXActionRegMove._fmt_str):] - - # src field - (n, len) = ofp.oxm_parse_header(rest, 0) - src_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # dst field - (n, len) = ofp.oxm_parse_header(rest, 0) - dst_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # ignore padding - return cls( - src_field, dst_field=dst_field, n_bits=n_bits, - src_ofs=src_ofs, dst_ofs=dst_ofs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.n_bits, self.src_ofs, self.dst_ofs, - ) - # src field - n = ofp.oxm_from_user_header(self.src_field) - ofp.oxm_serialize_header(n, data, len(data)) - # dst field - n = ofp.oxm_from_user_header(self.dst_field) - ofp.oxm_serialize_header(n, data, len(data)) - return data - - class NXActionResubmit(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit:port - .. - - +------------------------+ - | **resubmit**\:\ *port* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT - - # in_port - _fmt_str = '!H4x' - - def __init__( - self, - in_port=0xfff8, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmit, self).__init__() - self.in_port = in_port - - @classmethod - def parser(cls, buf): - (in_port,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, - ) - return data - - class NXActionResubmitTable(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit([port],[table]) - .. - - +------------------------------------------------+ - | **resubmit(**\[\ *port*\]\,[\ *table*\]\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - table_id Checking flow tables - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080, - table_id=10)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT_TABLE - - # in_port, table_id - _fmt_str = '!HB3x' - - def __init__( - self, - in_port=0xfff8, - table_id=0xff, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmitTable, self).__init__() - self.in_port = in_port - self.table_id = table_id - - @classmethod - def parser(cls, buf): - ( - in_port, - table_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port, table_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, self.table_id, - ) - return data - - class NXActionOutputReg(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG - - # ofs_nbits, src, max_len - _fmt_str = '!H4sH6x' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - (ofs_nbits, oxm_data, max_len) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - src = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, src, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - six.binary_type(src), - self.max_len, - ) - return data - - class NXActionOutputReg2(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - .. NOTE:: - Like the ``NXActionOutputReg`` but organized so - that there is room for a 64-bit experimenter OXM as 'src'. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg2( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG2 - - # ofs_nbits, src, max_len - _fmt_str = '!HH4s' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg2, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - ofs_nbits, - max_len, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, oxm_data, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - self.max_len, - six.binary_type(oxm_data), - ) - offset = len(data) - msg_pack_into("!%dx" % (14 - offset), data, offset) - return data - - class NXActionLearn(NXAction): - r""" - Adds or modifies flow action - - This action adds or modifies a flow in OpenFlow table. - - And equivalent to the followings action of ovs-ofctl command. - - .. - learn(argument[,argument]...) - .. - - +---------------------------------------------------+ - | **learn(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - table_id The table in which the new flow should be inserted - specs Adds a match criterion to the new flow - - Please use the - ``NXFlowSpecMatch`` - in order to set the following format - - .. - field=value - field[start..end]=src[start..end] - field[start..end] - .. - - | *field*\=\ *value* - | *field*\ **[**\ *start*\..\ *end*\ **]**\ =\ - *src*\ **[**\ *start*\..\ *end*\ **]** - | *field*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecLoad`` - in order to set the following format - - .. - load:value->dst[start..end] - load:src[start..end]->dst[start..end] - .. - - | **load**\:\ *value*\ **->**\ *dst*\ - **[**\ *start*\..\ *end*\ **]** - | **load**\:\ *src*\ **[**\ *start*\..\ *end*\ - **] ->**\ *dst*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecOutput`` - in order to set the following format - - .. - output:field[start..end] - .. - - | **output:**\ field\ **[**\ *start*\..\ *end*\ **]** - - idle_timeout Idle time before discarding(seconds) - hard_timeout Max time before discarding(seconds) - priority Priority level of flow entry - cookie Cookie for new flow - flags send_flow_rem - fin_idle_timeout Idle timeout after FIN(seconds) - fin_hard_timeout Hard timeout after FIN(seconds) - ================ ====================================================== - - .. CAUTION:: - The arguments specify the flow's match fields, actions, - and other properties, as follows. - At least one match criterion and one action argument - should ordinarily be specified. - - Example:: - - actions += [ - parser.NXActionLearn(able_id=10, - specs=[parser.NXFlowSpecMatch(src=0x800, - dst=('eth_type_nxm', 0), - n_bits=16), - parser.NXFlowSpecMatch(src=('reg1', 1), - dst=('reg2', 3), - n_bits=5), - parser.NXFlowSpecMatch(src=('reg3', 1), - dst=('reg3', 1), - n_bits=5), - parser.NXFlowSpecLoad(src=0, - dst=('reg4', 3), - n_bits=5), - parser.NXFlowSpecLoad(src=('reg5', 1), - dst=('reg6', 3), - n_bits=5), - parser.NXFlowSpecOutput(src=('reg7', 1), - dst="", - n_bits=5)], - idle_timeout=180, - hard_timeout=300, - priority=1, - cookie=0x64, - flags=ofproto.OFPFF_SEND_FLOW_REM, - fin_idle_timeout=180, - fin_hard_timeout=300)] - """ - _subtype = nicira_ext.NXAST_LEARN - - # idle_timeout, hard_timeout, priority, cookie, flags, - # table_id, pad, fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HHHQHBxHH' - # Followed by flow_mod_specs - - def __init__( - self, - table_id, - specs, - idle_timeout=0, - hard_timeout=0, - priority=ofp.OFP_DEFAULT_PRIORITY, - cookie=0, - flags=0, - fin_idle_timeout=0, - fin_hard_timeout=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionLearn, self).__init__() - self.idle_timeout = idle_timeout - self.hard_timeout = hard_timeout - self.priority = priority - self.cookie = cookie - self.flags = flags - self.table_id = table_id - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - self.specs = specs - - @classmethod - def parser(cls, buf): - ( - idle_timeout, - hard_timeout, - priority, - cookie, - flags, - table_id, - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # specs - specs = [] - while len(rest) > 0: - spec, rest = _NXFlowSpec.parse(rest) - if spec is None: - continue - specs.append(spec) - return cls( - idle_timeout=idle_timeout, - hard_timeout=hard_timeout, - priority=priority, - cookie=cookie, - flags=flags, - table_id=table_id, - fin_idle_timeout=fin_idle_timeout, - fin_hard_timeout=fin_hard_timeout, - specs=specs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.idle_timeout, - self.hard_timeout, - self.priority, - self.cookie, - self.flags, - self.table_id, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - for spec in self.specs: - data += spec.serialize() - return data - - class NXActionExit(NXAction): - """ - Halt action - - This action causes OpenvSwitch to immediately halt - execution of further actions. - - And equivalent to the followings action of ovs-ofctl command. - - .. - exit - .. - - +----------+ - | **exit** | - +----------+ - - Example:: - - actions += [parser.NXActionExit()] - """ - _subtype = nicira_ext.NXAST_EXIT - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionExit, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionDecTtl(NXAction): - """ - Decrement IP TTL action - - This action decrements TTL of IPv4 packet or - hop limit of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl - .. - - +-------------+ - | **dec_ttl** | - +-------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecNwTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionController(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER - - # max_len, controller_id, reason - _fmt_str = '!HHBx' - - def __init__( - self, - max_len, - controller_id, - reason, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionController, self).__init__() - self.max_len = max_len - self.controller_id = controller_id - self.reason = reason - - @classmethod - def parser(cls, buf): - ( - max_len, - controller_id, - reason, - ) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls( - max_len, - controller_id, - reason, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.max_len, - self.controller_id, - self.reason, - ) - return data - - class NXActionController2(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - userdata Additional data to the controller in the packet-in - message - pause Flag to pause pipeline to resume later - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL, - userdata=[0xa,0xb,0xc], - pause=True)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER2 - _fmt_str = '!6x' - _PACK_STR = '!HH' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - **kwargs - ): - super(NXActionController2, self).__init__() - - for arg in kwargs: - if arg in NXActionController2Prop._NAMES: - setattr(self, arg, kwargs[arg]) - - @classmethod - def parser(cls, buf): - cls_data = {} - offset = 6 - buf_len = len(buf) - while buf_len > offset: - (type_, length) = struct.unpack_from(cls._PACK_STR, buf, offset) - offset += 4 - try: - subcls = NXActionController2Prop._TYPES[type_] - except KeyError: - subcls = NXActionController2PropUnknown - data, size = subcls.parser_prop(buf[offset:], length - 4) - offset += size - cls_data[subcls._arg_name] = data - return cls(**cls_data) - - def serialize_body(self): - body = bytearray() - msg_pack_into(self._fmt_str, body, 0) - prop_list = [] - for arg in self.__dict__: - if arg in NXActionController2Prop._NAMES: - prop_list.append(( - NXActionController2Prop._NAMES[arg], - self.__dict__[arg], - )) - prop_list.sort(key=lambda x: x[0].type) - - for subcls, value in prop_list: - body += subcls.serialize_prop(value) - - return body - - class NXActionController2Prop(object): - _TYPES = {} - _NAMES = {} - - @classmethod - def register_type(cls, type_): - def _register_type(subcls): - subcls.type = type_ - NXActionController2Prop._TYPES[type_] = subcls - NXActionController2Prop._NAMES[subcls._arg_name] = subcls - return subcls - - return _register_type - - class NXActionController2PropUnknown(NXActionController2Prop): - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - return buf, size - - @classmethod - def serialize_prop(cls, argment): - data = bytearray() - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_MAX_LEN) - class NXActionController2PropMaxLen(NXActionController2Prop): - # max_len - _fmt_str = "!H2x" - _arg_name = "max_len" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (max_len,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return max_len, size - - @classmethod - def serialize_prop(cls, max_len): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_MAX_LEN, - 8, - max_len, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_CONTROLLER_ID) - class NXActionController2PropControllerId(NXActionController2Prop): - # controller_id - _fmt_str = "!H2x" - _arg_name = "controller_id" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (controller_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return controller_id, size - - @classmethod - def serialize_prop(cls, controller_id): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_CONTROLLER_ID, - 8, - controller_id, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_REASON) - class NXActionController2PropReason(NXActionController2Prop): - # reason - _fmt_str = "!B3x" - _arg_name = "reason" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (reason,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return reason, size - - @classmethod - def serialize_prop(cls, reason): - data = bytearray() - msg_pack_into( - "!HHB3x", data, 0, - nicira_ext.NXAC2PT_REASON, - 5, - reason, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_USERDATA) - class NXActionController2PropUserData(NXActionController2Prop): - # userdata - _fmt_str = "!B" - _arg_name = "userdata" - - @classmethod - def parser_prop(cls, buf, length): - userdata = [] - offset = 0 - - while offset < length: - u = struct.unpack_from(cls._fmt_str, buf, offset) - userdata.append(u[0]) - offset += 1 - - user_size = utils.round_up(length, 4) - - if user_size > 4 and (user_size % 8) == 0: - size = utils.round_up(length, 4) + 4 - else: - size = utils.round_up(length, 4) - - return userdata, size - - @classmethod - def serialize_prop(cls, userdata): - data = bytearray() - user_buf = bytearray() - user_offset = 0 - for user in userdata: - msg_pack_into( - '!B', user_buf, user_offset, - user, - ) - user_offset += 1 - - msg_pack_into( - "!HH", data, 0, - nicira_ext.NXAC2PT_USERDATA, - 4 + user_offset, - ) - data += user_buf - - if user_offset > 4: - user_len = utils.round_up(user_offset, 4) - brank_size = 0 - if (user_len % 8) == 0: - brank_size = 4 - msg_pack_into( - "!%dx" % (user_len - user_offset + brank_size), - data, 4 + user_offset, - ) - else: - user_len = utils.round_up(user_offset, 4) - - msg_pack_into( - "!%dx" % (user_len - user_offset), - data, 4 + user_offset, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_PAUSE) - class NXActionController2PropPause(NXActionController2Prop): - _arg_name = "pause" - - @classmethod - def parser_prop(cls, buf, length): - pause = True - size = 4 - return pause, size - - @classmethod - def serialize_prop(cls, pause): - data = bytearray() - msg_pack_into( - "!HH4x", data, 0, - nicira_ext.NXAC2PT_PAUSE, - 4, - ) - return data - - class NXActionDecTtlCntIds(NXAction): - r""" - Decrement TTL action - - This action decrements TTL of IPv4 packet or - hop limits of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl(id1[,id2]...) - .. - - +-------------------------------------------+ - | **dec_ttl(**\ *id1*\[,\ *id2*\]...\ **)** | - +-------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - cnt_ids Controller ids - ================ ====================================================== - - Example:: - - actions += [parser.NXActionDecTtlCntIds(cnt_ids=[1,2,3])] - - .. NOTE:: - If you want to set the following ovs-ofctl command. - Please use ``OFPActionDecNwTtl``. - - +-------------+ - | **dec_ttl** | - +-------------+ - """ - _subtype = nicira_ext.NXAST_DEC_TTL_CNT_IDS - - # controllers - _fmt_str = '!H4x' - _fmt_len = 6 - - def __init__( - self, - cnt_ids, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionDecTtlCntIds, self).__init__() - - self.cnt_ids = cnt_ids - - @classmethod - def parser(cls, buf): - (controllers,) = struct.unpack_from( - cls._fmt_str, buf, - ) - - offset = cls._fmt_len - cnt_ids = [] - - for i in range(0, controllers): - id_ = struct.unpack_from('!H', buf, offset) - cnt_ids.append(id_[0]) - offset += 2 - - return cls(cnt_ids) - - def serialize_body(self): - assert isinstance(self.cnt_ids, (tuple, list)) - for i in self.cnt_ids: - assert isinstance(i, six.integer_types) - - controllers = len(self.cnt_ids) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - controllers, - ) - offset = self._fmt_len - - for id_ in self.cnt_ids: - msg_pack_into('!H', data, offset, id_) - offset += 2 - - id_len = ( - utils.round_up(controllers, 4) - - controllers - ) - - if id_len != 0: - msg_pack_into('%dx' % id_len * 2, data, offset) - - return data - - # Use in only OpenFlow1.0 - class NXActionMplsBase(NXAction): - # ethertype - _fmt_str = '!H4x' - - def __init__( - self, - ethertype, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionMplsBase, self).__init__() - self.ethertype = ethertype - - @classmethod - def parser(cls, buf): - (ethertype,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ethertype) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ethertype, - ) - return data - - # For OpenFlow1.0 only - class NXActionPushMpls(NXActionMplsBase): - r""" - Push MPLS action - - This action pushes a new MPLS header to the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - push_mpls:ethertype - .. - - +-------------------------------+ - | **push_mpls**\:\ *ethertype* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type(The value must be either 0x8847 or 0x8848) - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPushMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x0800) - actions += [parser.NXActionPushMpls(ethertype=0x8847)] - """ - _subtype = nicira_ext.NXAST_PUSH_MPLS - - # For OpenFlow1.0 only - class NXActionPopMpls(NXActionMplsBase): - r""" - Pop MPLS action - - This action pops the MPLS header from the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_mpls:ethertype - .. - - +------------------------------+ - | **pop_mpls**\:\ *ethertype* | - +------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPopMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x8847) - actions += [parser.NXActionPushMpls(ethertype=0x0800)] - """ - _subtype = nicira_ext.NXAST_POP_MPLS - - # For OpenFlow1.0 only - class NXActionSetMplsTtl(NXAction): - r""" - Set MPLS TTL action - - This action sets the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_ttl:ttl - .. - - +---------------------------+ - | **set_mpls_ttl**\:\ *ttl* | - +---------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ttl MPLS TTL - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsTil(ttl=128)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TTL - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - ttl, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTtl, self).__init__() - self.ttl = ttl - - @classmethod - def parser(cls, buf): - (ttl,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ttl) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ttl, - ) - return data - - # For OpenFlow1.0 only - class NXActionDecMplsTtl(NXAction): - """ - Decrement MPLS TTL action - - This action decrements the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_mpls_ttl - .. - - +------------------+ - | **dec_mpls_ttl** | - +------------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecMplsTil()] - """ - _subtype = nicira_ext.NXAST_DEC_MPLS_TTL - - # ethertype - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecMplsTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsLabel(NXAction): - r""" - Set MPLS Lavel action - - This action sets the MPLS Label. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_label:label - .. - - +-------------------------------+ - | **set_mpls_label**\:\ *label* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - label MPLS Label - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=label)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(label=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_LABEL - - # ethertype - _fmt_str = '!2xI' - - def __init__( - self, - label, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsLabel, self).__init__() - self.label = label - - @classmethod - def parser(cls, buf): - (label,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(label) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.label, - ) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsTc(NXAction): - r""" - Set MPLS Tc action - - This action sets the MPLS Tc. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_tc:tc - .. - - +-------------------------+ - | **set_mpls_tc**\:\ *tc* | - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tc MPLS Tc - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=tc)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(tc=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TC - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - tc, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTc, self).__init__() - self.tc = tc - - @classmethod - def parser(cls, buf): - (tc,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(tc) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tc, - ) - return data - - class NXActionStackBase(NXAction): - # start, field, end - _fmt_str = '!H4sH' - _TYPE = { - 'ascii': [ - 'field', - ], - } - - def __init__( - self, - field, - start, - end, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionStackBase, self).__init__() - self.field = field - self.start = start - self.end = end - - @classmethod - def parser(cls, buf): - (start, oxm_data, end) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - field = ofp.oxm_to_user_header(n) - return cls(field, start, end) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.field) - ofp.oxm_serialize_header(oxm, oxm_data, 0) - msg_pack_into( - self._fmt_str, data, 0, - self.start, - six.binary_type(oxm_data), - self.end, - ) - offset = len(data) - msg_pack_into("!%dx" % (12 - offset), data, offset) - return data - - class NXActionStackPush(NXActionStackBase): - r""" - Push field action - - This action pushes field to top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:dst[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *dst*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for source field - start Start bit for source field - end End bit for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPush(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_PUSH - - class NXActionStackPop(NXActionStackBase): - r""" - Pop field action - - This action pops field from top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:src[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for destination field - start Start bit for destination field - end End bit for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPop(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_POP - - class NXActionSample(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets - to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3,)] - """ - _subtype = nicira_ext.NXAST_SAMPLE - - # probability, collector_set_id, obs_domain_id, obs_point_id - _fmt_str = '!HIII' - - def __init__( - self, - probability, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - ) - return data - - class NXActionSample2(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - 'sampling_port' can be equal to ingress port or one of egress ports. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - sampling_port Sampling port number - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample2(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3, - apn_mac_addr=[10,0,2,0,0,5], - msisdn=b'magmaIsTheBest', - apn_name=b'big_tower123', - pdp_start_epoch=b'90\x00\x00\x00\x00\x00\x00', - sampling_port=8080)] - """ - _subtype = nicira_ext.NXAST_SAMPLE2 - - # probability, collector_set_id, obs_domain_id, - # obs_point_id, msisdn, apn_mac_addr, apn_name, sampling_port - _fmt_str = '!HIIIL16s6B24s8s6x' - - def __init__( - self, - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - sampling_port=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample2, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - self.sampling_port = sampling_port - - self.msisdn = msisdn - self.apn_mac_addr = apn_mac_addr - self.apn_name = apn_name - self.pdp_start_epoch = pdp_start_epoch - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - msisdn, - apn_mac_addr_0, - apn_mac_addr_1, - apn_mac_addr_2, - apn_mac_addr_3, - apn_mac_addr_4, - apn_mac_addr_5, - apn_name, - pdp_start_epoch, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - - apn_mac_addr = [apn_mac_addr_0, apn_mac_addr_1, apn_mac_addr_2, apn_mac_addr_3, apn_mac_addr_4, apn_mac_addr_5] - return cls( - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - self.sampling_port, - self.msisdn, - *self.apn_mac_addr, - self.apn_name, - self.pdp_start_epoch, - ) - - return data - - class NXActionFinTimeout(NXAction): - r""" - Change TCP timeout action - - This action changes the idle timeout or hard timeout or - both, of this OpenFlow rule when the rule matches a TCP - packet with the FIN or RST flag. - - And equivalent to the followings action of ovs-ofctl command. - - .. - fin_timeout(argument[,argument]...) - .. - - +---------------------------------------------------------+ - | **fin_timeout(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fin_idle_timeout Causes the flow to expire after the given number - of seconds of inactivity - fin_idle_timeout Causes the flow to expire after the given number - of second, regardless of activity - ================ ====================================================== - - Example:: - - match = parser.OFPMatch(ip_proto=6, eth_type=0x0800) - actions += [parser.NXActionFinTimeout(fin_idle_timeout=30, - fin_hard_timeout=60)] - """ - _subtype = nicira_ext.NXAST_FIN_TIMEOUT - - # fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HH2x' - - def __init__( - self, - fin_idle_timeout, - fin_hard_timeout, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionFinTimeout, self).__init__() - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - - @classmethod - def parser(cls, buf): - ( - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - fin_idle_timeout, - fin_hard_timeout, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - return data - - class NXActionConjunction(NXAction): - r""" - Conjunctive matches action - - This action ties groups of individual OpenFlow flows into - higher-level conjunctive flows. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - conjunction(id,k/n) - .. - - +--------------------------------------------------+ - | **conjunction(**\ *id*\,\ *k*\ **/**\ *n*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - clause Number assigned to the flow's dimension - n_clauses Specify the conjunctive flow's match condition - id\_ Conjunction ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionConjunction(clause=1, - n_clauses=2, - id_=10)] - """ - _subtype = nicira_ext.NXAST_CONJUNCTION - - # clause, n_clauses, id - _fmt_str = '!BBI' - - def __init__( - self, - clause, - n_clauses, - id_, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionConjunction, self).__init__() - self.clause = clause - self.n_clauses = n_clauses - self.id = id_ - - @classmethod - def parser(cls, buf): - ( - clause, - n_clauses, - id_, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(clause, n_clauses, id_) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.clause, - self.n_clauses, - self.id, - ) - return data - - class NXActionMultipath(NXAction): - r""" - Select multipath link action - - This action selects multipath link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - multipath(fields, basis, algorithm, n_links, arg, dst[start..end]) - .. - - +-------------------------------------------------------------+ - | **multipath(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *n_links*\, \ *arg*\, \ *dst*\[\ *start*\..\ *end*\]\ **)** | - +-------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - algorithm One of NX_MP_ALG_*. - max_link Number of output links - arg Algorithm-specific argument - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionMultipath( - fields=nicira_ext.NX_HASH_FIELDS_SYMMETRIC_L4, - basis=1024, - algorithm=nicira_ext.NX_MP_ALG_HRW, - max_link=5, - arg=0, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg2")] - """ - _subtype = nicira_ext.NXAST_MULTIPATH - - # fields, basis, algorithm, max_link, - # arg, ofs_nbits, dst - _fmt_str = '!HH2xHHI2xH4s' - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionMultipath, self).__init__() - self.fields = fields - self.basis = basis - self.algorithm = algorithm - self.max_link = max_link - self.arg = arg - self.ofs_nbits = ofs_nbits - self.dst = dst - - @classmethod - def parser(cls, buf): - ( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - dst = ofp.oxm_to_user_header(n) - return cls( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - ) - - def serialize_body(self): - data = bytearray() - dst = bytearray() - oxm = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm, dst, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.fields, - self.basis, - self.algorithm, - self.max_link, - self.arg, - self.ofs_nbits, - six.binary_type(dst), - ) - - return data - - class _NXActionBundleBase(NXAction): - # algorithm, fields, basis, slave_type, n_slaves - # ofs_nbits - _fmt_str = '!HHHIHH' - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(_NXActionBundleBase, self).__init__() - self.len = utils.round_up( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE + len(slaves) * 2, 8, - ) - - self.algorithm = algorithm - self.fields = fields - self.basis = basis - self.slave_type = slave_type - self.n_slaves = n_slaves - self.ofs_nbits = ofs_nbits - self.dst = dst - - assert isinstance(slaves, (list, tuple)) - for s in slaves: - assert isinstance(s, six.integer_types) - - self.slaves = slaves - - @classmethod - def parser(cls, buf): - # Add dst ('I') to _fmt_str - ( - algorithm, fields, basis, - slave_type, n_slaves, ofs_nbits, dst, - ) = struct.unpack_from( - cls._fmt_str + 'I', buf, 0, - ) - - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if dst != 0: - (n, len_) = ofp.oxm_parse_header(buf, offset) - dst = ofp.oxm_to_user_header(n) - - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - - slaves = [] - for i in range(0, n_slaves): - s = struct.unpack_from('!H', buf, slave_offset) - slaves.append(s[0]) - slave_offset += 2 - - return cls( - algorithm, fields, basis, slave_type, - n_slaves, ofs_nbits, dst, slaves, - ) - - def serialize_body(self): - data = bytearray() - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - self.n_slaves = len(self.slaves) - for s in self.slaves: - msg_pack_into('!H', data, slave_offset, s) - slave_offset += 2 - pad_len = ( - utils.round_up(self.n_slaves, 4) - - self.n_slaves - ) - - if pad_len != 0: - msg_pack_into('%dx' % pad_len * 2, data, slave_offset) - - msg_pack_into( - self._fmt_str, data, 0, - self.algorithm, self.fields, self.basis, - self.slave_type, self.n_slaves, - self.ofs_nbits, - ) - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if self.dst == 0: - msg_pack_into('I', data, offset, self.dst) - else: - oxm_data = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm_data, data, offset) - return data - - class NXActionBundle(_NXActionBundleBase): - r""" - Select bundle link action - - This action selects bundle link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle(fields, basis, algorithm, slave_type, slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. (must be zero) - dst OXM/NXM header for source field(must be zero) - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundle( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=0, - dst=0, - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - # NXAST_BUNDLE actions should have 'sofs_nbits' and 'dst' zeroed. - super(NXActionBundle, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits=0, dst=0, slaves=slaves, - ) - - class NXActionBundleLoad(_NXActionBundleBase): - r""" - Select bundle link action - - This action has the same behavior as the bundle action, - with one exception. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle_load(fields, basis, algorithm, slave_type, - dst[start..end], slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle_load(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *dst*\[\ *start*\... \*emd*\], | - | \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundleLoad( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg0", - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE_LOAD - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(NXActionBundleLoad, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ) - - class NXActionCT(NXAction): - r""" - Pass traffic to the connection tracker action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct(argument[,argument]...) - .. - - +------------------------------------------------+ - | **ct(**\ *argument*\[,\ *argument*\]...\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - zone_src OXM/NXM header for source field - zone_ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits``. - If you need set the Immediate value for zone, - zone_src must be set to None or empty character string. - recirc_table Recirculate to a specific table - alg Well-known port number for the protocol - actions Zero or more actions may immediately follow this - action - ================ ====================================================== - - .. NOTE:: - - If you set number to zone_src, - Traceback occurs when you run the to_jsondict. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800, ct_state=(0,32)) - actions += [parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 4, - alg = 0, - actions = [])] - """ - _subtype = nicira_ext.NXAST_CT - - # flags, zone_src, zone_ofs_nbits, recirc_table, - # pad, alg - _fmt_str = '!H4sHB3xH' - _TYPE = { - 'ascii': [ - 'zone_src', - ], - } - - # Followed by actions - - def __init__( - self, - flags, - zone_src, - zone_ofs_nbits, - recirc_table, - alg, - actions, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCT, self).__init__() - self.flags = flags - self.zone_src = zone_src - self.zone_ofs_nbits = zone_ofs_nbits - self.recirc_table = recirc_table - self.alg = alg - self.actions = actions - - @classmethod - def parser(cls, buf): - ( - flags, - oxm_data, - zone_ofs_nbits, - recirc_table, - alg, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - - # OXM/NXM field - if oxm_data == b'\x00' * 4: - zone_src = "" - else: - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - zone_src = ofp.oxm_to_user_header(n) - - # actions - actions = [] - while len(rest) > 0: - action = ofpp.OFPAction.parser(rest, 0) - actions.append(action) - rest = rest[action.len:] - - return cls( - flags, zone_src, zone_ofs_nbits, recirc_table, - alg, actions, - ) - - def serialize_body(self): - data = bytearray() - # If zone_src is zero, zone_ofs_nbits is zone_imm - if not self.zone_src: - zone_src = b'\x00' * 4 - elif isinstance(self.zone_src, six.integer_types): - zone_src = struct.pack("!I", self.zone_src) - else: - zone_src = bytearray() - oxm = ofp.oxm_from_user_header(self.zone_src) - ofp.oxm_serialize_header(oxm, zone_src, 0) - - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - six.binary_type(zone_src), - self.zone_ofs_nbits, - self.recirc_table, - self.alg, - ) - for a in self.actions: - a.serialize(data, len(data)) - return data - - class NXActionCTClear(NXAction): - """ - Clear connection tracking state action - - This action clears connection tracking state from packets. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct_clear - .. - - +--------------+ - | **ct_clear** | - +--------------+ - - Example:: - - actions += [parser.NXActionCTClear()] - """ - _subtype = nicira_ext.NXAST_CT_CLEAR - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCTClear, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionNAT(NXAction): - r""" - Network address translation action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. NOTE:: - The following command image does not exist in ovs-ofctl command - manual and has been created from the command response. - - .. - nat(src=ip_min-ip_max : proto_min-proto-max) - .. - - +--------------------------------------------------+ - | **nat(src**\=\ *ip_min*\ **-**\ *ip_max*\ **:** | - | *proto_min*\ **-**\ *proto-max*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - range_ipv4_min Range ipv4 address minimun - range_ipv4_max Range ipv4 address maximun - range_ipv6_min Range ipv6 address minimun - range_ipv6_max Range ipv6 address maximun - range_proto_min Range protocol minimum - range_proto_max Range protocol maximun - ================ ====================================================== - - .. CAUTION:: - ``NXActionNAT`` must be defined in the actions in the - ``NXActionCT``. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800) - actions += [ - parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 255, - alg = 0, - actions = [ - parser.NXActionNAT( - flags = 1, - range_ipv4_min = "10.1.12.0", - range_ipv4_max = "10.1.13.255", - range_ipv6_min = "", - range_ipv6_max = "", - range_proto_min = 1, - range_proto_max = 1023 - ) - ] - ) - ] - """ - _subtype = nicira_ext.NXAST_NAT - - # pad, flags, range_present - _fmt_str = '!2xHH' - # Followed by optional parameters - - _TYPE = { - 'ascii': [ - 'range_ipv4_max', - 'range_ipv4_min', - 'range_ipv6_max', - 'range_ipv6_min', - ], - } - - def __init__( - self, - flags, - range_ipv4_min='', - range_ipv4_max='', - range_ipv6_min='', - range_ipv6_max='', - range_proto_min=None, - range_proto_max=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNAT, self).__init__() - self.flags = flags - self.range_ipv4_min = range_ipv4_min - self.range_ipv4_max = range_ipv4_max - self.range_ipv6_min = range_ipv6_min - self.range_ipv6_max = range_ipv6_max - self.range_proto_min = range_proto_min - self.range_proto_max = range_proto_max - - @classmethod - def parser(cls, buf): - ( - flags, - range_present, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # optional parameters - kwargs = dict() - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN: - kwargs['range_ipv4_min'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MAX: - kwargs['range_ipv4_max'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MIN: - kwargs['range_ipv6_min'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MAX: - kwargs['range_ipv6_max'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN: - kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2]) - rest = rest[2:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX: - kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2]) - - return cls(flags, **kwargs) - - def serialize_body(self): - # Pack optional parameters first, as range_present needs - # to be calculated. - optional_data = b'' - range_present = 0 - if self.range_ipv4_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MIN - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_min, - ) - if self.range_ipv4_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MAX - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_max, - ) - if self.range_ipv6_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MIN - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_min, - ) - if self.range_ipv6_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MAX - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_max, - ) - if self.range_proto_min is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MIN - optional_data += type_desc.Int2.from_user( - self.range_proto_min, - ) - if self.range_proto_max is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MAX - optional_data += type_desc.Int2.from_user( - self.range_proto_max, - ) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - range_present, - ) - msg_pack_into( - '!%ds' % len(optional_data), data, len(data), - optional_data, - ) - - return data - - class NXActionOutputTrunc(NXAction): - r""" - Truncate output action - - This action truncate a packet into the specified size and outputs it. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output(port=port,max_len=max_len) - .. - - +--------------------------------------------------------------+ - | **output(port**\=\ *port*\,\ **max_len**\=\ *max_len*\ **)** | - +--------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - port Output port - max_len Max bytes to send - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputTrunc(port=8080, - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_TRUNC - - # port, max_len - _fmt_str = '!HI' - - def __init__( - self, - port, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputTrunc, self).__init__() - self.port = port - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - port, - max_len, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(port, max_len) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.port, - self.max_len, - ) - return data - - class NXActionEncapEther(NXAction): - """ - Encap Ether - - This action encaps package with ethernet - - And equivalent to the followings action of ovs-ofctl command. - - :: - - encap(ethernet) - - Example:: - - actions += [parser.NXActionEncapEther()] - """ - _subtype = nicira_ext.NXAST_RAW_ENCAP - - _fmt_str = '!HI' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionEncapEther, self).__init__() - self.hdr_size = 0 - self.new_pkt_type = 0x00000000 - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.hdr_size, self.new_pkt_type) - return data - - class NXActionEncapNsh(NXAction): - """ - Encap nsh - - This action encaps package with nsh - - And equivalent to the followings action of ovs-ofctl command. - - :: - - encap(nsh(md_type=1)) - - Example:: - - actions += [parser.NXActionEncapNsh()] - """ - _subtype = nicira_ext.NXAST_RAW_ENCAP - - _fmt_str = '!HI' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionEncapNsh, self).__init__() - self.hdr_size = hdr_size - self.new_pkt_type = 0x0001894F - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.hdr_size, self.new_pkt_type) - return data - - class NXActionDecNshTtl(NXAction): - """ - Decrement NSH TTL action - - This action decrements the TTL in the Network Service Header(NSH). - - This action was added in OVS v2.9. - - And equivalent to the followings action of ovs-ofctl command. - - :: - - dec_nsh_ttl - - Example:: - - actions += [parser.NXActionDecNshTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_NSH_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecNshTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - def add_attr(k, v): - v.__module__ = ofpp.__name__ # Necessary for stringify stuff - setattr(ofpp, k, v) - - add_attr('NXAction', NXAction) - add_attr('NXActionUnknown', NXActionUnknown) - - classes = [ - 'NXActionSetQueue', - 'NXActionPopQueue', - 'NXActionRegLoad', - 'NXActionRegLoad2', - 'NXActionNote', - 'NXActionSetTunnel', - 'NXActionSetTunnel64', - 'NXActionRegMove', - 'NXActionResubmit', - 'NXActionResubmitTable', - 'NXActionOutputReg', - 'NXActionOutputReg2', - 'NXActionLearn', - 'NXActionExit', - 'NXActionDecTtl', - 'NXActionController', - 'NXActionController2', - 'NXActionDecTtlCntIds', - 'NXActionPushMpls', - 'NXActionPopMpls', - 'NXActionSetMplsTtl', - 'NXActionDecMplsTtl', - 'NXActionSetMplsLabel', - 'NXActionSetMplsTc', - 'NXActionStackPush', - 'NXActionStackPop', - 'NXActionSample', - 'NXActionSample2', - 'NXActionFinTimeout', - 'NXActionConjunction', - 'NXActionMultipath', - 'NXActionBundle', - 'NXActionBundleLoad', - 'NXActionCT', - 'NXActionCTClear', - 'NXActionNAT', - 'NXActionOutputTrunc', - '_NXFlowSpec', # exported for testing - 'NXFlowSpecMatch', - 'NXFlowSpecLoad', - 'NXFlowSpecOutput', - 'NXActionEncapNsh', - 'NXActionEncapEther', - 'NXActionDecNshTtl', - ] - vars = locals() - for name in classes: - cls = vars[name] - add_attr(name, cls) - if issubclass(cls, NXAction): - NXAction.register(cls) - if issubclass(cls, _NXFlowSpec): - _NXFlowSpec.register(cls) diff --git a/lte/gateway/deploy/roles/magma/files/nx_actions_3.5.py b/lte/gateway/deploy/roles/magma/files/nx_actions_3.5.py deleted file mode 100644 index 527078c71807..000000000000 --- a/lte/gateway/deploy/roles/magma/files/nx_actions_3.5.py +++ /dev/null @@ -1,3373 +0,0 @@ -# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2015 YAMAMOTO Takashi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import struct - -import six -from ryu import utils -from ryu.lib import type_desc -from ryu.lib.pack_utils import msg_pack_into -from ryu.ofproto import nicira_ext, ofproto_common -from ryu.ofproto.ofproto_parser import StringifyMixin - - -def generate(ofp_name, ofpp_name): - import sys - - ofp = sys.modules[ofp_name] - ofpp = sys.modules[ofpp_name] - - class _NXFlowSpec(StringifyMixin): - _hdr_fmt_str = '!H' # 2 bit 0s, 1 bit src, 2 bit dst, 11 bit n_bits - _dst_type = None - _subclasses = {} - _TYPE = { - 'nx-flow-spec-field': [ - 'src', - 'dst', - ], - } - - def __init__(self, src, dst, n_bits): - self.src = src - self.dst = dst - self.n_bits = n_bits - - @classmethod - def register(cls, subcls): - assert issubclass(subcls, cls) - assert subcls._dst_type not in cls._subclasses - cls._subclasses[subcls._dst_type] = subcls - - @classmethod - def parse(cls, buf): - (hdr,) = struct.unpack_from(cls._hdr_fmt_str, buf, 0) - rest = buf[struct.calcsize(cls._hdr_fmt_str):] - if hdr == 0: - return None, rest # all-0 header is no-op for padding - src_type = (hdr >> 13) & 0x1 - dst_type = (hdr >> 11) & 0x3 - n_bits = hdr & 0x3ff - subcls = cls._subclasses[dst_type] - if src_type == 0: # subfield - src = cls._parse_subfield(rest) - rest = rest[6:] - elif src_type == 1: # immediate - src_len = (n_bits + 15) // 16 * 2 - src_bin = rest[:src_len] - src = type_desc.IntDescr(size=src_len).to_user(src_bin) - rest = rest[src_len:] - if dst_type == 0: # match - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 1: # load - dst = cls._parse_subfield(rest) - rest = rest[6:] - elif dst_type == 2: # output - dst = '' # empty - return subcls(src=src, dst=dst, n_bits=n_bits), rest - - def serialize(self): - buf = bytearray() - if isinstance(self.src, tuple): - src_type = 0 # subfield - else: - src_type = 1 # immediate - # header - val = (src_type << 13) | (self._dst_type << 11) | self.n_bits - msg_pack_into(self._hdr_fmt_str, buf, 0, val) - # src - if src_type == 0: # subfield - buf += self._serialize_subfield(self.src) - elif src_type == 1: # immediate - src_len = (self.n_bits + 15) // 16 * 2 - buf += type_desc.IntDescr(size=src_len).from_user(self.src) - # dst - if self._dst_type == 0: # match - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 1: # load - buf += self._serialize_subfield(self.dst) - elif self._dst_type == 2: # output - pass # empty - return buf - - @staticmethod - def _parse_subfield(buf): - (n, len) = ofp.oxm_parse_header(buf, 0) - assert len == 4 # only 4-bytes NXM/OXM are defined - field = ofp.oxm_to_user_header(n) - rest = buf[len:] - (ofs,) = struct.unpack_from('!H', rest, 0) - return (field, ofs) - - @staticmethod - def _serialize_subfield(subfield): - (field, ofs) = subfield - buf = bytearray() - n = ofp.oxm_from_user_header(field) - ofp.oxm_serialize_header(n, buf, 0) - assert len(buf) == 4 # only 4-bytes NXM/OXM are defined - msg_pack_into('!H', buf, 4, ofs) - return buf - - class NXFlowSpecMatch(_NXFlowSpec): - """ - Specification for adding match criterion - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add a match criteria - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_VLAN_TCI[0..11] - _dst_type = 0 - - class NXFlowSpecLoad(_NXFlowSpec): - """ - Add NXAST_REG_LOAD actions - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst OXM/NXM header and Start bit for destination field - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add NXAST_REG_LOAD actions - # an example of the corresponding ovs-ofctl syntax: - # NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[] - _dst_type = 1 - - class NXFlowSpecOutput(_NXFlowSpec): - """ - Add an OFPAT_OUTPUT action - - This class is used by ``NXActionLearn``. - - For the usage of this class, please refer to ``NXActionLearn``. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src OXM/NXM header and Start bit for source field - dst Must be '' - n_bits The number of bits from the start bit - ================ ====================================================== - """ - # Add an OFPAT_OUTPUT action - # an example of the corresponding ovs-ofctl syntax: - # output:NXM_OF_IN_PORT[] - _dst_type = 2 - - def __init__(self, src, n_bits, dst=''): - assert dst == '' - super(NXFlowSpecOutput, self).__init__( - src=src, dst=dst, - n_bits=n_bits, - ) - - class NXAction(ofpp.OFPActionExperimenter): - _fmt_str = '!H' # subtype - _subtypes = {} - _experimenter = ofproto_common.NX_EXPERIMENTER_ID - - def __init__(self): - super(NXAction, self).__init__(self._experimenter) - self.subtype = self._subtype - - @classmethod - def parse(cls, buf): - fmt_str = NXAction._fmt_str - (subtype,) = struct.unpack_from(fmt_str, buf, 0) - subtype_cls = cls._subtypes.get(subtype) - rest = buf[struct.calcsize(fmt_str):] - if subtype_cls is None: - return NXActionUnknown(subtype, rest) - return subtype_cls.parser(rest) - - def serialize(self, buf, offset): - data = self.serialize_body() - payload_offset = ( - ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE + - struct.calcsize(NXAction._fmt_str) - ) - self.len = utils.round_up(payload_offset + len(data), 8) - super(NXAction, self).serialize(buf, offset) - msg_pack_into( - NXAction._fmt_str, - buf, - offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE, - self.subtype, - ) - buf += data - - @classmethod - def register(cls, subtype_cls): - assert subtype_cls._subtype is not cls._subtypes - cls._subtypes[subtype_cls._subtype] = subtype_cls - - class NXActionUnknown(NXAction): - def __init__( - self, subtype, data=None, - type_=None, len_=None, experimenter=None, - ): - self._subtype = subtype - super(NXActionUnknown, self).__init__() - self.data = data - - @classmethod - def parser(cls, buf): - return cls(data=buf) - - def serialize_body(self): - # fixup - return bytearray() if self.data is None else self.data - - # For OpenFlow1.0 only - class NXActionSetQueue(NXAction): - r""" - Set queue action - - This action sets the queue that should be used to queue - when packets are output. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_queue:queue - .. - - +-------------------------+ - | **set_queue**\:\ *queue*| - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - queue_id Queue ID for the packets - ================ ====================================================== - - .. note:: - This actions is supported by - ``OFPActionSetQueue`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetQueue(queue_id=10)] - """ - _subtype = nicira_ext.NXAST_SET_QUEUE - - # queue_id - _fmt_str = '!2xI' - - def __init__( - self, queue_id, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetQueue, self).__init__() - self.queue_id = queue_id - - @classmethod - def parser(cls, buf): - (queue_id,) = struct.unpack_from(cls._fmt_str, buf, 0) - return cls(queue_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0, self.queue_id) - return data - - class NXActionPopQueue(NXAction): - """ - Pop queue action - - This action restors the queue to the value it was before any - set_queue actions were applied. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_queue - .. - - +---------------+ - | **pop_queue** | - +---------------+ - - Example:: - - actions += [parser.NXActionPopQueue()] - """ - _subtype = nicira_ext.NXAST_POP_QUEUE - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionPopQueue, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionRegLoad(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - load:value->dst[start..end] - .. - - +-----------------------------------------------------------------+ - | **load**\:\ *value*\->\ *dst*\ **[**\ *start*\..\ *end*\ **]** | - +-----------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for destination field - value OXM/NXM value to be loaded - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="eth_dst", - value=0x112233)] - """ - _subtype = nicira_ext.NXAST_REG_LOAD - _fmt_str = '!HIQ' # ofs_nbits, dst, value - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, ofs_nbits, dst, value, - type_=None, len_=None, experimenter=None, - subtype=None, - ): - super(NXActionRegLoad, self).__init__() - self.ofs_nbits = ofs_nbits - self.dst = dst - self.value = value - - @classmethod - def parser(cls, buf): - (ofs_nbits, dst, value) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - # Right-shift instead of using oxm_parse_header for simplicity... - dst_name = ofp.oxm_to_user_header(dst >> 9) - return cls(ofs_nbits, dst_name, value) - - def serialize_body(self): - hdr_data = bytearray() - n = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(n, hdr_data, 0) - (dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, dst_num, self.value, - ) - return data - - class NXActionRegLoad2(NXAction): - r""" - Load literal value action - - This action loads a literal value into a field or part of a field. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_field:value[/mask]->dst - .. - - +------------------------------------------------------------+ - | **set_field**\:\ *value*\ **[**\/\ *mask*\ **]**\->\ *dst* | - +------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - value OXM/NXM value to be loaded - mask Mask for destination field - dst OXM/NXM header for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionRegLoad2(dst="tun_ipv4_src", - value="192.168.10.0", - mask="255.255.255.0")] - """ - _subtype = nicira_ext.NXAST_REG_LOAD2 - _TYPE = { - 'ascii': [ - 'dst', - 'value', - ], - } - - def __init__( - self, dst, value, mask=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegLoad2, self).__init__() - self.dst = dst - self.value = value - self.mask = mask - - @classmethod - def parser(cls, buf): - (n, uv, mask, _len) = ofp.oxm_parse(buf, 0) - dst, value = ofp.oxm_to_user(n, uv, mask) - - if isinstance(value, (tuple, list)): - return cls(dst, value[0], value[1]) - else: - return cls(dst, value, None) - - def serialize_body(self): - data = bytearray() - if self.mask is None: - value = self.value - else: - value = (self.value, self.mask) - self._TYPE['ascii'].append('mask') - - n, value, mask = ofp.oxm_from_user(self.dst, value) - len_ = ofp.oxm_serialize(n, value, mask, data, 0) - msg_pack_into("!%dx" % (14 - len_), data, len_) - - return data - - class NXActionNote(NXAction): - r""" - Note action - - This action does nothing at all. - - And equivalent to the followings action of ovs-ofctl command. - - .. - note:[hh].. - .. - - +-----------------------------------+ - | **note**\:\ **[**\ *hh*\ **]**\.. | - +-----------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - note A list of integer type values - ================ ====================================================== - - Example:: - - actions += [parser.NXActionNote(note=[0xaa,0xbb,0xcc,0xdd])] - """ - _subtype = nicira_ext.NXAST_NOTE - - # note - _fmt_str = '!%dB' - - # set the integer array in a note - def __init__( - self, - note, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNote, self).__init__() - self.note = note - - @classmethod - def parser(cls, buf): - note = struct.unpack_from( - cls._fmt_str % len(buf), buf, 0, - ) - return cls(list(note)) - - def serialize_body(self): - assert isinstance(self.note, (tuple, list)) - for n in self.note: - assert isinstance(n, six.integer_types) - - pad = (len(self.note) + nicira_ext.NX_ACTION_HEADER_0_SIZE) % 8 - if pad: - self.note += [0x0 for i in range(8 - pad)] - note_len = len(self.note) - data = bytearray() - msg_pack_into( - self._fmt_str % note_len, data, 0, - *self.note, - ) - return data - - class _NXActionSetTunnelBase(NXAction): - # _subtype, _fmt_str must be attributes of subclass. - - def __init__( - self, - tun_id, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(_NXActionSetTunnelBase, self).__init__() - self.tun_id = tun_id - - @classmethod - def parser(cls, buf): - (tun_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(tun_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tun_id, - ) - return data - - class NXActionSetTunnel(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action sets the identifier (such as GRE) to the specified id. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel:id - .. - - +------------------------+ - | **set_tunnel**\:\ *id* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(32bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL - - # tun_id - _fmt_str = '!2xI' - - class NXActionSetTunnel64(_NXActionSetTunnelBase): - r""" - Set Tunnel action - - This action outputs to a port that encapsulates - the packet in a tunnel. - - And equivalent to the followings action of ovs-ofctl command. - - .. note:: - This actions is supported by - ``OFPActionSetField`` - in OpenFlow1.2 or later. - - .. - set_tunnel64:id - .. - - +--------------------------+ - | **set_tunnel64**\:\ *id* | - +--------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tun_id Tunnel ID(64bits) - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSetTunnel64(tun_id=0xa)] - """ - _subtype = nicira_ext.NXAST_SET_TUNNEL64 - - # tun_id - _fmt_str = '!6xQ' - - class NXActionRegMove(NXAction): - r""" - Move register action - - This action copies the src to dst. - - And equivalent to the followings action of ovs-ofctl command. - - .. - move:src[start..end]->dst[start..end] - .. - - +--------------------------------------------------------+ - | **move**\:\ *src*\ **[**\ *start*\..\ *end*\ **]**\->\ | - | *dst*\ **[**\ *start*\..\ *end* \ **]** | - +--------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - src_field OXM/NXM header for source field - dst_field OXM/NXM header for destination field - n_bits Number of bits - src_ofs Starting bit offset in source - dst_ofs Starting bit offset in destination - ================ ====================================================== - - .. CAUTION:: - **src_start**\ and \ **src_end**\ difference and \ **dst_start**\ - and \ **dst_end**\ difference must be the same. - - Example:: - - actions += [parser.NXActionRegMove(src_field="reg0", - dst_field="reg1", - n_bits=5, - src_ofs=0 - dst_ofs=10)] - """ - _subtype = nicira_ext.NXAST_REG_MOVE - _fmt_str = '!HHH' # n_bits, src_ofs, dst_ofs - # Followed by OXM fields (src, dst) and padding to 8 bytes boundary - _TYPE = { - 'ascii': [ - 'src_field', - 'dst_field', - ], - } - - def __init__( - self, src_field, dst_field, n_bits, src_ofs=0, dst_ofs=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionRegMove, self).__init__() - self.n_bits = n_bits - self.src_ofs = src_ofs - self.dst_ofs = dst_ofs - self.src_field = src_field - self.dst_field = dst_field - - @classmethod - def parser(cls, buf): - (n_bits, src_ofs, dst_ofs) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(NXActionRegMove._fmt_str):] - - # src field - (n, len) = ofp.oxm_parse_header(rest, 0) - src_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # dst field - (n, len) = ofp.oxm_parse_header(rest, 0) - dst_field = ofp.oxm_to_user_header(n) - rest = rest[len:] - # ignore padding - return cls( - src_field, dst_field=dst_field, n_bits=n_bits, - src_ofs=src_ofs, dst_ofs=dst_ofs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.n_bits, self.src_ofs, self.dst_ofs, - ) - # src field - n = ofp.oxm_from_user_header(self.src_field) - ofp.oxm_serialize_header(n, data, len(data)) - # dst field - n = ofp.oxm_from_user_header(self.dst_field) - ofp.oxm_serialize_header(n, data, len(data)) - return data - - class NXActionResubmit(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit:port - .. - - +------------------------+ - | **resubmit**\:\ *port* | - +------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT - - # in_port - _fmt_str = '!H4x' - - def __init__( - self, - in_port=0xfff8, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmit, self).__init__() - self.in_port = in_port - - @classmethod - def parser(cls, buf): - (in_port,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, - ) - return data - - class NXActionResubmitTable(NXAction): - r""" - Resubmit action - - This action searches one of the switch's flow tables. - - And equivalent to the followings action of ovs-ofctl command. - - .. - resubmit([port],[table]) - .. - - +------------------------------------------------+ - | **resubmit(**\[\ *port*\]\,[\ *table*\]\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - in_port New in_port for checking flow table - table_id Checking flow tables - ================ ====================================================== - - Example:: - - actions += [parser.NXActionResubmit(in_port=8080, - table_id=10)] - """ - _subtype = nicira_ext.NXAST_RESUBMIT_TABLE - - # in_port, table_id - _fmt_str = '!HB3x' - - def __init__( - self, - in_port=0xfff8, - table_id=0xff, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionResubmitTable, self).__init__() - self.in_port = in_port - self.table_id = table_id - - @classmethod - def parser(cls, buf): - ( - in_port, - table_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(in_port, table_id) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.in_port, self.table_id, - ) - return data - - class NXActionOutputReg(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG - - # ofs_nbits, src, max_len - _fmt_str = '!H4sH6x' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - (ofs_nbits, oxm_data, max_len) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - src = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, src, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - six.binary_type(src), - self.max_len, - ) - return data - - class NXActionOutputReg2(NXAction): - r""" - Add output action - - This action outputs the packet to the OpenFlow port number read from - src. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output:src[start...end] - .. - - +-------------------------------------------------------+ - | **output**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +-------------------------------------------------------+ - - .. NOTE:: - Like the ``NXActionOutputReg`` but organized so - that there is room for a 64-bit experimenter OXM as 'src'. - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - src OXM/NXM header for source field - max_len Max length to send to controller - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputReg2( - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - src="reg0", - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_REG2 - - # ofs_nbits, src, max_len - _fmt_str = '!HH4s' - _TYPE = { - 'ascii': [ - 'src', - ], - } - - def __init__( - self, - ofs_nbits, - src, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputReg2, self).__init__() - self.ofs_nbits = ofs_nbits - self.src = src - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - ofs_nbits, - max_len, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - src = ofp.oxm_to_user_header(n) - return cls( - ofs_nbits, - src, - max_len, - ) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.src) - ofp.oxm_serialize_header(oxm, oxm_data, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.ofs_nbits, - self.max_len, - six.binary_type(oxm_data), - ) - offset = len(data) - msg_pack_into("!%dx" % (14 - offset), data, offset) - return data - - class NXActionLearn(NXAction): - r""" - Adds or modifies flow action - - This action adds or modifies a flow in OpenFlow table. - - And equivalent to the followings action of ovs-ofctl command. - - .. - learn(argument[,argument]...) - .. - - +---------------------------------------------------+ - | **learn(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - table_id The table in which the new flow should be inserted - specs Adds a match criterion to the new flow - - Please use the - ``NXFlowSpecMatch`` - in order to set the following format - - .. - field=value - field[start..end]=src[start..end] - field[start..end] - .. - - | *field*\=\ *value* - | *field*\ **[**\ *start*\..\ *end*\ **]**\ =\ - *src*\ **[**\ *start*\..\ *end*\ **]** - | *field*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecLoad`` - in order to set the following format - - .. - load:value->dst[start..end] - load:src[start..end]->dst[start..end] - .. - - | **load**\:\ *value*\ **->**\ *dst*\ - **[**\ *start*\..\ *end*\ **]** - | **load**\:\ *src*\ **[**\ *start*\..\ *end*\ - **] ->**\ *dst*\ **[**\ *start*\..\ *end*\ **]** - | - - Please use the - ``NXFlowSpecOutput`` - in order to set the following format - - .. - output:field[start..end] - .. - - | **output:**\ field\ **[**\ *start*\..\ *end*\ **]** - - idle_timeout Idle time before discarding(seconds) - hard_timeout Max time before discarding(seconds) - priority Priority level of flow entry - cookie Cookie for new flow - flags send_flow_rem - fin_idle_timeout Idle timeout after FIN(seconds) - fin_hard_timeout Hard timeout after FIN(seconds) - ================ ====================================================== - - .. CAUTION:: - The arguments specify the flow's match fields, actions, - and other properties, as follows. - At least one match criterion and one action argument - should ordinarily be specified. - - Example:: - - actions += [ - parser.NXActionLearn(able_id=10, - specs=[parser.NXFlowSpecMatch(src=0x800, - dst=('eth_type_nxm', 0), - n_bits=16), - parser.NXFlowSpecMatch(src=('reg1', 1), - dst=('reg2', 3), - n_bits=5), - parser.NXFlowSpecMatch(src=('reg3', 1), - dst=('reg3', 1), - n_bits=5), - parser.NXFlowSpecLoad(src=0, - dst=('reg4', 3), - n_bits=5), - parser.NXFlowSpecLoad(src=('reg5', 1), - dst=('reg6', 3), - n_bits=5), - parser.NXFlowSpecOutput(src=('reg7', 1), - dst="", - n_bits=5)], - idle_timeout=180, - hard_timeout=300, - priority=1, - cookie=0x64, - flags=ofproto.OFPFF_SEND_FLOW_REM, - fin_idle_timeout=180, - fin_hard_timeout=300)] - """ - _subtype = nicira_ext.NXAST_LEARN - - # idle_timeout, hard_timeout, priority, cookie, flags, - # table_id, pad, fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HHHQHBxHH' - # Followed by flow_mod_specs - - def __init__( - self, - table_id, - specs, - idle_timeout=0, - hard_timeout=0, - priority=ofp.OFP_DEFAULT_PRIORITY, - cookie=0, - flags=0, - fin_idle_timeout=0, - fin_hard_timeout=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionLearn, self).__init__() - self.idle_timeout = idle_timeout - self.hard_timeout = hard_timeout - self.priority = priority - self.cookie = cookie - self.flags = flags - self.table_id = table_id - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - self.specs = specs - - @classmethod - def parser(cls, buf): - ( - idle_timeout, - hard_timeout, - priority, - cookie, - flags, - table_id, - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # specs - specs = [] - while len(rest) > 0: - spec, rest = _NXFlowSpec.parse(rest) - if spec is None: - continue - specs.append(spec) - return cls( - idle_timeout=idle_timeout, - hard_timeout=hard_timeout, - priority=priority, - cookie=cookie, - flags=flags, - table_id=table_id, - fin_idle_timeout=fin_idle_timeout, - fin_hard_timeout=fin_hard_timeout, - specs=specs, - ) - - def serialize_body(self): - # fixup - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.idle_timeout, - self.hard_timeout, - self.priority, - self.cookie, - self.flags, - self.table_id, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - for spec in self.specs: - data += spec.serialize() - return data - - class NXActionExit(NXAction): - """ - Halt action - - This action causes OpenvSwitch to immediately halt - execution of further actions. - - And equivalent to the followings action of ovs-ofctl command. - - .. - exit - .. - - +----------+ - | **exit** | - +----------+ - - Example:: - - actions += [parser.NXActionExit()] - """ - _subtype = nicira_ext.NXAST_EXIT - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionExit, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionDecTtl(NXAction): - """ - Decrement IP TTL action - - This action decrements TTL of IPv4 packet or - hop limit of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl - .. - - +-------------+ - | **dec_ttl** | - +-------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecNwTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionController(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER - - # max_len, controller_id, reason - _fmt_str = '!HHBx' - - def __init__( - self, - max_len, - controller_id, - reason, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionController, self).__init__() - self.max_len = max_len - self.controller_id = controller_id - self.reason = reason - - @classmethod - def parser(cls, buf): - ( - max_len, - controller_id, - reason, - ) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls( - max_len, - controller_id, - reason, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.max_len, - self.controller_id, - self.reason, - ) - return data - - class NXActionController2(NXAction): - r""" - Send packet in message action - - This action sends the packet to the OpenFlow controller as - a packet in message. - - And equivalent to the followings action of ovs-ofctl command. - - .. - controller(key=value...) - .. - - +----------------------------------------------+ - | **controller(**\ *key*\=\ *value*\...\ **)** | - +----------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - max_len Max length to send to controller - controller_id Controller ID to send packet-in - reason Reason for sending the message - userdata Additional data to the controller in the packet-in - message - pause Flag to pause pipeline to resume later - ================ ====================================================== - - Example:: - - actions += [ - parser.NXActionController(max_len=1024, - controller_id=1, - reason=ofproto.OFPR_INVALID_TTL, - userdata=[0xa,0xb,0xc], - pause=True)] - """ - _subtype = nicira_ext.NXAST_CONTROLLER2 - _fmt_str = '!6x' - _PACK_STR = '!HH' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - **kwargs - ): - super(NXActionController2, self).__init__() - - for arg in kwargs: - if arg in NXActionController2Prop._NAMES: - setattr(self, arg, kwargs[arg]) - - @classmethod - def parser(cls, buf): - cls_data = {} - offset = 6 - buf_len = len(buf) - while buf_len > offset: - (type_, length) = struct.unpack_from(cls._PACK_STR, buf, offset) - offset += 4 - try: - subcls = NXActionController2Prop._TYPES[type_] - except KeyError: - subcls = NXActionController2PropUnknown - data, size = subcls.parser_prop(buf[offset:], length - 4) - offset += size - cls_data[subcls._arg_name] = data - return cls(**cls_data) - - def serialize_body(self): - body = bytearray() - msg_pack_into(self._fmt_str, body, 0) - prop_list = [] - for arg in self.__dict__: - if arg in NXActionController2Prop._NAMES: - prop_list.append(( - NXActionController2Prop._NAMES[arg], - self.__dict__[arg], - )) - prop_list.sort(key=lambda x: x[0].type) - - for subcls, value in prop_list: - body += subcls.serialize_prop(value) - - return body - - class NXActionController2Prop(object): - _TYPES = {} - _NAMES = {} - - @classmethod - def register_type(cls, type_): - def _register_type(subcls): - subcls.type = type_ - NXActionController2Prop._TYPES[type_] = subcls - NXActionController2Prop._NAMES[subcls._arg_name] = subcls - return subcls - - return _register_type - - class NXActionController2PropUnknown(NXActionController2Prop): - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - return buf, size - - @classmethod - def serialize_prop(cls, argment): - data = bytearray() - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_MAX_LEN) - class NXActionController2PropMaxLen(NXActionController2Prop): - # max_len - _fmt_str = "!H2x" - _arg_name = "max_len" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (max_len,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return max_len, size - - @classmethod - def serialize_prop(cls, max_len): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_MAX_LEN, - 8, - max_len, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_CONTROLLER_ID) - class NXActionController2PropControllerId(NXActionController2Prop): - # controller_id - _fmt_str = "!H2x" - _arg_name = "controller_id" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (controller_id,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return controller_id, size - - @classmethod - def serialize_prop(cls, controller_id): - data = bytearray() - msg_pack_into( - "!HHH2x", data, 0, - nicira_ext.NXAC2PT_CONTROLLER_ID, - 8, - controller_id, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_REASON) - class NXActionController2PropReason(NXActionController2Prop): - # reason - _fmt_str = "!B3x" - _arg_name = "reason" - - @classmethod - def parser_prop(cls, buf, length): - size = 4 - (reason,) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return reason, size - - @classmethod - def serialize_prop(cls, reason): - data = bytearray() - msg_pack_into( - "!HHB3x", data, 0, - nicira_ext.NXAC2PT_REASON, - 5, - reason, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_USERDATA) - class NXActionController2PropUserData(NXActionController2Prop): - # userdata - _fmt_str = "!B" - _arg_name = "userdata" - - @classmethod - def parser_prop(cls, buf, length): - userdata = [] - offset = 0 - - while offset < length: - u = struct.unpack_from(cls._fmt_str, buf, offset) - userdata.append(u[0]) - offset += 1 - - user_size = utils.round_up(length, 4) - - if user_size > 4 and (user_size % 8) == 0: - size = utils.round_up(length, 4) + 4 - else: - size = utils.round_up(length, 4) - - return userdata, size - - @classmethod - def serialize_prop(cls, userdata): - data = bytearray() - user_buf = bytearray() - user_offset = 0 - for user in userdata: - msg_pack_into( - '!B', user_buf, user_offset, - user, - ) - user_offset += 1 - - msg_pack_into( - "!HH", data, 0, - nicira_ext.NXAC2PT_USERDATA, - 4 + user_offset, - ) - data += user_buf - - if user_offset > 4: - user_len = utils.round_up(user_offset, 4) - brank_size = 0 - if (user_len % 8) == 0: - brank_size = 4 - msg_pack_into( - "!%dx" % (user_len - user_offset + brank_size), - data, 4 + user_offset, - ) - else: - user_len = utils.round_up(user_offset, 4) - - msg_pack_into( - "!%dx" % (user_len - user_offset), - data, 4 + user_offset, - ) - return data - - @NXActionController2Prop.register_type(nicira_ext.NXAC2PT_PAUSE) - class NXActionController2PropPause(NXActionController2Prop): - _arg_name = "pause" - - @classmethod - def parser_prop(cls, buf, length): - pause = True - size = 4 - return pause, size - - @classmethod - def serialize_prop(cls, pause): - data = bytearray() - msg_pack_into( - "!HH4x", data, 0, - nicira_ext.NXAC2PT_PAUSE, - 4, - ) - return data - - class NXActionDecTtlCntIds(NXAction): - r""" - Decrement TTL action - - This action decrements TTL of IPv4 packet or - hop limits of IPv6 packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_ttl(id1[,id2]...) - .. - - +-------------------------------------------+ - | **dec_ttl(**\ *id1*\[,\ *id2*\]...\ **)** | - +-------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - cnt_ids Controller ids - ================ ====================================================== - - Example:: - - actions += [parser.NXActionDecTtlCntIds(cnt_ids=[1,2,3])] - - .. NOTE:: - If you want to set the following ovs-ofctl command. - Please use ``OFPActionDecNwTtl``. - - +-------------+ - | **dec_ttl** | - +-------------+ - """ - _subtype = nicira_ext.NXAST_DEC_TTL_CNT_IDS - - # controllers - _fmt_str = '!H4x' - _fmt_len = 6 - - def __init__( - self, - cnt_ids, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionDecTtlCntIds, self).__init__() - - self.cnt_ids = cnt_ids - - @classmethod - def parser(cls, buf): - (controllers,) = struct.unpack_from( - cls._fmt_str, buf, - ) - - offset = cls._fmt_len - cnt_ids = [] - - for i in range(0, controllers): - id_ = struct.unpack_from('!H', buf, offset) - cnt_ids.append(id_[0]) - offset += 2 - - return cls(cnt_ids) - - def serialize_body(self): - assert isinstance(self.cnt_ids, (tuple, list)) - for i in self.cnt_ids: - assert isinstance(i, six.integer_types) - - controllers = len(self.cnt_ids) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - controllers, - ) - offset = self._fmt_len - - for id_ in self.cnt_ids: - msg_pack_into('!H', data, offset, id_) - offset += 2 - - id_len = ( - utils.round_up(controllers, 4) - - controllers - ) - - if id_len != 0: - msg_pack_into('%dx' % id_len * 2, data, offset) - - return data - - # Use in only OpenFlow1.0 - class NXActionMplsBase(NXAction): - # ethertype - _fmt_str = '!H4x' - - def __init__( - self, - ethertype, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionMplsBase, self).__init__() - self.ethertype = ethertype - - @classmethod - def parser(cls, buf): - (ethertype,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ethertype) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ethertype, - ) - return data - - # For OpenFlow1.0 only - class NXActionPushMpls(NXActionMplsBase): - r""" - Push MPLS action - - This action pushes a new MPLS header to the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - push_mpls:ethertype - .. - - +-------------------------------+ - | **push_mpls**\:\ *ethertype* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type(The value must be either 0x8847 or 0x8848) - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPushMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x0800) - actions += [parser.NXActionPushMpls(ethertype=0x8847)] - """ - _subtype = nicira_ext.NXAST_PUSH_MPLS - - # For OpenFlow1.0 only - class NXActionPopMpls(NXActionMplsBase): - r""" - Pop MPLS action - - This action pops the MPLS header from the packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop_mpls:ethertype - .. - - +------------------------------+ - | **pop_mpls**\:\ *ethertype* | - +------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ethertype Ether type - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionPopMpls`` - in OpenFlow1.2 or later. - - Example:: - - match = parser.OFPMatch(dl_type=0x8847) - actions += [parser.NXActionPushMpls(ethertype=0x0800)] - """ - _subtype = nicira_ext.NXAST_POP_MPLS - - # For OpenFlow1.0 only - class NXActionSetMplsTtl(NXAction): - r""" - Set MPLS TTL action - - This action sets the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_ttl:ttl - .. - - +---------------------------+ - | **set_mpls_ttl**\:\ *ttl* | - +---------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - ttl MPLS TTL - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsTil(ttl=128)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TTL - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - ttl, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTtl, self).__init__() - self.ttl = ttl - - @classmethod - def parser(cls, buf): - (ttl,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(ttl) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.ttl, - ) - return data - - # For OpenFlow1.0 only - class NXActionDecMplsTtl(NXAction): - """ - Decrement MPLS TTL action - - This action decrements the MPLS TTL. - - And equivalent to the followings action of ovs-ofctl command. - - .. - dec_mpls_ttl - .. - - +------------------+ - | **dec_mpls_ttl** | - +------------------+ - - .. NOTE:: - This actions is supported by - ``OFPActionDecMplsTtl`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionDecMplsTil()] - """ - _subtype = nicira_ext.NXAST_DEC_MPLS_TTL - - # ethertype - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecMplsTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsLabel(NXAction): - r""" - Set MPLS Lavel action - - This action sets the MPLS Label. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_label:label - .. - - +-------------------------------+ - | **set_mpls_label**\:\ *label* | - +-------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - label MPLS Label - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=label)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(label=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_LABEL - - # ethertype - _fmt_str = '!2xI' - - def __init__( - self, - label, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsLabel, self).__init__() - self.label = label - - @classmethod - def parser(cls, buf): - (label,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(label) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.label, - ) - return data - - # For OpenFlow1.0 only - class NXActionSetMplsTc(NXAction): - r""" - Set MPLS Tc action - - This action sets the MPLS Tc. - - And equivalent to the followings action of ovs-ofctl command. - - .. - set_mpls_tc:tc - .. - - +-------------------------+ - | **set_mpls_tc**\:\ *tc* | - +-------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - tc MPLS Tc - ================ ====================================================== - - .. NOTE:: - This actions is supported by - ``OFPActionSetField(mpls_label=tc)`` - in OpenFlow1.2 or later. - - Example:: - - actions += [parser.NXActionSetMplsLabel(tc=0x10)] - """ - _subtype = nicira_ext.NXAST_SET_MPLS_TC - - # ethertype - _fmt_str = '!B5x' - - def __init__( - self, - tc, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionSetMplsTc, self).__init__() - self.tc = tc - - @classmethod - def parser(cls, buf): - (tc,) = struct.unpack_from( - cls._fmt_str, buf, - ) - return cls(tc) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.tc, - ) - return data - - class NXActionStackBase(NXAction): - # start, field, end - _fmt_str = '!H4sH' - _TYPE = { - 'ascii': [ - 'field', - ], - } - - def __init__( - self, - field, - start, - end, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionStackBase, self).__init__() - self.field = field - self.start = start - self.end = end - - @classmethod - def parser(cls, buf): - (start, oxm_data, end) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - field = ofp.oxm_to_user_header(n) - return cls(field, start, end) - - def serialize_body(self): - data = bytearray() - oxm_data = bytearray() - oxm = ofp.oxm_from_user_header(self.field) - ofp.oxm_serialize_header(oxm, oxm_data, 0) - msg_pack_into( - self._fmt_str, data, 0, - self.start, - six.binary_type(oxm_data), - self.end, - ) - offset = len(data) - msg_pack_into("!%dx" % (12 - offset), data, offset) - return data - - class NXActionStackPush(NXActionStackBase): - r""" - Push field action - - This action pushes field to top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:dst[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *dst*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for source field - start Start bit for source field - end End bit for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPush(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_PUSH - - class NXActionStackPop(NXActionStackBase): - r""" - Pop field action - - This action pops field from top of the stack. - - And equivalent to the followings action of ovs-ofctl command. - - .. - pop:src[start...end] - .. - - +----------------------------------------------------+ - | **pop**\:\ *src*\ **[**\ *start*\...\ *end*\ **]** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - field OXM/NXM header for destination field - start Start bit for destination field - end End bit for destination field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionStackPop(field="reg2", - start=0, - end=5)] - """ - _subtype = nicira_ext.NXAST_STACK_POP - - class NXActionSample(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets - to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3,)] - """ - _subtype = nicira_ext.NXAST_SAMPLE - - # probability, collector_set_id, obs_domain_id, obs_point_id - _fmt_str = '!HIII' - - def __init__( - self, - probability, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - ) - return data - - class NXActionSample2(NXAction): - r""" - Sample packets action - - This action samples packets and sends one sample for - every sampled packet. - 'sampling_port' can be equal to ingress port or one of egress ports. - - And equivalent to the followings action of ovs-ofctl command. - - .. - sample(argument[,argument]...) - .. - - +----------------------------------------------------+ - | **sample(**\ *argument*\[,\ *argument*\]...\ **)** | - +----------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - probability The number of sampled packets - collector_set_id The unsigned 32-bit integer identifier of - the set of sample collectors to send sampled packets to - obs_domain_id The Unsigned 32-bit integer Observation Domain ID - obs_point_id The unsigned 32-bit integer Observation Point ID - sampling_port Sampling port number - ================ ====================================================== - - Example:: - - actions += [parser.NXActionSample2(probability=3, - collector_set_id=1, - obs_domain_id=2, - obs_point_id=3, - apn_mac_addr=[10,0,2,0,0,5], - msisdn=b'magmaIsTheBest', - apn_name=b'big_tower123', - pdp_start_epoch=b'90\x00\x00\x00\x00\x00\x00', - sampling_port=8080)] - """ - _subtype = nicira_ext.NXAST_SAMPLE2 - - # probability, collector_set_id, obs_domain_id, - # obs_point_id, msisdn, apn_mac_addr, apn_name, sampling_port - _fmt_str = '!HIIIL16s6B24s8s6x' - - def __init__( - self, - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id=0, - obs_domain_id=0, - obs_point_id=0, - sampling_port=0, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionSample2, self).__init__() - self.probability = probability - self.collector_set_id = collector_set_id - self.obs_domain_id = obs_domain_id - self.obs_point_id = obs_point_id - self.sampling_port = sampling_port - - self.msisdn = msisdn - self.apn_mac_addr = apn_mac_addr - self.apn_name = apn_name - self.pdp_start_epoch = pdp_start_epoch - - @classmethod - def parser(cls, buf): - ( - probability, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - msisdn, - apn_mac_addr_0, - apn_mac_addr_1, - apn_mac_addr_2, - apn_mac_addr_3, - apn_mac_addr_4, - apn_mac_addr_5, - apn_name, - pdp_start_epoch, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - - apn_mac_addr = [apn_mac_addr_0, apn_mac_addr_1, apn_mac_addr_2, apn_mac_addr_3, apn_mac_addr_4, apn_mac_addr_5] - return cls( - probability, - msisdn, - apn_mac_addr, - apn_name, - pdp_start_epoch, - collector_set_id, - obs_domain_id, - obs_point_id, - sampling_port, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.probability, - self.collector_set_id, - self.obs_domain_id, - self.obs_point_id, - self.sampling_port, - self.msisdn, - *self.apn_mac_addr, - self.apn_name, - self.pdp_start_epoch, - ) - - return data - - class NXActionFinTimeout(NXAction): - r""" - Change TCP timeout action - - This action changes the idle timeout or hard timeout or - both, of this OpenFlow rule when the rule matches a TCP - packet with the FIN or RST flag. - - And equivalent to the followings action of ovs-ofctl command. - - .. - fin_timeout(argument[,argument]...) - .. - - +---------------------------------------------------------+ - | **fin_timeout(**\ *argument*\[,\ *argument*\]...\ **)** | - +---------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fin_idle_timeout Causes the flow to expire after the given number - of seconds of inactivity - fin_idle_timeout Causes the flow to expire after the given number - of second, regardless of activity - ================ ====================================================== - - Example:: - - match = parser.OFPMatch(ip_proto=6, eth_type=0x0800) - actions += [parser.NXActionFinTimeout(fin_idle_timeout=30, - fin_hard_timeout=60)] - """ - _subtype = nicira_ext.NXAST_FIN_TIMEOUT - - # fin_idle_timeout, fin_hard_timeout - _fmt_str = '!HH2x' - - def __init__( - self, - fin_idle_timeout, - fin_hard_timeout, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionFinTimeout, self).__init__() - self.fin_idle_timeout = fin_idle_timeout - self.fin_hard_timeout = fin_hard_timeout - - @classmethod - def parser(cls, buf): - ( - fin_idle_timeout, - fin_hard_timeout, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls( - fin_idle_timeout, - fin_hard_timeout, - ) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.fin_idle_timeout, - self.fin_hard_timeout, - ) - return data - - class NXActionConjunction(NXAction): - r""" - Conjunctive matches action - - This action ties groups of individual OpenFlow flows into - higher-level conjunctive flows. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - conjunction(id,k/n) - .. - - +--------------------------------------------------+ - | **conjunction(**\ *id*\,\ *k*\ **/**\ *n*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - clause Number assigned to the flow's dimension - n_clauses Specify the conjunctive flow's match condition - id\_ Conjunction ID - ================ ====================================================== - - Example:: - - actions += [parser.NXActionConjunction(clause=1, - n_clauses=2, - id_=10)] - """ - _subtype = nicira_ext.NXAST_CONJUNCTION - - # clause, n_clauses, id - _fmt_str = '!BBI' - - def __init__( - self, - clause, - n_clauses, - id_, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionConjunction, self).__init__() - self.clause = clause - self.n_clauses = n_clauses - self.id = id_ - - @classmethod - def parser(cls, buf): - ( - clause, - n_clauses, - id_, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(clause, n_clauses, id_) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.clause, - self.n_clauses, - self.id, - ) - return data - - class NXActionMultipath(NXAction): - r""" - Select multipath link action - - This action selects multipath link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - multipath(fields, basis, algorithm, n_links, arg, dst[start..end]) - .. - - +-------------------------------------------------------------+ - | **multipath(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *n_links*\, \ *arg*\, \ *dst*\[\ *start*\..\ *end*\]\ **)** | - +-------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - algorithm One of NX_MP_ALG_*. - max_link Number of output links - arg Algorithm-specific argument - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - ================ ====================================================== - - Example:: - - actions += [parser.NXActionMultipath( - fields=nicira_ext.NX_HASH_FIELDS_SYMMETRIC_L4, - basis=1024, - algorithm=nicira_ext.NX_MP_ALG_HRW, - max_link=5, - arg=0, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg2")] - """ - _subtype = nicira_ext.NXAST_MULTIPATH - - # fields, basis, algorithm, max_link, - # arg, ofs_nbits, dst - _fmt_str = '!HH2xHHI2xH4s' - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionMultipath, self).__init__() - self.fields = fields - self.basis = basis - self.algorithm = algorithm - self.max_link = max_link - self.arg = arg - self.ofs_nbits = ofs_nbits - self.dst = dst - - @classmethod - def parser(cls, buf): - ( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - oxm_data, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - dst = ofp.oxm_to_user_header(n) - return cls( - fields, - basis, - algorithm, - max_link, - arg, - ofs_nbits, - dst, - ) - - def serialize_body(self): - data = bytearray() - dst = bytearray() - oxm = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm, dst, 0), - msg_pack_into( - self._fmt_str, data, 0, - self.fields, - self.basis, - self.algorithm, - self.max_link, - self.arg, - self.ofs_nbits, - six.binary_type(dst), - ) - - return data - - class _NXActionBundleBase(NXAction): - # algorithm, fields, basis, slave_type, n_slaves - # ofs_nbits - _fmt_str = '!HHHIHH' - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(_NXActionBundleBase, self).__init__() - self.len = utils.round_up( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE + len(slaves) * 2, 8, - ) - - self.algorithm = algorithm - self.fields = fields - self.basis = basis - self.slave_type = slave_type - self.n_slaves = n_slaves - self.ofs_nbits = ofs_nbits - self.dst = dst - - assert isinstance(slaves, (list, tuple)) - for s in slaves: - assert isinstance(s, six.integer_types) - - self.slaves = slaves - - @classmethod - def parser(cls, buf): - # Add dst ('I') to _fmt_str - ( - algorithm, fields, basis, - slave_type, n_slaves, ofs_nbits, dst, - ) = struct.unpack_from( - cls._fmt_str + 'I', buf, 0, - ) - - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if dst != 0: - (n, len_) = ofp.oxm_parse_header(buf, offset) - dst = ofp.oxm_to_user_header(n) - - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - - slaves = [] - for i in range(0, n_slaves): - s = struct.unpack_from('!H', buf, slave_offset) - slaves.append(s[0]) - slave_offset += 2 - - return cls( - algorithm, fields, basis, slave_type, - n_slaves, ofs_nbits, dst, slaves, - ) - - def serialize_body(self): - data = bytearray() - slave_offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - ) - self.n_slaves = len(self.slaves) - for s in self.slaves: - msg_pack_into('!H', data, slave_offset, s) - slave_offset += 2 - pad_len = ( - utils.round_up(self.n_slaves, 4) - - self.n_slaves - ) - - if pad_len != 0: - msg_pack_into('%dx' % pad_len * 2, data, slave_offset) - - msg_pack_into( - self._fmt_str, data, 0, - self.algorithm, self.fields, self.basis, - self.slave_type, self.n_slaves, - self.ofs_nbits, - ) - offset = ( - nicira_ext.NX_ACTION_BUNDLE_0_SIZE - - nicira_ext.NX_ACTION_HEADER_0_SIZE - 8 - ) - - if self.dst == 0: - msg_pack_into('I', data, offset, self.dst) - else: - oxm_data = ofp.oxm_from_user_header(self.dst) - ofp.oxm_serialize_header(oxm_data, data, offset) - return data - - class NXActionBundle(_NXActionBundleBase): - r""" - Select bundle link action - - This action selects bundle link based on the specified parameters. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle(fields, basis, algorithm, slave_type, slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. (must be zero) - dst OXM/NXM header for source field(must be zero) - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundle( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=0, - dst=0, - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - # NXAST_BUNDLE actions should have 'sofs_nbits' and 'dst' zeroed. - super(NXActionBundle, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits=0, dst=0, slaves=slaves, - ) - - class NXActionBundleLoad(_NXActionBundleBase): - r""" - Select bundle link action - - This action has the same behavior as the bundle action, - with one exception. - Please refer to the ovs-ofctl command manual for details. - - And equivalent to the followings action of ovs-ofctl command. - - .. - bundle_load(fields, basis, algorithm, slave_type, - dst[start..end], slaves:[ s1, s2,...]) - .. - - +-----------------------------------------------------------+ - | **bundle_load(**\ *fields*\, \ *basis*\, \ *algorithm*\, | - | *slave_type*\, \ *dst*\[\ *start*\... \*emd*\], | - | \ *slaves*\:[ \ *s1*\, \ *s2*\,...]\ **)** | | - +-----------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - algorithm One of NX_MP_ALG_*. - fields One of NX_HASH_FIELDS_* - basis Universal hash parameter - slave_type Type of slaves(must be NXM_OF_IN_PORT) - n_slaves Number of slaves - ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits`` - dst OXM/NXM header for source field - slaves List of slaves - ================ ====================================================== - - - Example:: - - actions += [parser.NXActionBundleLoad( - algorithm=nicira_ext.NX_MP_ALG_HRW, - fields=nicira_ext.NX_HASH_FIELDS_ETH_SRC, - basis=0, - slave_type=nicira_ext.NXM_OF_IN_PORT, - n_slaves=2, - ofs_nbits=nicira_ext.ofs_nbits(4, 31), - dst="reg0", - slaves=[2, 3])] - """ - _subtype = nicira_ext.NXAST_BUNDLE_LOAD - _TYPE = { - 'ascii': [ - 'dst', - ], - } - - def __init__( - self, algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ): - super(NXActionBundleLoad, self).__init__( - algorithm, fields, basis, slave_type, n_slaves, - ofs_nbits, dst, slaves, - ) - - class NXActionCT(NXAction): - r""" - Pass traffic to the connection tracker action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct(argument[,argument]...) - .. - - +------------------------------------------------+ - | **ct(**\ *argument*\[,\ *argument*\]...\ **)** | - +------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - zone_src OXM/NXM header for source field - zone_ofs_nbits Start and End for the OXM/NXM field. - Setting method refer to the ``nicira_ext.ofs_nbits``. - If you need set the Immediate value for zone, - zone_src must be set to None or empty character string. - recirc_table Recirculate to a specific table - alg Well-known port number for the protocol - actions Zero or more actions may immediately follow this - action - ================ ====================================================== - - .. NOTE:: - - If you set number to zone_src, - Traceback occurs when you run the to_jsondict. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800, ct_state=(0,32)) - actions += [parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 4, - alg = 0, - actions = [])] - """ - _subtype = nicira_ext.NXAST_CT - - # flags, zone_src, zone_ofs_nbits, recirc_table, - # pad, alg - _fmt_str = '!H4sHB3xH' - _TYPE = { - 'ascii': [ - 'zone_src', - ], - } - - # Followed by actions - - def __init__( - self, - flags, - zone_src, - zone_ofs_nbits, - recirc_table, - alg, - actions, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCT, self).__init__() - self.flags = flags - self.zone_src = zone_src - self.zone_ofs_nbits = zone_ofs_nbits - self.recirc_table = recirc_table - self.alg = alg - self.actions = actions - - @classmethod - def parser(cls, buf): - ( - flags, - oxm_data, - zone_ofs_nbits, - recirc_table, - alg, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - - # OXM/NXM field - if oxm_data == b'\x00' * 4: - zone_src = "" - else: - (n, len_) = ofp.oxm_parse_header(oxm_data, 0) - zone_src = ofp.oxm_to_user_header(n) - - # actions - actions = [] - while len(rest) > 0: - action = ofpp.OFPAction.parser(rest, 0) - actions.append(action) - rest = rest[action.len:] - - return cls( - flags, zone_src, zone_ofs_nbits, recirc_table, - alg, actions, - ) - - def serialize_body(self): - data = bytearray() - # If zone_src is zero, zone_ofs_nbits is zone_imm - if not self.zone_src: - zone_src = b'\x00' * 4 - elif isinstance(self.zone_src, six.integer_types): - zone_src = struct.pack("!I", self.zone_src) - else: - zone_src = bytearray() - oxm = ofp.oxm_from_user_header(self.zone_src) - ofp.oxm_serialize_header(oxm, zone_src, 0) - - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - six.binary_type(zone_src), - self.zone_ofs_nbits, - self.recirc_table, - self.alg, - ) - for a in self.actions: - a.serialize(data, len(data)) - return data - - class NXActionCTClear(NXAction): - """ - Clear connection tracking state action - - This action clears connection tracking state from packets. - - And equivalent to the followings action of ovs-ofctl command. - - .. - ct_clear - .. - - +--------------+ - | **ct_clear** | - +--------------+ - - Example:: - - actions += [parser.NXActionCTClear()] - """ - _subtype = nicira_ext.NXAST_CT_CLEAR - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionCTClear, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - class NXActionNAT(NXAction): - r""" - Network address translation action - - This action sends the packet through the connection tracker. - - And equivalent to the followings action of ovs-ofctl command. - - .. NOTE:: - The following command image does not exist in ovs-ofctl command - manual and has been created from the command response. - - .. - nat(src=ip_min-ip_max : proto_min-proto-max) - .. - - +--------------------------------------------------+ - | **nat(src**\=\ *ip_min*\ **-**\ *ip_max*\ **:** | - | *proto_min*\ **-**\ *proto-max*\ **)** | - +--------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - flags Zero or more(Unspecified flag bits must be zero.) - range_ipv4_min Range ipv4 address minimun - range_ipv4_max Range ipv4 address maximun - range_ipv6_min Range ipv6 address minimun - range_ipv6_max Range ipv6 address maximun - range_proto_min Range protocol minimum - range_proto_max Range protocol maximun - ================ ====================================================== - - .. CAUTION:: - ``NXActionNAT`` must be defined in the actions in the - ``NXActionCT``. - - Example:: - - match = parser.OFPMatch(eth_type=0x0800) - actions += [ - parser.NXActionCT( - flags = 1, - zone_src = "reg0", - zone_ofs_nbits = nicira_ext.ofs_nbits(4, 31), - recirc_table = 255, - alg = 0, - actions = [ - parser.NXActionNAT( - flags = 1, - range_ipv4_min = "10.1.12.0", - range_ipv4_max = "10.1.13.255", - range_ipv6_min = "", - range_ipv6_max = "", - range_proto_min = 1, - range_proto_max = 1023 - ) - ] - ) - ] - """ - _subtype = nicira_ext.NXAST_NAT - - # pad, flags, range_present - _fmt_str = '!2xHH' - # Followed by optional parameters - - _TYPE = { - 'ascii': [ - 'range_ipv4_max', - 'range_ipv4_min', - 'range_ipv6_max', - 'range_ipv6_min', - ], - } - - def __init__( - self, - flags, - range_ipv4_min='', - range_ipv4_max='', - range_ipv6_min='', - range_ipv6_max='', - range_proto_min=None, - range_proto_max=None, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionNAT, self).__init__() - self.flags = flags - self.range_ipv4_min = range_ipv4_min - self.range_ipv4_max = range_ipv4_max - self.range_ipv6_min = range_ipv6_min - self.range_ipv6_max = range_ipv6_max - self.range_proto_min = range_proto_min - self.range_proto_max = range_proto_max - - @classmethod - def parser(cls, buf): - ( - flags, - range_present, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - rest = buf[struct.calcsize(cls._fmt_str):] - # optional parameters - kwargs = dict() - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN: - kwargs['range_ipv4_min'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MAX: - kwargs['range_ipv4_max'] = type_desc.IPv4Addr.to_user(rest[:4]) - rest = rest[4:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MIN: - kwargs['range_ipv6_min'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MAX: - kwargs['range_ipv6_max'] = ( - type_desc.IPv6Addr.to_user(rest[:16]) - ) - rest = rest[16:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN: - kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2]) - rest = rest[2:] - if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX: - kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2]) - - return cls(flags, **kwargs) - - def serialize_body(self): - # Pack optional parameters first, as range_present needs - # to be calculated. - optional_data = b'' - range_present = 0 - if self.range_ipv4_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MIN - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_min, - ) - if self.range_ipv4_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MAX - optional_data += type_desc.IPv4Addr.from_user( - self.range_ipv4_max, - ) - if self.range_ipv6_min != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MIN - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_min, - ) - if self.range_ipv6_max != '': - range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MAX - optional_data += type_desc.IPv6Addr.from_user( - self.range_ipv6_max, - ) - if self.range_proto_min is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MIN - optional_data += type_desc.Int2.from_user( - self.range_proto_min, - ) - if self.range_proto_max is not None: - range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MAX - optional_data += type_desc.Int2.from_user( - self.range_proto_max, - ) - - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.flags, - range_present, - ) - msg_pack_into( - '!%ds' % len(optional_data), data, len(data), - optional_data, - ) - - return data - - class NXActionOutputTrunc(NXAction): - r""" - Truncate output action - - This action truncate a packet into the specified size and outputs it. - - And equivalent to the followings action of ovs-ofctl command. - - .. - output(port=port,max_len=max_len) - .. - - +--------------------------------------------------------------+ - | **output(port**\=\ *port*\,\ **max_len**\=\ *max_len*\ **)** | - +--------------------------------------------------------------+ - - ================ ====================================================== - Attribute Description - ================ ====================================================== - port Output port - max_len Max bytes to send - ================ ====================================================== - - Example:: - - actions += [parser.NXActionOutputTrunc(port=8080, - max_len=1024)] - """ - _subtype = nicira_ext.NXAST_OUTPUT_TRUNC - - # port, max_len - _fmt_str = '!HI' - - def __init__( - self, - port, - max_len, - type_=None, len_=None, experimenter=None, subtype=None, - ): - super(NXActionOutputTrunc, self).__init__() - self.port = port - self.max_len = max_len - - @classmethod - def parser(cls, buf): - ( - port, - max_len, - ) = struct.unpack_from( - cls._fmt_str, buf, 0, - ) - return cls(port, max_len) - - def serialize_body(self): - data = bytearray() - msg_pack_into( - self._fmt_str, data, 0, - self.port, - self.max_len, - ) - return data - - class NXActionDecNshTtl(NXAction): - """ - Decrement NSH TTL action - - This action decrements the TTL in the Network Service Header(NSH). - - This action was added in OVS v2.9. - - And equivalent to the followings action of ovs-ofctl command. - - :: - - dec_nsh_ttl - - Example:: - - actions += [parser.NXActionDecNshTtl()] - """ - _subtype = nicira_ext.NXAST_DEC_NSH_TTL - - _fmt_str = '!6x' - - def __init__( - self, - type_=None, len_=None, vendor=None, subtype=None, - ): - super(NXActionDecNshTtl, self).__init__() - - @classmethod - def parser(cls, buf): - return cls() - - def serialize_body(self): - data = bytearray() - msg_pack_into(self._fmt_str, data, 0) - return data - - def add_attr(k, v): - v.__module__ = ofpp.__name__ # Necessary for stringify stuff - setattr(ofpp, k, v) - - add_attr('NXAction', NXAction) - add_attr('NXActionUnknown', NXActionUnknown) - - classes = [ - 'NXActionSetQueue', - 'NXActionPopQueue', - 'NXActionRegLoad', - 'NXActionRegLoad2', - 'NXActionNote', - 'NXActionSetTunnel', - 'NXActionSetTunnel64', - 'NXActionRegMove', - 'NXActionResubmit', - 'NXActionResubmitTable', - 'NXActionOutputReg', - 'NXActionOutputReg2', - 'NXActionLearn', - 'NXActionExit', - 'NXActionDecTtl', - 'NXActionController', - 'NXActionController2', - 'NXActionDecTtlCntIds', - 'NXActionPushMpls', - 'NXActionPopMpls', - 'NXActionSetMplsTtl', - 'NXActionDecMplsTtl', - 'NXActionSetMplsLabel', - 'NXActionSetMplsTc', - 'NXActionStackPush', - 'NXActionStackPop', - 'NXActionSample', - 'NXActionSample2', - 'NXActionFinTimeout', - 'NXActionConjunction', - 'NXActionMultipath', - 'NXActionBundle', - 'NXActionBundleLoad', - 'NXActionCT', - 'NXActionCTClear', - 'NXActionNAT', - 'NXActionOutputTrunc', - '_NXFlowSpec', # exported for testing - 'NXFlowSpecMatch', - 'NXFlowSpecLoad', - 'NXFlowSpecOutput', - 'NXActionDecNshTtl', - ] - vars = locals() - for name in classes: - cls = vars[name] - add_attr(name, cls) - if issubclass(cls, NXAction): - NXAction.register(cls) - if issubclass(cls, _NXFlowSpec): - _NXFlowSpec.register(cls) diff --git a/lte/gateway/deploy/roles/magma/files/service.py b/lte/gateway/deploy/roles/magma/files/service.py deleted file mode 100644 index 1b1e7b7bab47..000000000000 --- a/lte/gateway/deploy/roles/magma/files/service.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2014 YAMAMOTO Takashi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ofctl service - -import numbers - -from ryu.base import app_manager -from ryu.controller import ofp_event -from ryu.controller.handler import ( - CONFIG_DISPATCHER, - DEAD_DISPATCHER, - MAIN_DISPATCHER, - set_ev_cls, -) - -from . import event, exception - - -class _SwitchInfo(object): - def __init__(self, datapath): - self.datapath = datapath - self.xids = {} - self.barriers = {} - self.results = {} - - -class OfctlService(app_manager.RyuApp): - def __init__(self, *args, **kwargs): - super(OfctlService, self).__init__(*args, **kwargs) - self.name = 'ofctl_service' - self._switches = {} - self._observing_events = {} - - def _observe_msg(self, msg_cls): - assert msg_cls is not None - ev_cls = ofp_event.ofp_msg_to_ev_cls(msg_cls) - self._observing_events.setdefault(ev_cls, 0) - if self._observing_events[ev_cls] == 0: - self.logger.debug('ofctl: start observing %s', ev_cls) - self.register_handler(ev_cls, self._handle_reply) - self.observe_event(ev_cls) - self._observing_events[ev_cls] += 1 - - def _unobserve_msg(self, msg_cls): - assert msg_cls is not None - ev_cls = ofp_event.ofp_msg_to_ev_cls(msg_cls) - assert self._observing_events[ev_cls] > 0 - self._observing_events[ev_cls] -= 1 - if self._observing_events[ev_cls] == 0: - self.unregister_handler(ev_cls, self._handle_reply) - self.unobserve_event(ev_cls) - self.logger.debug('ofctl: stop observing %s', ev_cls) - - def _cancel(self, info, barrier_xid, exception): - xid = info.barriers.pop(barrier_xid) - req = info.xids.pop(xid) - msg = req.msg - datapath = msg.datapath - parser = datapath.ofproto_parser - is_barrier = isinstance(msg, parser.OFPBarrierRequest) - - info.results.pop(xid) - - if not is_barrier and req.reply_cls is not None: - self._unobserve_msg(req.reply_cls) - - self.logger.error('failed to send message <%s>', req.msg) - self.reply_to_request(req, event.Reply(exception=exception)) - - @staticmethod - def _is_error(msg): - return ( - ofp_event.ofp_msg_to_ev_cls(type(msg)) == - ofp_event.EventOFPErrorMsg - ) - - @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) - def _switch_features_handler(self, ev): - datapath = ev.msg.datapath - id = datapath.id - assert isinstance(id, numbers.Integral) - old_info = self._switches.get(id, None) - new_info = _SwitchInfo(datapath=datapath) - self.logger.debug( - 'add dpid %s datapath %s new_info %s old_info %s', - id, datapath, new_info, old_info, - ) - self._switches[id] = new_info - if old_info: - old_info.datapath.close() - for xid in list(old_info.barriers): - self._cancel( - old_info, xid, exception.InvalidDatapath(result=id), - ) - - @set_ev_cls(ofp_event.EventOFPStateChange, DEAD_DISPATCHER) - def _handle_dead(self, ev): - datapath = ev.datapath - id = datapath.id - self.logger.debug('del dpid %s datapath %s', id, datapath) - if id is None: - return - try: - info = self._switches[id] - except KeyError: - return - if info.datapath is datapath: - self.logger.debug('forget info %s', info) - self._switches.pop(id) - for xid in list(info.barriers): - self._cancel(info, xid, exception.InvalidDatapath(result=id)) - - @set_ev_cls(event.GetDatapathRequest, MAIN_DISPATCHER) - def _handle_get_datapath(self, req): - result = None - if req.dpid is None: - result = [v.datapath for v in self._switches.values()] - else: - if req.dpid in self._switches: - result = self._switches[req.dpid].datapath - self.reply_to_request(req, event.Reply(result=result)) - - @set_ev_cls(event.SendMsgRequest, MAIN_DISPATCHER) - def _handle_send_msg(self, req): - msg = req.msg - datapath = msg.datapath - parser = datapath.ofproto_parser - is_barrier = isinstance(msg, parser.OFPBarrierRequest) - - try: - si = self._switches[datapath.id] - except KeyError: - self.logger.error('unknown dpid %s' % (datapath.id,)) - rep = event.Reply( - exception=exception. - InvalidDatapath(result=datapath.id), - ) - self.reply_to_request(req, rep) - return - - def _store_xid(xid, barrier_xid): - assert xid not in si.results - assert xid not in si.xids - assert barrier_xid not in si.barriers - si.results[xid] = [] - si.xids[xid] = req - si.barriers[barrier_xid] = xid - - if is_barrier: - barrier = msg - datapath.set_xid(barrier) - _store_xid(barrier.xid, barrier.xid) - else: - if req.reply_cls is not None: - self._observe_msg(req.reply_cls) - datapath.set_xid(msg) - barrier = datapath.ofproto_parser.OFPBarrierRequest(datapath) - datapath.set_xid(barrier) - _store_xid(msg.xid, barrier.xid) - if not datapath.send_msg(msg): - return self._cancel( - si, barrier.xid, - exception.InvalidDatapath(result=datapath.id), - ) - - if not datapath.send_msg(barrier): - return self._cancel( - si, barrier.xid, - exception.InvalidDatapath(result=datapath.id), - ) - - @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER) - def _handle_barrier(self, ev): - msg = ev.msg - datapath = msg.datapath - parser = datapath.ofproto_parser - try: - si = self._switches[datapath.id] - except KeyError: - self.logger.error('unknown dpid %s', datapath.id) - return - try: - xid = si.barriers.pop(msg.xid) - except KeyError: - self.logger.debug('unknown barrier xid %s', msg.xid) - return - result = si.results.pop(xid) - req = si.xids.pop(xid) - is_barrier = isinstance(req.msg, parser.OFPBarrierRequest) - if req.reply_cls is not None and not is_barrier: - self._unobserve_msg(req.reply_cls) - if is_barrier and req.reply_cls == parser.OFPBarrierReply: - rep = event.Reply(result=ev.msg) - elif any(self._is_error(r) for r in result): - rep = event.Reply(exception=exception.OFError(result=result)) - elif req.reply_multi: - rep = event.Reply(result=result) - elif len(result) == 0: - rep = event.Reply() - elif len(result) == 1: - rep = event.Reply(result=result[0]) - else: - rep = event.Reply( - exception=exception. - UnexpectedMultiReply(result=result), - ) - self.reply_to_request(req, rep) - - @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER) - def _handle_reply(self, ev): - msg = ev.msg - datapath = msg.datapath - try: - si = self._switches[datapath.id] - except KeyError: - self.logger.error('unknown dpid %s', datapath.id) - return - try: - req = si.xids[msg.xid] - except KeyError: - self.logger.error('unknown error xid %s', msg.xid) - return - if ((not isinstance(ev, ofp_event.EventOFPErrorMsg)) and - (req.reply_cls is None or not isinstance(ev.msg, req.reply_cls))): - self.logger.error('unexpected reply %s for xid %s', ev, msg.xid) - return - try: - si.results[msg.xid].append(ev.msg) - except KeyError: - self.logger.error('unknown error xid %s', msg.xid) diff --git a/lte/gateway/deploy/roles/magma/files/systemd/BUILD.bazel b/lte/gateway/deploy/roles/magma/files/systemd/BUILD.bazel index adfec038d524..eb453dd7ca36 100644 --- a/lte/gateway/deploy/roles/magma/files/systemd/BUILD.bazel +++ b/lte/gateway/deploy/roles/magma/files/systemd/BUILD.bazel @@ -9,4 +9,44 @@ # See the License for the specific language governing permissions and # limitations under the License. -exports_files(["sctpd.service"]) +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") + +pkg_files( + name = "sctpd_service_definition", + srcs = ["sctpd.service"], + prefix = "/etc/systemd/system/", + visibility = ["//lte/gateway/release:__pkg__"], +) + +pkg_files( + name = "magma_lte_service_definitions", + srcs = [ + ":magma_connectiond.service", + ":magma_dnsd.service", + ":magma_dp_envoy.service", + ":magma_envoy_controller.service", + ":magma_lighttpd.service", + ":magma_magmad.service", + ":magma_mme.service", + ":magma_mobilityd.service", + ":magma_pipelined.service", + ":magma_redirectd.service", + ":magma_redis.service", + ":magma_sessiond.service", + ], + renames = { + ":magma_connectiond.service": "magma@connectiond.service", + ":magma_dnsd.service": "magma@dnsd.service", + ":magma_dp_envoy.service": "magma_dp@envoy.service", + ":magma_envoy_controller.service": "magma@envoy_controller.service", + ":magma_lighttpd.service": "magma@lighttpd.service", + ":magma_magmad.service": "magma@magmad.service", + ":magma_mme.service": "magma@mme.service", + ":magma_mobilityd.service": "magma@mobilityd.service", + ":magma_pipelined.service": "magma@pipelined.service", + ":magma_redirectd.service": "magma@redirectd.service", + ":magma_redis.service": "magma@redis.service", + ":magma_sessiond.service": "magma@sessiond.service", + }, + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/deploy/roles/magma/files/systemd/magma_dp_envoy.service b/lte/gateway/deploy/roles/magma/files/systemd/magma_dp_envoy.service index 6f6856501b49..450208f7e8df 100644 --- a/lte/gateway/deploy/roles/magma/files/systemd/magma_dp_envoy.service +++ b/lte/gateway/deploy/roles/magma/files/systemd/magma_dp_envoy.service @@ -19,7 +19,9 @@ Type=simple EnvironmentFile=/etc/environment # Add delay to let envoy-controller init ExecStartPre=/bin/sleep 40 +ExecStartPre=/bin/bash /usr/local/bin/configure_envoy_namespace.sh setup ExecStart=/sbin/ip netns exec envoy_ns1 /usr/bin/envoy -c /var/opt/magma/envoy.yaml --log-path /var/log/envoy.log +ExecStopPost=/bin/bash /usr/local/bin/configure_envoy_namespace.sh destroy MemoryAccounting=yes StandardOutput=syslog StandardError=syslog diff --git a/lte/gateway/deploy/roles/magma/files/systemd/magma_envoy_controller.service b/lte/gateway/deploy/roles/magma/files/systemd/magma_envoy_controller.service index 0ca453b30ab6..e988edcb8758 100644 --- a/lte/gateway/deploy/roles/magma/files/systemd/magma_envoy_controller.service +++ b/lte/gateway/deploy/roles/magma/files/systemd/magma_envoy_controller.service @@ -19,10 +19,8 @@ Before=magma_dp@envoy.service [Service] Type=simple EnvironmentFile=/etc/environment -ExecStartPre=/bin/bash /usr/local/bin/configure_envoy_namespace.sh setup ExecStart=/usr/local/bin/envoy_controller --log_dir="/var/log" ExecStopPost=/usr/bin/env python3 /usr/local/bin/service_util.py envoy_controller -ExecStopPost=/bin/bash /usr/local/bin/configure_envoy_namespace.sh destroy MemoryAccounting=yes StandardOutput=syslog StandardError=syslog diff --git a/lte/gateway/deploy/roles/magma/tasks/main.yml b/lte/gateway/deploy/roles/magma/tasks/main.yml index 17d7a35ca226..3dd8e14ea7f6 100644 --- a/lte/gateway/deploy/roles/magma/tasks/main.yml +++ b/lte/gateway/deploy/roles/magma/tasks/main.yml @@ -421,33 +421,6 @@ - li_port when: full_provision -- name: Copy ryu ipfix patch for 3.5 - copy: - src: nx_actions.py - dest: /home/vagrant/build/python/lib/python3.5/site-packages/ryu/ofproto/ - when: full_provision - -- name: Copy ryu ipfix patch for 3.8 - copy: - src: nx_actions_3.5.py - dest: /home/vagrant/build/python/lib/python3.8/site-packages/ryu/ofproto/nx_actions.py - when: full_provision - ignore_errors: true - -- name: Copy ryu qfi patch1 for 3.8 - copy: - src: patches/0002-QFI-value-set-in-Openflow-controller-using-RYU.patch - dest: /home/vagrant/build/python/lib/python3.8/site-packages/ryu/ofproto/nicira_ext.py - when: full_provision - ignore_errors: true - -- name: Copy ryu qfi patch2 for 3.8 - copy: - src: patches/0003-QFI-value-set-in-Openflow-controller-using-RYU.patch - dest: /home/vagrant/build/python/lib/python3.8/site-packages/ryu/ofproto/nx_match.py - when: full_provision - ignore_errors: true - - name: Change build folder ownership ansible.builtin.file: path: /home/vagrant/build @@ -525,3 +498,30 @@ pkg: - libsqlite3-dev when: full_provision + +- name: Extend sda2 + community.general.parted: + device: /dev/sda + number: 2 + part_end: "100%" + resize: true + state: present + when: full_provision + +- name: Extend sda5 + community.general.parted: + device: /dev/sda + number: 5 + part_end: "100%" + resize: true + state: present + when: full_provision + +- name: Extend lvm volumes + become: yes + block: + - shell: pvresize /dev/sda5 + - shell: lvextend -l +100%FREE /dev/mapper/vgmagma--dev-root + ignore_errors: yes + - shell: resize2fs /dev/mapper/vgmagma--dev-root + when: full_provision diff --git a/lte/gateway/deploy/roles/magma_deb/tasks/main.yml b/lte/gateway/deploy/roles/magma_deb/tasks/main.yml index 2bb8a7ab9ba1..f147faeb0b38 100644 --- a/lte/gateway/deploy/roles/magma_deb/tasks/main.yml +++ b/lte/gateway/deploy/roles/magma_deb/tasks/main.yml @@ -89,6 +89,14 @@ state: link force: yes +- name: Create test certificates directory + file: path='/home/vagrant/magma/.cache/test_certs' state=directory + +- name: Generate the cloud VM's certs if they are not already generated + command: '/home/vagrant/magma/orc8r/tools/ansible/roles/gateway_dev/files/create_rootca /home/vagrant/magma/.cache/test_certs' + args: + creates: '/home/vagrant/magma/.cache/test_certs/rootCA.pem' + - name: Create test certificates directory file: path: /var/opt/magma/certs/ @@ -103,7 +111,6 @@ with_items: - .key - .pem - - .srl - name: Override pipelined and sessiond production configuration file: @@ -125,9 +132,6 @@ - templates/mme.conf.template - gateway.mconfig -- name: Clear existing service files - shell: rm -f /etc/systemd/system/magma* - - name: Override production service configurations with test configurations copy: src: "/home/vagrant/magma/{{ item.src }}" @@ -152,7 +156,3 @@ - { src: 'lte/gateway/deploy/roles/magma/files/systemd/magma_sessiond.service', dest: 'magma@sessiond.service' } - { src: 'orc8r/tools/ansible/roles/fluent_bit/files/magma_td-agent-bit.service', dest: 'magma@td-agent-bit.service' } - { src: 'lte/gateway/deploy/roles/magma/files/systemd/magma_dp_envoy.service', dest: 'magma_dp@envoy.service' } - -- name: Reboot the machine (Wait for 180s) - ansible.builtin.reboot: - reboot_timeout: 180 diff --git a/lte/gateway/deploy/roles/magma_deploy/tasks/main.yml b/lte/gateway/deploy/roles/magma_deploy/tasks/main.yml index a7689c1a6a7a..a2098235be5c 100644 --- a/lte/gateway/deploy/roles/magma_deploy/tasks/main.yml +++ b/lte/gateway/deploy/roles/magma_deploy/tasks/main.yml @@ -121,7 +121,6 @@ become: true apt: name: "{{ packages }}" - only_upgrade: yes vars: packages: - graphviz @@ -130,6 +129,17 @@ - openssl - dkms - uuid-runtime + tags: + - agwc + - base + +- name: Ensure ca-certificates is up to date + become: true + apt: + name: "{{ packages }}" + only_upgrade: yes + vars: + packages: - ca-certificates tags: - agwc diff --git a/lte/gateway/deploy/roles/service_aliases/tasks/main.yml b/lte/gateway/deploy/roles/service_aliases/tasks/main.yml new file mode 100644 index 000000000000..7780b77af843 --- /dev/null +++ b/lte/gateway/deploy/roles/service_aliases/tasks/main.yml @@ -0,0 +1,22 @@ +--- +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Create convenient aliases for magma services + lineinfile: + dest: /home/{{ ansible_user }}/.bashrc + state: present + line: "{{ item }}" + with_items: + - alias magma-start='sudo service magma@magmad start' + - alias magma-stop='sudo service "magma@*" stop && sudo service sctpd stop && sudo service magma_dp@envoy stop' + - alias magma-restart='magma-stop && magma-start' + - alias magma-status='sudo service "magma*" status' diff --git a/lte/gateway/docker/README.md b/lte/gateway/docker/README.md index 57be63495db1..a434d004faa0 100644 --- a/lte/gateway/docker/README.md +++ b/lte/gateway/docker/README.md @@ -41,6 +41,7 @@ containerized AGW by running the following steps inside the VM: cd $MAGMA_ROOT/lte/gateway && make run # You can skip this if you have built the AGW with make before for component in redis nghttpx td-agent-bit; do cp "${MAGMA_ROOT}"/{orc8r,lte}/gateway/configs/templates/${component}.conf.template; done sudo systemctl stop 'magma@*' 'sctpd' # We don't want the systemd-based AGW to run when we start the containerized AGW +sudo systemctl start magma_dp@envoy cd $MAGMA_ROOT/lte/gateway/docker docker-compose build docker-compose up diff --git a/lte/gateway/docker/docker-compose.dev.yaml b/lte/gateway/docker/docker-compose.dev.yaml index 1a9f0131275e..4d664b9de411 100644 --- a/lte/gateway/docker/docker-compose.dev.yaml +++ b/lte/gateway/docker/docker-compose.dev.yaml @@ -100,3 +100,7 @@ services: liagentd: environment: - MAGMA_DEV_MODE=1 + + envoy_controller: + environment: + - MAGMA_DEV_MODE=1 diff --git a/lte/gateway/docker/docker-compose.override.yaml b/lte/gateway/docker/docker-compose.override.yaml index 86d53581a537..548de6579875 100644 --- a/lte/gateway/docker/docker-compose.override.yaml +++ b/lte/gateway/docker/docker-compose.override.yaml @@ -11,3 +11,8 @@ services: build: context: ${BUILD_CONTEXT} dockerfile: lte/gateway/docker/services/python/Dockerfile + + gateway_go: + build: + context: ${BUILD_CONTEXT} + dockerfile: feg/gateway/docker/go/Dockerfile diff --git a/lte/gateway/docker/docker-compose.yaml b/lte/gateway/docker/docker-compose.yaml index a0ed43bd7091..c89598b7051d 100644 --- a/lte/gateway/docker/docker-compose.yaml +++ b/lte/gateway/docker/docker-compose.yaml @@ -32,7 +32,7 @@ x-agw-python-service: &pyservice image: ${DOCKER_REGISTRY}agw_gateway_python${OPTIONAL_ARCH_POSTFIX}:${IMAGE_VERSION} # Generic anchor for lte c services -x-lte-cservice: <ecservice +x-agw-c-service: &cservice <<: *service image: ${DOCKER_REGISTRY}agw_gateway_c${OPTIONAL_ARCH_POSTFIX}:${IMAGE_VERSION} @@ -40,6 +40,11 @@ services: magmad: <<: *pyservice container_name: magmad + healthcheck: + test: ["CMD", "nc", "-zv", "localhost", "50052"] + interval: "4s" + timeout: "4s" + retries: 3 environment: DOCKER_REGISTRY: ${DOCKER_REGISTRY} DOCKER_USERNAME: ${DOCKER_USERNAME} @@ -54,6 +59,7 @@ services: container_name: redis healthcheck: test: ["CMD", "redis-cli", "-p", "6380", "ping"] + interval: "4s" timeout: "4s" retries: 3 command: > @@ -68,6 +74,7 @@ services: - redis healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50067"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.directoryd.main @@ -77,6 +84,7 @@ services: container_name: subscriberdb healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50051"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.subscriberdb.main @@ -86,6 +94,7 @@ services: container_name: enodebd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "60055"] + interval: "4s" timeout: "4s" retries: 3 cap_add: @@ -97,6 +106,7 @@ services: container_name: state healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50074"] + interval: "4s" timeout: "4s" retries: 3 depends_on: @@ -110,6 +120,7 @@ services: - redis healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50068"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.policydb.main @@ -119,6 +130,7 @@ services: container_name: health healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50080"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.health.main @@ -128,6 +140,7 @@ services: container_name: monitord healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50076"] + interval: "4s" timeout: "4s" retries: 3 cap_add: @@ -139,6 +152,7 @@ services: container_name: redirectd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50071"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.redirectd.main @@ -148,6 +162,7 @@ services: container_name: smsd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50078"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.smsd.main @@ -157,6 +172,7 @@ services: container_name: control_proxy healthcheck: test: ["CMD", "nc", "-zv", "localhost", "8443"] + interval: "4s" timeout: "4s" retries: 3 command: > @@ -169,15 +185,21 @@ services: container_name: ctraced healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50079"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.ctraced.main sctpd: - <<: *ltecservice + <<: *cservice container_name: sctpd ulimits: core: -1 + healthcheck: + test: ["CMD", "test", "-S", "/tmp/sctpd_downstream.sock"] + interval: "4s" + timeout: "4s" + retries: 3 security_opt: - seccomp:unconfined environment: @@ -185,10 +207,11 @@ services: command: /usr/local/bin/sctpd oai_mme: - <<: *ltecservice + <<: *cservice container_name: oai_mme healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50073"] + interval: "4s" timeout: "4s" retries: 3 ulimits: @@ -221,6 +244,7 @@ services: - SYS_NICE healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50063"] + interval: "4s" timeout: "4s" retries: 3 command: > @@ -232,7 +256,7 @@ services: /usr/bin/env python3 -m magma.pipelined.main" sessiond: - <<: *ltecservice + <<: *cservice container_name: sessiond ulimits: core: -1 @@ -244,6 +268,7 @@ services: - directoryd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50065"] + interval: "4s" timeout: "4s" retries: 3 cap_drop: @@ -255,6 +280,7 @@ services: container_name: mobilityd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "60051"] + interval: "4s" timeout: "4s" retries: 3 command: @@ -265,6 +291,7 @@ services: container_name: td-agent-bit healthcheck: test: ["CMD", "nc", "-zv", "localhost", "5140"] + interval: "4s" timeout: "4s" retries: 3 logging: @@ -278,15 +305,17 @@ services: container_name: eventd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50075"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/bin/env python3 -m magma.eventd.main connectiond: - <<: *ltecservice + <<: *cservice container_name: connectiond healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50082"] + interval: "4s" timeout: "4s" retries: 3 cap_add: @@ -294,11 +323,23 @@ services: command: /usr/local/bin/connectiond liagentd: - <<: *ltecservice + <<: *cservice container_name: liagentd healthcheck: test: ["CMD", "nc", "-zv", "localhost", "50065"] + interval: "4s" timeout: "4s" retries: 3 command: /usr/local/bin/liagentd restart: "no" + + envoy_controller: + <<: *service + image: ${DOCKER_REGISTRY}gateway_go${OPTIONAL_ARCH_POSTFIX}:${IMAGE_VERSION} + container_name: envoy_controller + healthcheck: + test: ["CMD", "nc", "-zv", "localhost", "50081"] + interval: "4s" + timeout: "4s" + retries: 3 + command: /var/opt/magma/bin/envoy_controller diff --git a/lte/gateway/docker/services/c/Dockerfile b/lte/gateway/docker/services/c/Dockerfile index 95184dc7e883..f14b30a182d0 100644 --- a/lte/gateway/docker/services/c/Dockerfile +++ b/lte/gateway/docker/services/c/Dockerfile @@ -49,63 +49,64 @@ RUN apt-get update && \ # Install dependencies required for building RUN apt-get update && apt-get install -y \ - sudo \ - curl \ - wget \ - unzip \ - cmake \ - git \ - build-essential \ autoconf \ - libtool \ - pkg-config \ - libgflags-dev \ - libc++-dev \ - protobuf-compiler \ - ninja-build \ autogen \ + build-essential \ ccache \ - libprotoc-dev \ - libxml2-dev \ - libxslt-dev \ - libyaml-cpp-dev \ - nlohmann-json3-dev \ - libgoogle-glog-dev \ - libsctp-dev \ - libpcap-dev \ - libmnl-dev \ - uuid-dev \ - python3-pip \ - libcurl4-openssl-dev \ - libdouble-conversion-dev \ + check \ + cmake \ + curl \ + git \ libboost-chrono-dev \ libboost-context-dev \ libboost-program-options-dev \ libboost-filesystem-dev \ libboost-regex-dev \ - check \ - libssl-dev \ - libsctp-dev \ - libtspi-dev \ + libc++-dev \ libconfig-dev \ - libgmp3-dev \ + libcurl4-openssl-dev \ libczmq-dev \ - libsqlite3-dev + libdouble-conversion-dev \ + libgflags-dev \ + libgmp3-dev \ + libgoogle-glog-dev \ + libmnl-dev \ + libpcap-dev \ + libprotoc-dev \ + libsctp-dev \ + libsqlite3-dev \ + libssl-dev \ + libtspi-dev \ + libtool \ + libxml2-dev \ + libxslt-dev \ + libyaml-cpp-dev \ + ninja-build \ + nlohmann-json3-dev \ + pkg-config \ + protobuf-compiler \ + python3-pip \ + sudo \ + unzip \ + uuid-dev \ + wget \ + && rm -rf /var/lib/apt/lists/* RUN echo "deb https://artifactory.magmacore.org/artifactory/debian-test focal-ci main" > /etc/apt/sources.list.d/magma.list RUN wget -qO - https://artifactory.magmacore.org:443/artifactory/api/gpg/key/public | apt-key add - RUN apt-get update && apt-get install -y \ + grpc-dev \ libfolly-dev \ + liblfds710 \ + magma-cpp-redis \ + magma-libfluid \ oai-asn1c \ oai-freediameter \ oai-gnutls \ oai-nettle \ - magma-cpp-redis \ - magma-libfluid \ - grpc-dev \ prometheus-cpp-dev \ - liblfds710 -RUN rm /etc/apt/sources.list.d/magma.list + && rm -rf /var/lib/apt/lists/* \ + && rm /etc/apt/sources.list.d/magma.list ENV MAGMA_ROOT /magma WORKDIR /magma @@ -169,35 +170,36 @@ ENV TZ=Europe/Paris # Install runtime dependencies RUN apt-get update && apt-get install -y \ - apt-utils \ apt-transport-https \ + apt-utils \ ca-certificates \ - wget \ gnupg \ - sudo \ - netcat \ - libyaml-cpp-dev \ + iproute2 \ + iptables \ libgoogle-glog-dev \ - libprotoc-dev \ libmnl-dev \ + libprotoc-dev \ libsctp-dev \ - psmisc \ - openssl \ + libtspi1 \ + libyaml-cpp-dev \ net-tools \ + netcat \ + openssl \ + psmisc \ + sudo \ tshark \ tzdata \ - iproute2 \ - iptables \ - libtspi1 \ + wget \ && rm -rf /var/lib/apt/lists/* RUN echo "deb https://artifactory.magmacore.org/artifactory/debian-test focal-ci main" > /etc/apt/sources.list.d/magma.list RUN wget -qO - https://artifactory.magmacore.org:443/artifactory/api/gpg/key/public | apt-key add - RUN apt-get update && apt-get install -y \ libopenvswitch \ - openvswitch-datapath-dkms \ openvswitch-common \ - openvswitch-switch + openvswitch-datapath-dkms \ + openvswitch-switch \ + && rm -rf /var/lib/apt/lists/* # Copy pre-built shared object files COPY --from=builder /usr/lib/"${CPU_ARCH}"-linux-gnu/liblsan.so.0 /usr/lib/"${CPU_ARCH}"-linux-gnu/ diff --git a/lte/gateway/docker/services/envoy_controller/Dockerfile b/lte/gateway/docker/services/envoy_controller/Dockerfile deleted file mode 100644 index fd490287e693..000000000000 --- a/lte/gateway/docker/services/envoy_controller/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM cbuilder:latest - -ENV PATH="/root/go/bin/:$PATH" - -ENTRYPOINT envoy_controller diff --git a/lte/gateway/docker/services/python/Dockerfile b/lte/gateway/docker/services/python/Dockerfile index 8026dee65ac7..acb2f45080ed 100644 --- a/lte/gateway/docker/services/python/Dockerfile +++ b/lte/gateway/docker/services/python/Dockerfile @@ -38,24 +38,25 @@ ARG CODEGEN_VERSION=2.2.3 ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y \ - wget \ - ruby \ - sudo \ - ruby-dev \ docker.io \ - python3-pip \ + git \ + lsb-release \ + libsystemd-dev \ + libprotobuf-dev \ + pkg-config \ python3-dev \ python3-eventlet \ - python3-pystemd \ + python3-pip \ python3-protobuf \ - git \ - virtualenv \ - lsb-release \ - openjdk-8-jre-headless \ + python3-pystemd \ + ruby \ + ruby-dev \ + sudo \ openjdk-8-jdk \ - pkg-config \ - libsystemd-dev \ - libprotobuf-dev + openjdk-8-jre-headless \ + virtualenv \ + wget \ + && rm -rf /var/lib/apt/lists/* RUN gem install fpm @@ -88,23 +89,25 @@ ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y \ apt-transport-https \ ca-certificates \ + ethtool \ + inetutils-ping \ + iproute2 \ + iptables \ + isc-dhcp-client \ + linux-headers-generic \ + net-tools \ netcat \ - sudo \ + nghttp2-proxy \ python3-pip \ python3-venv \ - virtualenv \ python3-eventlet \ python3-pystemd \ python3-jinja2 \ - nghttp2-proxy \ - net-tools \ - inetutils-ping \ redis-server \ + sudo \ + virtualenv \ wget \ - ethtool \ - linux-headers-generic \ - iptables \ - iproute2 + && rm -rf /var/lib/apt/lists/* RUN python3 -m venv $VIRTUAL_ENV @@ -123,7 +126,8 @@ RUN apt-get update && apt-get install -y \ openvswitch-common \ openvswitch-switch \ bcc-tools \ - wireguard + wireguard \ + && rm -rf /var/lib/apt/lists/* COPY --from=builder /build /build COPY --from=builder /magma /magma diff --git a/lte/gateway/fabfile.py b/lte/gateway/fabfile.py index 04dbf4eb856a..3e7c8c05d87e 100644 --- a/lte/gateway/fabfile.py +++ b/lte/gateway/fabfile.py @@ -12,7 +12,6 @@ """ import sys -import time from distutils.util import strtobool from time import sleep @@ -49,6 +48,7 @@ fab dev package upload_to_aws """ +GATEWAY_IP_ADDRESS = "192.168.60.142" AGW_ROOT = "$MAGMA_ROOT/lte/gateway" AGW_PYTHON_ROOT = "$MAGMA_ROOT/lte/gateway/python" FEG_INTEG_TEST_ROOT = AGW_PYTHON_ROOT + "/integ_tests/federated_tests" @@ -309,9 +309,8 @@ def _modify_for_bazel_services(): run("sudo systemctl daemon-reload") -def bazel_integ_test_pre_build( - gateway_host=None, test_host=None, trf_host=None, - destroy_vm='True', provision_vm='True', +def provision_magma_dev_vm( + gateway_host=None, destroy_vm='True', provision_vm='True', ): """ Prepare to run the integration tests on the bazel build services. @@ -322,32 +321,18 @@ def bazel_integ_test_pre_build( gateway_host: The ssh address string of the machine to run the gateway services on. Formatted as "host:port". If not specified, defaults to the `magma` vagrant box. - - test_host: The ssh address string of the machine to run the tests on - on. Formatted as "host:port". If not specified, defaults to the - `magma_test` vagrant box. - - trf_host: The ssh address string of the machine to run the TrafficServer - on. Formatted as "host:port". If not specified, defaults to the - `magma_trfserver` vagrant box. """ destroy_vm = bool(strtobool(destroy_vm)) provision_vm = bool(strtobool(provision_vm)) - # Setup the gateway: use the provided gateway if given, else default to the - # vagrant machine - gateway_ip = '192.168.60.142' - if not gateway_host: gateway_host = vagrant_setup( 'magma', destroy_vm, force_provision=provision_vm, ) else: ansible_setup(gateway_host, "dev", "magma_dev.yml") - gateway_ip = gateway_host.split('@')[1].split(':')[0] execute(_dist_upgrade) - execute(_modify_for_bazel_services) def bazel_integ_test_post_build( @@ -384,7 +369,8 @@ def bazel_integ_test_post_build( ansible_setup(gateway_host, "dev", "magma_dev.yml") gateway_ip = gateway_host.split('@')[1].split(':')[0] - execute(_restart_gateway) + execute(_modify_for_bazel_services) + execute(_start_gateway) # Setup the trfserver: use the provided trfserver if given, else default to the # vagrant machine @@ -414,6 +400,25 @@ def bazel_integ_test_post_build( env.hosts = [gateway_host] +def _setup_vm(host, name, ansible_role, ansible_file, destroy_vm, provision_vm): + ip = None + if not host: + host = vagrant_setup( + name, destroy_vm, force_provision=provision_vm, + ) + else: + ansible_setup(host, ansible_role, ansible_file) + ip = host.split('@')[1].split(':')[0] + return host, ip + + +def _setup_gateway(gateway_host, name, ansible_role, ansible_file, destroy_vm, provision_vm): + gateway_host, gateway_ip = _setup_vm(gateway_host, name, ansible_role, ansible_file, destroy_vm, provision_vm) + if gateway_ip is None: + gateway_ip = GATEWAY_IP_ADDRESS + return gateway_host, gateway_ip + + def integ_test( gateway_host=None, test_host=None, trf_host=None, destroy_vm='True', provision_vm='True', @@ -441,45 +446,20 @@ def integ_test( # Setup the gateway: use the provided gateway if given, else default to the # vagrant machine - gateway_ip = '192.168.60.142' - - if not gateway_host: - gateway_host = vagrant_setup( - 'magma', destroy_vm, force_provision=provision_vm, - ) - else: - ansible_setup(gateway_host, "dev", "magma_dev.yml") - gateway_ip = gateway_host.split('@')[1].split(':')[0] - + gateway_host, gateway_ip = _setup_gateway(gateway_host, "magma", "dev", "magma_dev.yml", destroy_vm, provision_vm) execute(_dist_upgrade) execute(_build_magma) execute(_run_sudo_python_unit_tests) execute(_start_gateway) - # Run suite of integ tests that are required to be run on the access gateway - # instead of the test VM - # TODO: fix the integration test T38069907 - # execute(_run_local_integ_tests) - # Setup the trfserver: use the provided trfserver if given, else default to the # vagrant machine - if not trf_host: - trf_host = vagrant_setup( - 'magma_trfserver', destroy_vm, force_provision=provision_vm, - ) - else: - ansible_setup(trf_host, "trfserver", "magma_trfserver.yml") + _setup_vm(trf_host, "magma_trfserver", "trfserver", "magma_trfserver.yml", destroy_vm, provision_vm) execute(_start_trfserver) # Run the tests: use the provided test machine if given, else default to # the vagrant machine - if not test_host: - test_host = vagrant_setup( - 'magma_test', destroy_vm, force_provision=provision_vm, - ) - else: - ansible_setup(test_host, "test", "magma_test.yml") - + _setup_vm(test_host, "magma_test", "test", "magma_test.yml", destroy_vm, provision_vm) execute(_make_integ_tests) execute(_run_integ_tests, gateway_ip) @@ -489,6 +469,48 @@ def integ_test( env.hosts = [gateway_host] +def integ_test_deb_installation( + gateway_host=None, test_host=None, trf_host=None, + destroy_vm='True', provision_vm='True', +): + """ + Run the integration tests. This defaults to running on local vagrant + machines, but can also be pointed to an arbitrary host (e.g. amazon) by + passing "address:port" as arguments + + gateway_host: The ssh address string of the machine to run the gateway + services on. Formatted as "host:port". If not specified, defaults to + the `magma_deb` vagrant box. + + test_host: The ssh address string of the machine to run the tests on + on. Formatted as "host:port". If not specified, defaults to the + `magma_test` vagrant box. + + trf_host: The ssh address string of the machine to run the TrafficServer + on. Formatted as "host:port". If not specified, defaults to the + `magma_trfserver` vagrant box. + """ + + destroy_vm = bool(strtobool(destroy_vm)) + provision_vm = bool(strtobool(provision_vm)) + + # Set up the gateway: use the provided gateway if given, else default to the + # vagrant machine + _, gateway_ip = _setup_gateway(gateway_host, "magma_deb", "deb", "magma_deb.yml", destroy_vm, provision_vm) + execute(_start_gateway) + + # Set up the trfserver: use the provided trfserver if given, else default to the + # vagrant machine + _setup_vm(trf_host, "magma_trfserver", "trfserver", "magma_trfserver.yml", destroy_vm, provision_vm) + execute(_start_trfserver) + + # Run the tests: use the provided test machine if given, else default to + # the vagrant machine + _setup_vm(test_host, "magma_test", "test", "magma_test.yml", destroy_vm, provision_vm) + execute(_make_integ_tests) + execute(_run_integ_tests, gateway_ip) + + def run_integ_tests(tests=None, federated_mode=False): """ Function is required to run tests only in pre-configured Jenkins env. @@ -509,7 +531,7 @@ def run_integ_tests(tests=None, federated_mode=False): $ make integ_test TESTS=s1aptests/test_attach_detach.py $ make fed_integ_test TESTS=federated_tests/s1aptests/test_attach_detach.py """ - test_host = vagrant_setup("magma_test", destroy_vm=False) + vagrant_setup("magma_test", destroy_vm=False) gateway_ip = '192.168.60.142' if tests: tests = "TESTS=" + tests @@ -756,13 +778,6 @@ def _build_magma(): run('make') -def _oai_coverage(): - """ Get the code coverage statistic for OAI """ - - with cd(AGW_ROOT): - run('make coverage_oai') - - def _run_sudo_python_unit_tests(): """ Run the magma unit tests """ with cd(AGW_ROOT): @@ -772,16 +787,7 @@ def _run_sudo_python_unit_tests(): def _start_gateway(): """ Starts the gateway """ - - with cd(AGW_ROOT): - run('make run') - - -def _restart_gateway(): - """ Restart the gateway """ - - with cd(AGW_ROOT): - run('make restart') + run('sudo service magma@magmad start') def _set_service_config_var(service, var_name, value): @@ -822,9 +828,6 @@ def _start_trfserver(): 'nohup sudo /usr/local/bin/traffic_server.py 192.168.60.144 62462 > /dev/null 2>&1";' % (key, host, port), ) - # local( - # 'stty cbreak' - # ) def _make_integ_tests(): diff --git a/lte/gateway/python/BUILD.bazel b/lte/gateway/python/BUILD.bazel index d153f4f7d6c1..a6b9c00bcd5f 100644 --- a/lte/gateway/python/BUILD.bazel +++ b/lte/gateway/python/BUILD.bazel @@ -8,3 +8,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +load("@rules_pkg//pkg:mappings.bzl", "pkg_filegroup") +load("//bazel:runfiles.bzl", "expand_runfiles") + +LTE_PY_SERVICES = [ + "enodebd", + "health", + "kernsnoopd", + "mobilityd", + "monitord", + "pipelined", + "policydb", + "redirectd", + "smsd", + "subscriberdb", +] + +[ + expand_runfiles( + name = "{py_service}_expanded".format(py_service = py_service), + targets = [ + "//lte/gateway/python/magma/{py_service}:{py_service}".format(py_service = py_service), + ], + ) + for py_service in LTE_PY_SERVICES +] + +pkg_filegroup( + name = "magma_python_lte_services", + srcs = ["{py_service}_expanded".format(py_service = py_service) for py_service in LTE_PY_SERVICES], + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/python/integ_tests/Makefile b/lte/gateway/python/integ_tests/Makefile index 827ee74a721f..08e9752c04ea 100644 --- a/lte/gateway/python/integ_tests/Makefile +++ b/lte/gateway/python/integ_tests/Makefile @@ -88,8 +88,3 @@ ifdef TESTS else $(foreach test,$(NON_SANITY_TESTS),$(call execute_test,$(test));) endif - -local_integ_test: - # check if magma services are running - systemctl is-active --quiet magma@magmad || (echo "Local integ tests should be run on access gw with magma services running"; exit 1) - . $(PYTHON_BUILD)/bin/activate; sudo $(BIN)/pytest -s $(LOCAL_INTEG_TESTS) diff --git a/lte/gateway/python/integ_tests/defs.mk b/lte/gateway/python/integ_tests/defs.mk index 4da9b18abbda..2f46877a1e0f 100644 --- a/lte/gateway/python/integ_tests/defs.mk +++ b/lte/gateway/python/integ_tests/defs.mk @@ -131,6 +131,7 @@ s1aptests/test_attach_ul_udp_data_with_sessiond_restart.py \ s1aptests/test_service_req_ul_udp_data_with_mme_restart.py \ s1aptests/test_attach_detach_setsessionrules_tcp_data.py \ s1aptests/test_enable_ipv6_iface.py \ +s1aptests/test_ipv6_non_nat_dp_ul_tcp.py \ s1aptests/test_disable_ipv6_iface.py EXTENDED_TESTS = s1aptests/test_modify_mme_config_for_sanity.py \ @@ -274,7 +275,6 @@ s1aptests/test_restore_config_after_non_sanity.py #s1aptests/test_ipv6_non_nat_dp_dl_tcp.py #s1aptests/test_ipv6_non_nat_dp_ul_udp.py #s1aptests/test_ipv6_non_nat_dp_dl_udp.py -#s1aptests/test_ipv6_non_nat_dp_ul_tcp.py #--------------- # TODO: Add ipv6 tests to integ test suite @@ -308,7 +308,15 @@ s1aptests/test_attach_emergency.py \ s1aptests/test_attach_detach_after_ue_context_release.py \ s1aptests/test_attach_esm_information_wrong_apn.py \ s1aptests/test_attach_detach_secondary_pdn_invalid_apn.py \ -s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py +s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py \ +s1aptests/test_attach_service_without_mac.py \ +s1aptests/test_attach_mme_restart_detach_multi_ue.py \ +s1aptests/test_attach_detach_with_mme_restart.py \ +s1aptests/test_attach_detach_looped.py \ +s1aptests/test_attach_ipv4v6_pdn_type.py \ +s1aptests/test_standalone_pdn_conn_req.py \ +s1aptests/test_attach_detach_dedicated_multi_ue.py \ +s1aptests/test_attach_detach_dedicated_bearer_deactivation_invalid_imsi.py CLOUD_TESTS = cloud_tests/checkin_test.py \ @@ -317,6 +325,3 @@ cloud_tests/config_test.py S1AP_TESTER_CFG=$(MAGMA_ROOT)/lte/gateway/python/integ_tests/data/s1ap_tester_cfg S1AP_TESTER_PYTHON_PATH=$(S1AP_TESTER_ROOT)/bin - -# Local integ tests are run on the magma access gateway, not the test VM -LOCAL_INTEG_TESTS = gxgy_tests diff --git a/lte/gateway/python/integ_tests/federated_tests/docker/run.py b/lte/gateway/python/integ_tests/federated_tests/docker/run.py new file mode 100755 index 000000000000..0ee9c1705fec --- /dev/null +++ b/lte/gateway/python/integ_tests/federated_tests/docker/run.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +""" +Copyright 2022 The Magma Authors. + +This source code is licensed under the BSD-style license found in the +LICENSE file in the root directory of this source tree. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse +import os +import subprocess +import sys + +MAGMA_ROOT = os.environ["MAGMA_ROOT"] +SNOWFLAKE_PATH = MAGMA_ROOT + '/.cache/feg/' +SNOWFLAKE_FILE = MAGMA_ROOT + '/.cache/feg/snowflake' + + +def main() -> None: + """ create a snowflake file if necessary, then start docker containers """ + args = _parse_args() + if not os.path.isfile(SNOWFLAKE_FILE): + _create_snowflake_file() + _exec_docker_cmd(args) + + +def _create_snowflake_file() -> None: + if os.path.isdir(SNOWFLAKE_FILE): + _exec_cmd(['rm', '-r', SNOWFLAKE_FILE]) + print("Creating snowflake file") + _exec_cmd(['mkdir', '-p', SNOWFLAKE_PATH]) + _exec_cmd(['touch', SNOWFLAKE_FILE]) + + +def _exec_docker_cmd(args) -> None: + cmd = ['docker-compose', 'up', '-d'] + if args.down: + cmd = ['docker-compose', 'down'] + print(f"Running {' '.join(cmd)}...") + _exec_cmd(cmd) + + +def _exec_cmd(cmd) -> None: + try: + subprocess.run(cmd, check=True) + except subprocess.CalledProcessError as err: + sys.exit(err.returncode) + + +def _parse_args() -> argparse.Namespace: + """ Parse the command line args """ + parser = argparse.ArgumentParser(description='FeG run tool') + + # Other actions + parser.add_argument( + '--down', '-d', + action='store_true', + help='Stop running containers', + ) + args = parser.parse_args() + return args + + +if __name__ == '__main__': + main() diff --git a/lte/gateway/python/integ_tests/federated_tests/fabfile.py b/lte/gateway/python/integ_tests/federated_tests/fabfile.py index 31f2c2a5df74..81018b3e1ca4 100644 --- a/lte/gateway/python/integ_tests/federated_tests/fabfile.py +++ b/lte/gateway/python/integ_tests/federated_tests/fabfile.py @@ -201,7 +201,7 @@ def build_feg(): with cd(feg_docker_integ_test_path_vagrant): run('docker-compose down') run('docker-compose build') - run('docker-compose up -d') + run('./run.py') def _build_feg_on_host(): @@ -218,7 +218,7 @@ def _build_feg_on_host(): cwd=feg_docker_integ_test_path, ) subprocess.check_call( - 'docker-compose up -d', shell=True, + './run.py', shell=True, cwd=feg_docker_integ_test_path, ) @@ -229,7 +229,7 @@ def start_feg(): """ vagrant_setup('magma', destroy_vm=False) with cd(feg_docker_integ_test_path_vagrant): - run('docker-compose up -d') + run('./run.py') def _start_feg_on_host(): @@ -237,7 +237,7 @@ def _start_feg_on_host(): start FEG locally on Docker """ subprocess.check_call( - 'docker-compose up -d', shell=True, + './run.py', shell=True, cwd=feg_docker_integ_test_path, ) diff --git a/lte/gateway/python/integ_tests/gxgy_tests/policies.py b/lte/gateway/python/integ_tests/gxgy_tests/policies.py deleted file mode 100644 index 7bc59bad6367..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/policies.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -from lte.protos.policydb_pb2 import FlowDescription, FlowMatch, PolicyRule -from magma.pipelined.tests.app.packet_builder import IPPacketBuilder - -MAC_DEST = "5e:cc:cc:b1:49:4b" - - -def create_uplink_rule( - id, rating_group, ip_dest, m_key=None, - priority=10, tracking=PolicyRule.ONLY_OCS, - action=FlowDescription.PERMIT, -): - """ - Create a rule with a single uplink IP flow, useful for testing - Args: - id (string): rule id - rating_group (int): charging key - ip_dest (string): IP destination for rule flow - m_key (optional string): monitoring key, if the rule is tracked by PCRF - priority (int): priority of flow, the greater the higher the priority - tracking (PolicyRule.TrackingType): enum to dictate whether OCS or PCRF - or both is tracking the credit - action: permit or deny - Returns: - PolicyRule with single uplink IP flow - """ - return PolicyRule( - id=id, - priority=priority, - flow_list=[ - FlowDescription( - match=FlowMatch( - ipv4_dst=ip_dest, direction=FlowMatch.UPLINK, - ), - action=action, - ), - ], - tracking_type=tracking, - rating_group=rating_group, - monitoring_key=m_key, - ) - - -def get_packets_for_flows(sub, flows): - """ - Get packets sent from a subscriber to match a set of flows - Args: - sub (SubscriberContext): subscriber to send packets towards - flows ([FlowDescription]): list of flows to send matching packets to - Returns: - list of scapy packets to send - """ - packets = [] - for flow in flows: - packet = IPPacketBuilder()\ - .set_ip_layer(flow.match.ipv4_dst, sub.ip)\ - .set_ether_layer(MAC_DEST, "00:00:00:00:00:00")\ - .build() - packets.append(packet) - return packets diff --git a/lte/gateway/python/integ_tests/gxgy_tests/session_manager.py b/lte/gateway/python/integ_tests/gxgy_tests/session_manager.py deleted file mode 100644 index 37479d492912..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/session_manager.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -from lte.protos import session_manager_pb2, session_manager_pb2_grpc -from lte.protos.session_manager_pb2 import ( - ChargingCredit, - CreditUnit, - CreditUpdateResponse, - GrantedUnits, - UsageMonitoringCredit, - UsageMonitoringUpdateResponse, -) -from ryu.lib import hub - - -class MockSessionManager(session_manager_pb2_grpc.CentralSessionControllerServicer): - def __init__(self, *args, **kwargs): - super(MockSessionManager, self).__init__(*args, **kwargs) - self.mock_create_session = None - self.mock_terminate_session = None - self.mock_update_session = None - - def CreateSession(self, req, ctx): - return self.mock_create_session(req, ctx) - - def TerminateSession(self, req, ctx): - return self.mock_terminate_session(req, ctx) - - def UpdateSession(self, req, ctx): - return self.mock_update_session(req, ctx) - - -def get_from_queue(q, retries=10, sleep_time=0.5): - """ - get an object from a hub.Queue by polling - Args: - q (hub.queue): queue to wait on - retries (int): number of times to try getting from the queue - sleep_time (float): amount of seconds to wait between retries - Returns: - The object, or None if it wasn't retrieved in the number of retries - """ - for _ in range(retries): - try: - return q.get(block=False) - except hub.QueueEmpty: - hub.sleep(0.5) - continue - return None - - -def get_standard_update_response( - update_complete, monitor_complete, quota, - is_final=False, - success=True, - monitor_action=UsageMonitoringCredit.CONTINUE, -): - """ - Create a CreditUpdateResponse with some useful defaults - Args: - update_complete (hub.Queue): eventlet queue to wait for update responses on - monitor_complete (hub.Queue): eventlet queue to wait for monitor responses on - quota (int): number of bytes to return - is_final (bool): True if these are the last credits to return - success (bool): True if the update was successful - monitor_action (UsageMonitoringCredit.Action): action to take with response, - defaults to CONTINUE - """ - def update_response(*args, **kwargs): - charging_responses = [] - monitor_responses = [] - for update in args[0].updates: - charging_responses.append( - create_update_response( - update.sid, update.usage.charging_key, quota, - is_final=is_final, success=success, - ), - ) - update_complete.put(update) - for monitor in args[0].usage_monitors: - monitor_responses.append( - create_monitor_response( - monitor.sid, monitor.update.monitoring_key, quota, - monitor.update.level, action=monitor_action, - success=success, - ), - ) - monitor_complete.put(monitor) - return session_manager_pb2.UpdateSessionResponse( - responses=charging_responses, - usage_monitor_responses=monitor_responses, - ) - return update_response - - -def create_update_response( - imsi, charging_key, total_quota, - is_final=False, - success=True, -): - """ - Create a CreditUpdateResponse with some useful defaults - Args: - imsi (string): subscriber id - charging_key (int): rating group - quota (int): number of bytes to return - is_final (bool): True if these are the last credits to return - success (bool): True if the update was successful - """ - return CreditUpdateResponse( - success=success, - sid=imsi, - charging_key=charging_key, - credit=ChargingCredit( - granted_units=GrantedUnits( - total=CreditUnit(is_valid=True, volume=total_quota), - ), - is_final=is_final, - ), - ) - - -def create_monitor_response( - imsi, m_key, total_quota, level, - action=UsageMonitoringCredit.CONTINUE, - success=True, -): - """ - Create a UsageMonitoringUpdateResponse with some useful defaults - Args: - imsi (string): subscriber id - m_key (string): monitoring key - quota (int): number of bytes to return - level (MonitoringLevel): session level or rule level - action (UsageMonitoringCredit.Action): action to take with response, - defaults to CONTINUE - success (bool): True if the update was successful - """ - return UsageMonitoringUpdateResponse( - success=success, - sid=imsi, - credit=UsageMonitoringCredit( - monitoring_key=m_key, - granted_units=GrantedUnits( - total=CreditUnit(is_valid=True, volume=total_quota), - ), - level=level, - action=action, - ), - ) diff --git a/lte/gateway/python/integ_tests/gxgy_tests/test_credit_tracking.py b/lte/gateway/python/integ_tests/gxgy_tests/test_credit_tracking.py deleted file mode 100644 index 0eb23173ca62..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/test_credit_tracking.py +++ /dev/null @@ -1,392 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -import unittest -from unittest.mock import Mock - -from lte.protos import session_manager_pb2 -from lte.protos.policydb_pb2 import PolicyRule -from lte.protos.subscriberdb_pb2 import SubscriberID -from magma.pipelined.tests.app.subscriber import SubContextConfig -from ryu.lib import hub - -from .policies import create_uplink_rule, get_packets_for_flows -from .session_manager import ( - create_update_response, - get_from_queue, - get_standard_update_response, -) -from .utils import GxGyTestUtil as TestUtil - - -class CreditTrackingTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - super(CreditTrackingTest, cls).setUpClass() - # Static policies - cls.test_util = TestUtil() - policy = create_uplink_rule("simple_match", 1, '45.10.0.1') - cls.test_util.static_rules[policy.id] = policy - hub.sleep(2) # wait for static rule to sync - - @classmethod - def tearDownClass(cls): - cls.test_util.cleanup() - - def test_basic_init(self): - """ - Initiate subscriber, return 1 static policy, send traffic to match the - policy, verify update is sent, terminate subscriber - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', 4) - quota = 1024 # bytes - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[create_update_response(sub1.imsi, 1, quota)], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], - dynamic_rules=[], - usage_monitors=[], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - update_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, is_final=False, - ), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["simple_match"].flow_list, - ) - packet_count = int(quota / len(packets[0])) + 1 - - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, packet_count), - ) - self.assertIsNotNone(get_from_queue(update_complete)) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 1) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - def test_input_output(self): - """ - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', 4) - quota = 1024 # bytes - - # return only rx (downlink) packets - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - session_manager_pb2.CreditUpdateResponse( - success=True, - sid=sub1.imsi, - charging_key=1, - credit=session_manager_pb2.ChargingCredit( - granted_units=session_manager_pb2.GrantedUnits( - rx=session_manager_pb2.CreditUnit( - is_valid=True, - volume=quota, - ), - ), - ), - ), - ], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - update_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, is_final=False, - ), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["simple_match"].flow_list, - ) - packet_count = int(quota / len(packets[0])) + 1 - - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, packet_count), - ) - self.assertIsNone(get_from_queue(update_complete)) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 0) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - # now attach with tx (uplink packets) - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - session_manager_pb2.CreditUpdateResponse( - success=True, - sid=sub1.imsi, - charging_key=1, - credit=session_manager_pb2.ChargingCredit( - granted_units=session_manager_pb2.GrantedUnits( - tx=session_manager_pb2.CreditUnit( - is_valid=True, - volume=quota, - ), - ), - ), - ), - ], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], - ), - ) - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, packet_count), - ) - self.assertIsNotNone(get_from_queue(update_complete)) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 1) - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 2) - - def test_out_of_credit(self): - """ - Initiate subscriber, return 1 static policy, send traffic to match the - policy, verify update is sent, return final credits, use up final - credits, ensure that no traffic can be sent - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', 4) - quota = 1024 # bytes - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - session_manager_pb2.CreditUpdateResponse( - success=True, - sid=sub1.imsi, - charging_key=1, - credit=session_manager_pb2.ChargingCredit( - granted_units=session_manager_pb2.GrantedUnits( - total=session_manager_pb2.CreditUnit( - is_valid=True, - volume=quota, - ), - ), - ), - ), - ], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], - dynamic_rules=[], - usage_monitors=[], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - update_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, is_final=True, - ), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["simple_match"].flow_list, - ) - packet_count = int(quota / len(packets[0])) + 1 - send_packets = self.test_util.get_packet_sender( - [sub1], packets, packet_count, - ) - - self.test_util.thread.run_in_greenthread(send_packets) - self.assertIsNotNone(get_from_queue(update_complete)) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 1) - - # use up last credits - self.test_util.thread.run_in_greenthread(send_packets) - hub.sleep(3) # wait for sessiond to terminate rule after update - - pkt_diff = self.test_util.thread.run_in_greenthread(send_packets) - self.assertEqual(pkt_diff, 0) - - self.test_util.proxy_responder.ChargingReAuth( - session_manager_pb2.ChargingReAuthRequest( - charging_key=1, - sid=sub1.imsi, - ), - ) - get_from_queue(update_complete) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 2) - # wait for 1 update to trigger credit request, another to trigger - # rule activation - # TODO Add future to track when flows are added/deleted - hub.sleep(5) - pkt_diff = self.test_util.thread.run_in_greenthread(send_packets) - self.assertGreater(pkt_diff, 0) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - def test_multiple_subscribers(self): - """ - Test credit tracking with multiple rules and 32 subscribers, each using - up their quota and reporting to the OCS - """ - subs = [ - SubContextConfig( - 'IMSI0010100000888{}'.format(i), - '192.168.128.{}'.format(i), - 4, - ) for i in range(32) - ] - quota = 1024 # bytes - - # create some rules - rule1 = create_uplink_rule("rule1", 2, '46.10.0.1') - rule2 = create_uplink_rule( - "rule2", 0, '47.10.0.1', - tracking=PolicyRule.NO_TRACKING, - ) - rule3 = create_uplink_rule("rule3", 3, '49.10.0.1') - self.test_util.static_rules["rule1"] = rule1 - self.test_util.static_rules["rule2"] = rule2 - hub.sleep(2) # wait for policies - - # set up mocks - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - create_update_response("", 2, quota), - create_update_response("", 3, quota), - ], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="rule1", - ), - session_manager_pb2.StaticRuleInstall( - rule_id="rule2", - ), - ], - dynamic_rules=[ - session_manager_pb2.DynamicRuleInstall( - policy_rule=rule3, - ), - ], - ), - ) - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - update_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, is_final=True, - ), - ) - - # initiate sessions - for sub in subs: - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub.imsi), - ue_ipv4=sub.ip, - ), - ) - self.assertEqual( - self.test_util.controller.mock_create_session.call_count, len(subs), - ) - - # send packets towards all 3 rules - flows = [rule.flow_list[0] for rule in [rule1, rule2, rule3]] - packets = [] - for sub in subs: - packets.extend(get_packets_for_flows(sub, flows)) - packet_count = int(quota / len(packets[0])) + 1 - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender(subs, packets, packet_count), - ) - - # wait for responses for keys 2 and 3 (key 1 is not tracked) - expected_keys = {(sub.imsi, key) for sub in subs for key in [2, 3]} - for _ in range(len(expected_keys)): - update = get_from_queue(update_complete) - self.assertIsNotNone(update) - imsiKey = (update.sid, update.usage.charging_key) - self.assertTrue(imsiKey in expected_keys) - expected_keys.remove(imsiKey) - - for sub in subs: - self.test_util.sessiond.EndSession(SubscriberID(id=sub.imsi)) - self.assertEqual( - self.test_util.controller.mock_terminate_session.call_count, len(subs), - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/lte/gateway/python/integ_tests/gxgy_tests/test_failure_scenarios.py b/lte/gateway/python/integ_tests/gxgy_tests/test_failure_scenarios.py deleted file mode 100644 index 145b2e0f49de..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/test_failure_scenarios.py +++ /dev/null @@ -1,211 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -import unittest -from unittest.mock import Mock - -from lte.protos import session_manager_pb2 -from lte.protos.subscriberdb_pb2 import SubscriberID -from magma.pipelined.tests.app.subscriber import ( - SubContextConfig, - default_ambr_config, -) -from ryu.lib import hub - -from .policies import create_uplink_rule, get_packets_for_flows -from .session_manager import ( - create_update_response, - get_from_queue, - get_standard_update_response, -) -from .utils import GxGyTestUtil as TestUtil - - -class FailureScenarioTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - super(FailureScenarioTest, cls).setUpClass() - # Static policies - cls.test_util = TestUtil() - policy = create_uplink_rule("simple_match", 1, '45.10.0.1') - cls.test_util.static_rules[policy.id] = policy - - @classmethod - def tearDownClass(cls): - cls.test_util.cleanup() - - def test_rule_with_no_credit(self): - """ - Test that when a rule is returned that requires OCS tracking but has - no credit, data is not allowed to pass - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], # no credit for RG 1 - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["simple_match"].flow_list, - ) - - pkt_diff = self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, 1), - ) - self.assertEqual(pkt_diff, 0) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - def test_rules_with_failed_credit(self): - """ - Test that when a session is initialized but the OCS either errored out or - returned 0 GSUs, data is not allowed to flow - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - - rule2 = create_uplink_rule("rule2", 2, '46.10.0.1') - rule3 = create_uplink_rule("rule3", 3, '47.10.0.1') - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - # failed update - create_update_response(sub1.imsi, 1, 0, success=False), - # successful update, no credit - create_update_response(sub1.imsi, 1, 0, success=True), - ], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], # no credit for RG 1 - dynamic_rules=[ - session_manager_pb2.DynamicRuleInstall( - policy_rule=rule2, - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=rule3, - ), - ], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - flows = [rule.flow_list[0] for rule in [rule2, rule3]] - packets = get_packets_for_flows(sub1, flows) - pkt_diff = self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, 1), - ) - self.assertEqual(pkt_diff, 0) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - def test_ocs_failure(self): - """ - Test that when the OCS fails to respond to an update request, the service - is cut off until the update can be completed - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - quota = 1024 - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[create_update_response(sub1.imsi, 1, quota)], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="simple_match", - ), - ], - ), - ) - - update_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, success=False, - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["simple_match"].flow_list, - ) - packet_count = int(quota / len(packets[0])) + 1 - sender = self.test_util.get_packet_sender([sub1], packets, packet_count) - - # assert after session init, data can flow - self.assertGreater(self.test_util.thread.run_in_greenthread(sender), 0) - - # wait for failed update - self.assertIsNotNone(get_from_queue(update_complete)) - hub.sleep(2) - - # assert that no data can be sent anymore - self.assertEqual(self.test_util.thread.run_in_greenthread(sender), 0) - - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - update_complete, None, quota, success=True, - ), - ) - # wait for second update cycle to reactivate - hub.sleep(4) - self.assertGreater(self.test_util.thread.run_in_greenthread(sender), 0) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/lte/gateway/python/integ_tests/gxgy_tests/test_gx_reauth.py b/lte/gateway/python/integ_tests/gxgy_tests/test_gx_reauth.py deleted file mode 100644 index c6e85ce0eec7..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/test_gx_reauth.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -import itertools -import unittest -from unittest.mock import Mock - -from integ_tests.gxgy_tests.policies import ( - create_uplink_rule, - get_packets_for_flows, -) -from integ_tests.gxgy_tests.session_manager import create_update_response -from lte.protos import session_manager_pb2 -from lte.protos.policydb_pb2 import PolicyRule -from lte.protos.session_manager_pb2 import ( - CreateSessionResponse, - LocalCreateSessionRequest, - PolicyReAuthRequest, - SessionTerminateResponse, -) -from lte.protos.subscriberdb_pb2 import SubscriberID -from magma.pipelined.tests.app.subscriber import ( - SubContextConfig, - default_ambr_config, -) -from ryu.lib import hub - -from .utils import GxGyTestUtil as TestUtil - - -class GxReauthTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - super(GxReauthTest, cls).setUpClass() - - # Static policies - cls.test_util = TestUtil() - policy1 = create_uplink_rule( - 'policy1', 1, '45.10.0.1', - tracking=PolicyRule.NO_TRACKING, - ) - cls.test_util.static_rules[policy1.id] = policy1 - policy2 = create_uplink_rule( - 'policy2', 1, '45.10.10.2', - tracking=PolicyRule.NO_TRACKING, - ) - cls.test_util.static_rules[policy2.id] = policy2 - hub.sleep(2) # wait for static rule to sync - - @classmethod - def tearDownClass(cls): - cls.test_util.cleanup() - - def test_reauth(self): - """ - Send a Gx reauth request which installs one new static rule, one new - dynamic rule, and removes one static and one dynamic rule. - """ - dynamic_rule1 = create_uplink_rule( - 'dynamic1', 1, '46.10.10.1', - tracking=PolicyRule.NO_TRACKING, - ) - dynamic_rule2 = create_uplink_rule( - 'dynamic2', 1, '46.10.10.2', - tracking=PolicyRule.NO_TRACKING, - ) - - # Initialize sub with 1 static and 1 dynamic rule - sub = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - self.test_util.controller.mock_create_session = Mock( - return_value=CreateSessionResponse( - credits=[create_update_response(sub.imsi, 1, 1024)], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id='policy1', - ), - ], - dynamic_rules=[ - session_manager_pb2.DynamicRuleInstall( - policy_rule=dynamic_rule1, - ), - ], - usage_monitors=[], - ), - ) - self.test_util.controller.mock_terminate_session = Mock( - return_value=SessionTerminateResponse(), - ) - self.test_util.sessiond.CreateSession( - LocalCreateSessionRequest( - sid=SubscriberID(id=sub.imsi), - ue_ipv4=sub.ip, - ), - ) - self.assertEqual( - self.test_util.controller.mock_create_session.call_count, - 1, - ) - - # first, send some packets so we know that the uplink rules are - # accepting traffic - self._assert_rules( - sub, - [ - session_manager_pb2.DynamicRuleInstall( - policy_rule=self.test_util.static_rules['policy1'], - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=dynamic_rule1, - ), - ], - ) - - # Now via reauth, remove the old rules and install new uplink rules - # Verify the new uplink rules allow traffic - reauth_result = self.test_util.proxy_responder.PolicyReAuth( - PolicyReAuthRequest( - imsi=sub.imsi, - rules_to_remove=['dynamic1', 'policy1'], - rules_to_install=[ - session_manager_pb2.StaticRuleInstall( - rule_id='policy2', - ), - ], - dynamic_rules_to_install=[ - session_manager_pb2.DynamicRuleInstall( - policy_rule=dynamic_rule2, - ), - ], - ), - ) - self.assertEqual( - reauth_result.result, - session_manager_pb2.UPDATE_INITIATED, - ) - self.assertEqual(len(reauth_result.failed_rules), 0) - self._assert_rules( - sub, - [ - session_manager_pb2.DynamicRuleInstall( - policy_rule=self.test_util.static_rules['policy2'], - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=dynamic_rule2, - ), - ], - ) - - # Verify the old rules no longer allow traffic (uninstalled) - self._assert_rules( - sub, - [ - session_manager_pb2.DynamicRuleInstall( - policy_rule=self.test_util.static_rules['policy1'], - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=dynamic_rule1, - ), - ], - expected=0, - ) - - def _assert_rules(self, sub, rules, expected=-1): - flows = list( - itertools.chain(*[rule.policy_rule.flow_list for rule in rules]), - ) - packets = get_packets_for_flows(sub, flows) - packet_sender = self.test_util.get_packet_sender([sub], packets, 1) - - num_packets = self.test_util.thread.run_in_greenthread(packet_sender) - if expected == -1: - self.assertEqual(num_packets, len(flows)) - else: - self.assertEqual(num_packets, expected) diff --git a/lte/gateway/python/integ_tests/gxgy_tests/test_usage_monitors.py b/lte/gateway/python/integ_tests/gxgy_tests/test_usage_monitors.py deleted file mode 100644 index 057264dc6c25..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/test_usage_monitors.py +++ /dev/null @@ -1,225 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -import unittest -from unittest.mock import Mock - -from lte.protos import session_manager_pb2 -from lte.protos.policydb_pb2 import PolicyRule -from lte.protos.subscriberdb_pb2 import SubscriberID -from magma.pipelined.tests.app.subscriber import ( - SubContextConfig, - default_ambr_config, -) -from ryu.lib import hub - -from .policies import create_uplink_rule, get_packets_for_flows -from .session_manager import ( - create_monitor_response, - create_update_response, - get_from_queue, - get_standard_update_response, -) -from .utils import GxGyTestUtil as TestUtil - - -class UsageMonitorTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - super(UsageMonitorTest, cls).setUpClass() - - cls.test_util = TestUtil() - # default rule - policy = create_uplink_rule( - "monitor_rule", 0, '45.10.0.1', - m_key="mkey1", - tracking=PolicyRule.ONLY_PCRF, - ) - cls.test_util.static_rules[policy.id] = policy - hub.sleep(2) # wait for static rule to sync - - @classmethod - def tearDownClass(cls): - cls.test_util.cleanup() - - def test_basic_init(self): - """ - Initiate subscriber, return 1 static policy with monitoring key, send - traffic to match the policy, verify monitoring update is sent, terminate - subscriber - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - quota = 1024 # bytes - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[], - static_rules=[ - session_manager_pb2.StaticRuleInstall( - rule_id="monitor_rule", - ), - ], - dynamic_rules=[], - usage_monitors=[ - create_monitor_response( - sub1.imsi, "mkey1", quota, session_manager_pb2.PCC_RULE_LEVEL, - ), - ], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - monitor_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - None, monitor_complete, quota, - ), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - - packets = get_packets_for_flows( - sub1, self.test_util.static_rules["monitor_rule"].flow_list, - ) - packet_count = int(quota / len(packets[0])) + 1 - - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, packet_count), - ) - self.assertIsNotNone(get_from_queue(monitor_complete)) - self.assertEqual(self.test_util.controller.mock_update_session.call_count, 1) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - def test_mixed_monitors_and_updates(self): - """ - Test a mix of usage monitors, session monitors, and charging credits to - PCRF and OCS. - """ - sub1 = SubContextConfig('IMSI001010000088888', '192.168.128.74', default_ambr_config, 4) - quota = 1024 # bytes - - pcrf_rule = create_uplink_rule( - "pcrf_rule", 0, '46.10.0.1', - m_key="key1", - tracking=PolicyRule.ONLY_PCRF, - ) - ocs_rule = create_uplink_rule( - "ocs_rule", 1, '47.10.0.1', - tracking=PolicyRule.ONLY_OCS, - ) - both_rule = create_uplink_rule( - "both_rule", 2, '48.10.0.1', - m_key="key2", - tracking=PolicyRule.OCS_AND_PCRF, - ) - - self.test_util.controller.mock_create_session = Mock( - return_value=session_manager_pb2.CreateSessionResponse( - credits=[ - create_update_response("", 1, quota), - create_update_response("", 2, quota), - ], - dynamic_rules=[ - session_manager_pb2.DynamicRuleInstall( - policy_rule=pcrf_rule, - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=ocs_rule, - ), - session_manager_pb2.DynamicRuleInstall( - policy_rule=both_rule, - ), - ], - usage_monitors=[ - create_monitor_response( - sub1.imsi, - "key1", - quota, - session_manager_pb2.PCC_RULE_LEVEL, - ), - create_monitor_response( - sub1.imsi, - "key2", - quota, - session_manager_pb2.PCC_RULE_LEVEL, - ), - create_monitor_response( - sub1.imsi, - "key3", - quota, - session_manager_pb2.SESSION_LEVEL, - ), - ], - ), - ) - - self.test_util.controller.mock_terminate_session = Mock( - return_value=session_manager_pb2.SessionTerminateResponse(), - ) - - charging_complete = hub.Queue() - monitor_complete = hub.Queue() - self.test_util.controller.mock_update_session = Mock( - side_effect=get_standard_update_response( - charging_complete, monitor_complete, quota, - ), - ) - - self.test_util.sessiond.CreateSession( - session_manager_pb2.LocalCreateSessionRequest( - sid=SubscriberID(id=sub1.imsi), - ue_ipv4=sub1.ip, - ), - ) - - self.assertEqual(self.test_util.controller.mock_create_session.call_count, 1) - flows = [rule.flow_list[0] for rule in [pcrf_rule, ocs_rule, both_rule]] - packets = get_packets_for_flows(sub1, flows) - packet_count = int(quota / len(packets[0])) + 1 - self.test_util.thread.run_in_greenthread( - self.test_util.get_packet_sender([sub1], packets, packet_count), - ) - - # Wait for responses for keys 1 and 2 (ocs_rule and both_rule) - charging_keys = {1, 2} - for _ in range(len(charging_keys)): - update = get_from_queue(charging_complete) - self.assertTrue(update.usage.charging_key in charging_keys) - charging_keys.remove(update.usage.charging_key) - - # Wait for responses for mkeys key1 (pcrf_rule), key2 (both_rule), - # key3 (session rule) - monitoring_keys = ["key1", "key2", "key3"] - for _ in range(len(monitoring_keys)): - monitor = get_from_queue(monitor_complete) - self.assertTrue(monitor.update.monitoring_key in monitoring_keys) - monitoring_keys.remove(monitor.update.monitoring_key) - - self.test_util.sessiond.EndSession(SubscriberID(id=sub1.imsi)) - self.assertEqual(self.test_util.controller.mock_terminate_session.call_count, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/lte/gateway/python/integ_tests/gxgy_tests/utils.py b/lte/gateway/python/integ_tests/gxgy_tests/utils.py deleted file mode 100644 index 3a8d72b85ff3..000000000000 --- a/lte/gateway/python/integ_tests/gxgy_tests/utils.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Copyright 2020 The Magma Authors. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" -from concurrent.futures import Future, ThreadPoolExecutor -from contextlib import ExitStack - -import grpc -from lte.protos import session_manager_pb2_grpc -from magma.common.service_registry import ServiceRegistry -from magma.configuration.service_configs import load_service_config -from magma.pipelined.bridge_util import BridgeTools -from magma.pipelined.tests.app.flow_query import RyuDirectFlowQuery as FlowQuery -from magma.pipelined.tests.app.packet_injector import ScapyPacketInjector -from magma.pipelined.tests.app.start_pipelined import ( - PipelinedController, - TestSetup, -) -from magma.pipelined.tests.app.table_isolation import ( - RyuDirectTableIsolator, - RyuForwardFlowArgsBuilder, -) -from magma.pipelined.tests.pipelined_test_util import ( - start_ryu_app_thread, - stop_ryu_app_thread, - wait_after_send, -) -from magma.policydb.rule_store import PolicyRuleDict - -from .session_manager import MockSessionManager - - -class GxGyTestUtil(object): - BRIDGE = 'gtp_br0' - IFACE = 'gtp_br0' - CONTROLLER_PORT = 6644 - - def __init__(self): - self.static_rules = PolicyRuleDict() - - # Local sessiond - self.sessiond = session_manager_pb2_grpc.LocalSessionManagerStub( - ServiceRegistry.get_rpc_channel("sessiond", ServiceRegistry.LOCAL), - ) - - self.proxy_responder = session_manager_pb2_grpc.SessionProxyResponderStub( - ServiceRegistry.get_rpc_channel("sessiond", ServiceRegistry.LOCAL), - ) - - # Mock session controller server - cloud_port = load_service_config("sessiond")["local_controller_port"] - self.controller = MockSessionManager() - self.server = grpc.server(ThreadPoolExecutor(max_workers=10)) - session_manager_pb2_grpc.add_CentralSessionControllerServicer_to_server( - self.controller, self.server, - ) - self.server.add_insecure_port('127.0.0.1:{}'.format(cloud_port)) - self.server.start() - - # Add new controller to bridge - BridgeTools.add_controller_to_bridge(self.BRIDGE, self.CONTROLLER_PORT) - - # Start ryu test controller for adding flows - testing_controller_reference = Future() - test_setup = TestSetup( - apps=[PipelinedController.Testing], - references={ - PipelinedController.Testing: testing_controller_reference, - }, - config={ - 'bridge_name': self.BRIDGE, - 'bridge_ip_address': '192.168.128.1', - 'controller_port': self.CONTROLLER_PORT, - }, - mconfig=None, - loop=None, - integ_test=True, - ) - self.thread = start_ryu_app_thread(test_setup) - self.testing_controller = testing_controller_reference.result() - - def cleanup(self): - # Stop ryu controller - stop_ryu_app_thread(self.thread) - # Remove bridge - BridgeTools.remove_controller_from_bridge( - self.BRIDGE, - self.CONTROLLER_PORT, - ) - # Stop gRPC server - self.server.stop(0) - - def get_packet_sender(self, subs, packets, count): - """ - Return a function to call within a greenthread to send packets and - return the number of packets that went through table 20 (i.e. didn't - get dropped) - Args: - subs ([SubscriberContext]): list of subscribers that may receive - packets - packets ([ScapyPacket]): list of packets to send - count (int): how many of each packet to send - """ - pkt_sender = ScapyPacketInjector(self.IFACE) - - def packet_sender(): - isolators = [ - RyuDirectTableIsolator( - RyuForwardFlowArgsBuilder.from_subscriber(sub) - .build_requests(), - self.testing_controller, - ) for sub in subs - ] - flow_query = FlowQuery(20, self.testing_controller) - pkt_start = sum(flow.packets for flow in flow_query.lookup()) - with ExitStack() as es: - for iso in isolators: - es.enter_context(iso) - for packet in packets: - pkt_sender.send(packet, count=count) - wait_after_send(self.testing_controller) - pkt_final = sum(flow.packets for flow in flow_query.lookup()) - return pkt_final - pkt_start - return packet_sender diff --git a/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py b/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py index 20efc64e1ef3..1bc276523a96 100644 --- a/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py +++ b/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py @@ -894,13 +894,17 @@ def detect_init_system(self) -> InitMode: logging.info("systemd is not installed") if docker_running and systemd_running: - raise RuntimeError("Magmad is running with both Docker and systemd") + return InitMode.SYSTEMD # default to systemd if both are running - needed by feg integ tests elif docker_running: return InitMode.DOCKER elif systemd_running: return InitMode.SYSTEMD else: - raise RuntimeError("Magmad is not running with either Docker or systemd") + raise RuntimeError("Magmad is not running, you have to start magmad either in Docker or systemd") + + @property + def init_system(self): + return self._init_system def exec_command_output(self, command): """Run a command remotely on magma_dev VM. @@ -976,22 +980,33 @@ def restart_all_services(self): elif self._init_system == InitMode.DOCKER: self.exec_command("cd /home/vagrant/magma/lte/gateway/docker && docker-compose restart") print("Waiting for all services to restart. Sleeping for 60 seconds..") - time_slept = 0 - while time_slept < 60: - time.sleep(5) - time_slept += 5 - print("*********** Slept for " + str(time_slept) + " seconds") + self.wait_for_restart_to_finish(60) - def restart_services(self, services): + def restart_services(self, services, wait_time=0): """ Restart a list of magmad services. Blocking command. Args: services: List of (str) services names - + wait_time: (int) Time to wait for restart of the services """ for s in services: - self.exec_command("sudo systemctl restart magma@{0}".format(s)) + if s == "mme": + self.restart_mme(0) + elif s == "sctpd": + self.restart_sctpd(0) + else: + if self._init_system == InitMode.SYSTEMD: + self.exec_command(f"sudo systemctl restart magma@{s}") + elif self._init_system == InitMode.DOCKER: + self.exec_command(f"docker restart {s}") + self.wait_for_restart_to_finish(wait_time) + + @staticmethod + def wait_for_restart_to_finish(wait_time): + for j in range(wait_time): + print(f"Waiting for {wait_time - j} seconds for restart to complete") + time.sleep(1) def enable_service(self, service): """Enable a magma service on magma_dev VM and starts it @@ -999,8 +1014,11 @@ def enable_service(self, service): Args: service: (str) service to enable """ - self.exec_command("sudo systemctl unmask magma@{0}".format(service)) - self.exec_command("sudo systemctl start magma@{0}".format(service)) + if self._init_system == InitMode.SYSTEMD: + self.exec_command(f"sudo systemctl unmask magma@{service}") + self.exec_command(f"sudo systemctl start magma@{service}") + elif self._init_system == InitMode.DOCKER: + self.exec_command(f"docker start {service}") def disable_service(self, service): """Disables a magma service on magma_dev VM, preventing from @@ -1009,8 +1027,11 @@ def disable_service(self, service): Args: service: (str) service to disable """ - self.exec_command("sudo systemctl mask magma@{0}".format(service)) - self.exec_command("sudo systemctl stop magma@{0}".format(service)) + if self._init_system == InitMode.SYSTEMD: + self.exec_command(f"sudo systemctl mask magma@{service}") + self.exec_command(f"sudo systemctl stop magma@{service}") + elif self._init_system == InitMode.DOCKER: + self.exec_command(f"docker stop {service}") def is_service_active(self, service) -> bool: """Check if a magma service on magma_dev VM is active @@ -1021,14 +1042,22 @@ def is_service_active(self, service) -> bool: Returns: service active status """ - is_active_service_cmd = "systemctl is-active magma@" + service + if self._init_system == InitMode.SYSTEMD: + is_active_service_cmd = "systemctl is-active magma@" + service + return self.check_service_activity(is_active_service_cmd).strip() == "active" + elif self._init_system == InitMode.DOCKER: + is_active_service_cmd = "docker ps --filter 'name=" + service + "' --format '{{.Status}}'" + return self.check_service_activity(is_active_service_cmd).strip()[:2] == "up" + return False + + def check_service_activity(self, is_active_service_cmd): try: result_str = self.exec_command_output(is_active_service_cmd) except subprocess.CalledProcessError as e: # if service is disabled / masked, is-enabled will return # non-zero exit status result_str = e.output - return result_str.strip() == "active" + return result_str def update_mme_config_for_sanity(self, cmd): """Update MME configuration for all sanity test cases""" @@ -1222,29 +1251,72 @@ def config_ha_service(self, cmd): print("Ha service configuration failed") return -1 - def restart_mme_and_wait(self): - """Restart MME service and wait for the service to come up properly""" + def restart_mme(self, wait_time=20): + """ + Restart MME service and wait for the service to come up properly + """ print("Restarting mme service on gateway") - self.restart_services(["mme"]) - print("Waiting for mme to restart. 20 sec") - time.sleep(20) + if self._init_system == InitMode.SYSTEMD: + self.exec_command("sudo systemctl restart magma@mme") + elif self._init_system == InitMode.DOCKER: + self.exec_command("docker restart mobilityd pipelined sessiond oai_mme") + self.wait_for_restart_to_finish(wait_time) - def restart_sctpd(self): + def restart_sctpd(self, wait_time=30): """ Restart sctpd service explicitly because it is not managed by magmad """ + print("Restarting sctpd service on gateway") if self._init_system == InitMode.SYSTEMD: self.exec_command("sudo service sctpd restart") elif self._init_system == InitMode.DOCKER: - self.exec_command("docker restart sctpd") - for j in range(30): - print("Waiting for", 30 - j, "seconds for restart to complete") - time.sleep(1) + self.exec_command_output( + "docker stop sctpd mobilityd pipelined sessiond oai_mme;" + "sudo su -c '/usr/bin/env python3 /usr/local/bin/config_stateless_agw.py sctpd_pre';" + "docker start sctpd mobilityd pipelined sessiond oai_mme", + ) + self.wait_for_restart_to_finish(wait_time) def print_redis_state(self): """ Print the per-IMSI state in Redis data store on AGW """ + keys_to_be_cleaned, mme_ueip_imsi_map_entries, \ + num_htbl_entries, s1ap_imsi_map_entries = self.get_redis_state() + print( + "Keys left in Redis (list should be empty)[\n", + "\n".join(keys_to_be_cleaned), + "\n]", + ) + print( + "Entries in s1ap_imsi_map (should be zero):", + s1ap_imsi_map_entries, + ) + print( + "Entries left in hashtables (should be zero):", + num_htbl_entries, + ) + print( + "Entries in mme_ueip_imsi_map (should be zero):", + mme_ueip_imsi_map_entries, + ) + + def is_redis_empty(self): + """ + Check that the per-IMSI state in Redis data store on AGW is empty + """ + keys_to_be_cleaned, mme_ueip_imsi_map_entries, \ + num_htbl_entries, s1ap_imsi_map_entries = self.get_redis_state() + return \ + len(keys_to_be_cleaned) == 0 and \ + mme_ueip_imsi_map_entries == 0 and \ + num_htbl_entries == 0 and \ + s1ap_imsi_map_entries == 0 + + def get_redis_state(self): + """ + Get the per-IMSI state in Redis data store on AGW + """ magtivate_cmd = "source /home/vagrant/build/python/bin/activate" imsi_state_cmd = "state_cli.py keys IMSI*" redis_imsi_keys = self.exec_command_output( @@ -1254,7 +1326,7 @@ def print_redis_state(self): for key in redis_imsi_keys.split("\n"): # Ignore directoryd per-IMSI keys in this analysis as they will # persist after each test - if "directory" not in key: + if "directory" not in key and key != "": keys_to_be_cleaned.append(key) mme_nas_state_cmd = "state_cli.py parse mme_nas_state" @@ -1283,20 +1355,7 @@ def print_redis_state(self): for state in mme_ueip_imsi_map_state.split("\n"): if "key" in state: mme_ueip_imsi_map_entries += 1 - print( - "Keys left in Redis (list should be empty)[\n", - "\n".join(keys_to_be_cleaned), - "\n]", - ) - print( - "Entries in s1ap_imsi_map (should be zero):", - s1ap_imsi_map_entries, - ) - print("Entries left in hashtables (should be zero):", num_htbl_entries) - print( - "Entries in mme_ueip_imsi_map (should be zero):", - mme_ueip_imsi_map_entries, - ) + return keys_to_be_cleaned, mme_ueip_imsi_map_entries, num_htbl_entries, s1ap_imsi_map_entries def enable_nat(self, ip_version=4): """Enable Nat""" @@ -1352,7 +1411,7 @@ def _set_agw_nat(self, enable: bool): with open(mconfig_conf, "w") as json_file: json.dump(data, json_file, sort_keys=True, indent=2) - self.restart_sctpd() + self.restart_sctpd(0) self.restart_all_services() def _validate_non_nat_datapath(self, ip_version=4): @@ -2089,9 +2148,14 @@ def __init__(self): def restart_envoy_service(self): """Restart the Envoy service""" print("restarting envoy") - self.magma_utils.exec_command_output( - "sudo service magma@envoy_controller restart", - ) + if self.magma_utils.init_system == InitMode.SYSTEMD: + self.magma_utils.exec_command_output( + "sudo service magma@envoy_controller restart", + ) + elif self.magma_utils.init_system == InitMode.DOCKER: + self.magma_utils.exec_command_output( + "docker restart envoy_controller", + ) time.sleep(5) self.magma_utils.exec_command_output( "sudo service magma_dp@envoy restart", diff --git a/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py b/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py index 8fa4ca7bb671..2083d1282270 100644 --- a/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py +++ b/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py @@ -541,6 +541,11 @@ def cleanup(self, test=None): elif TestWrapper.TEST_CASE_EXECUTION_COUNT > 1: self.generate_flaky_summary() + if not self.magmad_util.is_redis_empty(): + print("************************* Redis not empty, initiating cleanup") + self.magmad_util.restart_sctpd() + self.magmad_util.print_redis_state() + def multiEnbConfig(self, num_of_enbs, enb_list=None): """Configure multiple eNB in S1APTester""" if enb_list is None: diff --git a/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_dedicated_bearer_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_dedicated_bearer_with_mme_restart.py index 56529174030b..13328ea38feb 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_dedicated_bearer_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_dedicated_bearer_with_mme_restart.py @@ -101,12 +101,10 @@ def test_3485_timer_for_dedicated_bearer_with_mme_restart(self): ) print('***** Restarting MME service on gateway') - self._s1ap_wrapper.magmad_util.restart_services(['mme']) - wait_for_restart = 20 - for j in range(wait_for_restart): - print('Waiting for', j, 'seconds') - time.sleep(1) + self._s1ap_wrapper.magmad_util.restart_services( + ['mme'], wait_for_restart, + ) response = self._s1ap_wrapper.s1_util.get_response() act_ded_ber_ctxt_req = response.cast( @@ -133,12 +131,8 @@ def test_3485_timer_for_dedicated_bearer_with_mme_restart(self): response = self._s1ap_wrapper.s1_util.get_response() msg_type = s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value - while (response.msg_type != msg_type): + while response.msg_type != msg_type: response = self._s1ap_wrapper.s1_util.get_response() - self.assertEqual( - response.msg_type, - s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value, - ) print('******************* Received deactivate eps bearer context') deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t) diff --git a/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_default_bearer_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_default_bearer_with_mme_restart.py index b2f330066b41..cc63225fe758 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_default_bearer_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_3485_timer_for_default_bearer_with_mme_restart.py @@ -108,12 +108,10 @@ def test_3485_timer_for_default_bearer_with_mme_restart(self): # Receive PDN CONN RSP/Activate default EPS bearer context request print('************************* Restarting MME service on gateway') - self._s1ap_wrapper.magmad_util.restart_services(['mme']) - wait_for_restart = 20 - for j in range(wait_for_restart): - print('Waiting for', j, 'seconds') - time.sleep(1) + self._s1ap_wrapper.magmad_util.restart_services( + ['mme'], wait_for_restart, + ) print( '*** Sending indication to drop Activate Default EPS bearer Ctxt' @@ -151,11 +149,8 @@ def test_3485_timer_for_default_bearer_with_mme_restart(self): # Receive UE_DEACTIVATE_BER_REQ response = self._s1ap_wrapper.s1_util.get_response() msg_type = s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value - while (response.msg_type != msg_type): + while response.msg_type != msg_type: response = self._s1ap_wrapper.s1_util.get_response() - self.assertEqual( - response.msg_type, s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value, - ) print( '******************* Received deactivate eps bearer context' diff --git a/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_dedicated_bearer_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_dedicated_bearer_with_mme_restart.py index ab900e1c8f3c..d9017052cc23 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_dedicated_bearer_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_dedicated_bearer_with_mme_restart.py @@ -133,11 +133,10 @@ def test_3495_timer_for_dedicated_bearer_with_mme_restart(self): "************************* Restarting MME service on", "gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual( diff --git a/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_default_bearer_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_default_bearer_with_mme_restart.py index 6e0d146c7456..154d6385aef4 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_default_bearer_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_3495_timer_for_default_bearer_with_mme_restart.py @@ -149,11 +149,10 @@ def test_3495_timer_for_default_bearer_with_mme_restart(self): # Do not send deactivate eps bearer context accept print("************************* Restarting MME service on", "gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual( diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue.py index 62a99da05ed1..704e4dad5e46 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue.py @@ -93,11 +93,10 @@ def test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue( print( "************************* Restarting MME service on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) for ue in ue_ids: # Now detach the UE @@ -113,11 +112,10 @@ def test_attach_and_mme_restart_loop_detach_and_mme_restart_loop_multi_ue( print( "************************* Restarting MME service on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) if __name__ == "__main__": diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_ip_blocks_mobilityd_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_ip_blocks_mobilityd_restart.py index 4018a13582da..876479a7d50b 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_ip_blocks_mobilityd_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_ip_blocks_mobilityd_restart.py @@ -45,10 +45,10 @@ def test_attach_detach_multiple_ip_blocks_mobilityd_restart(self): "mobilityd " print("************************* Restarting mobilityd") - self._s1ap_wrapper.magmad_util.restart_services(["mobilityd"]) - for j in range(30): - print("Waiting for", j, "seconds") - sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mobilityd"], wait_for_restart, + ) curr_blocks = self._s1ap_wrapper.mobility_util.list_ip_blocks() # Check if old_blocks and curr_blocks contain same ip blocks after diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_corrupt_stateless_mme.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_corrupt_stateless_mme.py index 01e90f749ca0..d995a9bf50bd 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_corrupt_stateless_mme.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_corrupt_stateless_mme.py @@ -66,11 +66,10 @@ def test_attach_detach_with_corrupt_stateless_mme(self): ) print("************************* Restarting %s service" % s) - self._s1ap_wrapper.magmad_util.restart_services([s]) - - for j in range(100): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 100 + self._s1ap_wrapper.magmad_util.restart_services( + [s], wait_for_restart, + ) # Re-establish S1 connection between eNB and MME self._s1ap_wrapper._s1setup() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mme_restart.py index 8a9905494481..ec37d672bd48 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mme_restart.py @@ -62,11 +62,10 @@ def test_attach_detach_with_mme_restart(self): "************************* Restarting MME service on", "gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Now detach the UE print( diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mobilityd_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mobilityd_restart.py index 33cad0dd9410..2559a95a66ae 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mobilityd_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_with_mobilityd_restart.py @@ -56,11 +56,10 @@ def test_attach_detach_with_mobilityd_restart(self): self._s1ap_wrapper._s1_util.receive_emm_info() print('************************* Restarting mobilityd') - self._s1ap_wrapper.magmad_util.restart_services(['mobilityd']) - # Timeout for mobilityd restart - for j in range(30): - print("Waiting for", j, "seconds") - sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mobilityd"], wait_for_restart, + ) print( "************************* Running UE detach for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_esm_info_with_apn_correction.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_esm_info_with_apn_correction.py index c2923d763fe6..2a868de95256 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_esm_info_with_apn_correction.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_esm_info_with_apn_correction.py @@ -39,10 +39,10 @@ def test_attach_esm_info_with_apn_correction(self): num_ues = 1 print("************************* restarting mme") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - for j in range(30): - print("Waiting mme restart for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) self._s1ap_wrapper.configUEDevice_ues_same_imsi(num_ues) print("************************* sending Attach Request for ue-id : 1") @@ -173,10 +173,10 @@ def test_attach_esm_info_with_apn_correction(self): self._s1ap_wrapper.magmad_util.config_apn_correction( MagmadUtil.apn_correction_cmds.DISABLE, ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - for j in range(30): - print("Waiting mme restart for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) if __name__ == "__main__": diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_mme_restart_detach_multi_ue.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_mme_restart_detach_multi_ue.py index 9070b3b692bf..4bcfc64990cf 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_mme_restart_detach_multi_ue.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_mme_restart_detach_multi_ue.py @@ -67,11 +67,10 @@ def test_attach_mme_restart_detach_multi_ue(self): ue_ids.append(req.ue_id) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) for ue in ue_ids: # Now detach the UE diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_nw_initiated_detach_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_nw_initiated_detach_with_mme_restart.py index 8c579367905b..46f56afb8447 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_nw_initiated_detach_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_nw_initiated_detach_with_mme_restart.py @@ -84,11 +84,10 @@ def test_attach_nw_initiated_detach_with_mme_restart(self): "************************* Restarting MME service on", "gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Receive NW initiated detach request response = self._s1ap_wrapper.s1_util.get_response() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mme_restart.py index b90f8c87b871..c8ef3910d2d3 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mme_restart.py @@ -57,11 +57,10 @@ def test_attach_ul_udp_data_with_mme_restart(self): test.verify() print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print( "************************* Running UE uplink (UDP) for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mobilityd_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mobilityd_restart.py index ff0b920d3b4f..e3dec84c5628 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mobilityd_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_mobilityd_restart.py @@ -63,11 +63,10 @@ def test_attach_ul_udp_data_with_mobilityd_restart(self): "************************* Restarting Mobilityd service", "on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mobilityd"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mobilityd"], wait_for_restart, + ) print( "************************* Running UE uplink (UDP) for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_multiple_service_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_multiple_service_restart.py index 079cfcdad1a0..9b397e2054ef 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_multiple_service_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_multiple_service_restart.py @@ -63,14 +63,10 @@ def test_attach_ul_udp_data_with_multiple_service_restart(self): "************************* Restarting Mobilityd, MME and", "Pipelined services on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services([ - "mobilityd", "mme", - "pipelined", - ]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mobilityd", "mme", "pipelined"], wait_for_restart, + ) print( "************************* Running UE uplink (UDP) for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_pipelined_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_pipelined_restart.py index e2831fd1898e..852036ee9bd1 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_pipelined_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_pipelined_restart.py @@ -63,11 +63,10 @@ def test_attach_ul_udp_data_with_pipelined_restart(self): "************************* Restarting Pipelined service", "on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["pipelined"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["pipelined"], wait_for_restart, + ) print( "************************* Running UE uplink (UDP) for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_sessiond_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_sessiond_restart.py index b36441b482e5..7576915c77b9 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_sessiond_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_ul_udp_data_with_sessiond_restart.py @@ -63,11 +63,10 @@ def test_attach_ul_udp_data_with_sessiond_restart(self): "************************* Restarting Sessiond service", "on gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["sessiond"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["sessiond"], wait_for_restart, + ) print( "************************* Running UE uplink (UDP) for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_with_multiple_mme_restarts.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_with_multiple_mme_restarts.py index 2e8bddb2ee83..4d17e653da69 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_with_multiple_mme_restarts.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_with_multiple_mme_restarts.py @@ -58,8 +58,8 @@ def test_attach_with_multiple_mme_restarts(self): print("************************* Received UE_AUTH_REQ_IND") # Try consecutive mme restarts - self._s1ap_wrapper.magmad_util.restart_mme_and_wait() - self._s1ap_wrapper.magmad_util.restart_mme_and_wait() + self._s1ap_wrapper.magmad_util.restart_services(['mme']) + self._s1ap_wrapper.magmad_util.restart_services(['mme']) auth_res = s1ap_types.ueAuthResp_t() auth_res.ue_Id = 1 @@ -82,7 +82,7 @@ def test_attach_with_multiple_mme_restarts(self): ) print("************************* Received UE_SEC_MOD_CMD_IND") - self._s1ap_wrapper.magmad_util.restart_mme_and_wait() + self._s1ap_wrapper.magmad_util.restart_services(['mme']) print("************************* Sending UE_SEC_MOD_COMPLETE") sec_mode_complete = s1ap_types.ueSecModeComplete_t() @@ -103,7 +103,7 @@ def test_attach_with_multiple_mme_restarts(self): attach_acc.ue_Id, ) - self._s1ap_wrapper.magmad_util.restart_mme_and_wait() + self._s1ap_wrapper.magmad_util.restart_services(['mme']) # Trigger Attach Complete print("************************* Sending UE_ATTACH_COMPLETE") diff --git a/lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset_multi_ue_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset_multi_ue_with_mme_restart.py index b67e6de04005..c75daeb3b6eb 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset_multi_ue_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset_multi_ue_with_mme_restart.py @@ -103,11 +103,10 @@ def test_enb_partial_reset_multi_ue_with_mme_restart(self): ) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.RESET_ACK.value) diff --git a/lte/gateway/python/integ_tests/s1aptests/test_ics_timer_expiry_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_ics_timer_expiry_with_mme_restart.py index 7bf7a10aac33..099ff77516b1 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_ics_timer_expiry_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_ics_timer_expiry_with_mme_restart.py @@ -109,11 +109,10 @@ def test_ics_timer_expiry_with_mme_restart(self): response.msg_type, s1ap_types.tfwCmd.UE_ICS_DROPD_IND.value, ) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print("************************* Waiting for response from MME") response = self._s1ap_wrapper.s1_util.get_response() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_idle_mode_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_idle_mode_with_mme_restart.py index 5f63d0b10d4e..8dfa3434c616 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_idle_mode_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_idle_mode_with_mme_restart.py @@ -80,11 +80,10 @@ def test_idle_mode_with_mme_restart(self): ) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print( "************************* Sending Service request for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_implicit_detach_timer_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_implicit_detach_timer_with_mme_restart.py index 25e6bf93a44c..9e2357f5a428 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_implicit_detach_timer_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_implicit_detach_timer_with_mme_restart.py @@ -107,11 +107,10 @@ def test_implicit_detach_timer_with_mme_restart(self): print("*********** Slept for", time_slept, "seconds") print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Wait for Implicit detach timer to expire, on expiry of which MME deletes # UE contexts locally, S1ap tester shall send Service Request expecting diff --git a/lte/gateway/python/integ_tests/s1aptests/test_mobile_reachability_tmr_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_mobile_reachability_tmr_with_mme_restart.py index 502137d14ebb..993fd7d2702e 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_mobile_reachability_tmr_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_mobile_reachability_tmr_with_mme_restart.py @@ -93,11 +93,10 @@ def test_mobile_reachability_tmr_with_mme_restart(self): ) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Delay by 11 minutes to ensure Mobile reachability timer and Implicit # detach timer expires diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_attach_complete_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_no_attach_complete_with_mme_restart.py index 5a541ff8bb50..a85011cdd74b 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_attach_complete_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_attach_complete_with_mme_restart.py @@ -110,11 +110,10 @@ def test_no_attach_complete_with_mme_restart(self): "************************* Restarting MME service on", "gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(20): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 20 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Receive NW initiated detach request response = self._s1ap_wrapper.s1_util.get_response() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_auth_resp_with_mme_restart_reattach.py b/lte/gateway/python/integ_tests/s1aptests/test_no_auth_resp_with_mme_restart_reattach.py index ac2258976df9..0a935dfad35d 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_auth_resp_with_mme_restart_reattach.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_auth_resp_with_mme_restart_reattach.py @@ -65,11 +65,10 @@ def test_no_auth_resp_with_mme_restart_reattach(self): print("************************* Received Auth Req for ue", req.ue_id) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Wait for UE context release command response = self._s1ap_wrapper.s1_util.get_response() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_auth_response_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_no_auth_response_with_mme_restart.py index 8ef67e3499b1..637ef1309ce6 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_auth_response_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_auth_response_with_mme_restart.py @@ -78,11 +78,10 @@ def test_no_auth_response_with_mme_restart(self): print("********* Received UE_CTX_REL_IND for ue", req.ue_id) print("************************* Restarting MME service on", "gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print("****** Triggering attach after mme restart *********") diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_esm_information_rsp_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_no_esm_information_rsp_with_mme_restart.py index de8f8a188b1b..74484cf123d0 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_esm_information_rsp_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_esm_information_rsp_with_mme_restart.py @@ -93,11 +93,10 @@ def test_no_esm_information_rsp_with_mme_restart(self): print("Received Esm Information Request ue-id", ue_id) print("************************* Restarting MME service on", "gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Receive UE_ESM_INFORMATION_REQ, as sometimes MME retransmits # UE_ESM_INFORMATION_REQ message before it restarts diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_identity_rsp_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_no_identity_rsp_with_mme_restart.py index 47cb975aed21..b6f6b1dad74b 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_identity_rsp_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_identity_rsp_with_mme_restart.py @@ -75,11 +75,10 @@ def test_no_identity_rsp_with_mme_restart(self): ) print("********************** Restarting MME service on gateway ***") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Since UE has neither received Attach Reject nor Attach Accept, # assuming that both T3410 and T3411 timer expires at UE diff --git a/lte/gateway/python/integ_tests/s1aptests/test_no_smc_with_mme_restart_reattach.py b/lte/gateway/python/integ_tests/s1aptests/test_no_smc_with_mme_restart_reattach.py index 22468608fb4c..dd2748e90db1 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_no_smc_with_mme_restart_reattach.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_no_smc_with_mme_restart_reattach.py @@ -73,11 +73,10 @@ def test_no_smc_with_mme_restart_reattach(self): print("************* Received SMC for ue", req.ue_id) print("************************* Restarting MME service on", "gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Wait for UE context release command response = self._s1ap_wrapper.s1_util.get_response() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_paging_after_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_paging_after_mme_restart.py index b9a3a5478e2b..d49120e1bbeb 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_paging_after_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_paging_after_mme_restart.py @@ -86,12 +86,10 @@ def test_paging_after_mme_restart(self): wait_time = 0.3 time.sleep(wait_time) print('************************* Restarting MME service on', 'gateway') - self._s1ap_wrapper.magmad_util.restart_services(['mme']) - - wait_time = 20 - for j in range(wait_time, 0, -1): - print('Waiting for', j, 'seconds') - time.sleep(1) + wait_for_restart = 20 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print( '************************* Running UE downlink (UDP) for UE id ', diff --git a/lte/gateway/python/integ_tests/s1aptests/test_paging_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_paging_with_mme_restart.py index d3396ec1062d..dc4d6b413e60 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_paging_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_paging_with_mme_restart.py @@ -109,11 +109,10 @@ def test_paging_with_mme_restart(self): "************************* Restarting MME service on", "gateway", ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) # Send service request to reconnect UE ser_req = s1ap_types.ueserviceReq_t() diff --git a/lte/gateway/python/integ_tests/s1aptests/test_secondary_pdn_with_dedicated_bearer_multiple_services_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_secondary_pdn_with_dedicated_bearer_multiple_services_restart.py index 751edb33269a..ff2d2d37b2e1 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_secondary_pdn_with_dedicated_bearer_multiple_services_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_secondary_pdn_with_dedicated_bearer_multiple_services_restart.py @@ -286,14 +286,11 @@ def test_secondary_pdn_with_dedicated_bearer_multiple_services_restart( "************************* Restarting Sessiond, MME and", "Pipelined services on gateway", ) + wait_for_restart = 30 self._s1ap_wrapper.magmad_util.restart_services( - ["sessiond", "mme", "pipelined"], + ["sessiond", "mme", "pipelined"], wait_for_restart, ) - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) - print("Sleeping for 5 seconds") time.sleep(5) diff --git a/lte/gateway/python/integ_tests/s1aptests/test_service_req_ul_udp_data_with_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_service_req_ul_udp_data_with_mme_restart.py index 808dc33966d0..4caf88b702df 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_service_req_ul_udp_data_with_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_service_req_ul_udp_data_with_mme_restart.py @@ -85,11 +85,10 @@ def test_service_req_ul_udp_data_with_mme_restart(self): ) print("************************* Restarting MME service on gateway") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - - for j in range(30): - print("Waiting for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) print( "************************* Sending Service request for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py b/lte/gateway/python/integ_tests/s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py index 83422fb77981..70988d7e459a 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_standalone_pdn_conn_req_with_apn_correction.py @@ -40,10 +40,10 @@ def test_standalone_pdn_conn_req_with_apn_correction(self): num_ues = 1 print("************************* restarting mme") - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - for j in range(30): - print("Waiting mme restart for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) self._s1ap_wrapper.configUEDevice(num_ues) req = self._s1ap_wrapper.ue_req @@ -109,10 +109,10 @@ def test_standalone_pdn_conn_req_with_apn_correction(self): self._s1ap_wrapper.magmad_util.config_apn_correction( MagmadUtil.apn_correction_cmds.DISABLE, ) - self._s1ap_wrapper.magmad_util.restart_services(["mme"]) - for j in range(30): - print("Waiting mme restart for", j, "seconds") - time.sleep(1) + wait_for_restart = 30 + self._s1ap_wrapper.magmad_util.restart_services( + ["mme"], wait_for_restart, + ) if __name__ == "__main__": diff --git a/lte/gateway/python/integ_tests/s1aptests/test_stateless_multi_ue_mixedstate_mme_restart.py b/lte/gateway/python/integ_tests/s1aptests/test_stateless_multi_ue_mixedstate_mme_restart.py index d689a672dd9c..501ed95ac196 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_stateless_multi_ue_mixedstate_mme_restart.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_stateless_multi_ue_mixedstate_mme_restart.py @@ -235,7 +235,7 @@ def test_stateless_multi_ue_mixedstate_mme_restart(self): attach_steps[step](req.ue_id) # Restart mme - self._s1ap_wrapper.magmad_util.restart_mme_and_wait() + self._s1ap_wrapper.magmad_util.restart_services(['mme']) # Post restart, complete the attach procedures that were cut in between for i in range(num_ues_attaching): diff --git a/lte/gateway/python/integ_tests/s1aptests/util/traffic_util.py b/lte/gateway/python/integ_tests/s1aptests/util/traffic_util.py index 3c5e267099fd..5dad6a67c79f 100644 --- a/lte/gateway/python/integ_tests/s1aptests/util/traffic_util.py +++ b/lte/gateway/python/integ_tests/s1aptests/util/traffic_util.py @@ -409,7 +409,7 @@ def _iface_up_ipv6(ip): ifname=TrafficTest._net_iface_ipv6, )[0] TrafficTest._iproute.addr( - 'add', index=net_iface_index, address=ip.exploded, + 'add', index=net_iface_index, address=ip.exploded, prefixlen=128, ) os.system( 'sudo route -A inet6 add 3001::10/64 dev eth3', diff --git a/lte/gateway/python/load_tests/BUILD.bazel b/lte/gateway/python/load_tests/BUILD.bazel index 5d9594d31988..12afb36f3c84 100644 --- a/lte/gateway/python/load_tests/BUILD.bazel +++ b/lte/gateway/python/load_tests/BUILD.bazel @@ -10,7 +10,42 @@ # limitations under the License. load("@python_deps//:requirements.bzl", "requirement") +load("@rules_pkg//pkg:mappings.bzl", "pkg_filegroup", "pkg_mklink") load("@rules_python//python:defs.bzl", "py_binary", "py_library") +load("//bazel:deb_build.bzl", "PY_DEST") +load("//bazel:runfiles.bzl", "expand_runfiles") + +SCRIPTS = [ + "loadtest_mobilityd", + "loadtest_pipelined", + "loadtest_sessiond", + "loadtest_subscriberdb", +] + +expand_runfiles( + name = "scripts_expanded", + targets = [":{script}".format(script = script) for script in SCRIPTS], +) + +[ + pkg_mklink( + name = "{script}_symlink".format(script = script), + link_name = "/usr/local/bin/{script}.py".format(script = script), + target = "{dest}/load_tests/{script}.py".format( + dest = PY_DEST, + script = script, + ), + ) + for script in SCRIPTS +] + +pkg_filegroup( + name = "magma_lte_loadtest_scripts", + srcs = [":scripts_expanded"] + + ["{script}_symlink".format(script = script) for script in SCRIPTS], + prefix = PY_DEST, + visibility = ["//lte/gateway/release:__pkg__"], +) MAGMA_ROOT = "../../../../" diff --git a/lte/gateway/python/magma/health/BUILD.bazel b/lte/gateway/python/magma/health/BUILD.bazel index 3951b06c91c9..2a41ed24f71d 100644 --- a/lte/gateway/python/magma/health/BUILD.bazel +++ b/lte/gateway/python/magma/health/BUILD.bazel @@ -29,7 +29,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":health_lib", "//orc8r/gateway/python/magma/common:sentry", diff --git a/lte/gateway/python/magma/kernsnoopd/BUILD.bazel b/lte/gateway/python/magma/kernsnoopd/BUILD.bazel index 9a25d316accf..c7435da38526 100644 --- a/lte/gateway/python/magma/kernsnoopd/BUILD.bazel +++ b/lte/gateway/python/magma/kernsnoopd/BUILD.bazel @@ -21,10 +21,6 @@ LTE_ROOT = "{}lte/gateway/python".format(MAGMA_ROOT) py_binary( name = "kernsnoopd", srcs = ["main.py"], - data = [ - "ebpf/byte_count.bpf.c", - "ebpf/common.bpf.h", - ], imports = [ LTE_ROOT, ORC8R_ROOT, @@ -33,7 +29,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":kernsnoopd_lib", "//orc8r/gateway/python/magma/common:sentry", diff --git a/example/gateway/deploy/roles/example_services/files/magma_hello.service b/lte/gateway/python/magma/kernsnoopd/ebpf/BUILD.bazel similarity index 55% rename from example/gateway/deploy/roles/example_services/files/magma_hello.service rename to lte/gateway/python/magma/kernsnoopd/ebpf/BUILD.bazel index 45715a52a0e5..06ec3cffa61c 100644 --- a/example/gateway/deploy/roles/example_services/files/magma_hello.service +++ b/lte/gateway/python/magma/kernsnoopd/ebpf/BUILD.bazel @@ -1,4 +1,4 @@ -# Copyright 2020 The Magma Authors. +# Copyright 2022 The Magma Authors. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -8,21 +8,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -[Unit] -Description=Example hello service -[Service] -Type=simple -EnvironmentFile=/etc/environment -ExecStart=/usr/bin/env python3 -m magmaexample.%i.main -StandardOutput=syslog -StandardError=syslog -SyslogIdentifier=hello -User=root -Restart=always -RestartSec=5s -StartLimitInterval=0 +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") -[Install] -WantedBy=multi-user.target +filegroup( + name = "ebpf_data_files", + srcs = [ + "byte_count.bpf.c", + "common.bpf.h", + ], +) + +pkg_files( + name = "magma_ebpf_kernsnoopd", + srcs = [":ebpf_data_files"], + prefix = "kernsnoopd", + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/python/magma/mobilityd/BUILD.bazel b/lte/gateway/python/magma/mobilityd/BUILD.bazel index bb2c2a19ea25..f5bc597c9a32 100644 --- a/lte/gateway/python/magma/mobilityd/BUILD.bazel +++ b/lte/gateway/python/magma/mobilityd/BUILD.bazel @@ -29,7 +29,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":mobilityd_lib", "//lte/protos:mconfigs_python_proto", diff --git a/lte/gateway/python/magma/mobilityd/uplink_gw.py b/lte/gateway/python/magma/mobilityd/uplink_gw.py index 10ba3857ecae..b469a002b0e5 100644 --- a/lte/gateway/python/magma/mobilityd/uplink_gw.py +++ b/lte/gateway/python/magma/mobilityd/uplink_gw.py @@ -93,6 +93,7 @@ def _do_read_default_gw(self): self._read_default_gw_interval_seconds, self._do_read_default_gw, ) + self._read_default_gw_timer.setDaemon(True) self._read_default_gw_timer.start() logging.info("GW probe: timer started") @@ -117,6 +118,7 @@ def _do_read_default_gw_v6(self): self._read_default_gw_interval_seconds, self._do_read_default_gw_v6, ) + self._read_default_gw_timer6.setDaemon(True) self._read_default_gw_timer6.start() logging.info("GW-v6 probe: timer started") diff --git a/lte/gateway/python/magma/monitord/BUILD.bazel b/lte/gateway/python/magma/monitord/BUILD.bazel index ec0ce6aafae0..f2ead68429a8 100644 --- a/lte/gateway/python/magma/monitord/BUILD.bazel +++ b/lte/gateway/python/magma/monitord/BUILD.bazel @@ -28,7 +28,7 @@ py_binary( # legacy_create_init = False is required to fix issues in module import, see https://github.com/rules-proto-grpc/rules_proto_grpc/issues/145 legacy_create_init = False, main = "main.py", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":monitord_lib", "//lte/protos:mconfigs_python_proto", diff --git a/lte/gateway/python/magma/pipelined/app/uplink_bridge.py b/lte/gateway/python/magma/pipelined/app/uplink_bridge.py index 139de534741a..24cf155cab7d 100644 --- a/lte/gateway/python/magma/pipelined/app/uplink_bridge.py +++ b/lte/gateway/python/magma/pipelined/app/uplink_bridge.py @@ -256,6 +256,13 @@ def _set_sgi_ipv4_and_v6_ingress_flows(self): actions = "output:LOCAL" self._install_flow(flows.MEDIUM_PRIORITY + 1, match, actions) + # forward the node solicite msg to local machine + match = "in_port=%s,ipv6,ipv6_dst=%s" % ( + self.config.uplink_eth_port_name, + SOLICITED_NODE_MULTICAST, + ) + self._install_flow(flows.MEDIUM_PRIORITY + 1, match, "output:LOCAL") + def _delete_all_flows(self): if self.config.uplink_bridge is None: return diff --git a/lte/gateway/python/magma/pipelined/ebpf/BUILD.bazel b/lte/gateway/python/magma/pipelined/ebpf/BUILD.bazel index b485bf22ec40..4f7d7181cebb 100644 --- a/lte/gateway/python/magma/pipelined/ebpf/BUILD.bazel +++ b/lte/gateway/python/magma/pipelined/ebpf/BUILD.bazel @@ -10,6 +10,7 @@ # limitations under the License. load("@python_deps//:requirements.bzl", "requirement") +load("@rules_pkg//pkg:mappings.bzl", "pkg_files") load("@rules_python//python:defs.bzl", "py_library") package(default_visibility = ["//visibility:public"]) @@ -26,3 +27,13 @@ py_library( requirement("scapy"), ], ) + +pkg_files( + name = "magma_ebpf_pipelined", + srcs = [ + ":ebpf_dl_handler.c", + ":ebpf_manager.py", + ":ebpf_ul_handler.c", + ], + visibility = ["//lte/gateway/release:__pkg__"], +) diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_delete_tunnel_flows.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_delete_tunnel_flows.snapshot deleted file mode 100644 index 6daf061ce7c0..000000000000 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_delete_tunnel_flows.snapshot +++ /dev/null @@ -1,3 +0,0 @@ - priority=0 actions=resubmit(,ingress(main_table)) - priority=0,in_port=15578 actions=resubmit(,201) - priority=0,in_port=15579 actions=resubmit(,202) diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_set_tunnel_flows.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_set_tunnel_flows.snapshot deleted file mode 100644 index 73a1cb467f3f..000000000000 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_set_tunnel_flows.snapshot +++ /dev/null @@ -1,5 +0,0 @@ - cookie=0x0, table=classifier(main_table), n_packets=0, n_bytes=0, priority=10,tun_id=0x1,qfi=9,in_port=32768 actions=mod_dl_src:02:00:00:00:00:01,mod_dl_dst:ff:ff:ff:ff:ff:ff,set_field:0x186a0->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) - cookie=0x0, table=classifier(main_table), n_packets=0, n_bytes=0, priority=10,ip,in_port=LOCAL,nw_dst=192.168.128.30 actions=load:0x186a0->NXM_NX_TUN_ID[],load:0xc0a83cb2->NXM_NX_TUN_IPV4_DST[],set_field:0x8000->reg8,load:0x1->NXM_NX_TUN_FLAGS[],load:0x9->NXM_NX_QFI[],set_field:0x1->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) - cookie=0x0, table=classifier(main_table), n_packets=0, n_bytes=0, priority=10,ip,in_port=15577,nw_dst=192.168.128.30 actions=load:0x186a0->NXM_NX_TUN_ID[],load:0xc0a83cb2->NXM_NX_TUN_IPV4_DST[],set_field:0x8000->reg8,load:0x1->NXM_NX_TUN_FLAGS[],load:0x9->NXM_NX_QFI[],set_field:0x1->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) - cookie=0x0, table=classifier(main_table), n_packets=0, n_bytes=0, priority=10,arp,in_port=LOCAL,arp_tpa=192.168.128.30 actions=load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) - cookie=0x0, table=classifier(main_table), n_packets=0, n_bytes=0, priority=10,arp,in_port=15577,arp_tpa=192.168.128.30 actions=load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) diff --git a/lte/gateway/python/.cache/feg/snowflake b/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_tunnel_flows.empty.snapshot similarity index 100% rename from lte/gateway/python/.cache/feg/snowflake rename to lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_tunnel_flows.empty.snapshot diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_tunnel_flows.with_qfi_flows.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_tunnel_flows.with_qfi_flows.snapshot new file mode 100644 index 000000000000..63794afed6bd --- /dev/null +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_qfi_gtp.QfigtpTest.test_qfi_tunnel_flows.with_qfi_flows.snapshot @@ -0,0 +1,5 @@ + priority=10,tun_id=0x1,qfi=9,in_port=32768 actions=mod_dl_src:02:00:00:00:00:01,mod_dl_dst:ff:ff:ff:ff:ff:ff,set_field:0x186a0->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) + priority=10,ip,in_port=LOCAL,nw_dst=192.168.128.30 actions=load:0x186a0->NXM_NX_TUN_ID[],load:0xc0a83cb2->NXM_NX_TUN_IPV4_DST[],set_field:0x8000->reg8,load:0x1->NXM_NX_TUN_FLAGS[],load:0x9->NXM_NX_QFI[],set_field:0x1->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) + priority=10,ip,in_port=15577,nw_dst=192.168.128.30 actions=load:0x186a0->NXM_NX_TUN_ID[],load:0xc0a83cb2->NXM_NX_TUN_IPV4_DST[],set_field:0x8000->reg8,load:0x1->NXM_NX_TUN_FLAGS[],load:0x9->NXM_NX_QFI[],set_field:0x1->reg9,load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) + priority=10,arp,in_port=LOCAL,arp_tpa=192.168.128.30 actions=load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) + priority=10,arp,in_port=15577,arp_tpa=192.168.128.30 actions=load:0x1388->OXM_OF_METADATA[],resubmit(,ingress(main_table)) diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest.testFlowSnapshotMatch.snapshot index 9a997470d4a5..832cb6a0ebe2 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest.testFlowSnapshotMatch.snapshot @@ -4,6 +4,7 @@ priority=100,in_port=2 actions=mod_dl_src:02:bb:5e:36:06:4b,output:3 priority=101,ip,in_port=3,nw_dst=1.1.11.1 actions=LOCAL priority=101,ipv6,in_port=3,ipv6_dst=fe80::48a3:2cff:fe1a:abcd actions=LOCAL + priority=101,ipv6,in_port=3,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=3,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=3,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=0,arp,in_port=3 actions=output:1,output:2,LOCAL diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTestVlan.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTestVlan.testFlowSnapshotMatch.snapshot index 5680c1122e1e..20cd7f7f42a4 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTestVlan.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTestVlan.testFlowSnapshotMatch.snapshot @@ -5,6 +5,7 @@ priority=65534,udp,in_port=3,tp_dst=68 actions=output:1,LOCAL priority=101,ip,in_port=3,nw_dst=1.1.11.1 actions=LOCAL priority=101,ipv6,in_port=3,ipv6_dst=fe80::48a3:2cff:fe1a:dd47 actions=LOCAL + priority=101,ipv6,in_port=3,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ip,in_port=3,vlan_tci=0x1000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=strip_vlan,output:70 diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN.testFlowSnapshotMatch.snapshot index 1cd5c4ab5f17..65375ae489cb 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN.testFlowSnapshotMatch.snapshot @@ -5,6 +5,7 @@ priority=65534,udp,in_port=3,tp_dst=68 actions=output:1,LOCAL priority=101,ip,in_port=3,nw_dst=1.6.5.7 actions=LOCAL priority=101,ipv6,in_port=3,ipv6_dst=fe80::48a3:2cff:fe1a:cccc actions=LOCAL + priority=101,ipv6,in_port=3,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ip,in_port=3,vlan_tci=0x1000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=strip_vlan,output:70 diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN_GW.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN_GW.testFlowSnapshotMatch.snapshot index fdd027bb98a4..246ab4299ef8 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN_GW.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNATTest_IP_VLAN_GW.testFlowSnapshotMatch.snapshot @@ -5,6 +5,7 @@ priority=65534,udp,in_port=3,tp_dst=68 actions=output:1,LOCAL priority=101,ip,in_port=3,nw_dst=1.6.5.7 actions=LOCAL priority=101,ipv6,in_port=3,ipv6_dst=fe80::48a3:2cff:aaaa:dd47 actions=LOCAL + priority=101,ipv6,in_port=3,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=3,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ip,in_port=3,vlan_tci=0x1000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=strip_vlan,output:70 diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Static_IP_Test.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Static_IP_Test.testFlowSnapshotMatch.snapshot index 825b9941c3ec..a8d60cf43e61 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Static_IP_Test.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Static_IP_Test.testFlowSnapshotMatch.snapshot @@ -4,6 +4,7 @@ priority=100,in_port=2 actions=mod_dl_src:02:bb:5e:36:06:4b,output:200 priority=101,ip,in_port=200,nw_dst=10.55.0.41 actions=LOCAL priority=101,ipv6,in_port=200,ipv6_dst=fc00::55:0:111 actions=LOCAL + priority=101,ipv6,in_port=200,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=200,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=200,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ip,in_port=200,vlan_tci=0x1000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=strip_vlan,output:70 diff --git a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Test.testFlowSnapshotMatch.snapshot b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Test.testFlowSnapshotMatch.snapshot index 35c1c9db38eb..53fa7ad18e57 100644 --- a/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Test.testFlowSnapshotMatch.snapshot +++ b/lte/gateway/python/magma/pipelined/tests/snapshots/test_uplink_bridge.UplinkBridgeWithNonNatUplinkConnect_Test.testFlowSnapshotMatch.snapshot @@ -4,6 +4,7 @@ priority=100,in_port=2 actions=mod_dl_src:02:bb:5e:36:06:4b,output:200 priority=101,ip,in_port=200,nw_dst=10.55.0.20 actions=LOCAL priority=101,ipv6,in_port=200,ipv6_dst=fe80::b0a6:34ff:fee0:b640 actions=LOCAL + priority=101,ipv6,in_port=200,ipv6_dst=ff02::1:ff00:0/104 actions=LOCAL priority=100,ip,in_port=200,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ipv6,in_port=200,vlan_tci=0x0000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=output:2 priority=100,ip,in_port=200,vlan_tci=0x1000/0x1000,dl_dst=02:bb:5e:36:06:4b actions=strip_vlan,output:70 diff --git a/lte/gateway/python/magma/pipelined/tests/test_qfi_gtp.py b/lte/gateway/python/magma/pipelined/tests/test_qfi_gtp.py index dbe2eb7c2549..f8cd704d1d24 100644 --- a/lte/gateway/python/magma/pipelined/tests/test_qfi_gtp.py +++ b/lte/gateway/python/magma/pipelined/tests/test_qfi_gtp.py @@ -103,7 +103,7 @@ def tearDownClass(cls): stop_ryu_app_thread(cls.thread) BridgeTools.destroy_bridge(cls.BRIDGE) - def test_qfi_set_tunnel_flows(self): + def test_qfi_tunnel_flows(self): # Need to delete all default flows in table 0 before # install the specific flows test case. @@ -122,23 +122,22 @@ def test_qfi_set_tunnel_flows(self): snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, + snapshot_name='with_qfi_flows', + include_stats=False, ) with snapshot_verifier: pass - def test_qfi_delete_tunnel_flows(self): - - ue_ip_addr = "192.168.128.30" - ip_flow_dl = IPFlowDL(set_params=0) self.classifier_controller.delete_tunnel_flows( 1, IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')), - ip_flow_dl=ip_flow_dl, + ip_flow_dl=ip_flow_dl, session_qfi=9, ) snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, include_stats=False, + snapshot_name='empty', ) with snapshot_verifier: pass diff --git a/lte/gateway/python/magma/policydb/BUILD.bazel b/lte/gateway/python/magma/policydb/BUILD.bazel index 41c12c3b35d7..a266990c713b 100644 --- a/lte/gateway/python/magma/policydb/BUILD.bazel +++ b/lte/gateway/python/magma/policydb/BUILD.bazel @@ -28,7 +28,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":policydb_lib", "//lte/gateway/python/magma/policydb/servicers:policy_servicer", diff --git a/lte/gateway/python/magma/redirectd/BUILD.bazel b/lte/gateway/python/magma/redirectd/BUILD.bazel index 21175936c0e9..ff84de561aea 100644 --- a/lte/gateway/python/magma/redirectd/BUILD.bazel +++ b/lte/gateway/python/magma/redirectd/BUILD.bazel @@ -29,7 +29,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":redirect_server", "//lte/protos:mconfigs_python_proto", diff --git a/lte/gateway/python/magma/smsd/BUILD.bazel b/lte/gateway/python/magma/smsd/BUILD.bazel index 541f621ff83b..07b29028e938 100644 --- a/lte/gateway/python/magma/smsd/BUILD.bazel +++ b/lte/gateway/python/magma/smsd/BUILD.bazel @@ -28,7 +28,7 @@ py_binary( legacy_create_init = False, main = "main.py", python_version = "PY3", - visibility = ["//visibility:private"], + visibility = ["//lte/gateway/python:__pkg__"], deps = [ ":smsd_lib", "//lte/protos:sms_orc8r_python_grpc", diff --git a/lte/gateway/python/scripts/BUILD.bazel b/lte/gateway/python/scripts/BUILD.bazel index 0b00931e6c4e..fdb6fdbf6841 100644 --- a/lte/gateway/python/scripts/BUILD.bazel +++ b/lte/gateway/python/scripts/BUILD.bazel @@ -10,7 +10,68 @@ # limitations under the License. load("@python_deps//:requirements.bzl", "requirement") +load("@rules_pkg//pkg:mappings.bzl", "pkg_filegroup", "pkg_mklink") load("@rules_python//python:defs.bzl", "py_binary") +load("//bazel:deb_build.bzl", "PY_DEST") +load("//bazel:runfiles.bzl", "expand_runfiles") + +SCRIPTS = [ + "agw_health_cli", + "config_stateless_agw", + "cpe_monitoring_cli", + "create_oai_certs", + "dp_probe_cli", + "enodebd_cli", + "fake_user", + "feg_hello_cli", + "generate_dnsd_config", + "generate_oai_config", + "ha_cli", + "hello_cli", + "icmpv6", + "mobility_cli", + "mobility_dhcp_cli", + "ocs_cli", + "packet_ryu_cli", + "packet_tracer_cli", + "pcrf_cli", + "pipelined_cli", + "policydb_cli", + "s6a_proxy_cli", + "s6a_service_cli", + "session_manager_cli", + "sgs_cli", + "sms_cli", + "spgw_service_cli", + "state_cli", + "subscriber_cli", + "user_trace_cli", +] + +expand_runfiles( + name = "scripts_expanded", + targets = [":{script}".format(script = script) for script in SCRIPTS], +) + +[ + pkg_mklink( + name = "{script}_symlink".format(script = script), + link_name = "/usr/local/bin/{script}.py".format(script = script), + target = "{dest}/scripts/{script}.py".format( + dest = PY_DEST, + script = script, + ), + ) + for script in SCRIPTS +] + +pkg_filegroup( + name = "magma_lte_scripts", + srcs = [":scripts_expanded"] + + ["{script}_symlink".format(script = script) for script in SCRIPTS], + prefix = PY_DEST, + visibility = ["//lte/gateway/release:__pkg__"], +) MAGMA_ROOT = "../../../../" diff --git a/lte/gateway/python/setup.py b/lte/gateway/python/setup.py index 3b1ad4e01cd3..8bbe9789f5c2 100644 --- a/lte/gateway/python/setup.py +++ b/lte/gateway/python/setup.py @@ -134,7 +134,8 @@ ], extras_require={ 'dev': [ - 'grpcio-tools>=1.46.3', + # Should be kept in sync with the version in python.mk + 'grpcio-tools>=1.46.3,<1.49.0', 'coverage==6.4.2', 'iperf3>=0.1.11', 'parameterized==0.8.1', diff --git a/lte/gateway/release/BUILD.bazel b/lte/gateway/release/BUILD.bazel index 32b3d1fe2545..437014125d7f 100644 --- a/lte/gateway/release/BUILD.bazel +++ b/lte/gateway/release/BUILD.bazel @@ -16,8 +16,10 @@ bazel build lte/gateway/release:sctpd_deb_pkg --config=production """ load("@rules_pkg//pkg:deb.bzl", "pkg_deb") -load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_files") +load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_filegroup", "pkg_files") load("@rules_pkg//pkg:tar.bzl", "pkg_tar") +load("//bazel:deb_build.bzl", "PY_DEST") +load(":deb_dependencies.bzl", "MAGMA_DEPS", "OAI_DEPS", "OVS_DEPS") SCTPD_PKGNAME = "magma-sctpd" @@ -25,12 +27,22 @@ VERSION = "1.8.0" ARCH = "amd64" -FILE_NAME = "{name}_{ver}_{arch}".format( +SCTPD_FILE_NAME = "{name}_{ver}_{arch}".format( name = SCTPD_PKGNAME, arch = ARCH, ver = VERSION, ) +MAGMA_PKGNAME = "magma" + +MAGMA_FILE_NAME = "{name}_{ver}_{arch}".format( + name = MAGMA_PKGNAME, + arch = ARCH, + ver = VERSION, +) + +### SCTPD BUILD + genrule( name = "gen_sctpd_version", outs = ["version"], @@ -43,12 +55,6 @@ pkg_files( prefix = "/usr/local/share/sctpd/", ) -pkg_files( - name = "sctpd_service_definition", - srcs = ["//lte/gateway/deploy/roles/magma/files/systemd:sctpd.service"], - prefix = "/etc/systemd/system/", -) - pkg_files( name = "sctpd_binary", srcs = ["//lte/gateway/c/sctpd/src:sctpd"], @@ -60,11 +66,10 @@ pkg_tar( name = "sctpd_content", srcs = [ ":sctpd_binary", - ":sctpd_service_definition", ":sctpd_version", + "//lte/gateway/deploy/roles/magma/files/systemd:sctpd_service_definition", ], - package_dir = "./", - package_file_name = "{fname}.tar".format(fname = FILE_NAME), + package_file_name = "{fname}.tar".format(fname = SCTPD_FILE_NAME), ) pkg_deb( @@ -73,6 +78,112 @@ pkg_deb( description = "Magma SCTPD", maintainer = "Copyright (c) 2022 The Magma Authors", package = SCTPD_PKGNAME, - package_file_name = "{fname}.deb".format(fname = FILE_NAME), + package_file_name = "{fname}.deb".format(fname = SCTPD_FILE_NAME), + version = VERSION, +) + +### MAGMA BUILD + +pkg_filegroup( + name = "magma_service_definitions", + srcs = [ + "//lte/gateway/deploy/roles/magma/files/systemd:magma_lte_service_definitions", + "//orc8r/tools/ansible/roles/fluent_bit/files:magma_fluent_bit_service_definition", + "//orc8r/tools/ansible/roles/gateway_services/files:magma_orc8r_service_definitions", + ], + prefix = "/etc/systemd/system", +) + +pkg_filegroup( + name = "magma_ebpf", + srcs = [ + "//lte/gateway/python/magma/kernsnoopd/ebpf:magma_ebpf_kernsnoopd", + "//lte/gateway/python/magma/pipelined/ebpf:magma_ebpf_pipelined", + ], + prefix = "/var/opt/magma/ebpf", +) + +pkg_filegroup( + name = "magma_python_scripts", + srcs = [ + "//lte/gateway/python/load_tests:magma_lte_loadtest_scripts", + "//lte/gateway/python/scripts:magma_lte_scripts", + "//orc8r/gateway/python/scripts:magma_orc8r_scripts", + ], +) + +pkg_files( + name = "magma_go_binaries", + srcs = ["//feg/gateway/services/envoy_controller"], + attributes = pkg_attributes(mode = "0755"), + prefix = "/usr/local/bin", +) + +pkg_files( + name = "magma_c_binaries", + srcs = [ + "//lte/gateway/c/connection_tracker/src:connectiond", + "//lte/gateway/c/core:agw_of", + "//lte/gateway/c/li_agent/src:liagentd", + "//lte/gateway/c/session_manager:sessiond", + ], + attributes = pkg_attributes(mode = "0755"), + prefix = "/usr/local/bin", + renames = {"//lte/gateway/c/core:agw_of": "mme"}, +) + +pkg_filegroup( + name = "magma_python_services", + srcs = [ + "//lte/gateway/python:magma_python_lte_services", + "//orc8r/gateway/python:magma_python_orc8r_services", + ], + prefix = PY_DEST, +) + +pkg_filegroup( + name = "magma_configs", + srcs = [ + "//lte/gateway/configs:magma_config_files", + "//lte/gateway/configs/templates:magma_lte_config_template_files", + "//orc8r/gateway/configs/templates:magma_orc8r_config_template_files", + ], + prefix = "/etc/magma", +) + +pkg_files( + name = "magma_config_stretch_snapshot", + srcs = [":stretch_snapshot"], + prefix = "/usr/local/share/magma", +) + +pkg_tar( + name = "magma_content", + srcs = [ + ":magma_c_binaries", + ":magma_config_stretch_snapshot", + ":magma_configs", + ":magma_ebpf", + ":magma_go_binaries", + ":magma_python_scripts", + ":magma_python_services", + ":magma_service_definitions", + "//lte/gateway/deploy/roles/magma/files:ansible_configs", + "//orc8r/tools/ansible/roles/fluent_bit/files:magma_config_fluent_bit", + ], + package_file_name = "{fname}.tar".format(fname = MAGMA_FILE_NAME), +) + +pkg_deb( + name = "magma_deb_pkg", + data = ":magma_content", + depends = MAGMA_DEPS + OAI_DEPS + OVS_DEPS, + description = "Magma Access Gateway", + maintainer = "Copyright (c) 2022 The Magma Authors", + package = MAGMA_PKGNAME, + package_file_name = "{fname}.deb".format(fname = MAGMA_FILE_NAME), + postinst = ":magma-postinst-bazel", + provides = [MAGMA_PKGNAME], + replaces = [MAGMA_PKGNAME], version = VERSION, ) diff --git a/lte/gateway/release/build-magma.sh b/lte/gateway/release/build-magma.sh index 9675c4565b43..0d44fb81e660 100755 --- a/lte/gateway/release/build-magma.sh +++ b/lte/gateway/release/build-magma.sh @@ -397,8 +397,6 @@ ${ANSIBLE_FILES}/99-magma.conf=/etc/sysctl.d/ \ ${ANSIBLE_FILES}/magma_ifaces_gtp=/etc/network/interfaces.d/gtp \ ${ANSIBLE_FILES}/20auto-upgrades=/etc/apt/apt.conf.d/20auto-upgrades \ ${ANSIBLE_FILES}/coredump=/usr/local/bin/ \ -${ANSIBLE_FILES}/nx_actions_3.5.py=/usr/local/lib/python3.8/dist-packages/ryu/ofproto/nx_actions.py.magma \ -${ANSIBLE_FILES}/service.py=/usr/local/lib/python3.8/dist-packages/ryu/app/ofctl/service.py.magma \ ${MAGMA_ROOT}/lte/gateway/release/stretch_snapshot=/usr/local/share/magma/ \ ${MAGMA_ROOT}/orc8r/tools/ansible/roles/fluent_bit/files/60-fluent-bit.conf=/etc/rsyslog.d/60-fluent-bit.conf \ ${ANSIBLE_FILES}/set_irq_affinity=/usr/local/bin/ \ diff --git a/lte/gateway/release/deb_dependencies.bzl b/lte/gateway/release/deb_dependencies.bzl new file mode 100644 index 000000000000..f3d2965c2734 --- /dev/null +++ b/lte/gateway/release/deb_dependencies.bzl @@ -0,0 +1,77 @@ +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +External dependencies of the magma debian build. +""" + +SCTPD_MIN_VERSION = "1.8.0" # earliest version of sctpd with which this magma version is compatible + +# Magma system dependencies: anything that we depend on at the top level, add +# here. +MAGMA_DEPS = [ + "grpc-dev (>= 1.15.0)", + "lighttpd (>= 1.4.45)", + "libxslt1.1", + "nghttp2-proxy (>= 1.18.1)", + "redis-server (>= 3.2.0)", + "sudo", + "dnsmasq (>= 2.7)", + "net-tools", # for ifconfig + "python3-pip", + "python3-apt", # The version in pypi is abandoned and broken on stretch + "libsystemd-dev", + "libyaml-cpp-dev", # install yaml parser + "libgoogle-glog-dev", + "python-redis", + "magma-cpp-redis", + "libfolly-dev", # required for C++ services + "libdouble-conversion-dev", # required for folly + "libboost-chrono-dev", # required for folly + "ntpdate", # required for eventd time synchronization + "tshark", # required for call tracing + "libtins-dev", # required for Connection tracker + "libmnl-dev", # required for Connection tracker + "getenvoy-envoy", # for envoy dep + "uuid-dev", # for liagentd + "libprotobuf17 (>= 3.0.0)", + "nlohmann-json3-dev", + "sentry-native", # sessiond + "td-agent-bit (>= 1.7.8)", + # eBPF compile and load tools for kernsnoopd and AGW datapath + # Ubuntu bcc lib (bpfcc-tools) is pretty old, use magma repo package + "bcc-tools", + "wireguard", +] + +# OAI runtime dependencies +OAI_DEPS = [ + "libconfig9", + "oai-asn1c", + "oai-gnutls (>= 3.1.23)", + "oai-nettle (>= 1.0.1)", + "prometheus-cpp-dev (>= 1.0.2)", + "liblfds710", + "libsctp-dev", + "magma-sctpd (>= {min_version})".format(min_version = SCTPD_MIN_VERSION), + "libczmq-dev (>= 4.0.2-7)", + "libasan5", + "oai-freediameter (>= 0.0.2)", +] + +# OVS runtime dependencies +OVS_DEPS = [ + "magma-libfluid (>= 0.1.0.7)", + "libopenvswitch (>= 2.15.4-8)", + "openvswitch-switch (>= 2.15.4-8)", + "openvswitch-common (>= 2.15.4-8)", + "openvswitch-datapath-dkms (>= 2.15.4-8)", +] diff --git a/lte/gateway/release/magma-postinst b/lte/gateway/release/magma-postinst index 14079d5dfedb..b18e1e648656 100644 --- a/lte/gateway/release/magma-postinst +++ b/lte/gateway/release/magma-postinst @@ -42,14 +42,6 @@ systemctl disable dnsmasq systemctl stop lighttpd systemctl disable lighttpd -# Copy over ipfix patch -cp /usr/local/lib/python3.8/dist-packages/ryu/ofproto/nx_actions.py.magma /usr/local/lib/python3.8/dist-packages/ryu/ofproto/nx_actions.py -cp /usr/local/lib/python3.8/dist-packages/ryu/app/ofctl/service.py.magma /usr/local/lib/python3.8/dist-packages/ryu/app/ofctl/service.py -if [ -f /usr/lib/python3/dist-packages/ryu/ofproto/nx_actions.py ]; then - cp /usr/local/lib/python3.8/dist-packages/ryu/ofproto/nx_actions.py /usr/lib/python3/dist-packages/ryu/ofproto/ - cp /usr/local/lib/python3.8/dist-packages/ryu/app/ofctl/service.py /usr/lib/python3/dist-packages/ryu/app/ofctl/service.py -fi - # Restart rsyslog to pick up fluent-bit config, create fluent-bit DB directory cp /etc/logrotate.d/rsyslog /etc/logrotate.d/rsyslog.orig cp /etc/logrotate.d/rsyslog.magma /etc/logrotate.d/rsyslog diff --git a/lte/gateway/release/magma-postinst-bazel b/lte/gateway/release/magma-postinst-bazel new file mode 100644 index 000000000000..c5dcfa851b84 --- /dev/null +++ b/lte/gateway/release/magma-postinst-bazel @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2022 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Delete OVS bridge on boot +sed -i "s/.*OVS_CTL_OPTS.*/OVS_CTL_OPTS='--delete-bridges'/" /etc/default/openvswitch-switch + +# Create /var/core directory +mkdir -p /var/core + +value=`cat /usr/local/share/magma/commit_hash` +if grep -q "COMMIT_HASH" /etc/environment +then + sudo sed -i -e "s/^COMMIT_HASH.*/$value/" /etc/environment +else + echo "$value" | sudo tee -a /etc/environment +fi + +# Set magmad service to start on boot +systemctl enable -f magma@magmad.service + +# Installation of the redis-server Debian package has a post-install +# script that starts a redis-server process. We kill and disable this +# process, allowing us to manage the process using magmad and our own +# systemd files. +systemctl stop redis-server +systemctl disable redis-server + +# Dnsmasq also starts up post-install. +systemctl stop dnsmasq +systemctl disable dnsmasq + +# Lighttpd also starts up post-install. +systemctl stop lighttpd +systemctl disable lighttpd + +# Restart rsyslog to pick up fluent-bit config, create fluent-bit DB directory +cp /etc/logrotate.d/rsyslog /etc/logrotate.d/rsyslog.orig +cp /etc/logrotate.d/rsyslog.magma /etc/logrotate.d/rsyslog +systemctl restart rsyslog +mkdir -p /var/opt/magma/fluent-bit + +# Restart all services on package install +if [ -f /var/run/sctpd.version ]; then + /usr/bin/env python3 -c 'from distutils.version import LooseVersion; import sys; ver = lambda n: LooseVersion(open(n).read()); sys.exit(1) if ver("/var/run/sctpd.version") < ver("/usr/local/share/magma/sctpd_min_version") else sys.exit(0)' || systemctl restart sctpd +else + killall -9 sctpd || true +fi +systemctl restart magma@* || true diff --git a/lte/gateway/release/magma.lockfile.ubuntu b/lte/gateway/release/magma.lockfile.ubuntu index 671ccd19f409..5f362d4153c4 100644 --- a/lte/gateway/release/magma.lockfile.ubuntu +++ b/lte/gateway/release/magma.lockfile.ubuntu @@ -1040,7 +1040,7 @@ "root": true, "source": "apt", "sysdep": "python3-spyne", - "version": "2.13.16" + "version": "2.13.15" }, "strict-rfc3339": { "root": true, @@ -1258,7 +1258,7 @@ "version": "0.0.3" }, "spyne": { - "version": "2.13.16" + "version": "2.13.15" }, "strict-rfc3339": { "version": "0.7" diff --git a/lte/protos/oai/s1ap_state.proto b/lte/protos/oai/s1ap_state.proto index 5ec5cb9b4ec9..5a5480ab1ae8 100644 --- a/lte/protos/oai/s1ap_state.proto +++ b/lte/protos/oai/s1ap_state.proto @@ -21,6 +21,17 @@ message SupportedTaList { repeated SupportedTaiItems supported_tai_items = 2; } +message ERabAdmittedItem { + uint32 e_rab_id = 1; + bytes transport_layer_address = 2; + uint32 gtp_teid = 3; +} + +message ERabAdmittedList { + uint32 no_of_items = 1; + repeated ERabAdmittedItem item = 2; +} + message EnbDescription { uint32 enb_id = 1; @@ -41,8 +52,17 @@ message EnbDescription { map ue_id_map = 14; // mme_ue_s1ap_id -> comp_s1ap_id } +enum S1apUeState { + S1AP_UE_INVALID_STATE = 0; + S1AP_UE_WAITING_ICSR = 1; ///< Waiting for Initial Context Setup Response + S1AP_UE_HANDOVER = 2; ///< Handover procedure triggered + S1AP_UE_CONNECTED = 3; ///< UE context ready + S1AP_UE_WAITING_CRC = 4; /// UE Context release Procedure initiated , waiting for + /// UE Context Release Complete +} + message UeDescription { - int32 s1_ue_state = 2; // enum s1_ue_state_s + int32 s1_ue_state = 2 [deprecated = true]; // enum s1_ue_state_s uint32 enb_ue_s1ap_id = 3; // enb_ue_s1ap_id_t uint32 mme_ue_s1ap_id = 4; // mme_ue_s1ap_id_t @@ -54,6 +74,8 @@ message UeDescription { uint32 sctp_assoc_id = 8; // sctp_assoc_id_t S1apHandoverState s1ap_handover_state = 9; // s1ap_handover_state_t + uint64 comp_s1ap_id = 10; // sctp_assoc_id & enb_ue_s1ap_id + S1apUeState s1ap_ue_state = 11; } message S1apState { @@ -74,4 +96,8 @@ message S1apHandoverState { uint32 target_enb_ue_s1ap_id = 4; // enb_ue_s1ap_id_t uint32 target_sctp_stream_recv = 5; // sctp_stream_id_t uint32 target_sctp_stream_send = 6; // sctp_stream_id_t + uint32 source_enb_ue_s1ap_id = 7; + uint32 source_sctp_stream_recv = 8; + uint32 source_sctp_stream_send = 9; + ERabAdmittedList e_rab_admitted_list = 10; } diff --git a/nms/Dockerfile b/nms/Dockerfile index 5d7818e0e945..8afa468de20b 100644 --- a/nms/Dockerfile +++ b/nms/Dockerfile @@ -9,7 +9,7 @@ COPY package.json yarn.lock babel.config.js ./ # Install node dependencies ENV PUPPETEER_SKIP_DOWNLOAD "true" -RUN yarn install --mutex network --frozen-lockfile && yarn cache clean +RUN yarn install --mutex network --frozen-lockfile --network-timeout 90000 && yarn cache clean # Build our static files COPY . . diff --git a/nms/app/components/AutorefreshCheckbox.tsx b/nms/app/components/AutorefreshCheckbox.tsx index 7eb51d19f17f..e8ce7c183dc8 100644 --- a/nms/app/components/AutorefreshCheckbox.tsx +++ b/nms/app/components/AutorefreshCheckbox.tsx @@ -16,8 +16,8 @@ import FormControlLabel from '@mui/material/FormControlLabel'; import React from 'react'; import Text from '../theme/design-system/Text'; import makeStyles from '@mui/styles/makeStyles'; -import moment from 'moment'; import {colors} from '../theme/default'; +import {subHours} from 'date-fns'; import {useCallback, useEffect, useState} from 'react'; export type UseRefreshingDateRangeHook = ( @@ -25,10 +25,10 @@ export type UseRefreshingDateRangeHook = ( updateInterval: number, onDateRangeChange: () => void, ) => { - startDate: moment.Moment; - endDate: moment.Moment; - setStartDate: (date: moment.Moment) => void; - setEndDate: (date: moment.Moment) => void; + startDate: Date; + endDate: Date; + setStartDate: (date: Date) => void; + setEndDate: (date: Date) => void; }; export const useRefreshingDateRange: UseRefreshingDateRangeHook = ( @@ -36,13 +36,13 @@ export const useRefreshingDateRange: UseRefreshingDateRangeHook = ( updateInterval, onDateRangeChange, ) => { - const [startDate, setStartDate] = useState(moment().subtract(3, 'hours')); - const [endDate, setEndDate] = useState(moment()); + const [startDate, setStartDate] = useState(subHours(new Date(), 3)); + const [endDate, setEndDate] = useState(new Date()); useEffect(() => { if (isAutoRefreshing) { const interval = setInterval(() => { - setEndDate(moment()); + setEndDate(new Date()); onDateRangeChange(); }, updateInterval); @@ -51,7 +51,7 @@ export const useRefreshingDateRange: UseRefreshingDateRangeHook = ( }, [endDate, startDate, onDateRangeChange, isAutoRefreshing, updateInterval]); const modifiedSetStartDate = useCallback( - (date: moment.Moment) => { + (date: Date) => { setStartDate(date); onDateRangeChange(); }, @@ -59,7 +59,7 @@ export const useRefreshingDateRange: UseRefreshingDateRangeHook = ( ); const modifiedSetEndDate = useCallback( - (date: moment.Moment) => { + (date: Date) => { setEndDate(date); onDateRangeChange(); }, diff --git a/nms/app/components/CustomMetrics.tsx b/nms/app/components/CustomMetrics.tsx index fa5b123da492..7cef24f34352 100644 --- a/nms/app/components/CustomMetrics.tsx +++ b/nms/app/components/CustomMetrics.tsx @@ -10,66 +10,74 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +import 'chartjs-adapter-date-fns'; import React from 'react'; -import moment from 'moment'; - import {Bar, Line} from 'react-chartjs-2'; -import {ChartData, ChartTooltipItem, TimeUnit} from 'chart.js'; +import {ScatterDataPoint, TimeUnit, TooltipItem} from 'chart.js'; +import { + add, + differenceInDays, + differenceInHours, + isBefore, + toDate, +} from 'date-fns'; export function getStepString(delta: number, unit: string) { return delta.toString() + unit[0]; } -export function getStep( - start: moment.Moment, - end: moment.Moment, -): [number, TimeUnit, string] { - const d = moment.duration(end.diff(start)); - if (d.asMinutes() <= 60.5) { - return [5, 'minute', 'HH:mm']; - } else if (d.asHours() <= 3.5) { - return [15, 'minute', 'HH:mm']; - } else if (d.asHours() <= 6.5) { - return [15, 'minute', 'HH:mm']; - } else if (d.asHours() <= 12.5) { - return [1, 'hour', 'HH:mm']; - } else if (d.asHours() <= 24.5) { - return [2, 'hour', 'HH:mm']; - } else if (d.asDays() <= 1.5) { - return [3, 'hour', 'DD-MM-YY HH:mm']; - } else if (d.asDays() <= 3.5) { - return [6, 'hour', 'DD-MM-YY HH:mm']; - } else if (d.asDays() <= 7.5) { +export function getStep(start: Date, end: Date): [number, TimeUnit, string] { + const durationInHours = differenceInHours(end, start); + const durationInDays = differenceInDays(end, start); + + if (durationInDays > 7.5) { + return [24, 'hour', 'DD-MM-YYYY']; + } else if (durationInDays > 3.5) { return [12, 'hour', 'DD-MM-YY HH:mm']; + } else if (durationInDays > 1.5) { + return [6, 'hour', 'DD-MM-YY HH:mm']; + } else if (durationInHours > 24.5) { + return [3, 'hour', 'DD-MM-YY HH:mm']; + } else if (durationInHours > 12.5) { + return [2, 'hour', 'HH:mm']; + } else if (durationInHours > 6.5) { + return [1, 'hour', 'HH:mm']; + } else if (durationInHours > 3.5) { + return [15, 'minute', 'HH:mm']; + } else { + return [5, 'minute', 'HH:mm']; } - return [24, 'hour', 'DD-MM-YYYY']; } // for querying event and log count, the api doesn't have a step attribute // hence we have to split the start and end window into several sets of // [start, end] queries which can then be queried in parallel export function getQueryRanges( - start: moment.Moment, - end: moment.Moment, + start: Date, + end: Date, delta: number, unit: TimeUnit, -): Array<[moment.Moment, moment.Moment]> { - const queries: Array<[moment.Moment, moment.Moment]> = []; - let s = start.clone(); - // go back delta time so that we get the total number of events - // or logs at that 's' point of time - s = s.subtract(delta, unit); - while (end.diff(s, unit) >= delta) { - const e = s.clone(); - e.add(delta, unit); - queries.push([s, e]); - s = e; +): Array<[Date, Date]> { + const queries: Array<[Date, Date]> = []; + let intervalStart = toDate(start); + while (isBefore(intervalStart, end)) { + const intervalEnd = add(intervalStart, {[timeUnitToDuration(unit)]: delta}); + queries.push([intervalStart, intervalEnd]); + intervalStart = intervalEnd; } return queries; } +function timeUnitToDuration(unit: TimeUnit): keyof Duration { + if (unit === 'millisecond' || unit === 'quarter') { + throw new Error(`${unit} cannot be converted to Duration!`); + } else { + return `${unit}s`; + } +} + export type DatasetType = { - t: number; + x: number; y: number; }; @@ -85,22 +93,24 @@ export type Dataset = { }; type Props = { - start?: moment.Moment; - end?: moment.Moment; + start?: Date; + end?: Date; delta?: number; dataset: Array; unit?: TimeUnit; yLabel?: string; - tooltipHandler?: (tooltipItem: ChartTooltipItem, data: ChartData) => string; + tooltipHandler?: ( + tooltipItem: TooltipItem<'bar'> | TooltipItem<'line'>, + ) => string; }; -function defaultTooltip( - tooltipItem: ChartTooltipItem, - data: ChartData, - props: Props, +export function defaultTooltip( + tooltipItem: TooltipItem<'bar'> | TooltipItem<'line'>, + props: {unit?: string | TimeUnit}, ) { - const dataSet = data.datasets![tooltipItem.datasetIndex!]; - return `${dataSet.label!}: ${tooltipItem.yLabel!} ${props.unit ?? ''}`; + const {dataset, dataIndex} = tooltipItem; + const value = (dataset.data[dataIndex] as ScatterDataPoint).y; + return `${dataset.label!}: ${value} ${props.unit ?? ''}`; } export default function CustomHistogram(props: Props) { @@ -112,52 +122,51 @@ export default function CustomHistogram(props: Props) { options={{ maintainAspectRatio: false, scales: { - xAxes: [ - { - stacked: true, - gridLines: { - display: false, - }, - type: 'time', - ticks: { - source: 'data', - }, - time: { - unit: props?.unit, - round: 'second', - tooltipFormat: 'YYYY/MM/DD h:mm:ss a', - }, - scaleLabel: { - display: true, - labelString: 'Date', - }, + x: { + stacked: true, + grid: { + display: false, }, - ], - yAxes: [ - { - stacked: true, - gridLines: { - drawBorder: true, - }, - ticks: { - maxTicksLimit: 3, - }, - scaleLabel: { - display: true, - labelString: props?.yLabel ?? '', - }, + type: 'time', + ticks: { + source: 'data', + }, + time: { + unit: props?.unit, + round: 'second', + tooltipFormat: 'yyyy/MM/dd h:mm:ss a', + }, + title: { + display: true, + text: 'Date', }, - ], + }, + + y: { + stacked: true, + grid: { + drawBorder: true, + }, + ticks: { + maxTicksLimit: 3, + }, + title: { + display: true, + text: props?.yLabel ?? '', + }, + }, }, - tooltips: { - enabled: true, - mode: 'nearest', - callbacks: { - label: (tooltipItem: ChartTooltipItem, data: ChartData) => { - return ( - props.tooltipHandler?.(tooltipItem, data) ?? - defaultTooltip(tooltipItem, data, props) - ); + plugins: { + tooltip: { + enabled: true, + mode: 'nearest', + callbacks: { + label(tooltipItem) { + return ( + props.tooltipHandler?.(tooltipItem) ?? + defaultTooltip(tooltipItem, props) + ); + }, }, }, }, @@ -175,62 +184,60 @@ export function CustomLineChart(props: Props) { data={{ datasets: props.dataset, }} - legend={{ - display: true, - position: 'top', - align: 'end', - labels: { - boxWidth: 12, - }, - }} options={{ maintainAspectRatio: false, scales: { - xAxes: [ - { - gridLines: { - display: false, - }, - ticks: { - maxTicksLimit: 10, - }, - type: 'time', - time: { - unit: props?.unit, - round: 'second', - tooltipFormat: 'YYYY/MM/DD h:mm:ss a', - }, - scaleLabel: { - display: true, - labelString: 'Date', - }, + x: { + grid: { + display: false, }, - ], - yAxes: [ - { - gridLines: { - drawBorder: true, - }, - ticks: { - maxTicksLimit: 5, - }, - scaleLabel: { - display: true, - labelString: props?.yLabel ?? '', - }, - position: 'left', + ticks: { + maxTicksLimit: 10, + }, + type: 'time', + time: { + unit: props?.unit, + round: 'second', + tooltipFormat: 'yyyy/MM/dd h:mm:ss a', + }, + title: { + display: true, + text: 'Date', + }, + }, + y: { + grid: { + drawBorder: true, }, - ], + ticks: { + maxTicksLimit: 5, + }, + title: { + display: true, + text: props?.yLabel ?? '', + }, + position: 'left', + }, }, - tooltips: { - enabled: true, - mode: 'nearest', - callbacks: { - label: (tooltipItem: ChartTooltipItem, data: ChartData) => { - return ( - props.tooltipHandler?.(tooltipItem, data) ?? - defaultTooltip(tooltipItem, data, props) - ); + plugins: { + legend: { + display: true, + position: 'top', + align: 'end', + labels: { + boxWidth: 12, + }, + }, + tooltip: { + enabled: true, + mode: 'nearest', + callbacks: { + label(tooltipItem) { + return ( + props.tooltipHandler?.(tooltipItem) ?? + defaultTooltip(tooltipItem, props) + ); + }, }, }, }, diff --git a/nms/app/components/DateTimeMetricChart.tsx b/nms/app/components/DateTimeMetricChart.tsx index 824280e3d506..e963b0b9f116 100644 --- a/nms/app/components/DateTimeMetricChart.tsx +++ b/nms/app/components/DateTimeMetricChart.tsx @@ -19,12 +19,12 @@ import DataUsageIcon from '@mui/icons-material/DataUsage'; import Grid from '@mui/material/Grid'; import React from 'react'; import Text from '../theme/design-system/Text'; -import moment from 'moment'; import TextField from '@mui/material/TextField'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; import {colors} from '../theme/default'; import {makeStyles} from '@mui/styles'; +import {subHours} from 'date-fns'; import {useState} from 'react'; export type DateTimeMetricChartProps = { @@ -32,8 +32,8 @@ export type DateTimeMetricChartProps = { queries: Array; legendLabels: Array; unit?: string; - startDate?: moment.Moment; - endDate?: moment.Moment; + startDate?: Date; + endDate?: Date; }; const useStyles = makeStyles({ @@ -46,8 +46,8 @@ const CHART_COLORS = [colors.secondary.dodgerBlue, colors.data.flamePea]; export default function DateTimeMetricChart(props: DateTimeMetricChartProps) { const classes = useStyles(); - const [startDate, setStartDate] = useState(moment().subtract(3, 'hours')); - const [endDate, setEndDate] = useState(moment()); + const [startDate, setStartDate] = useState(subHours(new Date(), 3)); + const [endDate, setEndDate] = useState(new Date()); function Filter() { return ( @@ -63,7 +63,7 @@ export default function DateTimeMetricChart(props: DateTimeMetricChartProps) { maxDate={endDate} disableFuture value={startDate} - onChange={date => setStartDate(date as moment.Moment)} + onChange={date => setStartDate(date!)} /> @@ -76,7 +76,7 @@ export default function DateTimeMetricChart(props: DateTimeMetricChartProps) { renderInput={props => } disableFuture value={endDate} - onChange={date => setEndDate(date as moment.Moment)} + onChange={date => setEndDate(date!)} /> diff --git a/nms/app/components/EventAlertChart.tsx b/nms/app/components/EventAlertChart.tsx index 4ed464b14667..63484746d97a 100644 --- a/nms/app/components/EventAlertChart.tsx +++ b/nms/app/components/EventAlertChart.tsx @@ -18,7 +18,6 @@ import DataUsageIcon from '@mui/icons-material/DataUsage'; import LoadingFiller from './LoadingFiller'; import MagmaAPI from '../api/MagmaAPI'; import React from 'react'; -import moment from 'moment'; import nullthrows from '../../shared/util/nullthrows'; import { CustomLineChart, @@ -28,6 +27,7 @@ import { } from './CustomMetrics'; import {TimeUnit} from 'chart.js'; import {colors} from '../theme/default'; +import {getUnixTime} from 'date-fns'; import {useEffect, useState} from 'react'; import {useEnqueueSnackbar} from '../hooks/useSnackbar'; import {useParams} from 'react-router-dom'; @@ -36,13 +36,13 @@ import type {NetworkId} from '../../shared/types/network'; import type {OptionsObject} from 'notistack'; type Props = { - startEnd: [moment.Moment, moment.Moment]; + startEnd: [Date, Date]; }; type DatasetFetchProps = { networkId: NetworkId; - start: moment.Moment; - end: moment.Moment; + start: Date; + end: Date; delta: number; unit: TimeUnit; enqueueSnackbar: ( @@ -82,13 +82,13 @@ async function getEventAlertDataset(props: DatasetFetchProps) { if (r === null || r === undefined) { return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: 0, }; } return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: r, }; }); @@ -98,7 +98,7 @@ async function getEventAlertDataset(props: DatasetFetchProps) { return []; }); - const alertsData: Array<{t: number; y: number}> = []; + const alertsData: Array<{x: number; y: number}> = []; try { const alertPromResp = ( @@ -113,7 +113,7 @@ async function getEventAlertDataset(props: DatasetFetchProps) { alertPromResp.data?.result.forEach(it => it['values']?.map(i => { alertsData.push({ - t: parseInt(i[0]) * 1000, + x: parseInt(i[0]) * 1000, y: parseFloat(i[1]), }); }), @@ -185,6 +185,7 @@ export default function EventAlertChart(props: Props) { }); const [delta, unit] = getStep(start, end); + useEffect(() => { // fetch queries const fetchAllData = async () => { diff --git a/nms/app/components/__tests__/EventAlertChartTest.tsx b/nms/app/components/__tests__/EventAlertChartTest.tsx index 14ed5681d562..e0e9eb37e09b 100644 --- a/nms/app/components/__tests__/EventAlertChartTest.tsx +++ b/nms/app/components/__tests__/EventAlertChartTest.tsx @@ -15,11 +15,11 @@ import EventAlertChart from '../EventAlertChart'; import MagmaAPI from '../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../theme/default'; -import moment from 'moment'; import {MemoryRouter, Route, Routes} from 'react-router-dom'; import {StyledEngineProvider, ThemeProvider} from '@mui/material/styles'; import {mockAPI} from '../../util/TestUtils'; import {render, waitFor} from '@testing-library/react'; +import {subDays, subHours} from 'date-fns'; import type {PromqlReturnObject} from '../../../generated'; const mockMetricSt: PromqlReturnObject = { @@ -35,12 +35,48 @@ const mockMetricSt: PromqlReturnObject = { }, }; +const testCases = [ + { + startDate: subHours(new Date(), 4), + endDate: new Date(), + step: '15m', + }, + { + startDate: subDays(new Date(), 10), + endDate: new Date(), + step: '24h', + }, + { + startDate: new Date(), + endDate: subDays(new Date(), 10), + step: '5m', + }, +]; + +const Wrapper = (props: {startDate: Date; endDate: Date}) => ( + + + + + + + } + /> + + + + + +); + jest.mock('axios'); jest.mock('../../../app/hooks/useSnackbar'); // chart component was failing here so mocking this out -// this shouldn't affect the prop verification part in the react -// chart component +// this shouldn't affect the prop verification part in the React chart component // @ts-ignore window.HTMLCanvasElement.prototype.getContext = () => {}; @@ -54,51 +90,8 @@ describe('', () => { mockAPI(MagmaAPI.events, 'eventsNetworkIdAboutCountGet'); }); - const testCases = [ - { - startDate: moment().subtract(2, 'hours'), - endDate: moment(), - step: '15m', - valid: true, - }, - { - startDate: moment().subtract(10, 'day'), - endDate: moment(), - step: '24h', - valid: true, - }, - { - startDate: moment(), - endDate: moment().subtract(10, 'day'), - step: '24h', - valid: false, - }, - ]; - - it.each(testCases)('renders', async tc => { - // const endDate = moment(); - // const startDate = moment().subtract(3, 'hours'); - const Wrapper = () => ( - - - - - - - } - /> - - - - - - ); - - render(); - const currentStep = tc.valid ? tc.step : '5m'; + it.each(testCases)('renders', async ({startDate, endDate, step}) => { + render(); await waitFor(() => expect( MagmaAPI.metrics.networksNetworkIdPrometheusQueryRangeGet, @@ -108,9 +101,9 @@ describe('', () => { expect( MagmaAPI.metrics.networksNetworkIdPrometheusQueryRangeGet, ).toBeCalledWith({ - start: tc.startDate.toISOString(), - end: tc.endDate.toISOString(), - step: currentStep, + start: startDate.toISOString(), + end: endDate.toISOString(), + step: step, networkId: 'mynetwork', query: 'sum(ALERTS)', }); diff --git a/nms/app/components/insights/AsyncMetric.tsx b/nms/app/components/insights/AsyncMetric.tsx index c478202e909f..7572491b64dd 100644 --- a/nms/app/components/insights/AsyncMetric.tsx +++ b/nms/app/components/insights/AsyncMetric.tsx @@ -12,14 +12,14 @@ */ import CircularProgress from '@mui/material/CircularProgress'; +import MagmaAPI from '../../api/MagmaAPI'; import React from 'react'; import Text from '../../theme/design-system/Text'; -import moment from 'moment'; +import {LayoutPosition, TimeUnit} from 'chart.js'; import {Line} from 'react-chartjs-2'; - -import MagmaAPI from '../../api/MagmaAPI'; -import {PositionType, TimeUnit} from 'chart.js'; import {PromqlMetric, PromqlMetricValue} from '../../../generated'; +import {defaultTooltip} from '../CustomMetrics'; +import {differenceInDays, differenceInHours, getUnixTime, sub} from 'date-fns'; import {makeStyles} from '@mui/styles'; import {useEffect, useMemo, useState} from 'react'; import {useEnqueueSnackbar} from '../../hooks/useSnackbar'; @@ -45,7 +45,7 @@ export type ChartStyle = { pointRadius: number; }; legend: { - position: PositionType; + position: LayoutPosition; align: 'center' | 'end' | 'start'; }; }; @@ -56,7 +56,7 @@ type Props = { queries: Array; legendLabels?: Array; timeRange: TimeRange; - startEnd?: [moment.Moment, moment.Moment]; + startEnd?: [Date, Date]; networkId?: string; style?: ChartStyle; height?: number; @@ -96,7 +96,7 @@ type Dataset = { borderWidth: number; backgroundColor: string; borderColor: string; - data: Array<{t: number; y: number | string}>; + data: Array<{x: number; y: number | string}>; }; const RANGE_VALUES: Record = { @@ -187,10 +187,10 @@ function Progress() { function getStartEnd(timeRange: TimeRange) { const {days, hours, step} = RANGE_VALUES[timeRange]; - const end = moment(); - const endUnix = end.unix() * 1000; - const start = end.clone().subtract({days, hours}); - const startUnix = start.unix() * 1000; + const end = new Date(); + const endUnix = getUnixTime(end) * 1000; + const start = sub(end, {days, hours}); + const startUnix = getUnixTime(start) * 1000; return { start: start.toISOString(), startUnix: startUnix, @@ -204,34 +204,33 @@ function getUnit(timeRange: TimeRange) { return RANGE_VALUES[timeRange].unit; } -function getStepUnit( - startEnd: [moment.Moment, moment.Moment], -): [string, TimeUnit] { +function getStepUnit(startEnd: [Date, Date]): [string, TimeUnit] { const [start, end] = startEnd; - const d = moment.duration(end.diff(start)); - const hrs = d.asHours(); - const days = d.asDays(); - let r: RangeValue; - if (hrs <= 24) { - if (hrs <= 3) { - r = RANGE_VALUES['3_hours']; - } else if (hrs <= 6) { - r = RANGE_VALUES['6_hours']; - } else if (hrs <= 12) { - r = RANGE_VALUES['12_hours']; + const durationInHours = differenceInHours(end, start); + const durationInDays = differenceInDays(end, start); + + let range: RangeValue; + if (durationInHours <= 24) { + if (durationInHours <= 3) { + range = RANGE_VALUES['3_hours']; + } else if (durationInHours <= 6) { + range = RANGE_VALUES['6_hours']; + } else if (durationInHours <= 12) { + range = RANGE_VALUES['12_hours']; } else { - r = RANGE_VALUES['24_hours']; + range = RANGE_VALUES['24_hours']; } } else { - if (days <= 7) { - r = RANGE_VALUES['7_days']; - } else if (days <= 14) { - r = RANGE_VALUES['14_days']; + if (durationInDays <= 7) { + range = RANGE_VALUES['7_days']; + } else if (durationInDays <= 14) { + range = RANGE_VALUES['14_days']; } else { - r = RANGE_VALUES['30_days']; + range = RANGE_VALUES['30_days']; } } - return [r.step, r.unit]; + + return [range.step, range.unit]; } function getColorForIndex(index: number, customChartColors?: Array) { @@ -249,9 +248,9 @@ function useDatasetsFetcher(props: Props) { const [step] = getStepUnit(props.startEnd); return { start: start.toISOString(), - startUnix: start.unix() * 1000, + startUnix: getUnixTime(start) * 1000, end: end.toISOString(), - endUnix: end.unix() * 1000, + endUnix: getUnixTime(end) * 1000, step, }; } else { @@ -312,7 +311,7 @@ function useDatasetsFetcher(props: Props) { backgroundColor: getColorForIndex(index, props.chartColors), borderColor: getColorForIndex(index++, props.chartColors), data: it[dbHelper.datapointFieldName]!.map(i => ({ - t: parseInt(i[0]) * 1000, + x: parseInt(i[0]) * 1000, y: parseFloat(i[1]), })), }), @@ -322,11 +321,11 @@ function useDatasetsFetcher(props: Props) { // Add "NaN" to the beginning/end of each dataset to force the chart to // display the whole time frame requested datasets.forEach(dataset => { - if (dataset.data[0].t > startEnd.startUnix) { - dataset.data.unshift({t: startEnd.startUnix, y: 'NaN'}); + if (dataset.data[0].x > startEnd.startUnix) { + dataset.data.unshift({x: startEnd.startUnix, y: 'NaN'}); } - if (dataset.data[dataset.data.length - 1].t < startEnd.endUnix) { - dataset.data.push({t: startEnd.endUnix, y: 'NaN'}); + if (dataset.data[dataset.data.length - 1].x < startEnd.endUnix) { + dataset.data.push({x: startEnd.endUnix, y: 'NaN'}); } }); setAllDatasets(datasets); @@ -366,56 +365,55 @@ export default function AsyncMetric(props: Props) { } else { unit = getUnit(props.timeRange); } + return ( - `${data.datasets![tooltipItem.datasetIndex!] - .label!}: ${tooltipItem.yLabel!} ${props.unit}`, }, }, - }} - legend={{ - display: allDatasets.length < 5, - position: style ? style.legend.position : 'bottom', - align: style ? style.legend.align : 'center', - labels: { - boxWidth: 12, + plugins: { + tooltip: { + enabled: true, + mode: 'nearest', + callbacks: { + label(tooltipItem) { + return defaultTooltip(tooltipItem, props); + }, + }, + }, + legend: { + display: allDatasets.length < 5, + position: style ? style.legend.position : 'bottom', + align: style ? style.legend.align : 'center', + labels: { + boxWidth: 12, + }, + }, }, }} data={{datasets: allDatasets}} diff --git a/nms/app/host.tsx b/nms/app/host.tsx index cd18698eb285..0d74d09875cd 100644 --- a/nms/app/host.tsx +++ b/nms/app/host.tsx @@ -12,6 +12,7 @@ */ import './util/axiosConfig'; +import './util/chartjsSetup'; import './util/polyfill'; import Index from './components/host/Index'; diff --git a/nms/app/login.tsx b/nms/app/login.tsx index 69eaf30bc272..d96b40438c34 100644 --- a/nms/app/login.tsx +++ b/nms/app/login.tsx @@ -24,7 +24,11 @@ import {AppContextProvider} from './context/AppContext'; import {BrowserRouter} from 'react-router-dom'; import {StyledEngineProvider, ThemeProvider} from '@mui/material/styles'; +const LOGIN_ERROR_MESSAGE = 'Invalid email or password'; + function LoginWrapper() { + const params = new URLSearchParams(window.location.search); + const loginInvalid = params.get('invalid'); return ( ); } diff --git a/nms/app/main.tsx b/nms/app/main.tsx index fbfc0a282826..93264605a7fd 100644 --- a/nms/app/main.tsx +++ b/nms/app/main.tsx @@ -12,6 +12,7 @@ */ import './util/axiosConfig'; +import './util/chartjsSetup'; import './util/polyfill'; import ApplicationMain from './components/ApplicationMain'; @@ -19,13 +20,13 @@ import Main from './components/Main'; import React from 'react'; import ReactDOM from 'react-dom'; import nullthrows from '../shared/util/nullthrows'; -import {AdapterMoment} from '@mui/x-date-pickers/AdapterMoment'; +import {AdapterDateFns} from '@mui/x-date-pickers/AdapterDateFns'; import {BrowserRouter} from 'react-router-dom'; import {LocalizationProvider} from '@mui/x-date-pickers'; ReactDOM.render( - +
diff --git a/nms/app/util/chartjsSetup.ts b/nms/app/util/chartjsSetup.ts new file mode 100644 index 000000000000..f609ee041b4c --- /dev/null +++ b/nms/app/util/chartjsSetup.ts @@ -0,0 +1,68 @@ +/** + * Copyright 2022 The Magma Authors. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import 'chartjs-adapter-date-fns'; + +import { + ArcElement, + BarController, + BarElement, + BubbleController, + CategoryScale, + Chart, + Decimation, + DoughnutController, + Filler, + Legend, + LineController, + LineElement, + LinearScale, + LogarithmicScale, + PieController, + PointElement, + PolarAreaController, + RadarController, + RadialLinearScale, + ScatterController, + SubTitle, + TimeScale, + TimeSeriesScale, + Title, + Tooltip, +} from 'chart.js'; + +Chart.register( + ArcElement, + LineElement, + BarElement, + PointElement, + BarController, + BubbleController, + DoughnutController, + LineController, + PieController, + PolarAreaController, + RadarController, + ScatterController, + CategoryScale, + LinearScale, + LogarithmicScale, + RadialLinearScale, + TimeScale, + TimeSeriesScale, + Decimation, + Filler, + Legend, + Title, + Tooltip, + SubTitle, +); diff --git a/nms/app/views/alarms/components/alertmanager/AlertDetails/AlertDetailsPane.tsx b/nms/app/views/alarms/components/alertmanager/AlertDetails/AlertDetailsPane.tsx index 7498d5932e0d..d29edd5cd322 100644 --- a/nms/app/views/alarms/components/alertmanager/AlertDetails/AlertDetailsPane.tsx +++ b/nms/app/views/alarms/components/alertmanager/AlertDetails/AlertDetailsPane.tsx @@ -30,9 +30,9 @@ import Link from '@mui/material/Link'; import Paper from '@mui/material/Paper'; import SeverityIndicator from '../../severity/SeverityIndicator'; import Typography from '@mui/material/Typography'; -import moment from 'moment'; import {PromFiringAlert} from '../../../../../../generated'; import {Theme} from '@mui/material/styles'; +import {format, formatDistanceToNow, parseISO} from 'date-fns'; import {getErrorMessage} from '../../../../../util/ErrorUtils'; import {makeStyles} from '@mui/styles'; import {useAlarmContext} from '../../AlarmContext'; @@ -158,10 +158,19 @@ function MetricAlertViewer({alert}: AlertViewerProps) { function AlertDate({date}: {date: string}) { const classes = useStyles(); - const fromNow = React.useMemo(() => moment(date).local().fromNow(), [date]); + const realDate = React.useMemo(() => { + if (date) { + return parseISO(date); + } else { + return new Date(); + } + }, [date]); + const fromNow = React.useMemo(() => formatDistanceToNow(realDate), [ + realDate, + ]); const startDate = React.useMemo( - () => moment(date).local().format('MMM Do YYYY, h:mm:ss a'), - [date], + () => format(realDate, 'MMM Do yyyy, h:mm:ss a'), + [realDate], ); return ( diff --git a/nms/app/views/alarms/components/alertmanager/FiringAlerts.tsx b/nms/app/views/alarms/components/alertmanager/FiringAlerts.tsx index 753fdaa92cdc..0a57bc1efe28 100644 --- a/nms/app/views/alarms/components/alertmanager/FiringAlerts.tsx +++ b/nms/app/views/alarms/components/alertmanager/FiringAlerts.tsx @@ -10,6 +10,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import AddAlertTwoToneIcon from '@mui/icons-material/AddAlertTwoTone'; import AlertDetailsPane from './AlertDetails/AlertDetailsPane'; import Button from '@mui/material/Button'; @@ -20,17 +21,16 @@ import SeverityIndicator from '../severity/SeverityIndicator'; import SimpleTable from '../table/SimpleTable'; import Slide from '@mui/material/Slide'; import Typography from '@mui/material/Typography'; -import moment from 'moment'; import {Link, useResolvedPath} from 'react-router-dom'; +import {Theme} from '@mui/material/styles'; import {colors} from '../../../../theme/default'; +import {formatDistanceToNow, formatISO} from 'date-fns'; +import {getErrorMessage} from '../../../../util/ErrorUtils'; import {makeStyles} from '@mui/styles'; import {useAlarmContext} from '../AlarmContext'; import {useEffect, useState} from 'react'; import {useNetworkId} from '../hooks'; import {useSnackbars} from '../../../../hooks/useSnackbar'; - -import {Theme} from '@mui/material/styles'; -import {getErrorMessage} from '../../../../util/ErrorUtils'; import type {PromFiringAlert} from '../../../../../generated'; const useStyles = makeStyles(theme => ({ @@ -182,11 +182,13 @@ export default function FiringAlerts(props: Props) { title: 'Date', field: 'startsAt', render: currRow => { - const date = moment(new Date(currRow.startsAt)); + const date = new Date(currRow.startsAt ?? null); return ( <> - {date.fromNow()} -
{date.format('dddd, MMMM Do YYYY')}
+ + {formatDistanceToNow(date)} + +
{formatISO(date, {representation: 'date'})}
); }, diff --git a/nms/app/views/alarms/components/alertmanager/Receivers/SelectReceiver.tsx b/nms/app/views/alarms/components/alertmanager/Receivers/SelectReceiver.tsx index 6368afffb29f..3011c4125945 100644 --- a/nms/app/views/alarms/components/alertmanager/Receivers/SelectReceiver.tsx +++ b/nms/app/views/alarms/components/alertmanager/Receivers/SelectReceiver.tsx @@ -14,9 +14,10 @@ import * as React from 'react'; import Chip from '@mui/material/Chip'; import CircularProgress from '@mui/material/CircularProgress'; -import Input from '@mui/material/Input'; import MenuItem from '@mui/material/MenuItem'; import Select, {SelectChangeEvent} from '@mui/material/Select'; +import {AltFormField} from '../../../../../components/FormField'; +import {FormControl, OutlinedInput} from '@mui/material'; import {useAlarmContext} from '../../AlarmContext'; import {useParams} from 'react-router-dom'; @@ -51,32 +52,40 @@ export default function SelectReceiver({ } return ( - + + + + + ); } diff --git a/nms/app/views/alarms/components/rules/LabelsEditor.tsx b/nms/app/views/alarms/components/rules/LabelsEditor.tsx index bbeef7931446..419c513c6d40 100644 --- a/nms/app/views/alarms/components/rules/LabelsEditor.tsx +++ b/nms/app/views/alarms/components/rules/LabelsEditor.tsx @@ -21,9 +21,9 @@ import CardHeader from '@mui/material/CardHeader'; import DeleteIcon from '@mui/icons-material/Delete'; import Grid from '@mui/material/Grid'; import IconButton from '@mui/material/IconButton'; -import InputLabel from '@mui/material/InputLabel'; -import TextField from '@mui/material/TextField'; import Typography from '@mui/material/Typography'; +import {AltFormField} from '../../../../components/FormField'; +import {OutlinedInput} from '@mui/material'; import type {Labels} from '../AlarmAPIType'; const filteredLabels = new Set(['networkID', 'severity']); @@ -117,30 +117,28 @@ export default function LabelsEditor({labels, onChange}: Props) { labelsState.map(([key, value], index) => ( - - Label Name - - handleKeyChange(index, e.target.value)} - /> + + handleKeyChange(index, e.target.value)} + /> + - - Value - - handleValueChange(index, e.target.value)} - /> + + handleValueChange(index, e.target.value)} + /> + {showCopyButton && ( diff --git a/nms/app/views/dashboard/feg/FEGDashboard.tsx b/nms/app/views/dashboard/feg/FEGDashboard.tsx index 7687d92d75df..2cebd09f826c 100644 --- a/nms/app/views/dashboard/feg/FEGDashboard.tsx +++ b/nms/app/views/dashboard/feg/FEGDashboard.tsx @@ -20,7 +20,6 @@ import React, {useState} from 'react'; import Text from '../../../theme/design-system/Text'; import TextField from '@mui/material/TextField'; import TopBar from '../../../components/TopBar'; -import moment from 'moment'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; import {EVENT_STREAM} from '../../events/EventsTable'; import {Navigate, Route, Routes} from 'react-router-dom'; @@ -28,6 +27,7 @@ import {NetworkCheck} from '@mui/icons-material'; import {Theme} from '@mui/material/styles'; import {colors} from '../../../theme/default'; import {makeStyles} from '@mui/styles'; +import {subDays} from 'date-fns'; const useStyles = makeStyles(theme => ({ dashboardRoot: { @@ -45,8 +45,8 @@ const useStyles = makeStyles(theme => ({ */ function FEGDashboard() { // datetime picker - const [startDate, setStartDate] = useState(moment().subtract(3, 'days')); - const [endDate, setEndDate] = useState(moment()); + const [startDate, setStartDate] = useState(subDays(new Date(), 3)); + const [endDate, setEndDate] = useState(new Date()); return ( <> @@ -86,14 +86,10 @@ function FEGDashboard() { * It consists of an event alert chart, an alert table, a kpi for the * federation network and events table which helps in describing the * current network state. - * @param {Array} startEnd: An array of two elements holding the + * @param {Array} startEnd: An array of two elements holding the * start and end date. */ -function FEGNetworkDashboard({ - startEnd, -}: { - startEnd: [moment.Moment, moment.Moment]; -}) { +function FEGNetworkDashboard({startEnd}: {startEnd: [Date, Date]}) { const classes = useStyles(); return ( @@ -131,10 +127,10 @@ function FEGNetworkDashboard({ */ type Props = { - startDate: moment.Moment; - endDate: moment.Moment; - setStartDate: (startDate: moment.Moment) => void; - setEndDate: (endDate: moment.Moment) => void; + startDate: Date; + endDate: Date; + setStartDate: (startDate: Date) => void; + setEndDate: (endDate: Date) => void; }; function FEGNetworkTab(props: Props) { @@ -153,7 +149,7 @@ function FEGNetworkTab(props: Props) { maxDate={endDate} disableFuture value={startDate} - onChange={date => setStartDate(date as moment.Moment)} + onChange={date => setStartDate(date!)} /> @@ -166,7 +162,7 @@ function FEGNetworkTab(props: Props) { renderInput={props => } disableFuture value={endDate} - onChange={date => setEndDate(date as moment.Moment)} + onChange={date => setEndDate(date!)} /> diff --git a/nms/app/views/dashboard/lte/LteDashboard.tsx b/nms/app/views/dashboard/lte/LteDashboard.tsx index 89018e50fd4e..04dc9e3517b3 100644 --- a/nms/app/views/dashboard/lte/LteDashboard.tsx +++ b/nms/app/views/dashboard/lte/LteDashboard.tsx @@ -10,6 +10,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import DashboardAlertTable from '../../../components/DashboardAlertTable'; import DashboardKPIs from '../../../components/DashboardKPIs'; import EventAlertChart from '../../../components/EventAlertChart'; @@ -17,16 +18,15 @@ import EventsTable from '../../events/EventsTable'; import Grid from '@mui/material/Grid'; import React, {useState} from 'react'; import Text from '../../../theme/design-system/Text'; -import TopBar from '../../../components/TopBar'; -import moment from 'moment'; - import TextField from '@mui/material/TextField'; +import TopBar from '../../../components/TopBar'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; import {Navigate, Route, Routes} from 'react-router-dom'; import {NetworkCheck} from '@mui/icons-material'; import {Theme} from '@mui/material/styles'; import {colors} from '../../../theme/default'; import {makeStyles} from '@mui/styles'; +import {subDays} from 'date-fns'; const useStyles = makeStyles(theme => ({ dashboardRoot: { @@ -41,8 +41,8 @@ function LteDashboard() { const classes = useStyles(); // datetime picker - const [startDate, setStartDate] = useState(moment().subtract(3, 'days')); - const [endDate, setEndDate] = useState(moment()); + const [startDate, setStartDate] = useState(subDays(new Date(), 3)); + const [endDate, setEndDate] = useState(new Date()); return ( <> @@ -71,7 +71,7 @@ function LteDashboard() { maxDate={endDate} disableFuture value={startDate} - onChange={date => setStartDate(date as moment.Moment)} + onChange={date => setStartDate(date!)} /> @@ -84,7 +84,7 @@ function LteDashboard() { renderInput={props => } disableFuture value={endDate} - onChange={date => setEndDate(date as moment.Moment)} + onChange={date => setEndDate(date!)} /> @@ -104,11 +104,7 @@ function LteDashboard() { ); } -function LteNetworkDashboard({ - startEnd, -}: { - startEnd: [moment.Moment, moment.Moment]; -}) { +function LteNetworkDashboard({startEnd}: {startEnd: [Date, Date]}) { const classes = useStyles(); return ( diff --git a/nms/app/views/domain-proxy/LogsList.tsx b/nms/app/views/domain-proxy/LogsList.tsx index 24f8d89d37dd..05e7731b42df 100644 --- a/nms/app/views/domain-proxy/LogsList.tsx +++ b/nms/app/views/domain-proxy/LogsList.tsx @@ -10,32 +10,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +import ActionTable, {TableRef} from '../../components/ActionTable'; +import AutorefreshCheckbox, { + useRefreshingDateRange, +} from '../../components/AutorefreshCheckbox'; import Button from '@mui/material/Button'; +import CardTitleRow from '../../components/layout/CardTitleRow'; import FormControl from '@mui/material/FormControl'; import Grid from '@mui/material/Grid'; import ListIcon from '@mui/icons-material/ListAlt'; +import MagmaAPI from '../../api/MagmaAPI'; import MenuItem from '@mui/material/MenuItem'; import OutlinedInput from '@mui/material/OutlinedInput'; import React, {useCallback, useRef, useState} from 'react'; import Select from '@mui/material/Select'; -import moment from 'moment'; -import nullthrows from '../../../shared/util/nullthrows'; -import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; -import {isFinite} from 'lodash'; -import {makeStyles} from '@mui/styles'; -import {useParams} from 'react-router-dom'; - -import ActionTable, {TableRef} from '../../components/ActionTable'; -import AutorefreshCheckbox, { - useRefreshingDateRange, -} from '../../components/AutorefreshCheckbox'; -import CardTitleRow from '../../components/layout/CardTitleRow'; -import MagmaAPI from '../../api/MagmaAPI'; import Text from '../../theme/design-system/Text'; import TextField from '@mui/material/TextField'; +import nullthrows from '../../../shared/util/nullthrows'; +import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; import {REFRESH_INTERVAL} from '../../context/AppContext'; import {Theme} from '@mui/material/styles'; import {colors} from '../../theme/default'; +import {isFinite} from 'lodash'; +import {makeStyles} from '@mui/styles'; +import {useParams} from 'react-router-dom'; const useStyles = makeStyles(theme => ({ root: { @@ -177,7 +176,7 @@ function LogsList() { from: item.from, to: item.to, serialNumber: item.serial_number, - time: moment(item.time)?.toLocaleString(), + time: item.time, type: item.type, }; }) @@ -320,10 +319,10 @@ function LogsList() { maxDate={endDate} disableFuture value={startDate} - onChange={newValue => { - setStartDate(newValue as moment.Moment); + onChange={date => { + setStartDate(date!); }} - inputFormat="yyyy/MM/DD HH:mm" + inputFormat="yyyy/MM/dd HH:mm" /> @@ -424,8 +423,8 @@ function LogsList() { )} disableFuture value={endDate} - onChange={newValue => setEndDate(newValue as moment.Moment)} - inputFormat="yyyy/MM/DD HH:mm" + onChange={date => setEndDate(date!)} + inputFormat="yyyy/MM/dd HH:mm" /> diff --git a/nms/app/views/domain-proxy/__tests__/LogsList.tsx b/nms/app/views/domain-proxy/__tests__/LogsList.tsx index 49e9a1b2ad20..70a731f1727e 100644 --- a/nms/app/views/domain-proxy/__tests__/LogsList.tsx +++ b/nms/app/views/domain-proxy/__tests__/LogsList.tsx @@ -12,14 +12,12 @@ */ import LogsList from '../LogsList'; +import MagmaAPI from '../../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../../theme/default'; -import {AdapterMoment} from '@mui/x-date-pickers/AdapterMoment'; +import {AdapterDateFns} from '@mui/x-date-pickers/AdapterDateFns'; import {LocalizationProvider} from '@mui/x-date-pickers'; import {MemoryRouter, Route, Routes} from 'react-router-dom'; - -import MagmaAPI from '../../../api/MagmaAPI'; -import moment from 'moment'; import {StyledEngineProvider, ThemeProvider} from '@mui/material/styles'; import { fireEvent, @@ -29,6 +27,7 @@ import { within, } from '@testing-library/react'; import {mockAPI} from '../../../util/TestUtils'; +import {parse} from 'date-fns'; const mockEnqueueSnackbar = jest.fn(); jest.mock('../../../hooks/useSnackbar', () => ({ @@ -49,7 +48,7 @@ const renderWithProviders = (jsx: React.ReactNode) => { - + @@ -136,10 +135,12 @@ describe('', () => { it('Sends start date', async () => { fillInput('start-date-input', filterValues.startDate); clickSearchButton(); - await expectApiCallParam( - 'begin', - moment(filterValues.startDate).toISOString(), + const date = parse( + filterValues.startDate, + 'yyyy/MM/dd HH:mm', + new Date(), ); + await expectApiCallParam('begin', date.toISOString()); }); it('Sends responseCode', async () => { @@ -163,10 +164,8 @@ describe('', () => { it('Sends end date', async () => { fillInput('end-date-input', filterValues.endDate); clickSearchButton(); - await expectApiCallParam( - 'end', - moment(filterValues.endDate).toISOString(), - ); + const date = parse(filterValues.endDate, 'yyyy/MM/dd HH:mm', new Date()); + await expectApiCallParam('end', date.toISOString()); }); }); }); diff --git a/nms/app/views/equipment/EnodebDetailMain.tsx b/nms/app/views/equipment/EnodebDetailMain.tsx index fbcae74fc194..75e18f44efbd 100644 --- a/nms/app/views/equipment/EnodebDetailMain.tsx +++ b/nms/app/views/equipment/EnodebDetailMain.tsx @@ -10,6 +10,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import AutorefreshCheckbox from '../../components/AutorefreshCheckbox'; import Button from '@mui/material/Button'; import CardTitleRow from '../../components/layout/CardTitleRow'; @@ -26,7 +27,6 @@ import SettingsIcon from '@mui/icons-material/Settings'; import Text from '../../theme/design-system/Text'; import TextField from '@mui/material/TextField'; import TopBar from '../../components/TopBar'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import withAlert from '../../components/Alert/withAlert'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; @@ -38,6 +38,7 @@ import {Theme} from '@mui/material/styles'; import {colors, typography} from '../../theme/default'; import {getErrorMessage} from '../../util/ErrorUtils'; import {makeStyles} from '@mui/styles'; +import {subHours} from 'date-fns'; import {useContext, useState} from 'react'; import {useEnqueueSnackbar} from '../../hooks/useSnackbar'; import type {WithAlert} from '../../components/Alert/withAlert'; @@ -160,8 +161,8 @@ const EnodebRebootButton = withAlert(EnodebRebootButtonInternal); function Overview() { const classes = useStyles(); - const [startDate, setStartDate] = useState(moment().subtract(3, 'hours')); - const [endDate, setEndDate] = useState(moment()); + const [startDate, setStartDate] = useState(subHours(new Date(), 3)); + const [endDate, setEndDate] = useState(new Date()); const [refresh, setRefresh] = useState(true); function MetricChartFilter() { @@ -178,7 +179,7 @@ function Overview() { maxDate={endDate} disableFuture value={startDate} - onChange={date => setStartDate(date as moment.Moment)} + onChange={date => setStartDate(date!)} /> @@ -191,7 +192,7 @@ function Overview() { renderInput={props => } disableFuture value={endDate} - onChange={date => setEndDate(date as moment.Moment)} + onChange={date => setEndDate(date!)} /> @@ -239,8 +240,8 @@ function Overview() { } type Props = { - startDate: moment.Moment; - endDate: moment.Moment; + startDate: Date; + endDate: Date; }; function EnodebMetricChart(props: Props) { diff --git a/nms/app/views/equipment/FEGClusterStatus.tsx b/nms/app/views/equipment/FEGClusterStatus.tsx index 12791b06a7cc..ec978fbe993f 100644 --- a/nms/app/views/equipment/FEGClusterStatus.tsx +++ b/nms/app/views/equipment/FEGClusterStatus.tsx @@ -21,7 +21,6 @@ import MagmaAPI from '../../api/MagmaAPI'; import Paper from '@mui/material/Paper'; import React from 'react'; import Typography from '@mui/material/Typography'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import useMagmaAPI from '../../api/useMagmaAPI'; import {FederationGateway, PromqlReturnObject} from '../../../generated'; @@ -31,6 +30,7 @@ import { HEALTHY_STATUS, } from '../../components/GatewayUtils'; import {GatewayId} from '../../../shared/types/network'; +import {formatRelative, toDate} from 'date-fns'; import {makeStyles} from '@mui/styles'; import {useContext} from 'react'; import {useParams} from 'react-router-dom'; @@ -90,7 +90,10 @@ export default function FEGClusterStatus() { lastFalloverTime = Math.max(lastFalloverTime, curUpdate); }); lastFalloverTime && - (lastFalloverStatus = moment.unix(lastFalloverTime).calendar()); + (lastFalloverStatus = formatRelative( + toDate(lastFalloverTime), + new Date(), + )); return lastFalloverStatus; }; const getSecondaryFegGatewayId = ( diff --git a/nms/app/views/equipment/GatewayCheckinChart.tsx b/nms/app/views/equipment/GatewayCheckinChart.tsx index bb9133eaee0b..e4df48ab3832 100644 --- a/nms/app/views/equipment/GatewayCheckinChart.tsx +++ b/nms/app/views/equipment/GatewayCheckinChart.tsx @@ -63,7 +63,7 @@ export default function () { drawBorder: true, }, ticks: { - maxTicksLimit: 1, + maxTicksLimit: 3, }, }, }, diff --git a/nms/app/views/equipment/GatewayLogChart.tsx b/nms/app/views/equipment/GatewayLogChart.tsx index 2ec9aaa9689b..1b3fb1b90a12 100644 --- a/nms/app/views/equipment/GatewayLogChart.tsx +++ b/nms/app/views/equipment/GatewayLogChart.tsx @@ -17,19 +17,19 @@ import CustomHistogram from '../../components/CustomMetrics'; import LoadingFiller from '../../components/LoadingFiller'; import MagmaAPI from '../../api/MagmaAPI'; import React from 'react'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import {TimeUnit} from 'chart.js'; import {colors} from '../../theme/default'; import {getQueryRanges} from '../../components/CustomMetrics'; +import {getUnixTime} from 'date-fns'; import {useEffect, useState} from 'react'; import {useEnqueueSnackbar} from '../../hooks/useSnackbar'; import {useParams} from 'react-router-dom'; import type {Dataset, DatasetType} from '../../components/CustomMetrics'; type Props = { - start: moment.Moment; - end: moment.Moment; + start: Date; + end: Date; delta: number; unit: TimeUnit; format: string; @@ -83,13 +83,13 @@ export default function LogChart(props: Props) { if (r === null || r === undefined) { return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: 0, }; } return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: r, }; }); diff --git a/nms/app/views/equipment/GatewayLogs.tsx b/nms/app/views/equipment/GatewayLogs.tsx index a0da2a4ab096..b07647f95639 100644 --- a/nms/app/views/equipment/GatewayLogs.tsx +++ b/nms/app/views/equipment/GatewayLogs.tsx @@ -25,7 +25,6 @@ import MagmaAPI from '../../api/MagmaAPI'; import React, {useMemo, useRef, useState} from 'react'; import Text from '../../theme/design-system/Text'; import TextField from '@mui/material/TextField'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import {CsvBuilder} from 'filefy'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; @@ -117,8 +116,8 @@ async function searchLogs( gatewayId: string, from: number, size: number, - start: moment.Moment, - end: moment.Moment, + start: Date, + end: Date, query: ActionQuery, ) { const logs = ( @@ -151,8 +150,8 @@ async function exportLogs( gatewayId: string, from: number, size: number, - start: moment.Moment, - end: moment.Moment, + start: Date, + end: Date, query: ActionQuery, enqueueSnackbar: (message: string, config: OptionsObject) => string | number, ) { @@ -188,8 +187,8 @@ async function handleLogQuery( gatewayId: string, from: number, size: number, - start: moment.Moment, - end: moment.Moment, + start: Date, + end: Date, query: ActionQuery, ) { try { @@ -284,8 +283,8 @@ export default function GatewayLogs() { maxDate={endDate} disableFuture value={startDate} - onChange={val => { - setStartDate(val as moment.Moment); + onChange={date => { + setStartDate(date as Date); setIsAutoRefreshing(false); }} /> @@ -300,8 +299,8 @@ export default function GatewayLogs() { renderInput={props => } disableFuture value={endDate} - onChange={val => { - setEndDate(val as moment.Moment); + onChange={date => { + setEndDate(date as Date); setIsAutoRefreshing(false); }} /> diff --git a/nms/app/views/equipment/__tests__/EnodebDetailMainTest.tsx b/nms/app/views/equipment/__tests__/EnodebDetailMainTest.tsx index 05c4b1a12b01..1cf89dfab8a9 100644 --- a/nms/app/views/equipment/__tests__/EnodebDetailMainTest.tsx +++ b/nms/app/views/equipment/__tests__/EnodebDetailMainTest.tsx @@ -17,7 +17,7 @@ import EnodebDetail from '../EnodebDetailMain'; import MagmaAPI from '../../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../../theme/default'; -import {AdapterMoment} from '@mui/x-date-pickers/AdapterMoment'; +import {AdapterDateFns} from '@mui/x-date-pickers/AdapterDateFns'; import {EnodebInfo} from '../../../components/lte/EnodebUtils'; import {LocalizationProvider} from '@mui/x-date-pickers'; import {MemoryRouter, Route, Routes} from 'react-router-dom'; @@ -142,7 +142,7 @@ describe('', () => { - + @@ -181,7 +181,7 @@ describe('', () => { - + diff --git a/nms/app/views/equipment/__tests__/EquipmentEnodebTest.tsx b/nms/app/views/equipment/__tests__/EquipmentEnodebTest.tsx index fa0b6c91cd8b..9a96d10c69b8 100644 --- a/nms/app/views/equipment/__tests__/EquipmentEnodebTest.tsx +++ b/nms/app/views/equipment/__tests__/EquipmentEnodebTest.tsx @@ -17,7 +17,7 @@ import EnodebContext from '../../../context/EnodebContext'; import MagmaAPI from '../../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../../theme/default'; -import {AdapterMoment} from '@mui/x-date-pickers/AdapterMoment'; +import {AdapterDateFns} from '@mui/x-date-pickers/AdapterDateFns'; import {EnodebInfo} from '../../../components/lte/EnodebUtils'; import {LocalizationProvider} from '@mui/x-date-pickers'; import {MemoryRouter, Route, Routes} from 'react-router-dom'; @@ -122,7 +122,7 @@ describe('', () => { const Wrapper = () => ( - + diff --git a/nms/app/views/equipment/__tests__/FEGEquipmentGatewayTest.tsx b/nms/app/views/equipment/__tests__/FEGEquipmentGatewayTest.tsx index 55efd18d3475..1018e7179a89 100644 --- a/nms/app/views/equipment/__tests__/FEGEquipmentGatewayTest.tsx +++ b/nms/app/views/equipment/__tests__/FEGEquipmentGatewayTest.tsx @@ -15,7 +15,6 @@ import FEGEquipmentDashboard from '../FEGEquipmentDashboard'; import MagmaAPI from '../../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../../theme/default'; -import moment from 'moment'; import {AxiosResponse} from 'axios'; import {FEGGatewayContextProvider} from '../../../context/FEGGatewayContext'; import { @@ -25,6 +24,7 @@ import { import {MemoryRouter, Route, Routes} from 'react-router-dom'; import {StyledEngineProvider, ThemeProvider} from '@mui/material/styles'; import {fireEvent, render, waitFor} from '@testing-library/react'; +import {formatRelative, getUnixTime, toDate} from 'date-fns'; import {mockAPI, mockAPIOnce} from '../../../util/TestUtils'; import type { Csfb, @@ -167,11 +167,14 @@ const mockKPIMetric: PromqlReturnObject = { }, }; -const lastFalloverTimeResponse1 = moment().unix(); +const lastFalloverTimeResponse1 = getUnixTime(new Date()); -const lastFalloverTimeResponse2 = moment().unix(); +const lastFalloverTimeResponse2 = getUnixTime(new Date()); -const lastFalloverTime = `${moment.unix(lastFalloverTimeResponse2).calendar()}`; +const lastFalloverTime = `${formatRelative( + toDate(lastFalloverTimeResponse2), + new Date(), +)}`; const mockFalloverStatus: PromqlReturnObject = { status: 'success', diff --git a/nms/app/views/equipment/__tests__/GatewayLogsTest.tsx b/nms/app/views/equipment/__tests__/GatewayLogsTest.tsx index ccfddcfdd53e..589be69fc0fc 100644 --- a/nms/app/views/equipment/__tests__/GatewayLogsTest.tsx +++ b/nms/app/views/equipment/__tests__/GatewayLogsTest.tsx @@ -16,7 +16,7 @@ import GatewayLogs from '../GatewayLogs'; import MagmaAPI from '../../../api/MagmaAPI'; import React from 'react'; import defaultTheme from '../../../theme/default'; -import {AdapterMoment} from '@mui/x-date-pickers/AdapterMoment'; +import {AdapterDateFns} from '@mui/x-date-pickers/AdapterDateFns'; import {LocalizationProvider} from '@mui/x-date-pickers'; import {MemoryRouter, Route, Routes} from 'react-router-dom'; import {StyledEngineProvider, ThemeProvider} from '@mui/material/styles'; @@ -30,7 +30,7 @@ const LogTableWrapper = () => ( - + @@ -123,7 +123,6 @@ describe('', () => { ]; beforeEach(() => { mockAPI(MagmaAPI.logs, 'networksNetworkIdLogsCountGet', mockLogCount); - mockAPI(MagmaAPI.logs, 'networksNetworkIdLogsSearchGet', mockLogs); }); diff --git a/nms/app/views/events/EventChart.tsx b/nms/app/views/events/EventChart.tsx index e28964706b51..4f085c62531e 100644 --- a/nms/app/views/events/EventChart.tsx +++ b/nms/app/views/events/EventChart.tsx @@ -10,27 +10,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import type {Dataset, DatasetType} from '../../components/CustomMetrics'; import Card from '@mui/material/Card'; import CardHeader from '@mui/material/CardHeader'; import CustomHistogram from '../../components/CustomMetrics'; import LoadingFiller from '../../components/LoadingFiller'; +import MagmaAPI from '../../api/MagmaAPI'; import React from 'react'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; - -import MagmaAPI from '../../api/MagmaAPI'; import {TimeUnit} from 'chart.js'; import {colors} from '../../theme/default'; import {getQueryRanges} from '../../components/CustomMetrics'; +import {getUnixTime} from 'date-fns'; import {useEffect, useState} from 'react'; import {useEnqueueSnackbar} from '../../hooks/useSnackbar'; import {useParams} from 'react-router-dom'; +import type {Dataset, DatasetType} from '../../components/CustomMetrics'; type Props = { - start: moment.Moment; - end: moment.Moment; + start: Date; + end: Date; delta: number; unit: TimeUnit; format: string; @@ -84,12 +83,12 @@ export default function EventChart(props: Props) { const [, e] = queries[index]; if (r === null || r === undefined) { return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: 0, }; } return { - t: e.unix() * 1000, + x: getUnixTime(e) * 1000, y: r, }; }); diff --git a/nms/app/views/events/EventsTable.tsx b/nms/app/views/events/EventsTable.tsx index ae8b038d89ed..ae26da6351c7 100644 --- a/nms/app/views/events/EventsTable.tsx +++ b/nms/app/views/events/EventsTable.tsx @@ -23,7 +23,6 @@ import MyLocationIcon from '@mui/icons-material/MyLocation'; import React from 'react'; import Text from '../../theme/design-system/Text'; import TextField from '@mui/material/TextField'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import {DateTimePicker} from '@mui/x-date-pickers/DateTimePicker'; import {MaterialTableProps} from '@material-table/core'; @@ -162,8 +161,8 @@ async function handleEventQuery( tags: string, query: ActionQuery, from: number, - start: moment.Moment, - end: moment.Moment, + start: Date, + end: Date, ): Promise<{data: Array; page: number; totalCount: number}> { const filters = buildEventQueryFromFilters(query); try { @@ -225,8 +224,8 @@ type EventTableProps = { tags?: string; hardwareId?: string; sz: 'sm' | 'md' | 'lg'; - inStartDate?: moment.Moment; - inEndDate?: moment.Moment; + inStartDate?: Date; + inEndDate?: Date; isAutoRefreshing?: boolean; }; @@ -387,7 +386,7 @@ export default function EventsTable(props: EventTableProps) { disableFuture value={startDate} onChange={date => { - setStartDate(date as moment.Moment); + setStartDate(date!); setIsAutoRefreshing(false); }} /> @@ -403,7 +402,7 @@ export default function EventsTable(props: EventTableProps) { disableFuture value={endDate} onChange={date => { - setEndDate(date as moment.Moment); + setEndDate(date!); setIsAutoRefreshing(false); }} /> diff --git a/nms/app/views/metrics/Explorer.tsx b/nms/app/views/metrics/Explorer.tsx index 7aecf4a526e3..2622be8c6c10 100644 --- a/nms/app/views/metrics/Explorer.tsx +++ b/nms/app/views/metrics/Explorer.tsx @@ -10,6 +10,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import ActionTable from '../../components/ActionTable'; import CardTitleRow from '../../components/layout/CardTitleRow'; import ExpandLess from '@mui/icons-material/ExpandLess'; @@ -17,17 +18,16 @@ import ExpandMore from '@mui/icons-material/ExpandMore'; import ExploreIcon from '@mui/icons-material/Explore'; import Grid from '@mui/material/Grid'; import LoadingFiller from '../../components/LoadingFiller'; +import MagmaAPI from '../../api/MagmaAPI'; import React from 'react'; -import moment from 'moment'; import nullthrows from '../../../shared/util/nullthrows'; import useMagmaAPI from '../../api/useMagmaAPI'; - -import MagmaAPI from '../../api/MagmaAPI'; import {PrometheusLabelSet} from '../alarms/components/AlarmAPIType'; import {Theme} from '@mui/material/styles'; import {colors, typography} from '../../theme/default'; import {getErrorMessage} from '../../util/ErrorUtils'; import {makeStyles} from '@mui/styles'; +import {subHours} from 'date-fns'; import {useEffect, useMemo, useState} from 'react'; import {useEnqueueSnackbar} from '../../hooks/useSnackbar'; import {useParams} from 'react-router-dom'; @@ -76,8 +76,8 @@ export default function MetricsExplorer() { const networkId = nullthrows(params.networkId); const startEnd = useMemo(() => { return { - start: moment().subtract(3, 'hours'), - end: moment(), + start: subHours(new Date(), 3), + end: new Date(), }; }, []); diff --git a/nms/app/views/organizations/OrganizationInfoDialog.tsx b/nms/app/views/organizations/OrganizationInfoDialog.tsx index 2ca1cc9327e4..d79115f95421 100644 --- a/nms/app/views/organizations/OrganizationInfoDialog.tsx +++ b/nms/app/views/organizations/OrganizationInfoDialog.tsx @@ -32,7 +32,7 @@ import {SSOSelectedType} from '../../../shared/types/auth'; import {useState} from 'react'; const ENABLE_ALL_NETWORKS_HELPER = - 'By checking this, the organization will have access to all existing and future networks.'; + 'Checking this gives access to all networks that exist at the moment.'; /** * Create Organization Tab @@ -83,7 +83,7 @@ export default function (props: DialogProps) { {!shouldEnableAllNetworks && ( + subLabel={'The networks that the organization has access to'}>