diff --git a/.github/actions/keras_application_test/action.yml b/.github/actions/keras_application_test/action.yml index e22d0ad8e..f03745e6d 100644 --- a/.github/actions/keras_application_test/action.yml +++ b/.github/actions/keras_application_test/action.yml @@ -24,13 +24,10 @@ runs: python -m pip install --upgrade pip pip install onnxconverter-common pip install onnx==${{ inputs.onnx_version }} - pip uninstall -y protobuf - pip install "protobuf~=3.20" - pip install h5py==3.7.0 + pip install h5py pip install parameterized pip install timeout-decorator pip install coloredlogs flatbuffers - pip install tensorflow==${{ inputs.tf_version }} pip install onnxruntime==${{ inputs.ort_version }} pip install pillow pip install opencv-python @@ -49,16 +46,20 @@ runs: pip install git+https://github.com/qubvel/efficientnet pip install keras-self-attention pip install pytest pytest-cov pytest-runner + pip uninstall -y protobuf h5py if [[ ${{ inputs.tf_version }} == 1.* ]]; then - pip install keras==2.3.1 - pip install transformers==4.2.0 - pip uninstall -y h5py - pip install h5py==2.9.0 - pip install numpy==1.19.0 + echo "-- install-1 TF1-KERAS ${{ inputs.tf_version }}" + pip install numpy==1.19.0 tensorflow==${{ inputs.tf_version }} protobuf h5py==2.9.0 transformers==4.2.0 keras h5py else pip install transformers - pip install "numpy<2" + if [[ "${{ inputs.tf_version }}" != "2.13.0" && "${{ inputs.tf_version }}" != "2.9.0" ]]; then + echo "-- install-1 TF-KERAS ${{ inputs.tf_version }}" + pip install tf_keras==${{ inputs.tf_version }} tensorflow==${{ inputs.tf_version }} protobuf + else + echo "-- install-1 TF ${{ inputs.tf_version }}" + pip install protobuf tensorflow==${{ inputs.tf_version }} + fi fi pip install -e . @@ -71,5 +72,7 @@ runs: run: | python -c "import onnxruntime" pytest tests/keras2onnx_unit_tests --doctest-modules --junitxml=junit/test-results.xml + + export TF_USE_LEGACY_KERAS=True cd tests/keras2onnx_applications/nightly_build python run_all_v2.py diff --git a/.github/actions/keras_unit_test/action.yml b/.github/actions/keras_unit_test/action.yml index 4c78945f1..d5d5171bd 100644 --- a/.github/actions/keras_unit_test/action.yml +++ b/.github/actions/keras_unit_test/action.yml @@ -24,19 +24,23 @@ runs: python -m pip install --upgrade pip pip install onnxconverter-common pip install onnx==${{ inputs.onnx_version }} - pip install h5py==3.7.0 pip install parameterized pip install timeout-decorator pip install coloredlogs flatbuffers - pip install tensorflow==${{ inputs.tf_version }} pip install pytest pytest-cov pytest-runner pip install onnxruntime==${{ inputs.ort_version }} - pip uninstall -y protobuf - pip install "protobuf~=3.20" + pip uninstall -y protobuf h5py tensorflow if [[ ${{ inputs.tf_version }} == 1.* ]]; then - pip install numpy==1.19.0 + echo "-- install-2 TF1-KERAS ${{ inputs.tf_version }}" + pip install numpy==1.19.0 tensorflow==${{ inputs.tf_version }} protobuf keras h5py else - pip install "numpy<2" + if [[ "${{ inputs.tf_version }}" != "2.13.0" && "${{ inputs.tf_version }}" != "2.9.0" ]]; then + echo "-- install-2 TF-KERAS ${{ inputs.tf_version }}" + pip install tf_keras==${{ inputs.tf_version }} tensorflow==${{ inputs.tf_version }} h5py protobuf + else + echo "-- install-2 TF ${{ inputs.tf_version }}" + pip install protobuf tensorflow==${{ inputs.tf_version }} h5py + fi fi pip install -e . @@ -48,6 +52,7 @@ runs: shell: bash if: runner.os == 'Linux' run: | + export TF_USE_LEGACY_KERAS=True python -c "import onnxruntime" python -c "import onnxconverter_common" pytest tests/keras2onnx_unit_tests --doctest-modules --junitxml=junit/test-results.xml diff --git a/.github/actions/unit_test/action.yml b/.github/actions/unit_test/action.yml index 00cb28386..ebcad6c80 100644 --- a/.github/actions/unit_test/action.yml +++ b/.github/actions/unit_test/action.yml @@ -46,6 +46,7 @@ runs: export TF2ONNX_SKIP_TFLITE_TESTS=${{ inputs.skip_tflite }} export TF2ONNX_SKIP_TFJS_TESTS=True export TF2ONNX_SKIP_TF_TESTS=False + export TF_USE_LEGACY_KERAS=True python -m pytest --cov=tf2onnx --cov-report=term --disable-pytest-warnings -r s tests --cov-append --junitxml=junit/test-results.xml ls @@ -58,5 +59,6 @@ runs: set TF2ONNX_SKIP_TFLITE_TESTS=${{ inputs.skip_tflite }} set TF2ONNX_SKIP_TFJS_TESTS=True set TF2ONNX_SKIP_TF_TESTS=False + set TF_USE_LEGACY_KERAS=True python -m pytest --cov=tf2onnx --cov-report=term --disable-pytest-warnings -r s tests --cov-append --junitxml=junit/test-results.xml ls diff --git a/.github/workflows/keras_application_test_ci.yml b/.github/workflows/keras_application_test_ci.yml index 75d88fb07..d10e23a06 100644 --- a/.github/workflows/keras_application_test_ci.yml +++ b/.github/workflows/keras_application_test_ci.yml @@ -15,7 +15,7 @@ concurrency: jobs: - Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo. + Test1_py38_tf2_9: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -39,7 +39,7 @@ jobs: name: Test Results (Py38-TF2.9-ubuntu) path: ./**/test-results-*.xml - Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo. + Test1_py310_tf2_18: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -48,22 +48,22 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run Tests (Py310-TF2.15) + - name: Run Tests (Py310-TF2.18) uses: ./.github/actions/keras_application_test with: - tf_version: '2.15.0' + tf_version: '2.18.0' python_version: '3.10' - ort_version: '1.16.3' - onnx_version: '1.16.1' + ort_version: '1.20.1' + onnx_version: '1.17.0' - name: Upload Test Results if: always() uses: actions/upload-artifact@v4 with: - name: Test Results (Py310-TF2.15-ubuntu) + name: Test Results (Py310-TF2.18-ubuntu) path: ./**/test-results-*.xml - Test_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo. + Test1_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-22.04 @@ -87,23 +87,37 @@ jobs: name: Test Results (Py37-TF1.15-ubuntu) path: ./**/test-results-*.xml - Extra_tests: + Extra_tests1: strategy: fail-fast: false matrix: name: - 'py38-tf2.13' - 'py39-tf2.15' + - 'py310-tf2.18' + - 'py311-tf2.18' os: ['ubuntu-latest', 'windows-2022'] - ort_version: ['1.16.3'] - onnx_version: ['1.16.1'] include: - name: 'py38-tf2.13' tf_version: '2.13.0' python_version: '3.8' + ort_version: '1.16.3' + onnx_version: '1.16.1' - name: 'py39-tf2.15' tf_version: '2.15.0' python_version: '3.9' + ort_version: '1.16.3' + onnx_version: '1.16.1' + - name: 'py310-tf2.18' + tf_version: '2.18.0' + python_version: '3.10' + ort_version: '1.20.1' + onnx_version: '1.17.0' + - name: 'py311-tf2.18' + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' runs-on: ${{ matrix.os }} steps: @@ -126,7 +140,7 @@ jobs: publish-test-results: name: "Publish Tests Results to Github" - needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests] + needs: [Test1_py38_tf2_9, Test1_py310_tf2_18, Test1_py37_with_tf1_15, Extra_tests1] runs-on: ubuntu-latest permissions: checks: write diff --git a/.github/workflows/keras_unit_test_ci.yml b/.github/workflows/keras_unit_test_ci.yml index a8b35f67b..b62f46c0a 100644 --- a/.github/workflows/keras_unit_test_ci.yml +++ b/.github/workflows/keras_unit_test_ci.yml @@ -14,7 +14,7 @@ concurrency: cancel-in-progress: true jobs: - Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo. + Test2_py38_tf2_9: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -38,7 +38,7 @@ jobs: name: Test Results (Py38-TF2.9-ubuntu) path: ./**/test-results-*.xml - Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo. + Test2_py310_tf2_18: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -47,22 +47,46 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run Tests (Py310-TF2.15) + - name: Run Tests (Py310-TF2.18) uses: ./.github/actions/keras_unit_test with: - tf_version: '2.15.0' + tf_version: '2.18.0' python_version: '3.10' - ort_version: '1.16.3' - onnx_version: '1.16.1' + ort_version: '1.20.1' + onnx_version: '1.17.0' + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: Test Results (Py310-TF2.18-ubuntu) + path: ./**/test-results-*.xml + + Test2_py311_tf2_18: # Do not change this name because it is used in Ruleset of this repo. + strategy: + fail-fast: false + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Tests (Py311-TF2.18) + uses: ./.github/actions/keras_unit_test + with: + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' - name: Upload Test Results if: always() uses: actions/upload-artifact@v4 with: - name: Test Results (Py310-TF2.15-ubuntu) + name: Test Results (Py311-TF2.18-ubuntu) path: ./**/test-results-*.xml - Test_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo. + Test2_py37_with_tf1_15: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-22.04 @@ -86,23 +110,37 @@ jobs: name: Test Results (Py37-TF1.15-ubuntu) path: ./**/test-results-*.xml - Extra_tests: + Extra_tests2: strategy: fail-fast: false matrix: name: - - 'py39-tf2.10' + - 'py38-tf2.13' - 'py39-tf2.15' + - 'py310-tf2.18' + - 'py311-tf2.18' os: ['ubuntu-latest', 'windows-2022'] - ort_version: ['1.16.3'] - onnx_version: ['1.16.1'] include: - - name: 'py39-tf2.10' - tf_version: '2.10.0' - python_version: '3.9' + - name: 'py38-tf2.13' + tf_version: '2.13.0' + python_version: '3.8' + ort_version: '1.16.3' + onnx_version: '1.16.1' - name: 'py39-tf2.15' tf_version: '2.15.0' python_version: '3.9' + ort_version: '1.16.3' + onnx_version: '1.16.1' + - name: 'py310-tf2.18' + tf_version: '2.18.0' + python_version: '3.10' + ort_version: '1.20.1' + onnx_version: '1.17.0' + - name: 'py311-tf2.18' + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' runs-on: ${{ matrix.os }} steps: @@ -125,7 +163,7 @@ jobs: publish-test-results: name: "Publish Tests Results to Github" - needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests] + needs: [Test2_py38_tf2_9, Test2_py310_tf2_18, Test2_py311_tf2_18, Test2_py37_with_tf1_15, Extra_tests2] runs-on: ubuntu-latest permissions: checks: write diff --git a/.github/workflows/pretrained_model_test_ci.yml b/.github/workflows/pretrained_model_test_ci.yml index 57e4409be..e7cde4b71 100644 --- a/.github/workflows/pretrained_model_test_ci.yml +++ b/.github/workflows/pretrained_model_test_ci.yml @@ -15,7 +15,7 @@ concurrency: jobs: - Test_min_py_with_min_tf: # Do not change this name because it is used in 'publish-test-results' section below. + Test3_py38_tf2_9: # Do not change this name because it is used in 'publish-test-results' section below. strategy: fail-fast: false runs-on: ubuntu-latest @@ -42,7 +42,7 @@ jobs: name: Test Results (Py38-TF2.9-18-ubuntu) path: ./**/test-results-*.xml - Test_max_py_with_latest_tf: # Do not change this name because it is used in 'publish-test-results' section below. + Test3_py310_tf2_18: # Do not change this name because it is used in 'publish-test-results' section below. strategy: fail-fast: false runs-on: ubuntu-latest @@ -51,14 +51,14 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run Tests (Py310-TF2.15-18) + - name: Run Tests (Py310-TF2.18-18) uses: ./.github/actions/pretrained_model_test with: os: 'ubuntu-latest' - tf_version: '2.15.0' + tf_version: '2.18.0' python_version: '3.10' - ort_version: '1.16.3' - onnx_version: '1.16.1' + ort_version: '1.20.1' + onnx_version: '1.17.0' opset_version: '18' skip_tflite: 'False' @@ -66,10 +66,10 @@ jobs: if: always() uses: actions/upload-artifact@v4 with: - name: Test Results (Py310-TF2.15-18-ubuntu) + name: Test Results (Py310-TF2.18-18-ubuntu) path: ./**/test-results-*.xml - Test_py37_with_tf1_15: # Do not change this name because it is used in 'publish-test-results' section below. + Test3_py37_with_tf1_15: # Do not change this name because it is used in 'publish-test-results' section below. strategy: fail-fast: false runs-on: ubuntu-22.04 @@ -95,25 +95,39 @@ jobs: name: Test Results (Py37-TF1.15-15-ubuntu) path: ./**/test-results-*.xml - Extra_tests: # Do not change this name because it is used in 'publish-test-results' section below. + Extra_tests3: # Do not change this name because it is used in 'publish-test-results' section below. strategy: fail-fast: false matrix: name: - 'py38-tf2.13' - 'py39-tf2.15' + - 'py310-tf2.18' + - 'py311-tf2.18' os: ['ubuntu-latest', 'windows-2022'] opset_version: ['18', '15'] - ort_version: ['1.16.3'] - onnx_version: ['1.16.1'] skip_tflite: ['False'] include: - name: 'py38-tf2.13' tf_version: '2.13.0' python_version: '3.8' + ort_version: '1.16.3' + onnx_version: '1.16.1' - name: 'py39-tf2.15' tf_version: '2.15.0' python_version: '3.9' + ort_version: '1.16.3' + onnx_version: '1.16.1' + - name: 'py310-tf2.18' + tf_version: '2.18.0' + python_version: '3.10' + ort_version: '1.20.1' + onnx_version: '1.17.0' + - name: 'py311-tf2.18' + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' runs-on: ${{ matrix.os }} steps: @@ -139,7 +153,7 @@ jobs: publish-test-results: name: "Publish Tests Results to Github" - needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Test_py37_with_tf1_15, Extra_tests] + needs: [Test3_py38_tf2_9, Test3_py310_tf2_18, Test3_py37_with_tf1_15, Extra_tests3] runs-on: ubuntu-latest permissions: checks: write diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index ac5631aa3..5e9377d2e 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -18,10 +18,10 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.9 # Specify the desired Python version (e.g., 3.8, 3.9) + python-version: 3.11 - name: Install dependencies - run: pip install pylint==2.4.4 + run: pip install pylint - name: Run pylint run: | diff --git a/.github/workflows/unit_test_ci.yml b/.github/workflows/unit_test_ci.yml index 1143963dc..018fa0262 100644 --- a/.github/workflows/unit_test_ci.yml +++ b/.github/workflows/unit_test_ci.yml @@ -15,7 +15,7 @@ concurrency: jobs: - Test_min_py_with_min_tf: # Do not change this name because it is used in Ruleset of this repo. + Test4_py38_tf2_9: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -42,7 +42,7 @@ jobs: name: Test Results (Py38-TF2.9-18-ubuntu) path: ./**/test-results-*.xml - Test_max_py_with_latest_tf: # Do not change this name because it is used in Ruleset of this repo. + Test4_py310_tf2_18: # Do not change this name because it is used in Ruleset of this repo. strategy: fail-fast: false runs-on: ubuntu-latest @@ -51,14 +51,41 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run Tests (Py310-TF2.15-18) + - name: Run Tests (Py310-TF2.18-18) uses: ./.github/actions/unit_test with: os: 'ubuntu-latest' - tf_version: '2.15.0' + tf_version: '2.18.0' python_version: '3.10' - ort_version: '1.16.3' - onnx_version: '1.16.1' + ort_version: '1.20.1' + onnx_version: '1.17.0' + opset_version: '18' + skip_tflite: 'False' + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: Test Results (Py310-TF2.18-18-ubuntu) + path: ./**/test-results-*.xml + + Test4_py311_tf2_18: # Do not change this name because it is used in Ruleset of this repo. + strategy: + fail-fast: false + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Tests (Py311-TF2.18-18) + uses: ./.github/actions/unit_test + with: + os: 'ubuntu-latest' + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' opset_version: '18' skip_tflite: 'False' @@ -66,28 +93,42 @@ jobs: if: always() uses: actions/upload-artifact@v4 with: - name: Test Results (Py310-TF2.15-18-ubuntu) + name: Test Results (Py311-TF2.18-18-ubuntu) path: ./**/test-results-*.xml - Extra_tests: + Extra_tests4: strategy: fail-fast: false matrix: name: - 'py38-tf2.13' - 'py39-tf2.15' + - 'py310-tf2.18' + - 'py311-tf2.18' os: ['ubuntu-latest', 'windows-2022'] opset_version: ['18', '15'] - ort_version: ['1.16.3'] - onnx_version: ['1.16.1'] skip_tflite: ['False'] include: + - name: 'py311-tf2.18' + tf_version: '2.18.0' + python_version: '3.11' + ort_version: '1.20.1' + onnx_version: '1.17.0' + - name: 'py310-tf2.18' + tf_version: '2.18.0' + python_version: '3.10' + ort_version: '1.20.1' + onnx_version: '1.17.0' - name: 'py38-tf2.13' tf_version: '2.13.0' python_version: '3.8' + ort_version: '1.16.3' + onnx_version: '1.16.1' - name: 'py39-tf2.15' tf_version: '2.15.0' python_version: '3.9' + ort_version: '1.16.3' + onnx_version: '1.16.1' - name: 'py37-tf1.15' tf_version: '1.15.5' python_version: '3.7' @@ -122,7 +163,7 @@ jobs: publish-test-results: name: "Publish Tests Results to Github" - needs: [Test_min_py_with_min_tf, Test_max_py_with_latest_tf, Extra_tests] + needs: [Test4_py38_tf2_9, Test4_py310_tf2_18, Test4_py311_tf2_18, Extra_tests4] runs-on: ubuntu-latest permissions: checks: write diff --git a/README.md b/README.md index 719b99e1b..cad1cd591 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,8 @@ The common issues we run into we try to document here [Troubleshooting Guide](Tr | Build Type | OS | Python | TensorFlow | ONNX opset | Status | | --- | --- | --- | --- | --- | --- | -| Unit Test - Basic | Linux, Windows | 3.7-3.10 | 1.15, 2.9-2.15 | 14-18 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test?branchName=main)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=16&branchName=main) | -| Unit Test - Full | Linux, Windows | 3.7-3.10 | 1.15, 2.9-2.15 | 14-18 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test-matrix?branchName=main)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=18&branchName=main) | | +| Unit Test - Basic | Linux, Windows | 3.7-3.11 | 1.15, 2.9-2.15 | 14-18 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test?branchName=main)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=16&branchName=main) | +| Unit Test - Full | Linux, Windows | 3.7-3.11 | 1.15, 2.9-2.15 | 14-18 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test-matrix?branchName=main)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=18&branchName=main) | |
## Supported Versions @@ -42,7 +42,7 @@ You can install tf2onnx on top of tf-1.x or tf-2.x. ### Python -We support Python ```3.7-3.10```. +We support Python ```3.7-3.11```. ## Prerequisites diff --git a/Troubleshooting.md b/Troubleshooting.md index fd9ef4a6a..5aff57cc5 100644 --- a/Troubleshooting.md +++ b/Troubleshooting.md @@ -36,3 +36,7 @@ An example of this is the [ONNX Slice operator before opset-10](https://github.c You can pass the options ```--fold_const```(removed after tf2onnx-1.9.3) in the tf2onnx command line that allows tf2onnx to apply more aggressive constant folding which will increase chances to find a constant. If this doesn't work the model is most likely not to be able to convert to ONNX. We used to see this a lot of issue with the ONNX Slice op and in opset-10 was updated for exactly this reason. + +## cudaSetDevice() on GPU:0 failed. Status: CUDA-capable device(s) is/are busy or unavailable + +See [Regression: TF 2.18 crashes with cudaSetDevice failing due to GPU being busy](https://github.com/tensorflow/tensorflow/issues/78784). diff --git a/examples/end2end_tfkeras.py b/examples/end2end_tfkeras.py index 21ffd2d76..24a95f5dc 100644 --- a/examples/end2end_tfkeras.py +++ b/examples/end2end_tfkeras.py @@ -8,11 +8,9 @@ *onnxruntime*, *tensorflow* and *tensorflow.lite*. """ from onnxruntime import InferenceSession -import os import subprocess import timeit import numpy as np -import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, Input diff --git a/examples/tf_custom_op/double_and_add_one_custom_op.py b/examples/tf_custom_op/double_and_add_one_custom_op.py index 770324ceb..e8510c414 100644 --- a/examples/tf_custom_op/double_and_add_one_custom_op.py +++ b/examples/tf_custom_op/double_and_add_one_custom_op.py @@ -7,7 +7,6 @@ import os from tf2onnx import utils from tf2onnx.handler import tf_op -from tf2onnx.tf_loader import tf_placeholder DIR_PATH = os.path.realpath(os.path.dirname(__file__)) diff --git a/setup.py b/setup.py index 85fece3a2..7be0c795a 100644 --- a/setup.py +++ b/setup.py @@ -82,7 +82,7 @@ def run(self): author='ONNX', author_email='onnx-technical-discuss@lists.lfaidata.foundation', url='https://github.com/onnx/tensorflow-onnx', - install_requires=['numpy>=1.14.1', 'onnx>=1.4.1', 'requests', 'six', 'flatbuffers>=1.12', 'protobuf~=3.20'], + install_requires=['numpy>=1.14.1', 'onnx>=1.4.1', 'requests', 'six', 'flatbuffers>=1.12'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', @@ -98,5 +98,7 @@ def run(self): 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10'] + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + ] ) diff --git a/tests/keras2onnx_unit_tests/conftest.py b/tests/keras2onnx_unit_tests/conftest.py index f3b518d44..ea6ab4441 100644 --- a/tests/keras2onnx_unit_tests/conftest.py +++ b/tests/keras2onnx_unit_tests/conftest.py @@ -13,6 +13,12 @@ K = keras.backend +def is_keras_3(): + if hasattr(keras, '__version__'): + return keras.__version__.startswith("3.") + + return False + @pytest.fixture(scope='function') def runner(): np.random.seed(42) @@ -25,12 +31,19 @@ def runner(): def runner_func(*args, **kwargs): return run_onnx_runtime(*args, model_files, **kwargs) - # Ensure Keras layer naming is reset for each function - if hasattr(K, "reset_uids"): - # see https://github.com/onnx/tensorflow-onnx/issues/2370 - K.reset_uids() - # Reset the TensorFlow session to avoid resource leaking between tests - K.clear_session() + if is_keras_3(): + import tf_keras + if hasattr(K, "reset_uids"): + # see https://github.com/onnx/tensorflow-onnx/issues/2370 + K.reset_uids() + tf_keras.backend.clear_session() + else: + # Ensure Keras layer naming is reset for each function + if hasattr(K, "reset_uids"): + # see https://github.com/onnx/tensorflow-onnx/issues/2370 + K.reset_uids() + # Reset the TensorFlow session to avoid resource leaking between tests + K.clear_session() # Provide wrapped run_onnx_runtime function yield runner_func diff --git a/tests/keras2onnx_unit_tests/test_subclassing.py b/tests/keras2onnx_unit_tests/test_subclassing.py index f4b8ea9d0..cd7de66b6 100644 --- a/tests/keras2onnx_unit_tests/test_subclassing.py +++ b/tests/keras2onnx_unit_tests/test_subclassing.py @@ -49,6 +49,43 @@ def call(self, inputs, **kwargs): return output +def get_save_spec(model, dynamic_batch=False): + """Returns the save spec of the subclassing keras model.""" + from tensorflow.python.framework import tensor_spec + shapes_dict = getattr(model, '_build_shapes_dict', None) + # TODO: restore dynamic_batch + # assert not dynamic_batch, f"get_save_spec: dynamic_batch={dynamic_batch}, shapes_dict={shapes_dict}" + if not shapes_dict: + return None + + if 'input_shape' not in shapes_dict: + raise ValueError( + 'Model {} cannot be saved because the input shapes have not been set.' + ) + + input_shape = shapes_dict['input_shape'] + if isinstance(input_shape, tuple): + shape = input_shape + shape = (None,) + shape[1:] + return tensor_spec.TensorSpec( + shape=shape, dtype=model.input_dtype + ) + elif isinstance(input_shape, dict): + specs = {} + for key, shape in input_shape.items(): + shape = (None,) + shape[1:] + specs[key] = tensor_spec.TensorSpec( + shape=shape, dtype=model.input_dtype, name=key + ) + return specs + elif isinstance(input_shape, list): + specs = [] + for shape in input_shape: + shape = (None,) + shape[1:] + specs.append(tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype)) + return specs + + class SimpleWrapperModel(tf.keras.Model): def __init__(self, func): super(SimpleWrapperModel, self).__init__() @@ -57,6 +94,9 @@ def __init__(self, func): def call(self, inputs, **kwargs): return self.func(inputs) + def _get_save_spec(self, dynamic_batch=False): + return get_save_spec(self, dynamic_batch=dynamic_batch) + def test_lenet(runner): tf.keras.backend.clear_session() @@ -198,7 +238,10 @@ def _tf_where(input_0): swm = SimpleWrapperModel(_tf_where) const_in = [np.array([2, 4, 6, 8, 10]).astype(np.int32)] expected = swm(const_in) - swm._set_inputs(const_in) + if hasattr(swm, "_set_input"): + swm._set_inputs(const_in) + else: + swm.inputs_spec = const_in oxml = convert_keras(swm) assert runner('where_test', oxml, const_in, expected) diff --git a/tests/run_pretrained_models.py b/tests/run_pretrained_models.py index a6a952af3..7b74bbc61 100644 --- a/tests/run_pretrained_models.py +++ b/tests/run_pretrained_models.py @@ -474,7 +474,7 @@ def run_tflite(): for k in input_names: v = self.input_names[k] inputs[to_rename.get(k, k)] = tf.constant(self.make_input(v)) - tf_func = tf.function(concrete_func) + tf_func = tf.function(self.concrete_function) logger.info("Running TF") tf_results_d = tf_func(**inputs) # If there is only a single output a dict might not be returned @@ -492,7 +492,7 @@ def run_tflite(): tf.profiler.experimental.start(self.tf_profile) while time.time() < stop: for _ in range(PERF_STEP): - _ = concrete_func(**inputs) + _ = self.concrete_function(**inputs) n += PERF_STEP if self.tf_profile is not None: tf.profiler.experimental.stop() diff --git a/tests/test_cudnn_compatible_gru.py b/tests/test_cudnn_compatible_gru.py index ceab5a5ee..e01f08e6d 100644 --- a/tests/test_cudnn_compatible_gru.py +++ b/tests/test_cudnn_compatible_gru.py @@ -16,9 +16,7 @@ # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop if is_tf2(): - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell - dynamic_rnn = tf.compat.v1.nn.dynamic_rnn - bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn + pass else: GRUBlockCell = tf.contrib.rnn.GRUBlockCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell diff --git a/tests/test_custom_rnncell.py b/tests/test_custom_rnncell.py index b5286c348..8cff2d81e 100644 --- a/tests/test_custom_rnncell.py +++ b/tests/test_custom_rnncell.py @@ -16,13 +16,8 @@ # pylint: disable=abstract-method,arguments-differ if is_tf2(): - BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell - LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell - GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell - RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell - dynamic_rnn = tf.compat.v1.nn.dynamic_rnn - bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn + # no test for tf2 in this file + pass else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell @@ -32,6 +27,45 @@ dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn + class GatedGRUCell(RNNCell): + def __init__(self, hidden_dim, reuse=None): + super().__init__(self, _reuse=reuse) + self._num_units = hidden_dim + self._activation = tf.tanh + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def call(self, inputs, state): + # inputs shape: [batch size, time step, input size] = [1, 3, 2] + # num_units: 5 + # W shape: [2, 3 * 5] = [2, 15] + # U shape: [5, 3 * 5] = [5, 15] + # b shape: [1, 3 * 5] = [1, 15] + # state shape: [batch size, state size] = [1, 5] + + input_dim = inputs.get_shape()[-1] + assert input_dim is not None, "input dimension must be defined" + # W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) + W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) + # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) + U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) + # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) + b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) + + xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) + hu = tf.split(tf.matmul(state, U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = self._activation(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + state * z + return next_h, next_h + class CustomRnnCellTests(Tf2OnnxBackendTestBase): @check_opset_min_version(8, "Scan") @@ -376,45 +410,5 @@ def func(encoder_x, decoder_x, seq_length): self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1) -class GatedGRUCell(RNNCell): - def __init__(self, hidden_dim, reuse=None): - super().__init__(self, _reuse=reuse) - self._num_units = hidden_dim - self._activation = tf.tanh - - @property - def state_size(self): - return self._num_units - - @property - def output_size(self): - return self._num_units - - def call(self, inputs, state): - # inputs shape: [batch size, time step, input size] = [1, 3, 2] - # num_units: 5 - # W shape: [2, 3 * 5] = [2, 15] - # U shape: [5, 3 * 5] = [5, 15] - # b shape: [1, 3 * 5] = [1, 15] - # state shape: [batch size, state size] = [1, 5] - - input_dim = inputs.get_shape()[-1] - assert input_dim is not None, "input dimension must be defined" - # W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) - W = np.arange(30.0, dtype=np.float32).reshape((2, 15)) - # U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) - U = np.arange(75.0, dtype=np.float32).reshape((5, 15)) - # b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) - b = np.arange(15.0, dtype=np.float32).reshape((1, 15)) - - xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) - hu = tf.split(tf.matmul(state, U), 3, 1) - r = tf.sigmoid(xw[0] + hu[0]) - z = tf.sigmoid(xw[1] + hu[1]) - h1 = self._activation(xw[2] + r * hu[2]) - next_h = h1 * (1 - z) + state * z - return next_h, next_h - - if __name__ == '__main__': unittest_main() diff --git a/tests/test_gru.py b/tests/test_gru.py index 88d1f7f7c..c127c57dc 100644 --- a/tests/test_gru.py +++ b/tests/test_gru.py @@ -34,10 +34,13 @@ if is_tf2(): # There is no LSTMBlockCell in tf-2.x - BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell - LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell - GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell + try: + BasicLSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "BasicLSTMCell", None) + LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None) + GRUCell = getattr(tf.compat.v1.nn.rnn_cell, "GRUCell", None) + MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None) + except ImportError: + pass dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: diff --git a/tests/test_grublock.py b/tests/test_grublock.py index ebd878680..7418fc128 100644 --- a/tests/test_grublock.py +++ b/tests/test_grublock.py @@ -17,7 +17,10 @@ # pylint: disable=invalid-name if is_tf2(): - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell + try: + MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None) + except ImportError: + pass dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: diff --git a/tests/test_lstm.py b/tests/test_lstm.py index 3f9d41e5b..675298c44 100644 --- a/tests/test_lstm.py +++ b/tests/test_lstm.py @@ -21,9 +21,12 @@ if is_tf2(): # There is no LSTMBlockCell in tf-2.x - BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell - LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell + try: + BasicLSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "BasicLSTMCell", None) + LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None) + MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None) + except ImportError: + pass dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: diff --git a/tests/test_lstmblock.py b/tests/test_lstmblock.py index d44e16ebd..0b8c9f8e1 100644 --- a/tests/test_lstmblock.py +++ b/tests/test_lstmblock.py @@ -16,9 +16,7 @@ if is_tf2(): # There is no LSTMBlockCell in tf-2.x - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell - dynamic_rnn = tf.compat.v1.nn.dynamic_rnn - bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn + pass else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell diff --git a/tests/test_seq2seq.py b/tests/test_seq2seq.py index 4a6963467..51e1255be 100644 --- a/tests/test_seq2seq.py +++ b/tests/test_seq2seq.py @@ -13,13 +13,7 @@ # pylint: disable=invalid-name if is_tf2(): - BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell - LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell - RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell - dynamic_rnn = tf.compat.v1.nn.dynamic_rnn - bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn - LSTMStateTuple = tf.compat.v1.nn.rnn_cell.LSTMStateTuple + pass else: LSTMCell = tf.contrib.rnn.LSTMCell LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell diff --git a/tests/test_stacked_lstm.py b/tests/test_stacked_lstm.py index 2cfef9bb1..2b3f877bc 100644 --- a/tests/test_stacked_lstm.py +++ b/tests/test_stacked_lstm.py @@ -16,8 +16,11 @@ # pylint: disable=invalid-name if is_tf2(): - LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell - MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell + try: + LSTMCell = getattr(tf.compat.v1.nn.rnn_cell, "LSTMCell", None) + MultiRNNCell = getattr(tf.compat.v1.nn.rnn_cell, "MultiRNNCell", None) + except ImportError: + pass dynamic_rnn = tf.compat.v1.nn.dynamic_rnn else: LSTMCell = tf.contrib.rnn.LSTMCell diff --git a/tests/utils/setup_test_env.sh b/tests/utils/setup_test_env.sh index a14828d05..0c643d854 100755 --- a/tests/utils/setup_test_env.sh +++ b/tests/utils/setup_test_env.sh @@ -16,17 +16,22 @@ echo "==== ONNXRuntime version: $ORT_VERSION" echo "==== ONNX version: $ONNX_VERSION" pip install pytest pytest-cov pytest-runner coverage graphviz requests pyyaml pillow pandas parameterized sympy coloredlogs flatbuffers timeout-decorator -pip install onnx==$ONNX_VERSION -pip install onnxruntime==$ORT_VERSION -pip install "numpy<2" - -pip install onnxruntime-extensions -pip install "tensorflow-text<=$TF_VERSION" - -pip uninstall -y tensorflow -pip install tensorflow==$TF_VERSION -pip uninstall -y protobuf -pip install "protobuf~=3.20" +pip uninstall -y tensorflow protobuf h5py +pip install onnx==$ONNX_VERSION onnxruntime==$ORT_VERSION onnxruntime-extensions + +if [[ $TF_VERSION == 1.* ]]; then + echo "-- install-3 TF1-KERAS $TF_VERSION" + pip install numpy==1.19.0 tensorflow==$TF_VERSION protobug keras h5py +else + pip uninstall -y protobuf + if [[ "$TF_VERSION" != "2.13.0" && "$TF_VERSION" != "2.9.0" ]]; then + echo "-- install-3 TF-KERAS $TF_VERSION" + pip install tensorflow==$TF_VERSION tf_keras==$TF_VERSION tensorflow-text + else + echo "-- install-3 TF $TF_VERSION" + pip install tensorflow-text tensorflow==$TF_VERSION protobuf + fi +fi python setup.py install diff --git a/tf2onnx/convert.py b/tf2onnx/convert.py index 6ee66c096..3ee9d4640 100644 --- a/tf2onnx/convert.py +++ b/tf2onnx/convert.py @@ -328,7 +328,8 @@ def _rename_duplicate_keras_model_names(model): IMPORTANT: model may be edited. Assign model.output_names to old_out_names to restore. """ old_out_names = None - if model.output_names and len(set(model.output_names)) != len(model.output_names): + if hasattr(model, "output_names") and model.output_names \ + and len(set(model.output_names)) != len(model.output_names): # In very rare cases, keras has a bug where it will give multiple outputs the same name # We must edit the model or the TF trace will fail old_out_names = model.output_names @@ -446,7 +447,7 @@ def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_ function = _saving_utils.trace_model_call(model, input_signature) try: concrete_func = function.get_concrete_function() - except TypeError as e: + except (TypeError, AttributeError) as e: # Legacy keras models don't accept the training arg tf provides so we hack around it if "got an unexpected keyword argument 'training'" not in str(e): raise e diff --git a/tf2onnx/onnx_opset/nn.py b/tf2onnx/onnx_opset/nn.py index a06f02cec..1d10bbd66 100644 --- a/tf2onnx/onnx_opset/nn.py +++ b/tf2onnx/onnx_opset/nn.py @@ -1793,6 +1793,7 @@ def version_11(cls, ctx, node, **kwargs): node.type = "Identity" ctx.replace_inputs(node, [data]) return + cond = None if len(conditions) == 1: cond = conditions[0] if len(conditions) == 2: diff --git a/tf2onnx/tf_utils.py b/tf2onnx/tf_utils.py index 16cb76344..d23957fbd 100644 --- a/tf2onnx/tf_utils.py +++ b/tf2onnx/tf_utils.py @@ -351,9 +351,9 @@ def read_tf_node_def_attrs(node_def, input_dtypes, input_shapes): # ignore the following attributes TF_IGNORED_NODE_ATTRS = { "T", "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index", "Tpaddings", - "TI", "Tparams", "Tindices", "Tlen", "Tdim", "Tin", "dynamic_size", "Tmultiples", + "TI", "Tparams", "Tindices", "Tlen", "Tdim", "dynamic_size", "Tmultiples", "Tblock_shape", "Tcrops", "index_type", "Taxis", "U", "maxval", - "Tout", "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond", + "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond", "T_threshold", "shape_type", "_lower_using_switch_merge", "parallel_iterations", "_num_original_outputs", "output_types", "output_shapes", "key_dtype", "value_dtype", "Tin", "Tout", "capacity", "component_types", "shapes", diff --git a/tf2onnx/tfonnx.py b/tf2onnx/tfonnx.py index c2c881e77..a5e5c6927 100644 --- a/tf2onnx/tfonnx.py +++ b/tf2onnx/tfonnx.py @@ -74,7 +74,7 @@ def rewrite_constant_fold(g, ops): func_map = { "Add": np.add, "GreaterEqual": np.greater_equal, - "Cast": np.cast, + "Cast": lambda x, dtype: x.astype(dtype), "ConcatV2": np.concatenate, "Less": np.less, "ListDiff": np.setdiff1d, diff --git a/tf2onnx/utils.py b/tf2onnx/utils.py index 7f2f53daa..f6749eb7a 100644 --- a/tf2onnx/utils.py +++ b/tf2onnx/utils.py @@ -38,6 +38,7 @@ onnx_pb.TensorProto.FLOAT: np.float32, onnx_pb.TensorProto.FLOAT16: np.float16, onnx_pb.TensorProto.DOUBLE: np.float64, + onnx_pb.TensorProto.INT64: np.int64, onnx_pb.TensorProto.INT32: np.int32, onnx_pb.TensorProto.INT16: np.int16, onnx_pb.TensorProto.INT8: np.int8, @@ -45,8 +46,6 @@ onnx_pb.TensorProto.UINT16: np.uint16, onnx_pb.TensorProto.UINT32: np.uint32, onnx_pb.TensorProto.UINT64: np.uint64, - onnx_pb.TensorProto.INT64: np.int64, - onnx_pb.TensorProto.UINT64: np.uint64, onnx_pb.TensorProto.BOOL: bool, onnx_pb.TensorProto.COMPLEX64: np.complex64, onnx_pb.TensorProto.COMPLEX128: np.complex128, diff --git a/tools/pylintrc b/tools/pylintrc index 955e35304..645cf5c29 100644 --- a/tools/pylintrc +++ b/tools/pylintrc @@ -8,9 +8,6 @@ # pygtk.require(). #init-hook= -# Profiled execution. -profile=no - # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS @@ -39,11 +36,7 @@ enable=indexing-exception,old-raise-syntax # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,useless-object-inheritance - - -# Set the cache size for astng objects. -cache-size=500 +disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,no-member,no-name-in-module,import-error,unsubscriptable-object,unbalanced-tuple-unpacking,undefined-variable,not-context-manager,useless-object-inheritance,consider-using-f-string,unspecified-encoding,unnecessary-lambda-assignment,use-dict-literal,consider-using-with,wrong-import-order,consider-iterating-dictionary,use-maxsplit-arg,possibly-used-before-assignment,super-with-arguments,redundant-u-string-prefix,use-list-literal,f-string-without-interpolation,condition-evals-to-constant,global-variable-not-assigned,consider-using-enumerate,unused-argument,superfluous-parens,use-sequence-for-iteration,useless-return,superfluous-parens,use-a-generator,raise-missing-from,overgeneral-exceptions,condition-evals-to-constant,consider-using-generator,use-yield-from,consider-using-min-builtin,overgeneral-exceptions [REPORTS] @@ -53,11 +46,6 @@ cache-size=500 # mypackage.mymodule.MyReporterClass. output-format=text -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - # Tells whether to display a full report or only the messages reports=no @@ -68,10 +56,6 @@ reports=no # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -comment=no - # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= @@ -87,10 +71,6 @@ ignore-mixin-members=yes # (useful for classes with attributes dynamically set). ignored-classes=SQLObject -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. @@ -117,17 +97,6 @@ additional-builtins= [BASIC] -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=apply,input,reduce - - -# Disable the report(s) with the given id(s). -# All non-Google reports are disabled by default. -disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923 - # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ @@ -187,9 +156,6 @@ ignore-long-lines=^\s*(# )??$ # else. single-line-if-stmt=y -# List of optional constructs for which whitespace checking is disabled -no-space-check= - # Maximum number of lines in a module max-module-lines=99999 @@ -239,10 +205,6 @@ int-import-graph= [CLASSES] -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp @@ -286,33 +248,6 @@ min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception,StandardError,BaseException - - -[AST] - -# Maximum line length for lambdas -short-func-length=1 - -# List of module members that should be marked as deprecated. -# All of the string functions are listed in 4.1.4 Deprecated string functions -# in the Python 2.4 docs. -deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc - - -[DOCSTRING] - -# List of exceptions that do not need to be mentioned in the Raises section of -# a docstring. -ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError - - - [TOKENS] # Number of spaces of indent required when the last token on the preceding line