diff --git a/.github/scripts/hostsetup.sh b/.github/scripts/hostsetup.sh index a136f61a43e..835c3e95c81 100755 --- a/.github/scripts/hostsetup.sh +++ b/.github/scripts/hostsetup.sh @@ -70,6 +70,8 @@ apt install -y \ g++-9 \ gcc-9 \ wget \ + openssl \ + libssl-dev \ libtbb-dev # installing the latest version of cmake diff --git a/.github/scripts/install_dependencies.sh b/.github/scripts/install_dependencies.sh index 383b237a89e..0d05ecdc7d1 100755 --- a/.github/scripts/install_dependencies.sh +++ b/.github/scripts/install_dependencies.sh @@ -2,40 +2,32 @@ sudo apt update +# Required packages specifically for the CI and not VTR in general. sudo apt install -y \ autoconf \ automake \ bash \ - bison \ binutils \ binutils-gold \ - build-essential \ capnproto \ exuberant-ctags \ curl \ doxygen \ - flex \ fontconfig \ gdb \ - git \ gperf \ libcairo2-dev \ libcapnp-dev \ - libgtk-3-dev \ libevent-dev \ libfontconfig1-dev \ liblist-moreutils-perl \ libncurses5-dev \ - libx11-dev \ libxft-dev \ libxml2-utils \ libxml++2.6-dev \ - libreadline-dev \ tcllib \ tcl8.6-dev \ - libffi-dev \ perl \ - pkg-config \ texinfo \ time \ valgrind \ @@ -54,9 +46,10 @@ sudo apt install -y \ clang-15 \ clang-16 \ clang-17 \ - clang-18 \ - clang-format-18 \ - libtbb-dev + clang-18 + +# Standard packages install script. +./install_apt_packages.sh pip install -r requirements.txt diff --git a/.github/scripts/install_jammy_dependencies.sh b/.github/scripts/install_jammy_dependencies.sh index aa6631f8a04..82fc6d587bd 100755 --- a/.github/scripts/install_jammy_dependencies.sh +++ b/.github/scripts/install_jammy_dependencies.sh @@ -2,40 +2,32 @@ sudo apt update +# Required packages specifically for the CI and not VTR in general. sudo apt install -y \ autoconf \ automake \ bash \ - bison \ binutils \ binutils-gold \ - build-essential \ capnproto \ exuberant-ctags \ curl \ doxygen \ - flex \ fontconfig \ gdb \ - git \ gperf \ libcairo2-dev \ libcapnp-dev \ - libgtk-3-dev \ libevent-dev \ libfontconfig1-dev \ liblist-moreutils-perl \ libncurses5-dev \ - libx11-dev \ libxft-dev \ libxml2-utils \ libxml++2.6-dev \ - libreadline-dev \ tcllib \ tcl8.6-dev \ - libffi-dev \ perl \ - pkg-config \ texinfo \ time \ valgrind \ @@ -50,9 +42,10 @@ sudo apt install -y \ g++-11 \ gcc-11 \ g++-12 \ - gcc-12 \ - clang-format-14 \ - libtbb-dev + gcc-12 + +# Standard packages install script. +./install_apt_packages.sh pip install -r requirements.txt diff --git a/.github/workflows/nightly_test_manual.yml b/.github/workflows/nightly_test_manual.yml index f98f412eb8c..0ad4ce31e52 100644 --- a/.github/workflows/nightly_test_manual.yml +++ b/.github/workflows/nightly_test_manual.yml @@ -104,3 +104,13 @@ jobs: run: | source .venv/bin/activate ./run_reg_test.py -j12 vtr_reg_nightly_test7 + + - name: Upload regression results + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: nightly_test_results + path: | + vtr_flow/**/*.log + vtr_flow/**/vpr.out + vtr_flow/**/parse_results*.txt diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 5f6e9fd9088..fb4a487b2e8 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,6 +10,11 @@ jobs: steps: - uses: actions/stale@v9 with: + # Set default number of days before being marked stale to 100 years + # This will be overriden by "days-before-issue-stale" and "days-before-pr-stale" + # This is done to avoid marking PRs as stale, as it is not something + # we want to do. + days-before-stale: 36500 # The message to be shown for stale issues stale-issue-message: 'This issue has been inactive for a year and has been marked as stale. It will be closed in 15 days if it continues to be stale. If you believe this is still an issue, please add a comment.' close-issue-message: 'This issue has been marked stale for 15 days and has been automatically closed.' @@ -20,6 +25,11 @@ jobs: # Start from the oldest issues ascending: true + # Upper limit for number of API calls per day + # This worklfow does 2-3 API calls per issue + # including issues that have been marked stale + operations-per-run: 300 + # The configuration below can be used to allow the same behaviour with PRs. # Since we currently don't want to close old PRs, it is commented out but # left here in case we change our mind. diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5977e1221ba..fc80c9d523e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -99,7 +99,26 @@ jobs: run: ./dev/${{ matrix.script }} - UniTests: + VerifyTestSuites: + runs-on: ubuntu-24.04 + name: 'Verify Test Suites' + steps: + + - uses: actions/setup-python@v5 + with: + python-version: 3.12.3 + + - uses: actions/checkout@v4 + # NOTE: We do not need sub-modules. This only verifies the tests, does not run them. + + - name: 'Run test suite verification' + run: | + ./dev/vtr_test_suite_verifier/verify_test_suites.py \ + -vtr_regression_tests_dir vtr_flow/tasks/regression_tests \ + -test_suite_info dev/vtr_test_suite_verifier/test_suites_info.json + + + UnitTests: name: 'U: C++ Unit Tests' runs-on: ubuntu-24.04 steps: @@ -125,36 +144,90 @@ jobs: run: ./.github/scripts/unittest.sh - Warnings: - name: 'W: Check Compilation Warnings' + # This test builds different variations of VTR (with different CMake Params) + # and ensures that they can run the basic regression tests. This also ensures + # that these build variations are warning clean. + BuildVariations: runs-on: ubuntu-24.04 + name: 'B: Build Variations' + env: + # For the CI, we want all build variations to be warning clean. + # NOTE: Need to turn IPO off due to false warnings being produced. + COMMON_CMAKE_PARAMS: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off' steps: - uses: actions/setup-python@v5 with: python-version: 3.12.3 + - uses: actions/checkout@v4 with: submodules: 'true' - - name: Get number of CPU cores + - name: 'Get number of CPU cores' uses: SimenB/github-actions-cpu-cores@v2 id: cpu-cores - - name: Install dependencies + - name: 'Install dependencies' run: ./.github/scripts/install_dependencies.sh - - uses: hendrikmuhs/ccache-action@v1.2 + - name: 'ccache' + uses: hendrikmuhs/ccache-action@v1.2 - - name: Test + - name: 'Test with VTR_ASSERT_LEVEL 4' + if: success() || failure() env: - #In order to get compilation warnings produced per source file, we must do a non-IPO build - #We also turn warnings into errors for this target by doing a strict compile - CMAKE_PARAMS: "-DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DVTR_ENABLE_STRICT_COMPILE=on -DVTR_IPO_BUILD=off" + CMAKE_PARAMS: "${{ env.COMMON_CMAKE_PARAMS }} -DVTR_ASSERT_LEVEL=4" NUM_PROC: ${{ steps.cpu-cores.outputs.count }} run: | + rm -f build/CMakeCache.txt export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" - ./.github/scripts/build.sh + make -j${{ steps.cpu-cores.outputs.count}} + ./run_reg_test.py vtr_reg_basic -show_failures -j${{ steps.cpu-cores.outputs.count}} + + - name: 'Test with NO_GRAPHICS' + if: success() || failure() + env: + CMAKE_PARAMS: "${{ env.COMMON_CMAKE_PARAMS }} -DVPR_USE_EZGL=off" + NUM_PROC: ${{ steps.cpu-cores.outputs.count }} + run: | + rm -f build/CMakeCache.txt + export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" + make -j${{ steps.cpu-cores.outputs.count}} + ./run_reg_test.py vtr_reg_basic -show_failures -j${{ steps.cpu-cores.outputs.count}} + + - name: 'Test with NO_SERVER' + if: success() || failure() + env: + CMAKE_PARAMS: "${{ env.COMMON_CMAKE_PARAMS }} -DVPR_USE_SERVER=off" + NUM_PROC: ${{ steps.cpu-cores.outputs.count }} + run: | + rm -f build/CMakeCache.txt + export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" + make -j${{ steps.cpu-cores.outputs.count}} + ./run_reg_test.py vtr_reg_basic -show_failures -j${{ steps.cpu-cores.outputs.count}} + + - name: 'Test with CAPNPROTO disabled' + if: success() || failure() + env: + CMAKE_PARAMS: "${{ env.COMMON_CMAKE_PARAMS }} -DVTR_ENABLE_CAPNPROTO=off" + NUM_PROC: ${{ steps.cpu-cores.outputs.count }} + run: | + rm -f build/CMakeCache.txt + export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" + make -j${{ steps.cpu-cores.outputs.count}} + ./run_reg_test.py vtr_reg_basic -show_failures -j${{ steps.cpu-cores.outputs.count}} + + - name: 'Test with serial VPR_EXECUTION_ENGINE' + if: success() || failure() + env: + CMAKE_PARAMS: "${{ env.COMMON_CMAKE_PARAMS }} -DVPR_EXECUTION_ENGINE=serial -DTATUM_EXECUTION_ENGINE=serial" + NUM_PROC: ${{ steps.cpu-cores.outputs.count }} + run: | + rm -f build/CMakeCache.txt + export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" + make -j${{ steps.cpu-cores.outputs.count}} + ./run_reg_test.py vtr_reg_basic -show_failures -j${{ steps.cpu-cores.outputs.count}} Regression: @@ -169,36 +242,12 @@ jobs: suite: 'vtr_reg_basic', extra_pkgs: "" }, - { - name: 'Basic with highest assertion level', - params: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off -DVTR_ASSERT_LEVEL=4 -DWITH_BLIFEXPLORER=on', - suite: 'vtr_reg_basic', - extra_pkgs: "" - }, { name: 'Basic_odin', params: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off -DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DWITH_PARMYS=OFF -DWITH_ODIN=on', suite: 'vtr_reg_basic_odin', extra_pkgs: "" }, - { - name: 'Basic with NO_GRAPHICS', - params: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off -DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DVPR_USE_EZGL=off', - suite: 'vtr_reg_basic', - extra_pkgs: "" - }, - { - name: 'Basic with NO_SERVER', - params: '-DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DVPR_USE_EZGL=on -DVPR_USE_SERVER=off', - suite: 'vtr_reg_basic', - extra_pkgs: "" - }, - { - name: 'Basic with CAPNPROTO disabled', - params: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off -DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DVTR_ENABLE_CAPNPROTO=off', - suite: 'vtr_reg_basic', - extra_pkgs: "" - }, { name: 'Basic with VTR_ENABLE_DEBUG_LOGGING', params: '-DCMAKE_COMPILE_WARNING_AS_ERROR=on -DVTR_IPO_BUILD=off -DVTR_ASSERT_LEVEL=3 -DWITH_BLIFEXPLORER=on -DVTR_ENABLE_DEBUG_LOGGING=on', @@ -510,8 +559,9 @@ jobs: needs: - Build - Format - - UniTests - - Warnings + - VerifyTestSuites + - UnitTests + - BuildVariations - Regression - Sanitized - Parmys diff --git a/.gitmodules b/.gitmodules index 8a35f2bff8c..90a89248c2a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,3 +6,7 @@ [submodule "libs/EXTERNAL/sockpp"] path = libs/EXTERNAL/sockpp url = https://github.com/w0lek/sockpp.git + +[submodule "libs/EXTERNAL/libezgl"] + path = libs/EXTERNAL/libezgl + url = https://github.com/verilog-to-routing/ezgl.git diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 884a834f088..a0652f749e0 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -19,6 +19,10 @@ build: tools: python: "3.11" +submodules: + include: all + python: install: - requirements: doc/requirements.txt + - requirements: requirements.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a1a2150b84..34504cd898f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,11 +26,11 @@ set_property(CACHE VTR_IPO_BUILD PROPERTY STRINGS auto on off) set(VTR_ASSERT_LEVEL "2" CACHE STRING "VTR assertion checking level. 0: no assertions, 1: fast assertions, 2: regular assertions, 3: additional assertions with noticeable run-time overhead, 4: all assertions (including those with significant run-time cost)") set_property(CACHE VTR_ASSERT_LEVEL PROPERTY STRINGS 0 1 2 3 4) -option(VTR_ENABLE_STRICT_COMPILE "Specifies whether compiler warnings should be treated as errors (e.g. -Werror)" OFF) option(VTR_ENABLE_SANITIZE "Enable address/leak/undefined-behaviour sanitizers (i.e. run-time error checking)" OFF) option(VTR_ENABLE_PROFILING "Enable performance profiler (gprof)" OFF) option(VTR_ENABLE_COVERAGE "Enable code coverage tracking (gcov)" OFF) option(VTR_ENABLE_DEBUG_LOGGING "Enable debug logging" OFF) +option(VTR_ENABLE_VERSION "Enable version number up-to-date during compilation" ON) option(VTR_ENABLE_VERBOSE "Enable increased debug verbosity" OFF) option(SPEC_CPU "Enable SPEC CPU v8 support" OFF) @@ -42,9 +42,6 @@ option(VTR_ENABLE_CAPNPROTO "Enable capnproto binary serialization support in VP #Allow the user to decide whether to compile the server module option(VPR_USE_SERVER "Specify whether vpr enables the server mode" ON) -#Allow the user to enable/disable VPR analytic placement -#VPR option --enable_analytic_placer is also required for Analytic Placement -option(VPR_ANALYTIC_PLACE "Enable analytic placement in VPR." ON) option(VPR_ENABLE_INTERCHANGE "Enable FPGA interchange." ON) option(VPR_ENABLE_NOC_SAT_ROUTING "Enable NoC SAT routing." OFF) diff --git a/Dockerfile b/Dockerfile index 2b36ac5c5e5..1d25efe0304 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,25 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG DEBIAN_FRONTEND=noninteractive # set out workspace ENV WORKSPACE=/workspace RUN mkdir -p ${WORKSPACE} WORKDIR ${WORKSPACE} COPY . ${WORKSPACE} +# Required to bypass Python's protection on system-wide package installations in Ubuntu 23.04+. +# This allows pip to install packages globally without using a virtual environment. +ENV PIP_BREAK_SYSTEM_PACKAGES=1 # Install and cleanup is done in one command to minimize the build cache size RUN apt-get update -qq \ # Extract package names from install_apt_packages.sh - && sed '/sudo/d' install_apt_packages.sh | sed '/#/d' | sed 's/ \\//g' | sed '/^$/d' | sed '/^[[:space:]]*$/d' \ + && sed '/sudo/d' install_apt_packages.sh | sed '/#/d' | sed '/if\s.*then$/d' | sed '/else$/d' | sed '/fi$/d' | sed '/echo\s/d' | sed 's/ \\//g' | sed '/^$/d' | sed '/^[[:space:]]*$/d' | sed 's/\s//g' \ # Install packages | xargs apt-get -y install --no-install-recommends \ # Additional packages not listed in install_apt_packages.sh && apt-get -y install --no-install-recommends \ wget \ ninja-build \ - default-jre \ libeigen3-dev \ - libtbb-dev \ python3-pip \ - git \ time \ # Install python packages && pip install -r requirements.txt \ @@ -29,4 +29,4 @@ RUN apt-get update -qq \ # Build VTR RUN rm -rf build && make -j$(nproc) && make install # Container's default launch command -SHELL ["/bin/bash", "-c"] \ No newline at end of file +SHELL ["/bin/bash", "-c"] diff --git a/Makefile b/Makefile index 109288f0344..78b5257dc0f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -#This is a simple wrapper which hides cmake (for convenience, and from non-expert end users). +# This is a simple wrapper which hides cmake (for convenience, and from non-expert end users). # # It supports the targets: # 'make' - builds everything (all libaries/executables) @@ -15,12 +15,14 @@ # # 'make BUILD_TYPE=debug VERBOSE=1' -#Default build type -# Possible values: -# release_pgo #Perform a 2-stage build with profile-guided compiler optimization -# release #Build with compiler optimization -# debug #Build with debug info and no compiler optimization -# strict #Build VPR with warnings treated as errors +# Build type +# Possible values (not case sensitive): +# release #Build with compiler optimization (Default) +# RelWithDebInfo #Build with debug info and compiler optimizations +# debug #Build with debug info and no compiler optimization +# Possible suffixes: +# _pgo #Perform a 2-stage build with profile-guided compiler optimization +# _strict #Build VPR with warnings treated as errors BUILD_TYPE ?= release #Debugging verbosity enable @@ -40,7 +42,7 @@ override CMAKE_PARAMS := -DCMAKE_BUILD_TYPE=$(CMAKE_BUILD_TYPE) -G 'Unix Makefil #Are we doing a strict (i.e. warnings as errors) build? ifneq (,$(findstring strict,$(BUILD_TYPE))) #Configure for strict build with VPR warning treated as errors -override CMAKE_PARAMS := -DVTR_ENABLE_STRICT_COMPILE=on ${CMAKE_PARAMS} +override CMAKE_PARAMS := -DCMAKE_COMPILE_WARNING_AS_ERROR=on ${CMAKE_PARAMS} endif #Strict build type #Enable verbosity diff --git a/README.developers.md b/README.developers.md index 866f8ca1dac..d2d0d37af39 100644 --- a/README.developers.md +++ b/README.developers.md @@ -179,6 +179,29 @@ For large scale reformatting (should only be performed by VTR maintainers) the s Python files are automatically checked using `pylint` to ensure they follow established Python conventions. You can run `pylint` on the entire repository by running `./dev/pylint_check.py`. Certain files which were created before we adopted Python lint checking are grandfathered and are not checked. To check *all* files, provide the `--check_grandfathered` argument. You can also manually check individual files using `./dev/pylint_check.py ...`. +# Sanitizing Includes + +You can use include-what-you-use or the clangd language server to make sure includes are correct and you don't have missing or unused includes. + +## include-what-you-use + +First, install include-what-you-use. Ubuntu/Debian users can run `sudo apt install iwyu` and Fedora/RHEL users can run `sudo dnf install iwyu`. You can then compile VTR with include-what-you-use enabled to get diagnostic messages about includes in all files with the following command: + +``` +make CMAKE_PARAMS="-DCMAKE_CXX_INCLUDE_WHAT_YOU_USE=include-what-you-use" +``` + +Note that this method checks all source files and the diagnostic messages can be very long. + +## clangd language server + +Alternatively, if your editor supports clangd, you can use it to get diagnostic messages for the specific file you are working with. Visual Studio Code users can use the clangd extension to use clangd instead of Microsoft's C/C++ extension. To enable include diagnostics, create a file named `.clangd` in VTR root directory and add the following lines to it: +``` +Diagnostics: + UnusedIncludes: Strict + MissingIncludes: Strict +``` + # Running Tests VTR has a variety of tests which are used to check for correctness, performance and Quality of Result (QoR). @@ -1108,11 +1131,17 @@ All tests passed (1 assertion in 1 test case) VTR has support for several additional tools/features to aid debugging. ## Basic -To build vpr with make in debug mode, simply add `BUILD_TYPE=debug` at the end of your make command. +To build a tool with make in debug mode, simply add `BUILD_TYPE=debug` at the end of your make command. For example, to build all tools in debug mode use: ```shell -$ make vpr BUILD_TYPE=debug +$ make BUILD_TYPE=debug ``` +You can also enable additional (verbose) output from some tools. To build vpr with both debug information and additional output, use: +```shell +$ make vpr BUILD_TYPE=debug VERBOSE=1 +``` + + ## Sanitizers VTR can be compiled using *sanitizers* which will detect invalid memory accesses, memory leaks and undefined behaviour (supported by both GCC and LLVM): ```shell diff --git a/README.md b/README.md index 75ad2239398..da545289d0a 100644 --- a/README.md +++ b/README.md @@ -36,15 +36,15 @@ See the [full license](LICENSE.md) for details. ## How to Cite The following paper may be used as a general citation for VTR: -K. E. Murray, O. Petelin, S. Zhong, J. M. Wang, M. ElDafrawy, J.-P. Legault, E. Sha, A. G. Graham, J. Wu, M. J. P. Walker, H. Zeng, P. Patros, J. Luu, K. B. Kent and V. Betz "VTR 8: High Performance CAD and Customizable FPGA Architecture Modelling", ACM TRETS, 2020. +M. A. Elgammal, A. Mohaghegh, S. G. Shahrouz, F. Mahmoudi, F. Kosar, K. Talaei, J. Fife, D. Khadivi, K. Murray, A. Boutros, K. B. Kent, J. Goeders, and V. Betz "VTR 9: Open-Source CAD for Fabric and Beyond FPGA Architecture Exploration", ACM TRETS, 2025. [PDF](https://dl.acm.org/doi/epdf/10.1145/3734798) Bibtex: ``` -@article{vtr8, - title={VTR 8: High Performance CAD and Customizable FPGA Architecture Modelling}, - author={Murray, Kevin E. and Petelin, Oleg and Zhong, Sheng and Wang, Jai Min and ElDafrawy, Mohamed and Legault, Jean-Philippe and Sha, Eugene and Graham, Aaron G. and Wu, Jean and Walker, Matthew J. P. and Zeng, Hanqing and Patros, Panagiotis and Luu, Jason and Kent, Kenneth B. and Betz, Vaughn}, +@article{vtr9, + title={VTR 9: Open-Source CAD for Fabric and Beyond FPGA Architecture Exploration}, + author={Elgammal, Mohamed A. and Mohaghegh, Amin and Shahrouz, Soheil G. and Mahmoudi, Fatemehsadat and Kosar, Fahrican and Talaei, Kimia and Fife, Joshua and Khadivi, Daniel and Murray, Kevin and Boutros, Andrew and Kent, Kenneth B. and Goeders, Jeff and Betz, Vaughn}, journal={ACM Trans. Reconfigurable Technol. Syst.}, - year={2020} + year={2025} } ``` diff --git a/dev/subtree_config.xml b/dev/subtree_config.xml index d2e97bcbd9e..07357619081 100644 --- a/dev/subtree_config.xml +++ b/dev/subtree_config.xml @@ -24,11 +24,6 @@ internal_path="libs/EXTERNAL/libtatum" external_url="https://github.com/verilog-to-routing/tatum.git" default_external_ref="master"/> - List[TestSuite]: + """ + Parses the given test_suite_info file. The test suite info file is expected + to be a JSON file which contains information on which test suites in the + regression tests to verify and if any of the tasks should be ignored. + + The JSON should have the following form: + {"test_suites": [ + { + "name": "", + "ignored_tasks": [ + "", + ... + ] + }, + { + ... + } + ]} + """ + with open(test_suite_info_file, "r") as file: + data = json.load(file) + + assert isinstance(data, dict), "Test suite info should be a dictionary" + assert "test_suites" in data, "A list of test suites must be provided" + + test_suites = [] + for test_suite in data["test_suites"]: + assert isinstance(test_suite, dict), "Test suite should be a dictionary" + assert "name" in test_suite, "All test suites must have names" + assert "ignored_tasks" in test_suite, "All test suite must have an ignored task list" + + test_suites.append( + TestSuite( + name=test_suite["name"], + ignored_tasks=test_suite["ignored_tasks"], + ) + ) + + return test_suites + + +def parse_task_list(task_list_file: str) -> Set[str]: + """ + Parses the given task_list file and returns a list of the tasks within + the task list. + """ + tasks = set() + with open(task_list_file, "r") as file: + for line in file: + # Strip the whitespace from the line. + line.strip() + # If this is a comment line, skip it. + if line[0] == "#": + continue + # Split the line. This is used in case there is a comment on the line. + split_line = line.split() + if split_line: + # If the line can be split (i.e. the line is not empty), add + # the first part of the line to the tasks list, stripping any + # trailing "/" characters. + tasks.add(split_line[0].rstrip("/")) + + return tasks + + +def get_expected_task_list(test_suite_dir: str, reg_tests_parent_dir: str) -> Set[str]: + """ + Get the expected task list by parsing the test suite directory and finding + all files that look like config files. + """ + # Get all config files in the test suite. These will indicated where all + # the tasks are in the suite. + base_path = Path(test_suite_dir) + assert base_path.is_dir() + config_files = list(base_path.rglob("config.txt")) + + # Get a list of all the expected tasks in the task list + expected_task_list = set() + for config_file in config_files: + config_dir = os.path.dirname(config_file) + task_dir = os.path.dirname(config_dir) + # All tasks in the task list are relative to the parent of the regression + # tests directory. + expected_task_list.add(os.path.relpath(task_dir, reg_tests_parent_dir)) + + return expected_task_list + + +def verify_test_suite(test_suite: TestSuite, regression_tests_dir: str): + """ + Verifies the given test suite by looking into the regression tests directory + for the suite and ensures that all expected tasks are present in the suite's + task list. + + Returns the number of failures found in the test suite. + """ + # Check that the test suite exists in the regression tests directory + test_suite_dir = os.path.join(regression_tests_dir, test_suite.name) + if not os.path.exists(test_suite_dir): + print("\tError: Test suite not found in regression tests directory") + return 1 + + # Get the expected tasks list from the test suite directory. + reg_tests_parent_dir = os.path.dirname(regression_tests_dir.rstrip("/")) + expected_task_list = get_expected_task_list(test_suite_dir, reg_tests_parent_dir) + + # Get the task list file from the test suite and parse it to get the actual + # task list. + task_list_file = os.path.join(test_suite_dir, "task_list.txt") + if not os.path.exists(task_list_file): + print("\tError: Test suite does not have a root-level task list") + return 1 + actual_task_list = parse_task_list(task_list_file) + + # Keep track of the number of failures + num_failures = 0 + + # Process the ignored tests + ignored_tasks = set() + for ignored_task in test_suite.ignored_tasks: + # Ignored tasks are relative to the test directory, get their full path. + ignored_task_path = os.path.join(test_suite_dir, ignored_task) + # Check that the task exists. + if not os.path.exists(ignored_task_path): + print(f"\tError: Ignored task '{ignored_task}' not found in test suite") + num_failures += 1 + continue + # If the task exists, add it to the ignored tasks list relative to the + # reg test's parent directory so it can be compared properly. + ignored_tasks.add(os.path.relpath(ignored_task_path, reg_tests_parent_dir)) + + if len(ignored_tasks) > 0: + print(f"\tWarning: {len(ignored_tasks)} tasks were ignored") + + # Check for any missing tasks in the task list + for task in expected_task_list: + # If this task is ignored, it is expected to be missing. + if task in ignored_tasks: + continue + # If the task is not in the actual task list, this is an error. + if task not in actual_task_list: + print(f"\tError: Failed to find task '{task}' in task list!") + num_failures += 1 + + # Check for any tasks in the task list which should not be there + for task in actual_task_list: + # If a task is in the task list, but is not in the test directory, this + # is a failure. + if task not in expected_task_list: + print(f"\tError: Task '{task}' found in task list but not in test directory") + num_failures += 1 + # If a task is in the task list, but is marked as ignored, this must be + # a mistake. + if task in ignored_tasks: + print(f"\tError: Task '{task}' found in task list but was marked as ignored") + + return num_failures + + +def verify_test_suites(): + """ + Verify the VTR test suites. + + Test suites are verified by checking the tasks within their test directory + and the tasks within the task list at the root of that directory and ensuring + that they match. If there are any tasks which appear in one but not the other, + an error is produced and this script will return an error code. + """ + # Set up the argument parser object. + parser = argparse.ArgumentParser(description="Verifies the test suites used in VTR.") + parser.add_argument( + "-vtr_regression_tests_dir", + type=str, + required=True, + help="The path to the vtr_flow/tasks/regression_tests directory in VTR.", + ) + parser.add_argument( + "-test_suite_info", + type=str, + required=True, + help="Information on the test suite (must be a JSON file).", + ) + + # Parse the arguments from the command line. + args = parser.parse_args() + + # Verify each of the test suites. + num_failures = 0 + test_suites = parse_test_suite_info(args.test_suite_info) + for test_suite in test_suites: + print(f"Verifying test suite: {test_suite.name}") + test_suite_failures = verify_test_suite(test_suite, args.vtr_regression_tests_dir) + print(f"\tTest suite had {test_suite_failures} failures\n") + num_failures += test_suite_failures + + # If any failures were found in any suite, return exit code 1. + if num_failures != 0: + print(f"Failure: Test suite verifcation failed with {num_failures} failures") + print(f"Please fix the failing test suites found in {args.vtr_regression_tests_dir}") + print(f"If necessary, update the test suites info found here: {args.test_suite_info}") + sys.exit(1) + + print(f"Success: All test suites in {args.test_suite_info} passed") + + +if __name__ == "__main__": + verify_test_suites() diff --git a/doc/_doxygen/ezgl.dox b/doc/_doxygen/ezgl.dox new file mode 100644 index 00000000000..b0d39c4bce9 --- /dev/null +++ b/doc/_doxygen/ezgl.dox @@ -0,0 +1,13 @@ +PROJECT_NAME = "Verilog to Routing - EZGL" +OUTPUT_DIRECTORY = ../_build/doxygen/ezgl +FULL_PATH_NAMES = NO +OPTIMIZE_OUTPUT_FOR_C = YES +EXTRACT_ALL = YES +EXTRACT_PRIVATE = YES +EXTRACT_STATIC = YES +WARN_IF_UNDOCUMENTED = NO +INPUT = ../../libs/EXTERNAL/libezgl +RECURSIVE = YES +GENERATE_HTML = NO +GENERATE_LATEX = NO +GENERATE_XML = YES diff --git a/doc/src/Images/VIB.png b/doc/src/Images/VIB.png new file mode 100644 index 00000000000..4d84eee75e6 Binary files /dev/null and b/doc/src/Images/VIB.png differ diff --git a/doc/src/Images/bent_wires.png b/doc/src/Images/bent_wires.png new file mode 100644 index 00000000000..99e236a29c2 Binary files /dev/null and b/doc/src/Images/bent_wires.png differ diff --git a/doc/src/Images/double-level.png b/doc/src/Images/double-level.png new file mode 100644 index 00000000000..3f26df480d9 Binary files /dev/null and b/doc/src/Images/double-level.png differ diff --git a/doc/src/Images/vib_example.png b/doc/src/Images/vib_example.png new file mode 100644 index 00000000000..20a5b7177bf Binary files /dev/null and b/doc/src/Images/vib_example.png differ diff --git a/doc/src/api/ezgl/application.rst b/doc/src/api/ezgl/application.rst new file mode 100644 index 00000000000..cd1ef7a223a --- /dev/null +++ b/doc/src/api/ezgl/application.rst @@ -0,0 +1,6 @@ +=========== +Application +=========== + +.. doxygenfile:: application.hpp + :project: ezgl diff --git a/doc/src/api/ezgl/callback.rst b/doc/src/api/ezgl/callback.rst new file mode 100644 index 00000000000..73b9b53692d --- /dev/null +++ b/doc/src/api/ezgl/callback.rst @@ -0,0 +1,56 @@ +========= +Callbacks +========= + +This module provides callback functions for handling keyboard and mouse input, as well as predefined button actions in EZGL applications. + +Input Event Callbacks +-------------------- + +.. doxygenfunction:: press_key + :project: ezgl + +.. doxygenfunction:: press_mouse + :project: ezgl + +.. doxygenfunction:: release_mouse + :project: ezgl + +.. doxygenfunction:: move_mouse + :project: ezgl + +.. doxygenfunction:: scroll_mouse + :project: ezgl + +Button Action Callbacks +---------------------- + +.. doxygenfunction:: press_zoom_fit + :project: ezgl + +.. doxygenfunction:: press_zoom_in + :project: ezgl + +.. doxygenfunction:: press_zoom_out + :project: ezgl + +Navigation Callbacks +------------------ + +.. doxygenfunction:: press_up + :project: ezgl + +.. doxygenfunction:: press_down + :project: ezgl + +.. doxygenfunction:: press_left + :project: ezgl + +.. doxygenfunction:: press_right + :project: ezgl + +Other Callbacks +------------- + +.. doxygenfunction:: press_proceed + :project: ezgl \ No newline at end of file diff --git a/doc/src/api/ezgl/camera.rst b/doc/src/api/ezgl/camera.rst new file mode 100644 index 00000000000..9a7b7b9108f --- /dev/null +++ b/doc/src/api/ezgl/camera.rst @@ -0,0 +1,5 @@ +====== +Camera +====== +.. doxygenfile:: camera.hpp + :project: ezgl diff --git a/doc/src/api/ezgl/canvas.rst b/doc/src/api/ezgl/canvas.rst new file mode 100644 index 00000000000..a19065e7497 --- /dev/null +++ b/doc/src/api/ezgl/canvas.rst @@ -0,0 +1,5 @@ +====== +Canvas +====== +.. doxygenfile:: canvas.hpp + :project: ezgl diff --git a/doc/src/api/ezgl/color.rst b/doc/src/api/ezgl/color.rst new file mode 100644 index 00000000000..ebb245e8639 --- /dev/null +++ b/doc/src/api/ezgl/color.rst @@ -0,0 +1,5 @@ +===== +Color +===== +.. doxygenfile:: color.hpp + :project: ezgl diff --git a/doc/src/api/ezgl/control.rst b/doc/src/api/ezgl/control.rst new file mode 100644 index 00000000000..5c8d6cce10e --- /dev/null +++ b/doc/src/api/ezgl/control.rst @@ -0,0 +1,41 @@ +======= +Control +======= + +Functions to manipulate what is visible on the EZGL canvas. These functions are used by EZGL's predefined buttons, but application code can also call them directly when needed. + +Zoom Functions +------------- + +.. doxygenfunction:: ezgl::zoom_in(canvas*, double) + :project: ezgl + +.. doxygenfunction:: ezgl::zoom_out(canvas*, double) + :project: ezgl + +.. doxygenfunction:: ezgl::zoom_in(canvas*, point2d, double) + :project: ezgl + +.. doxygenfunction:: ezgl::zoom_out(canvas*, point2d, double) + :project: ezgl + +.. doxygenfunction:: ezgl::zoom_fit + :project: ezgl + +Translation Functions +-------------------- + +.. doxygenfunction:: ezgl::translate + :project: ezgl + +.. doxygenfunction:: ezgl::translate_up + :project: ezgl + +.. doxygenfunction:: ezgl::translate_down + :project: ezgl + +.. doxygenfunction:: ezgl::translate_left + :project: ezgl + +.. doxygenfunction:: ezgl::translate_right + :project: ezgl \ No newline at end of file diff --git a/doc/src/api/ezgl/graphics.rst b/doc/src/api/ezgl/graphics.rst new file mode 100644 index 00000000000..d6425154c40 --- /dev/null +++ b/doc/src/api/ezgl/graphics.rst @@ -0,0 +1,5 @@ +======== +Graphics +======== +.. doxygenfile:: graphics.hpp + :project: ezgl diff --git a/doc/src/api/ezgl/index.rst b/doc/src/api/ezgl/index.rst new file mode 100644 index 00000000000..0429b42af1d --- /dev/null +++ b/doc/src/api/ezgl/index.rst @@ -0,0 +1,20 @@ +.. _ezgl: + +==== +EZGL +==== + +EZGL is a graphics layer on top of version 3.x of the GTK graphics library. It allows drawing in an arbitrary 2D world coordinate space (instead of in pixel coordinates), handles panning and zooming automatically, and provides easy-to-use functions for common tasks like setting up a window, setting graphics attributes (like colour and line style) and drawing primitives (like lines and polygons). Most of VPR's drawing is performed in ezgl, and GTK functionality not exposed by ezgl can still be accessed by directly calling the relevant gtk functions. + +.. toctree:: + :maxdepth: 1 + + application + callback + camera + canvas + color + control + graphics + point + rectangle \ No newline at end of file diff --git a/doc/src/api/ezgl/point.rst b/doc/src/api/ezgl/point.rst new file mode 100644 index 00000000000..d335dc3873b --- /dev/null +++ b/doc/src/api/ezgl/point.rst @@ -0,0 +1,6 @@ +===== +Point +===== +.. doxygenfile:: point.hpp + :project: ezgl + diff --git a/doc/src/api/ezgl/rectangle.rst b/doc/src/api/ezgl/rectangle.rst new file mode 100644 index 00000000000..0a62cdb542e --- /dev/null +++ b/doc/src/api/ezgl/rectangle.rst @@ -0,0 +1,5 @@ +========= +Rectangle +========= +.. doxygenfile:: rectangle.hpp + :project: ezgl diff --git a/doc/src/api/vprinternals/router_connection_router.rst b/doc/src/api/vprinternals/router_connection_router.rst new file mode 100644 index 00000000000..32a7c7dc673 --- /dev/null +++ b/doc/src/api/vprinternals/router_connection_router.rst @@ -0,0 +1,18 @@ +========== +Connection Router +========== + +ConnectionRouter +--------- +.. doxygenfile:: connection_router.h + :project: vpr + +SerialConnectionRouter +---------- +.. doxygenclass:: SerialConnectionRouter + :project: vpr + +ParallelConnectionRouter +---------- +.. doxygenclass:: ParallelConnectionRouter + :project: vpr diff --git a/doc/src/api/vprinternals/vpr_router.rst b/doc/src/api/vprinternals/vpr_router.rst index 63624cd8b39..5e72894aba7 100644 --- a/doc/src/api/vprinternals/vpr_router.rst +++ b/doc/src/api/vprinternals/vpr_router.rst @@ -9,3 +9,4 @@ VPR Router router_heap router_lookahead + router_connection_router diff --git a/doc/src/api/vtrutil/containers.rst b/doc/src/api/vtrutil/containers.rst index 879e01a4ca2..b0eb95ec2cc 100644 --- a/doc/src/api/vtrutil/containers.rst +++ b/doc/src/api/vtrutil/containers.rst @@ -8,6 +8,12 @@ vtr_vector :project: vtr :sections: briefdescription detaileddescription innernamespace innerclass public-func typedef func +vtr_array +---------- +.. doxygenfile:: vtr_array.h + :project: vtr + :sections: briefdescription detaileddescription innernamespace innerclass public-func typedef func + vtr_small_vector ---------------- .. doxygenclass:: vtr::small_vector diff --git a/doc/src/conf.py b/doc/src/conf.py index fe2fc79f5b1..7e3a421f7f7 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -155,6 +155,7 @@ "odin_ii": "../_build/doxygen/odin_ii/xml", "blifexplorer": "../_build/doxygen/blifexplorer/xml", "librrgraph": "../_build/doxygen/librrgraph/xml", + "ezgl": "../_build/doxygen/ezgl/xml", } breathe_default_project = "vpr" diff --git a/doc/src/index.rst b/doc/src/index.rst index a7881f79a8d..378e46af087 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -62,6 +62,7 @@ For more specific documentation about VPR see :ref:`vpr`. api/vpr/index api/vtrutil/index + api/ezgl/index api/vprinternals/index Indices and tables diff --git a/doc/src/quickstart/index.rst b/doc/src/quickstart/index.rst index 242079bef99..f69eb39b077 100644 --- a/doc/src/quickstart/index.rst +++ b/doc/src/quickstart/index.rst @@ -23,8 +23,23 @@ If you cloned the repository, you will need to set up the git submodules (if you > git submodule init > git submodule update - -VTR requires several system packages and Python packages to build and run the flow. Ubuntu users can install the required system packages using the following command (this works on Ubuntu 18.04, 20.04, 22.04 and 24.04, but you may require different packages on other Linux distributions). Our CI testing is on Ubuntu 24.04, so that is the best tested platform and recommended for development. + +VTR requires several system and Python packages to build and run the flow. Ubuntu users can install the required system packages using the provided script or the command below. This setup works on Ubuntu 18.04, 20.04, 22.04, and 24.04, but note that some packages (such as ``clang-format-18``) are only available by default on Ubuntu 24.04. On older versions, this package will not be installed unless you manually add the appropriate LLVM APT repository. + +To install ``clang-format-18`` on older Ubuntu versions (e.g., 20.04 or 22.04), you must add the LLVM repository manually. Note that this tool is only required if you want to run ``make format`` to automatically fix formatting issues in the code. It is not necessary for building or running VPR. + +.. code-block:: bash + + sudo apt install wget gnupg lsb-release + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 18 + +After that, you can install ``clang-format-18`` using: + +.. code-block:: bash + + sudo apt install clang-format-18 .. code-block:: bash diff --git a/doc/src/tutorials/index.rst b/doc/src/tutorials/index.rst index 1c25145ef32..b9c661fb374 100644 --- a/doc/src/tutorials/index.rst +++ b/doc/src/tutorials/index.rst @@ -10,3 +10,4 @@ Tutorials arch/index titan_benchmarks/index timing_simulation/index + timing_analysis/index diff --git a/doc/src/tutorials/timing_analysis/index.rst b/doc/src/tutorials/timing_analysis/index.rst new file mode 100644 index 00000000000..20c6e2aef67 --- /dev/null +++ b/doc/src/tutorials/timing_analysis/index.rst @@ -0,0 +1,172 @@ +.. _timing_analysis_tutorial: + +Post-Implementation Timing Analysis +----------------------------------- + +This tutorial describes how to perform static timing analysis (STA) on a circuit which has +been implemented by :ref:`VPR` using OpenSTA, an external timing analysis tool. + +A video of this tutorial can be found here: https://youtu.be/yihFJc7WOfE + +External timing analysis can be useful since VPR's timing analyzer (Tatum) does +not support all timing constraints and does not provide a TCL interface to allow +you to directly interrogate the timing graph. VPR also has limited support for +timing exceptions such as multi-cycles and false paths, which tools like OpenSTA +have better support for. + +Some external tools can also ingest more complex timing models (e.g. four +transition rr, rf, fr, ff delays vs. VTR's modeling of all transitions having +the same min,max range). + +.. _fig_timing_analysis_design_cycle: + +.. figure:: timing_analysis_design_cycle.png + + Post-implementation timing analysis design cycle. + +A user design cycle which would use post-implementation timing analysis could perform the following: + 1. Run VPR with the timing commands it can support (simplified constraints). + 2. Perform timing analysis on the resulting netlist using OpenSTA with + more complex timing commands. + 3. The user can then modify the design to meet the complex timing constraints based on the timing report produced by OpenSTA. + 4. The design can then be fed back into VPR and the process can repeat until all constraints are met. + +Generating the Post-Implementation Netlist for STA +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For this tutorial, we will be using the ``clma`` :ref:`benchmark ` +targetting the ``k6_frac_N10_frac_chain_mem32K_40nm.xml`` architecture. + +We will first create a working directory to hold all the timing analysis files: + +.. code-block:: console + + $ mkdir timing_analysis_tut + $ cd timing_analysis_tut + +Next we will copy over the benchmark and FPGA architecture into the working +directory for convenience: + +.. code-block:: console + + $ cp $VTR_ROOT/vtr_flow/benchmarks/blif/clma.blif . + $ cp $VTR_ROOT/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml . + +.. note:: Replace :term:`$VTR_ROOT` with the root directory of the VTR source tree + +To perform timing analysis externally to VTR, we need to provide an SDC file +which will contain the timing constraints on the clocks and I/Os in the circuit. +For this tutorial, we will use the following ``clma.sdc`` file: + +.. code-block:: tcl + :linenos: + :caption: SDC file ``clma.sdc`` used for timing analysis. + + # Set pclk to be a clock with a 16ns period. + create_clock -period 16 pclk + + # Set the input delays of all input ports in the clma design to be 0 relative to pclk. + set_input_delay -clock pclk -max 0 [get_ports {pi*}] + + # Set the output delays of all output ports in the clma design to be 0 relative to pclk. + set_output_delay -clock pclk -max 0 [get_ports {p__*}] + +Next, we can generate the post-implementation netlist and other necessary files +for timing analysis using VPR. + +.. code-block:: console + + $ vpr \ + $ k6_frac_N10_frac_chain_mem32K_40nm.xml \ + $ clma.blif \ + $ --route_chan_width 100 \ + $ --sdc_file clma.sdc \ + $ --gen_post_synthesis_netlist on \ + $ --gen_post_implementation_sdc on \ + $ --post_synth_netlist_unconn_inputs gnd \ + $ --post_synth_netlist_module_parameters off + +In this command, we provide the architecture, circuit, the channel width, and +the SDC file. The other four commands are what generate the necessary netlist +files for timing analysis: + * ``--gen_post_synthesis_netlist on``: This will generate the post-implementation netlist as a Verilog file. + * ``--gen_post_implementation_sdc on``: This will have VPR generate a new SDC file which contains extra timing information (e.g. clock delays) based on how VPR implemented the design. + * ``--post_synth_netlist_unconn_inputs gnd``: For timing analysis with OpenSTA, we should be explicit about how we handle unconnected signal ports. Here we just ground them for simplicity. + * ``--post_synth_netlist_module_parameters off``: OpenSTA does not allow parameters to be used in the netlist. This command tells VPR to generate a netlist without using parameters. + +Once VPR has completed, we should see the generated Verilog netlist, SDF file, and SDC file: + +.. code-block:: console + + $ ls *.v *.sdf *.sdc + top_post_synthesis.sdc top_post_synthesis.sdf top_post_synthesis.v + + +Performing Timing Analysis using OpenSTA +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To perform static timing analysis for this tutorial, we will be using OpenSTA (https://github.com/parallaxsw/OpenSTA ). +Other STA tools can be used, however they may use slightly different commands. + +First, install OpenSTA onto your system. Building from source is a good option, +which can be done using the following instructions: +https://github.com/parallaxsw/OpenSTA?tab=readme-ov-file#build-from-source + +After OpenSTA is installed, we can perfrom static timing analysis on the post-implementation +netlist generated by VPR. + +It is easiest to write a ``sdf_delays.tcl`` file to setup and configure the timing analysis: + +.. code-block:: tcl + :linenos: + :caption: OpenSTA TCL file ``sdf_delays.tcl``. Note that :term:`$VTR_ROOT` should be replaced with the relevant path. + + # Read a skeleton of a liberty file which contains just enough information to + # allow OpenSTA to perform timing analysis on the post-synthesized netlist using + # an SDF file. This contains descriptions of the timing arcs of the primitives + # in the circuit. + read_liberty $VTR_ROOT/vtr_flow/primitives.lib + + # Read the post-implementation netlist generated by VPR. + read_verilog top_post_synthesis.v + + # Link the top-level design. + link_design top + + # Read the post-synthesis SDF file. + read_sdf top_post_synthesis.sdf + + # Read the SDC commands generated by VPR. + read_sdc top_post_synthesis.sdc + + # Report the setup and hold timing checks using OpenSTA and write them to files. + report_checks -group_path_count 100 -digits 3 -path_delay max > open_sta_report_timing.setup.rpt + report_checks -group_path_count 100 -digits 3 -path_delay min > open_sta_report_timing.hold.rpt + + # Report the minimum period of the clocks and their fmax. + report_clock_min_period + + # Exit OpenSTA's TCL terminal. + # This can be removed if you want terminal access to write TCL commands after + # executing the prior commands. + exit + +Now that we have a ``.tcl`` file, we can launch OpenSTA from the terminal and run it: + +.. code-block:: console + + $ sta sdf_delays.tcl + +Running this command will open a TCL terminal which will execute all of the commands +in ``sdf_delays.tcl``. The TCL file above will write setup and hold timing reports (similar to +the reports written by VPR), report the minimum period of all clocks, and then exit the OpenSTA TCL terminal. + +You can compare the timing reports generated by OpenSTA (``open_sta_report_timing.{setup/hold}.rpt``) +to the timing reports generated by VPR (``report_timing.{setup/hold}.rpt``). +You can also compare the minimum period reported by OpenSTA with the final +period reported by VTR at the bottom of ``vpr_stdout.log``. + +The TCL file above is just an example of what OpenSTA can do. For full documentation +of the different commands available in OpenSTA, see: +https://github.com/parallaxsw/OpenSTA/blob/master/doc/OpenSTA.pdf + diff --git a/doc/src/tutorials/timing_analysis/timing_analysis_design_cycle.png b/doc/src/tutorials/timing_analysis/timing_analysis_design_cycle.png new file mode 100644 index 00000000000..98b5e766297 Binary files /dev/null and b/doc/src/tutorials/timing_analysis/timing_analysis_design_cycle.png differ diff --git a/doc/src/vpr/VIB.rst b/doc/src/vpr/VIB.rst new file mode 100644 index 00000000000..b48675ab174 --- /dev/null +++ b/doc/src/vpr/VIB.rst @@ -0,0 +1,254 @@ +.. _VIB: + +VIB Architecture +============ +The VIB architecture adds modeling support for double-level MUX topology and bent wires. In past, switch blocks have only one level of routing MUXes, whose inputs are driven by outputs of programmable blocks and routing tracks. Now outputs of programmable blocks can shape the first level of routing MUXes, while the inputs of second level involves the outputs of first level and other routing tracks. This can reduce the number and input sizes of routing MUXes. + +Figure 1 shows the proposed VIB architecture which is tile-based. Each tile is composed of a CLB and a VIB. Each CLB can interact with the corresponding VIB which contains all the routing programmable switches in one tile. Figure 2 shows an example of the detailed interconnect architecture in VIB. The CLB input muxes and the driving muxes of wire segments can share the same fanins. A routing path of a net with two sinks is presented red in the Figure. + +.. figure:: ../Images/VIB.png + :align: center + :height: 300 + + Figure 1. VIB architecture. The connections between the inputs and outputs of the LB and the routing wires are all implemented within the VIB. + +.. figure:: ../Images/double-level.png + :align: center + + Figure 2. Double-level MUX topology. + +Figure 3 shows the modeling for bent wires. A bent L-length wire is modeled as two segments in CHANX and CHANY respectively connected by a delayless switch. The orange and red arrows represent conterclockwise and clockwise bent wires respectively. The bent wires can connect to both bent and straight wire segments. + +.. figure:: ../Images/bent_wires.png + :align: center + + Figure 3. Presentation for bent wires. + +FPGA Architecture File Modification (.xml) +-------------------------- +For original tags of FPGA architecture file see :ref:`fpga_architecture_description`. + +Modification for ```` Tag +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The content within the ```` tag consists of a group of ```` tags. +The ```` tag and its contents are described below. + +.. arch:tag:: content + + :req_param content: + The switch names and the depopulation pattern as described below. + +.. arch:tag:: int list + +.. arch:tag:: int list + +.. arch:tag:: + +For bent wires, a new content ```` is added in the ```` tag. + +.. arch:tag:: bent pattern list + + This tag describes the bent pattern for this particular wire segment. + For example, a length 4 wire has a bent pattern of ``- - U``. + A ``-`` indicates no bent at this position and a ``U`` indicates a conterclockwise bent at the position. (``D`` indicates a clockwise bent.) + + .. note:: A bent wire should remain consistent in both the x and y axes. + +New Added Top Level Tag ```` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The content within the ```` tag consists of a group of ```` tags. Different ```` tags describe the paradigms of VIB, which apply to different positions. + +.. arch:tag:: content + + :req_param name: + A unique alphanumeric name to identify this VIB type. + + :req_param pbtype_name: + The name of the block type (e.g. clb, memory) that this VIB connects to. + + .. note:: A block (e.g. clb, dsp) is connected to the VIB on its top-right side, so the input and output pins of the block should be on the top or right side. + + :req_param vib_seg_group: + The number of the segment types in this VIB. + + :req_param arch_vib_switch: + Name of the mux switch type used to drive wires in the VIB by default, and a custom switch can override this switch type for specific connections if desired. + + :req_param content: + The segment groups and the multistage MUX topology as described below. + +The ``content`` of ```` tag consists of several ```` tags and a ```` tag. +For example: + +.. code-block:: xml + + + + + + + + + + ... + + + ... + + + + + ... + + + +.. arch:tag:: + + :req_param name: + The name of the segment in this VIB described in ````. + + :req_param track_nums: + The track number of the segment in this VIB. + + .. note:: When using unidirectional segments, the track number of the segment represents the number for one direction. For example, the ``track_nums`` is ``10``, which means total ``20`` tracks of the segment in the channel for both (INC & DEC) directions. + +.. arch:tag:: content + + :req_param content: + The detaild information for first and second MUXes. + +The ``content`` of ```` tag consists of a ```` tag and a ```` tag. + +.. arch:tag:: content + + :req_param switch_name: + Name of the mux switch type used to drive first stage MUXes in the VIB. + + :req_param content: + The details of each MUX. + +The ``content`` of ```` tag consists of many ```` tags. + +.. arch:tag:: content + + :req_param name: + Name of the MUX. + + :req_param content: + A ```` tag to describe what pins and wires connect to this MUX. + +For example: + +.. code-block:: xml + + + + clb.O[0] clb.O[1:3] clb.O[4] + + + L1.E1 L1.S1 L2.E0 + + ... + + +The ```` tag in ```` describes nodes that connects to the MUX. ``clb.O[*]`` means output pin(s); ``L1.E1`` means the track ``1`` in the ``East`` direction of ``L1`` segment. + +.. arch:tag:: content + + :req_param content: + The details of each MUX. + +The ``content`` of ```` tag consists of many ```` tags. + +.. arch:tag:: content + + :req_param name: + Name of the MUX. + + :req_param content: + A ```` tag to describe where this MUX connect to and a ```` tag to describe what pins and wires connect to this MUX. + +For example: + +.. code-block:: xml + + + + clb.I[0] + clb.O[4] f_mux_0 f_mux_1 + + + L1.E1 + L1.S2 f_mux_0 f_mux_1 + + ... + + +The ```` tag describes the node this MUX connects to. ``clb.I[*]`` means input pin(s); ``L1.E1`` means the track ``1`` in the ``East`` direction of ``L1`` segment. The ```` tag in ```` describes nodes that connects to the MUX. ``clb.O[*]`` means output pin(s); ``L1.S2`` means the track ``2`` in the ``South`` direction of ``L1`` segment. ``f_mux_0`` means the name of the specific first stage MUX. + +Here is a complete example of the ```` tag: + +.. code-block:: xml + + + + + + + + clb.O[0] clb.O[1:3] clb.O[4] + + + L1.E1 L1.S1 L2.E0 + + + + + clb.I[0] + clb.O[4] f_mux_0 f_mux_1 + + + L1.E1 + L1.S2 f_mux_0 f_mux_1 + + + + + +Its corresponding detailed architecture is shown in Figure 4. + +.. figure:: ../Images/vib_example.png + :align: center + :height: 600 + + Figure 4. The corresponding detaied architecture of the example. + +New Added Top Level Tag ```` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Content inside this tag specifies VIB grid layout to describe different VIBs applied on different locations. + +.. arch:tag:: content + + :req_param name: + The name identifying this VIB grid layout. It should be the same as the corresponding layout name inside the ```` tag. + + :req_param content: + The content should contain a set of grid location tags. For grid location tags of vib_layout see :ref:`fpga_architecture_description`; ref:`grid_expressions` + +For example: + +.. code-block:: xml + + + + + + + ... + + + +In this VIB grid layout, ``perimeter``, ``fill``, ``col`` and so on are tags in original ```` tag to describe positions of each type of VIB block. The attibute ``type`` should correspond to the ``name`` of a ```` tag in ````. +Besides, the ``pbtype_name`` of corresponding ```` must be the same as the physical block type at this position. + +In this example, IO blocks are located on the perimeter of the layout. Memory blocks are on column 5 and CLBs are on the rest positions. The ``vib_io``, ``vib_clb`` and ``vib_memory`` are different types of vib blocks corresponding to IO, CLB and memory blocks respectively. diff --git a/doc/src/vpr/command_line_usage.rst b/doc/src/vpr/command_line_usage.rst index f21ee85f1eb..916a7986cd2 100644 --- a/doc/src/vpr/command_line_usage.rst +++ b/doc/src/vpr/command_line_usage.rst @@ -47,12 +47,12 @@ By default VPR will perform a binary search routing to find the minimum channel Detailed Command-line Options ----------------------------- -VPR has a lot of options. Running :option:`vpr --help` will display all the available options and their usage information. +VPR has a lot of options. Running :option:`vpr --help` will display all the available options and their usage information. .. option:: -h, --help Display help message then exit. - + The options most people will be interested in are: * :option:`--route_chan_width` (route at a fixed channel width), and @@ -208,7 +208,7 @@ General Options * Any string matching ``name`` attribute of a device layout defined with a ```` tag in the :ref:`arch_grid_layout` section of the architecture file. If the value specified is neither ``auto`` nor matches the ``name`` attribute value of a ```` tag, VPR issues an error. - + .. note:: If the only layout in the architecture file is a single device specified using ````, it is recommended to always specify the ``--device`` option; this prevents the value ``--device auto`` from interfering with operations supported only for ```` grids. **Default:** ``auto`` @@ -223,6 +223,12 @@ General Options If this option is not specified it may be set from the ``VPR_NUM_WORKERS`` environment variable; otherwise the default is used. + If this option is set to something other than 1, the following algorithms can be run in parallel: + + * Timing Analysis + * Routing (If routing algorithm is set to parallel or parallel_decomp; See :option:`--router_algorithm`) + * Portions of analytical placement (If using the analytical placement flow and compiled VPR with Eigen enabled; See :option:`--analytical_place`) + .. note:: To compile VPR to allow the usage of parallel workers, ``libtbb-dev`` must be installed in the system. **Default:** ``1`` @@ -410,9 +416,14 @@ Use the options below to override this default naming behaviour. .. option:: --write_placement_delay_lookup Writes the placement delay lookup to the specified file. Expects a file extension of either ``.capnp`` or ``.bin``. + +.. option:: --read_initial_place_file + + Reads in the initial cluster-level placement (in :ref:`.place file format `) from the specified file and uses it as the starting point for annealing improvement, instead of generating an initial placement internally. + .. option:: --write_initial_place_file - Writes out the the placement chosen by the initial placement algorithm to the specified file. + Writes out the clustered netlist placement chosen by the initial placement algorithm to the specified file, in :ref:`.place file format `. .. option:: --outfile_prefix @@ -569,7 +580,7 @@ For people not working on CAD, you can probably leave all the options to their d **Default**: ``auto`` -.. option:: --alpha_clustering +.. option:: --timing_gain_weight A parameter that weights the optimization of timing vs area. @@ -577,7 +588,7 @@ For people not working on CAD, you can probably leave all the options to their d **Default**: ``0.75`` -.. option:: --beta_clustering +.. option:: --connection_gain_weight A tradeoff parameter that controls the optimization of smaller net absorption vs. the optimization of signal sharing. @@ -832,9 +843,9 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe Controls how the placer handles blocks (of any type) during placement. - * ````: A path to a file listing the desired location of blocks in the netlist. + * ````: A path to a file listing the desired location of clustered blocks in the netlist. - This place location file is in the same format as a :ref:`normal placement file `, but does not require the first two lines which are normally at the top of a placement file that specify the netlist file, netlist ID, and array size. + This place location file is in the same format as a :ref:`.place file `, but does not require the first two lines which are normally at the top of a placement file that specify the netlist file, netlist ID, and array size. **Default:** ````. @@ -900,7 +911,7 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe .. option:: --place_agent_algorithm {e_greedy | softmax} - Controls which placement RL agent is used. + Controls which placement RL agent is used. **Default:** ``softmax`` @@ -922,10 +933,10 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe .. option:: --place_reward_fun {basic | nonPenalizing_basic | runtime_aware | WLbiased_runtime_aware} - The reward function used by the placement RL agent to learn the best action at each anneal stage. + The reward function used by the placement RL agent to learn the best action at each anneal stage. + + .. note:: The latter two are only available for timing-driven placement. - .. note:: The latter two are only available for timing-driven placement. - **Default:** ``WLbiased_runtime_aware`` .. option:: --place_agent_space {move_type | move_block_type} @@ -935,20 +946,20 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe **Default:** ``move_block_type`` .. option:: --place_quench_only {on | off} - + If this option is set to ``on``, the placement will skip the annealing phase and only perform the placement quench. - This option is useful when the the quality of initial placement is good enough and there is no need to perform the + This option is useful when the the quality of initial placement is good enough and there is no need to perform the annealing phase. **Default:** ``off`` .. option:: --placer_debug_block - + .. note:: This option is likely only of interest to developers debugging the placement algorithm - Controls which block the placer produces detailed debug information for. - + Controls which block the placer produces detailed debug information for. + If the block being moved has the same ID as the number assigned to this parameter, the placer will print debugging information about it. * For values >= 0, the value is the block ID for which detailed placer debug information should be produced. @@ -960,7 +971,7 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe **Default:** ``-2`` .. option:: --placer_debug_net - + .. note:: This option is likely only of interest to developers debugging the placement algorithm Controls which net the placer produces detailed debug information for. @@ -1004,7 +1015,7 @@ The following options are only valid when the placement engine is in timing-driv .. option:: --quench_recompute_divider - Controls how many times the placer performs a timing analysis to update its criticality estimates during a quench. + Controls how many times the placer performs a timing analysis to update its criticality estimates during a quench. If unspecified, uses the value from --inner_loop_recompute_divider. **Default:** ``0`` @@ -1088,7 +1099,7 @@ The following options are only valid when the placement engine is in timing-driv NoC Options ^^^^^^^^^^^^^^ -The following options are only used when FPGA device and netlist contain a NoC router. +The following options are only used when FPGA device and netlist contain a NoC router. .. option:: --noc {on | off} @@ -1098,7 +1109,7 @@ The following options are only used when FPGA device and netlist contain a NoC r **Default:** ``off`` .. option:: --noc_flows_file - + XML file containing the list of traffic flows within the NoC (communication between routers). .. note:: noc_flows_file are required to specify if NoC optimization is turned on (--noc on). @@ -1106,7 +1117,7 @@ The following options are only used when FPGA device and netlist contain a NoC r .. option:: --noc_routing_algorithm {xy_routing | bfs_routing | west_first_routing | north_last_routing | negative_first_routing | odd_even_routing} Controls the algorithm used by the NoC to route packets. - + * ``xy_routing`` Uses the direction oriented routing algorithm. This is recommended to be used with mesh NoC topologies. * ``bfs_routing`` Uses the breadth first search algorithm. The objective is to find a route that uses a minimum number of links. This algorithm is not guaranteed to generate deadlock-free traffic flow routes, but can be used with any NoC topology. * ``west_first_routing`` Uses the west-first routing algorithm. This is recommended to be used with mesh NoC topologies. @@ -1119,11 +1130,11 @@ The following options are only used when FPGA device and netlist contain a NoC r .. option:: --noc_placement_weighting Controls the importance of the NoC placement parameters relative to timing and wirelength of the design. - + * ``noc_placement_weighting = 0`` means the placement is based solely on timing and wirelength. * ``noc_placement_weighting = 1`` means noc placement is considered equal to timing and wirelength. * ``noc_placement_weighting > 1`` means the placement is increasingly dominated by NoC parameters. - + **Default:** ``5.0`` .. option:: --noc_aggregate_bandwidth_weighting @@ -1141,7 +1152,7 @@ The following options are only used when FPGA device and netlist contain a NoC r Other positive numbers specify the importance of meeting latency constraints compared to other NoC-related cost terms. Weighting factors for NoC-related cost terms are normalized internally. Therefore, their absolute values are not important, and only their relative ratios determine the importance of each cost term. - + **Default:** ``0.6`` .. option:: --noc_latency_weighting @@ -1151,7 +1162,7 @@ The following options are only used when FPGA device and netlist contain a NoC r Other positive numbers specify the importance of minimizing aggregate latency compared to other NoC-related cost terms. Weighting factors for NoC-related cost terms are normalized internally. Therefore, their absolute values are not important, and only their relative ratios determine the importance of each cost term. - + **Default:** ``0.02`` .. option:: --noc_congestion_weighting @@ -1167,11 +1178,11 @@ The following options are only used when FPGA device and netlist contain a NoC r .. option:: --noc_swap_percentage Sets the minimum fraction of swaps attempted by the placer that are NoC blocks. - This value is an integer ranging from [0-100]. - - * ``0`` means NoC blocks will be moved at the same rate as other blocks. + This value is an integer ranging from [0-100]. + + * ``0`` means NoC blocks will be moved at the same rate as other blocks. * ``100`` means all swaps attempted by the placer are NoC router blocks. - + **Default:** ``0`` .. option:: --noc_placement_file_name @@ -1257,7 +1268,7 @@ Analytical Placement is generally split into three stages: * ``none`` Do not use any Detailed Placer. - * ``annealer`` Use the Annealer from the Placement stage as a Detailed Placer. This will use the same Placer Options from the Place stage to configure the annealer. + * ``annealer`` Use the Annealer from the Placement stage as a Detailed Placer. This will use the same Placer Options from the Place stage to configure the annealer. **Default:** ``annealer`` @@ -1270,6 +1281,49 @@ Analytical Placement is generally split into three stages: **Default:** ``0.5`` +.. option:: --appack_max_dist_th { auto | :, } + + Sets the maximum candidate distance thresholds for the logical block types + used by APPack. APPack uses the primitive-level placement produced by the + global placer to cluster primitives together. APPack uses the thresholds + here to ignore primitives which are too far away from the cluster being formed. + + When this option is set to "auto", VPR will select good values for these + thresholds based on the primitives contained within each logical block type. + + Using this option, the user can set the maximum candidate distance threshold + of logical block types to something else. The strings passed in by the user + should be of the form ``:,`` where the regex string is + used to match the name of the logical block type to set, the first float + is a scaling term, and the second float is an offset. The threshold will + be set to max(scale * (W + H), offset), where W and H are the width and height + of the device. This allows the user to specify a threshold based on the + size of the device, while also preventing the number from going below "offset". + When multiple strings are provided, the thresholds are set from left to right, + and any logical block types which have been unset will be set to their "auto" + values. + + For example: + + .. code-block:: none + + --appack_max_dist_th .*:0.1,0 "clb|memory:0,5" + + Would set all logical block types to be 0.1 * (W + H), except for the clb and + memory block, which will be set to a fixed value of 5. + + Another example: + + .. code-block:: none + + --appack_max_dist_th "clb|LAB:0.2,5" + + This will set all of the logical block types to their "auto" thresholds, except + for logical blocks with the name clb/LAB which will be set to 0.2 * (W + H) or + 5 (whichever is larger). + + **Default:** ``auto`` + .. option:: --ap_verbosity Controls the verbosity of the AP flow output. @@ -1287,6 +1341,15 @@ Analytical Placement is generally split into three stages: **Default:** ``1`` +.. option:: --ap_generate_mass_report {on | off} + + Controls whether to generate a report on how the partial legalizer + within the AP flow calculates the mass of primitives and the + capacity of tiles on the device. This report is useful when + debugging the partial legalizer. + + **Default:** ``off`` + .. _router_options: @@ -1343,8 +1406,8 @@ VPR uses a negotiated congestion algorithm (based on Pathfinder) to perform rout .. option:: --max_pres_fac - Sets the maximum present overuse penalty factor that can ever result during routing. Should always be less than 1e25 or so to prevent overflow. - Smaller values may help prevent circuitous routing in difficult routing problems, but may increase + Sets the maximum present overuse penalty factor that can ever result during routing. Should always be less than 1e25 or so to prevent overflow. + Smaller values may help prevent circuitous routing in difficult routing problems, but may increase the number of routing iterations needed and hence runtime. **Default:** ``1000.0`` @@ -1423,7 +1486,7 @@ VPR uses a negotiated congestion algorithm (based on Pathfinder) to perform rout .. option:: --router_algorithm {timing_driven | parallel | parallel_decomp} - Selects which router algorithm to use. + Selects which router algorithm to use. * ``timing_driven`` is the default single-threaded PathFinder algorithm. @@ -1474,6 +1537,35 @@ VPR uses a negotiated congestion algorithm (based on Pathfinder) to perform rout * `swns` - setup Worst Negative Slack (sWNS) [ns] * `stns` - Setup Total Negative Slack (sTNS) [ns] + +.. option:: --generate_net_timing_report {on | off} + + Generates a report that lists the bounding box, slack, and delay of every routed connection in a design in CSV format (``report_net_timing.csv``). Each row in the CSV corresponds to a single net. + + The report can later be used by other tools to enable further optimizations. For example, the Synopsys synthesis tool (Synplify) can use this information to re-synthesize the design and improve the Quality of Results (QoR). + + Fields in the report are: + + .. code-block:: none + + netname : The name assigned to the net in the atom netlist + Fanout : Net's fanout (number of sinks) + bb_xmin : X coordinate of the net's bounding box's bottom-left corner + bb_ymin : Y coordinate of the net's bounding box's bottom-left corner + bb_layer_min : Lowest layer number of the net's bounding box + bb_xmax : X coordinate of the net's bounding box's top-right corner + bb_ymax : Y coordinate of the net's bounding box's top-right corner + bb_layer_max : Highest layer number of the net's bounding box + src_pin_name : Name of the net's source pin + src_pin_slack : Setup slack of the net's source pin + sinks : A semicolon-separated list of sink pin entries, each in the format: + ,, + + Example value for the ``sinks`` field: + ``"U2.B,0.12,0.5;U3.C,0.10,0.6;U4.D,0.08,0.7"`` + + **Default:** ``off`` + .. option:: --route_verbosity Controls the verbosity of routing output. @@ -1505,13 +1597,90 @@ The following options are only valid when the router is in timing-driven mode (t **Default:** ``0.0`` .. option:: --router_profiler_astar_fac - + Controls the directedness of the timing-driven router's exploration when doing router delay profiling of an architecture. The router delay profiling step is currently used to calculate the place delay matrix lookup. Values between 1 and 2 are resonable; higher values trade some quality for reduced run-time. **Default:** ``1.2`` +.. option:: --enable_parallel_connection_router {on | off} + + Controls whether the MultiQueue-based parallel connection router is used during a single connection routing. + + When enabled, the parallel connection router accelerates the path search for individual source-sink connections using + multi-threading without altering the net routing order. + + **Default:** ``off`` + +.. option:: --post_target_prune_fac + + Controls the post-target pruning heuristic calculation in the parallel connection router. + + This parameter is used as a multiplicative factor applied to the VPR heuristic (not guaranteed to be admissible, i.e., + might over-predict the cost to the sink) to calculate the 'stopping heuristic' when pruning nodes after the target has + been reached. The 'stopping heuristic' must be admissible for the path search algorithm to guarantee optimal paths and + be deterministic. + + Values of this parameter are architecture-specific and have to be empirically found. + + This parameter has no effect if :option:`--enable_parallel_connection_router` is not set. + + **Default:** ``1.2`` + +.. option:: --post_target_prune_offset + + Controls the post-target pruning heuristic calculation in the parallel connection router. + + This parameter is used as a subtractive offset together with :option:`--post_target_prune_fac` to apply an affine + transformation on the VPR heuristic to calculate the 'stopping heuristic'. The 'stopping heuristic' must be admissible + for the path search algorithm to guarantee optimal paths and be deterministic. + + Values of this parameter are architecture-specific and have to be empirically found. + + This parameter has no effect if :option:`--enable_parallel_connection_router` is not set. + + **Default:** ``0.0`` + +.. option:: --multi_queue_num_threads + + Controls the number of threads used by MultiQueue-based parallel connection router. + + If not explicitly specified, defaults to 1, implying the parallel connection router works in 'serial' mode using only + one main thread to route. + + This parameter has no effect if :option:`--enable_parallel_connection_router` is not set. + + **Default:** ``1`` + +.. option:: --multi_queue_num_queues + + Controls the number of queues used by MultiQueue in the parallel connection router. + + Must be set >= 2. A common configuration for this parameter is the number of threads used by MultiQueue * 4 (the number + of queues per thread). + + This parameter has no effect if :option:`--enable_parallel_connection_router` is not set. + + **Default:** ``2`` + +.. option:: --multi_queue_direct_draining {on | off} + + Controls whether to enable queue draining optimization for MultiQueue-based parallel connection router. + + When enabled, queues can be emptied quickly by draining all elements if no further solutions need to be explored after + the target is reached in the path search. + + Note: For this optimization to maintain optimality and deterministic results, the 'ordering heuristic' (calculated by + :option:`--astar_fac` and :option:`--astar_offset`) must be admissible to ensure emptying queues of entries with higher + costs does not prune possibly superior solutions. However, you can still enable this optimization regardless of whether + optimality and determinism are required for your specific use case (in such cases, the 'ordering heuristic' can be + inadmissible). + + This parameter has no effect if :option:`--enable_parallel_connection_router` is not set. + + **Default:** ``off`` + .. option:: --max_criticality Sets the maximum fraction of routing cost that can come from delay (vs. coming from routability) for any net. @@ -1710,6 +1879,16 @@ Analysis Options **Default:** ``off`` +.. option:: --gen_post_implementation_sdc { on | off } + + Generates an SDC file including a list of constraints that would + replicate the timing constraints that the timing analysis within + VPR followed during the flow. This can be helpful for flows that + use external timing analysis tools that have additional capabilities + or more detailed delay models than what VPR uses. + + **Default:** ``off`` + .. option:: --post_synth_netlist_unconn_inputs { unconnected | nets | gnd | vcc } Controls how unconnected input cell ports are handled in the post-synthesis netlist @@ -1730,6 +1909,16 @@ Analysis Options **Default:** ``unconnected`` +.. option:: --post_synth_netlist_module_parameters { on | off } + + Controls whether the post-synthesis netlist output by VTR can use Verilog parameters + or not. When using the post-synthesis netlist for external timing analysis, + some tools cannot accept the netlist if it contains parameters. By setting + this option to ``off``, VPR will try to represent the netlist using non-parameterized + modules. + + **Default:** ``on`` + .. option:: --timing_report_npaths Controls how many timing paths are reported. @@ -2047,6 +2236,16 @@ The following options are used to enable server mode in VPR. .. seealso:: :ref:`interactive_path_analysis_client` + +Show Architecture Resources +^^^^^^^^^^^^^^^^^^^^^^^^ +.. option:: --show_arch_resources + + Print the architecture resource report for each device layout and exit normally. + + **Default:** ``off`` + + Command-line Auto Completion ---------------------------- diff --git a/doc/src/vpr/index.rst b/doc/src/vpr/index.rst index 61d60030bdb..ceefa3debaa 100644 --- a/doc/src/vpr/index.rst +++ b/doc/src/vpr/index.rst @@ -60,3 +60,5 @@ The purpose of VPR is to make the packing, placement, and routing stages of the file_formats debug_aids + + VIB diff --git a/doc/src/vpr/route_constraints.rst b/doc/src/vpr/route_constraints.rst new file mode 100644 index 00000000000..fe921cd5a23 --- /dev/null +++ b/doc/src/vpr/route_constraints.rst @@ -0,0 +1,44 @@ + +VPR Route Constraints +========================= +.. _vpr_constraints_file: +VPR supports running flows with route constraints. Route constraints are set on global signals to specify if they should be routed or not. For example, a user may want to route a specific internal clock even clock modeling option is set to not route it. + +.. note:: The constraint specified in this file overrides the setting of option "--clock_modeling" if it is specified. A message will be issued in such case: "Route constraint(s) detected and will override clock modeling setting". + +The route constraints should be specified by the user using an XML constraints file format, as described in the section below. + +A Constraints File Example +-------------------------- + +.. code-block:: xml + :caption: An example of a route constraints file in XML format. + :linenos: + + + + + + + + + + +.. _end: + +.. note:: The "route_model" in constraint specified in this file only support "ideal" and "route" only. + +Constraints File Format +----------------------- + +VPR has a specific XML format which must be used when creating a route constraints file. The purpose of this constraints file is to specify + +#. The signals that should be constrained for routing +#. The route model for such signals + +The file is passed as an input to VPR when running with route constraints. When the file is read in, its information is used to guide VPR route or not route such signals. + +.. note:: Use the VPR option :vpr:option:`--read_vpr_constraints` to specify the VPR route constraints file that is to be loaded. + +.. note:: Wildcard names of signals are supported to specify a list of signals. The wildcard expression should follow the C/C++ regexpr rule. + diff --git a/doc/src/zreferences.rst b/doc/src/zreferences.rst index 76df654a14a..af178fe070a 100644 --- a/doc/src/zreferences.rst +++ b/doc/src/zreferences.rst @@ -1,5 +1,18 @@ Publications & References ========================= +How to cite +----------- +`M. A. Elgammal`, `A. Mohaghegh`, `S. G. Shahrouz`, `F. Mahmoudi`, `F. Kosar`, `K. Talaei`, `J. Fife`, `D. Khadivi`, `K. Murray`, `A. Boutros`, `K. B. Kent`, `J. Goeders`, and `V. Betz`, "VTR 9: Open-Source CAD for Fabric and Beyond FPGA Architecture Exploration," *ACM TRETS*, 2025. [`PDF `__] + +Previous Publications +--------------------- +* `K. E. Murray`, `O. Petelin`, `S. Zhong`, `J. M. Wang`, `M. ElDafrawy`, `J.-P. Legault`, `E. Sha`, `A. G. Graham`, `J. Wu`, `M. J. P. Walker`, `H. Zeng`, `P. Patros`, `J. Luu`, `K. B. Kent` and `V. Betz`, "VTR 8: High Performance CAD and Customizable FPGA Architecture Modelling", *ACM TRETS*, 2020. + +* `J. LUU`, `J. Goeders`, `M. Wainberg`, `A. Somerville`, `T. Yu`, `K. Nasartschuk`, `M. Nasr`, `S. Wang`, `T. L`, `N. Ahmed`, `K. B. Kent`, `J. Anderson`, `J. Rose`, `V. Betz`, "VTR 7.0: Next Generation Architecture and CAD System for FPGAs", *ACM TRETS*, 2014. + +References +---------- + .. bibliography:: z_references.bib :all: diff --git a/install_apt_packages.sh b/install_apt_packages.sh index 76c705a2c34..c3cba57f777 100755 --- a/install_apt_packages.sh +++ b/install_apt_packages.sh @@ -9,8 +9,14 @@ sudo apt-get install -y \ bison \ flex \ python3-dev \ - python3-venv + python3-venv \ + openssl \ + libssl-dev +# Packages for more complex features of VTR that most people will use. +sudo apt-get install -y \ + libtbb-dev + # Required for graphics sudo apt-get install -y \ libgtk-3-dev \ @@ -40,3 +46,13 @@ sudo apt-get install -y \ # Required to build the documentation sudo apt-get install -y \ sphinx-common + +# Required for code formatting +# NOTE: clang-format-18 may only be found on specific distributions. Only +# install it if the distribution has this version of clang format. +if apt-cache search '^clang-format-18$' | grep -q 'clang-format-18'; then + sudo apt-get install -y \ + clang-format-18 +else + echo "clang-format-18 not found in apt-cache. Skipping installation." +fi diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt index 094f7694cad..a02d6ae1cc7 100644 --- a/libs/CMakeLists.txt +++ b/libs/CMakeLists.txt @@ -9,6 +9,8 @@ link_libraries(${ADDITIONAL_FLAGS}) add_subdirectory(libarchfpga) add_subdirectory(libvtrutil) add_subdirectory(liblog) +add_subdirectory(libencrypt) +add_subdirectory(libdecrypt) add_subdirectory(libpugiutil) add_subdirectory(libvqm) add_subdirectory(librtlnumber) diff --git a/libs/EXTERNAL/libblifparse/CMakeLists.txt b/libs/EXTERNAL/libblifparse/CMakeLists.txt index 57f33eb2e57..f38652308c8 100644 --- a/libs/EXTERNAL/libblifparse/CMakeLists.txt +++ b/libs/EXTERNAL/libblifparse/CMakeLists.txt @@ -13,7 +13,7 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) endif() #Flex and Bison are used to generate the parser -find_package(BISON REQUIRED 3.0) +find_package(BISON REQUIRED 3.3) find_package(FLEX REQUIRED) file(GLOB_RECURSE LIB_SOURCES src/blif*.cpp) diff --git a/libs/EXTERNAL/libblifparse/src/blif_parser.y b/libs/EXTERNAL/libblifparse/src/blif_parser.y index 2b51dd7cc7e..495d495b6d8 100644 --- a/libs/EXTERNAL/libblifparse/src/blif_parser.y +++ b/libs/EXTERNAL/libblifparse/src/blif_parser.y @@ -1,5 +1,5 @@ -/* C++ parsers require Bison 3 */ -%require "3.0" +/* C++ parsers require Bison 3.3 */ +%require "3.3" %language "C++" /* Write-out tokens header file */ @@ -34,7 +34,7 @@ %define api.namespace {blifparse} /* Name the parser class */ -%define parser_class_name {Parser} +%define api.parser.class {Parser} /* Match the flex prefix */ %define api.prefix {blifparse_} diff --git a/libs/EXTERNAL/libcatch2 b/libs/EXTERNAL/libcatch2 index 76f70b1403d..74fcff6e5b1 160000 --- a/libs/EXTERNAL/libcatch2 +++ b/libs/EXTERNAL/libcatch2 @@ -1 +1 @@ -Subproject commit 76f70b1403dbc0781216f49e20e45b71f7eccdd8 +Subproject commit 74fcff6e5b190fb833a231b7f7c1829e3c3ac54d diff --git a/libs/EXTERNAL/libezgl b/libs/EXTERNAL/libezgl new file mode 160000 index 00000000000..b6beef98a3e --- /dev/null +++ b/libs/EXTERNAL/libezgl @@ -0,0 +1 @@ +Subproject commit b6beef98a3e51907c66fa6c7cc74933fb91faa6c diff --git a/libs/EXTERNAL/libezgl/.clang-format b/libs/EXTERNAL/libezgl/.clang-format deleted file mode 100644 index 86a33c1358e..00000000000 --- a/libs/EXTERNAL/libezgl/.clang-format +++ /dev/null @@ -1,38 +0,0 @@ ---- -AccessModifierOffset: '-2' -AlignAfterOpenBracket: DontAlign -AlignConsecutiveAssignments: 'false' -AlignConsecutiveDeclarations: 'false' -AlignEscapedNewlinesLeft: 'true' -AlignOperands: 'true' -AlignTrailingComments: 'true' -AllowAllParametersOfDeclarationOnNextLine: 'false' -AllowShortBlocksOnASingleLine: 'false' -AllowShortFunctionsOnASingleLine: None -AllowShortIfStatementsOnASingleLine: 'false' -AllowShortLoopsOnASingleLine: 'false' -AlwaysBreakAfterReturnType: None -AlwaysBreakTemplateDeclarations: 'true' -BinPackArguments: 'true' -BinPackParameters: 'false' -BreakBeforeBraces: WebKit -BreakConstructorInitializersBeforeComma: 'true' -ColumnLimit: '100' -ConstructorInitializerAllOnOneLineOrOnePerLine: 'true' -Cpp11BracedListStyle: 'true' -ExperimentalAutoDetectBinPacking: 'false' -IndentWidth: '2' -Language: Cpp -NamespaceIndentation: None -ReflowComments: 'false' -SortIncludes: 'false' -SpaceBeforeParens: Never -SpaceInEmptyParentheses: 'false' -SpacesBeforeTrailingComments: '1' -SpacesInAngles: 'false' -SpacesInSquareBrackets: 'false' -Standard: Cpp11 -TabWidth: '2' -UseTab: Never - -... diff --git a/libs/EXTERNAL/libezgl/.gitignore b/libs/EXTERNAL/libezgl/.gitignore deleted file mode 100644 index f3c566480eb..00000000000 --- a/libs/EXTERNAL/libezgl/.gitignore +++ /dev/null @@ -1,41 +0,0 @@ -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -# Build Directories -cmake-build* - -# Vim -.swp - -# IDE Files and Folders -.idea diff --git a/libs/EXTERNAL/libezgl/CMakeLists.txt b/libs/EXTERNAL/libezgl/CMakeLists.txt deleted file mode 100644 index d63d2cca64c..00000000000 --- a/libs/EXTERNAL/libezgl/CMakeLists.txt +++ /dev/null @@ -1,82 +0,0 @@ -cmake_minimum_required(VERSION 3.10 FATAL_ERROR) - -# create the project -project( - ezgl - VERSION 1.0.1 - LANGUAGES CXX -) - -# we rely on GTK3 for the GUI, so make sure the system has it -find_package(PkgConfig REQUIRED) -pkg_check_modules(GTK3 QUIET gtk+-3.0) -pkg_check_modules(X11 QUIET x11) - -if(NOT GTK3_FOUND) - message(WARNING "EZGL: Failed to find required GTK3 library (on debian/ubuntu try 'sudo apt-get install libgtk-3-dev' to install)") -endif() - -# we also rely on glib to compile the GTK resource files -# a set of macros has been developed by Makman2 on GitHub to help with this -list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/gcr-cmake/macros) - -#Is ezgl the root cmake project? -set(IS_ROOT_PROJECT TRUE) -if (${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) - set(IS_ROOT_PROJECT FALSE) -endif() - -# include the configuration/compile time options for this library -include(options.cmake) - -# create a library that can be linked by executables -add_library( - ${PROJECT_NAME} - include/ezgl/application.hpp - include/ezgl/camera.hpp - include/ezgl/canvas.hpp - include/ezgl/color.hpp - include/ezgl/control.hpp - include/ezgl/callback.hpp - include/ezgl/graphics.hpp - include/ezgl/point.hpp - include/ezgl/rectangle.hpp - src/application.cpp - src/camera.cpp - src/canvas.cpp - src/control.cpp - src/callback.cpp - src/graphics.cpp -) - -target_include_directories( - ${PROJECT_NAME} - PUBLIC include -) - -#Treat GTK/X11 headers as system headers so they -#do not generate compilation warnings -target_include_directories( - ${PROJECT_NAME} - SYSTEM - PUBLIC ${GTK3_INCLUDE_DIRS} - PUBLIC ${X11_INCLUDE_DIRS} -) - -target_link_libraries( - ${PROJECT_NAME} - PUBLIC ${GTK3_LIBRARIES} - PUBLIC ${X11_LIBRARIES} -) - -# add_compile_options does not seem to be working on the UG machines, -# and we cannot set target properties in version 3.0.2 -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") - -if(EZGL_BUILD_EXAMPLES) - add_subdirectory(examples) -endif() - -if(EZGL_BUILD_DOCS) - add_subdirectory(doc) -endif() diff --git a/libs/EXTERNAL/libezgl/LICENSE b/libs/EXTERNAL/libezgl/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/libs/EXTERNAL/libezgl/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/libs/EXTERNAL/libezgl/README.adoc b/libs/EXTERNAL/libezgl/README.adoc deleted file mode 100644 index 836033efe83..00000000000 --- a/libs/EXTERNAL/libezgl/README.adoc +++ /dev/null @@ -1,38 +0,0 @@ -= EZGL - An Easy Graphics Library - -image:https://codedocs.xyz/mariobadr/ezgl.svg[link="https://codedocs.xyz/mariobadr/ezgl"] - -EZGL is a library for use in ece297 as a simple way to create a GUI application. -The library provides a thin wrapper around GTK and drawing functionality. - -== Dependencies - -The library currently depends on GTK 3 and cairo. - -== Compilation - -This project uses CMake for compiling and works with CMake version 3.0.2 (the version available on the UG machines). -CMake can configure the project for different build systems and IDEs (type `cmake --help` for a list of generators available for your platform). -We recommend you create a build directory before invoking CMake to configure the project (`cmake -B`). -For example, we can perform the configuration step from the project root directory: - - cmake -H. -Bcmake-build-release -DCMAKE_BUILD_TYPE=Release - cmake -H. -Bcmake-build-debug -DCMAKE_BUILD_TYPE=Debug - -After the configuration step, you can ask CMake to build the project. - - cmake --build cmake-build-release/ --target all - cmake --build cmake-build-debug/ --target all - -=== Build Options - -Build options can be found in `options.cmake`. -Simply specify the build option during the configuration step in CMake. -Using the already generated `cmake-build-release` directory from the previous section, we can: - - cmake -H. -Bcmake-build-release -DEZGL_BUILD_EXAMPLES=ON - -Your IDE or Makefile should now include additional targets when you turn these options on. -For example, enabling `EZGL_BUILD_EXAMPLES` should provide access to the `basic-application` target, which you can build: - - cmake --build cmake-build-release/ --target basic-application diff --git a/libs/EXTERNAL/libezgl/doc/CMakeLists.txt b/libs/EXTERNAL/libezgl/doc/CMakeLists.txt deleted file mode 100644 index d56545e6c90..00000000000 --- a/libs/EXTERNAL/libezgl/doc/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -find_package(Doxygen) - -if(NOT DOXYGEN_FOUND) - message(FATAL_ERROR "Doxygen is needed to build the documentation.") -endif() - -set(DOXYFILE_IN Doxyfile.in) -set(DOXYFILE ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) - -configure_file(${DOXYFILE_IN} ${DOXYFILE} @ONLY) - -# add a target to generate API documentation with Doxygen -add_custom_target( - doc - COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE} - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Generating API documentation with Doxygen" - VERBATIM -) diff --git a/libs/EXTERNAL/libezgl/doc/Doxyfile.in b/libs/EXTERNAL/libezgl/doc/Doxyfile.in deleted file mode 100644 index 8365d2ba95a..00000000000 --- a/libs/EXTERNAL/libezgl/doc/Doxyfile.in +++ /dev/null @@ -1,2291 +0,0 @@ -# Doxyfile 1.8.6 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "@PROJECT_NAME@" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = "@ezgl_VERSION@" - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = "An Easy Graphics & GUI Library" - -# With the PROJECT_LOGO tag one can specify an logo or icon that is included in -# the documentation. The maximum height of the logo should not exceed 55 pixels -# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo -# to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = YES - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a -# new page for each member. If set to NO, the documentation of a member will be -# part of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make -# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C -# (default is Fortran), use: inc=Fortran f=C. -# -# Note For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO these classes will be included in the various overviews. This option has -# no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the -# todo list. This list is created by putting \todo commands in the -# documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the -# test list. This list is created by putting \test commands in the -# documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES the list -# will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. Do not use file names with spaces, bibtex cannot handle them. See -# also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO doxygen will only warn about wrong or incomplete parameter -# documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = "@PROJECT_SOURCE_DIR@/include" "@PROJECT_SOURCE_DIR@/examples" - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER ) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES, then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- -# defined cascading style sheet that is included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. -# Doxygen will copy the style sheet file to the output directory. For an example -# see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the stylesheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler ( hhc.exe). If non-empty -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated ( -# YES) or that it should be included in the master .chm file ( NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated ( -# YES) or a normal table of contents ( NO) in the .chm file. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using prerendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /