diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 5a01544e01..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,64 +0,0 @@ -version: 2.1 - -jobs: - test_tool_and_runtime_java: - docker: - - image: cimg/openjdk:11.0 - steps: - - checkout - - run: - name: build tool - command: mvn -B -V -DskipTests=true -Dmaven.javadoc.skip=true install - - run: - name: test runtime - command: | - cd runtime-testsuite - mvn -Dparallel=classes -DthreadCount=4 -Dtest=java.** test - cd .. - - run: - name: test tool - command: | - cd tool-testsuite - mvn -Dparallel=classes -DthreadCount=4 test - cd .. - test_runtime: - parameters: - test-group: - description: The section - type: string - default: ALL - target: - description: The target - type: string - default: java - docker: - - image: cimg/openjdk:11.0 - environment: - MAVEN_OPTS: -Xmx512m - parallelism: 4 - resource_class: large - environment: - TARGET: << parameters.target >> - GROUP: << parameters.test-group >> - steps: - - checkout - - run: - name: Install << parameters.target >> pre-requisites - command: | - f=".circleci/scripts/install-linux-<< parameters.target >>.sh"; ! [ -x "$f" ] || "$f" - - run: - name: Build ANTLR4 tool - command: mvn -B -V -DskipTests=true -Dmaven.javadoc.skip=true install - - run: - name: Test << parameters.target >> runtime - command: | - .circleci/scripts/run-tests-<< parameters.target >>.sh - -workflows: - build: - jobs: - - test_tool_and_runtime_java - - test_runtime: - matrix: - parameters: - target: [ dart, go, python2, python3, javascript, php, cpp, dotnet ] diff --git a/.circleci/scripts/install-linux-cpp.sh b/.circleci/scripts/install-linux-cpp.sh deleted file mode 100755 index 8e3b003a0d..0000000000 --- a/.circleci/scripts/install-linux-cpp.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing cpp SDK..." - -sudo apt-get update -y -sudo apt-get install -y clang -sudo apt-get install -y cmake -sudo apt-get install -y pkg-config -sudo apt-get install -y uuid-dev - -echo "done installing cpp SDK" - -clang++ --version -cmake --version - -echo "building cpp runtime..." - -pushd "runtime/Cpp/" - echo $PWD - rc=0 - if [ $rc == 0 ]; then - cmake . -DCMAKE_BUILD_TYPE=release - rc=$? - fi - if [ $rc == 0 ]; then - make -j 8 - rc=$? - fi -popd - - -echo "done building cpp runtime" - diff --git a/.circleci/scripts/install-linux-dart.sh b/.circleci/scripts/install-linux-dart.sh deleted file mode 100755 index 25e857606b..0000000000 --- a/.circleci/scripts/install-linux-dart.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing dart SDK..." -sudo apt-get update -sudo apt-get install apt-transport-https -sudo sh -c 'wget -qO- https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -' -sudo sh -c 'wget -qO- https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list' -sudo apt-get update -sudo apt-get install dart=2.12.1-1 -export PATH="$PATH:/usr/lib/dart/bin" -echo "done installing dart SDK" -sudo apt-get install -f diff --git a/.circleci/scripts/install-linux-dotnet.sh b/.circleci/scripts/install-linux-dotnet.sh deleted file mode 100755 index 1e23257e42..0000000000 --- a/.circleci/scripts/install-linux-dotnet.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing .Net SDK..." -wget https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb -sudo dpkg -i packages-microsoft-prod.deb -sudo apt-get update; \ - sudo apt-get install -y apt-transport-https && \ - sudo apt-get update && \ - sudo apt-get install -y dotnet-sdk-3.1 -export PATH=$PATH:~/.dotnet -echo "done installing .Net SDK" - -# we need to build the runtime before test run, since we used "--no-dependencies" -# when we call dotnet cli for restore and build, in order to speed up -echo "building runtime..." -dotnet build -c Release -f netstandard2.0 runtime/CSharp/src/Antlr4.csproj -echo "done building runtime" diff --git a/.circleci/scripts/install-linux-go.sh b/.circleci/scripts/install-linux-go.sh deleted file mode 100755 index a3d0937eca..0000000000 --- a/.circleci/scripts/install-linux-go.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing go SDK..." -sudo apt update -curl -OL https://go.dev/dl/go1.19.linux-amd64.tar.gz -sudo tar -C /usr/local -xf go1.19.linux-amd64.tar.gz -export PATH=$PATH:/usr/local/go/bin -echo -n "go bin: "; ls -l /usr/local/go/bin -go version -echo "done installing go SDK" diff --git a/.circleci/scripts/install-linux-javascript.sh b/.circleci/scripts/install-linux-javascript.sh deleted file mode 100755 index 2b8c8912a1..0000000000 --- a/.circleci/scripts/install-linux-javascript.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# use v14 and check -echo "installing nodejs..." -curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash - -sudo apt-get install -y nodejs -echo node version: $(node --version) -echo "done installing nodejs" - -echo "packaging javascript runtime..." -pushd runtime/JavaScript - sudo npm install - sudo npm link -popd -echo "done packaging javascript runtime" diff --git a/.circleci/scripts/install-linux-libcurl3.sh b/.circleci/scripts/install-linux-libcurl3.sh deleted file mode 100755 index 6fe59bafac..0000000000 --- a/.circleci/scripts/install-linux-libcurl3.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo "before patching" -ls -all /lib/x86_64-linux-gnu/ | grep libcurl - -# This would fix missing CURL_OPENSSL_3 -# use a dedicated temp dir in the user space -mkdir ~/libcurl3 -cd ~/libcurl3 -# fetch latest libcurl3 -wget http://archive.ubuntu.com/ubuntu/pool/main/c/curl/libcurl3_7.47.0-1ubuntu2_amd64.deb -# extract data.tar.xz -ar x libcurl3* data.tar.xz -# extract all from data.tar.xz -tar xf data.tar.xz -# copy libcurl.so.3 where required -sudo cp -L ~/libcurl3/usr/lib/x86_64-linux-gnu/libcurl.so.4.4.0 /lib/x86_64-linux-gnu/libcurl.so.4.4.0 -sudo ln -sf libcurl.so.4.4.0 /lib/x86_64-linux-gnu/libcurl.so.4 -cd .. -# drop dedicated temp dir -sudo rm -rf ~/libcurl3 - -echo "after patching" -ls -all /lib/x86_64-linux-gnu/ | grep libcurl diff --git a/.circleci/scripts/install-linux-php.sh b/.circleci/scripts/install-linux-php.sh deleted file mode 100755 index 4127d1ce61..0000000000 --- a/.circleci/scripts/install-linux-php.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -sudo apt install software-properties-common -sudo add-apt-repository ppa:ondrej/php -sudo apt update - -sudo apt install wget php8.0-cli php8.0-zip unzip -wget -O composer-setup.php https://getcomposer.org/installer -sudo php composer-setup.php --install-dir=/usr/local/bin --filename=composer - -sudo apt install php8.0 -sudo apt install php8.0-mbstring -sudo apt install php8.0-xml -php -v - - -git clone https://github.com/antlr/antlr-php-runtime.git runtime/PHP -composer install -d runtime/PHP - -mvn install -DskipTests=true -Dmaven.javadoc.skip=true -B -V \ No newline at end of file diff --git a/.circleci/scripts/install-linux-python2.sh b/.circleci/scripts/install-linux-python2.sh deleted file mode 100755 index 5549c1b9c3..0000000000 --- a/.circleci/scripts/install-linux-python2.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing python 2..." -sudo apt-get update -y -sudo apt-get install python2 -echo "done installing python 2" diff --git a/.circleci/scripts/install-linux-python3.sh b/.circleci/scripts/install-linux-python3.sh deleted file mode 100755 index 4c90f519ca..0000000000 --- a/.circleci/scripts/install-linux-python3.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing python 3..." -sudo apt-get update -y -sudo apt-get install python3 -echo "done installing python 3" diff --git a/.circleci/scripts/install-linux-swift.sh b/.circleci/scripts/install-linux-swift.sh deleted file mode 100755 index 7390175657..0000000000 --- a/.circleci/scripts/install-linux-swift.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "installing swift SDK..." - -.circleci/scripts/install-linux-libcurl3.sh - -# see https://tecadmin.net/install-swift-ubuntu-1604-xenial/ -sudo apt-get update -y -sudo apt-get install clang libicu-dev -sudo apt-get install libpython2.7 libpython2.7-dev - -export SWIFT_VERSION=swift-5.3.2 -echo "installing gpg key..." -wget -q -O - https://swift.org/keys/all-keys.asc | sudo gpg --import - -echo "downloading SDK gpg key..." -SWIFT_SDK=https://swift.org/builds/$SWIFT_VERSION-release/ubuntu1604/$SWIFT_VERSION-RELEASE/$SWIFT_VERSION-RELEASE-ubuntu16.04.tar.gz -echo $SWIFT_SDK -wget -q $SWIFT_SDK -sudo tar xzf $SWIFT_VERSION-RELEASE-ubuntu16.04.tar.gz -mv $SWIFT_VERSION-RELEASE-ubuntu16.04 $PWD/swift - -export SWIFT_HOME=$PWD/swift/$SWIFT_VERSION-RELEASE-ubuntu16.04/usr/bin/ -export PATH=$PWD/swift/usr/bin:$PATH - -# This would fix a know linker issue mentioned in: # https://bugs.swift.org/browse/SR-2299 -sudo ln -sf ld.gold /usr/bin/ld -# This would fix missing libtinfo.so.5 -sudo apt install libncurses5 - -echo "done installing swift SDK..." - -# check swift -swift --version -swift build --version diff --git a/.circleci/scripts/run-tests-cpp.sh b/.circleci/scripts/run-tests-cpp.sh deleted file mode 100755 index 58b59ff283..0000000000 --- a/.circleci/scripts/run-tests-cpp.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -pushd runtime-testsuite -export MAVEN_OPTS="-Xmx8g" -mvn -Dparallel=classes -DthreadCount=4 -Dtest=cpp.** test -popd diff --git a/.circleci/scripts/run-tests-dart.sh b/.circleci/scripts/run-tests-dart.sh deleted file mode 100755 index 2f9015df6c..0000000000 --- a/.circleci/scripts/run-tests-dart.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -dart --version - -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dparallel=classes -DthreadCount=4 -Dtest=dart.** test -popd diff --git a/.circleci/scripts/run-tests-dotnet.sh b/.circleci/scripts/run-tests-dotnet.sh deleted file mode 100755 index c0e6eeef6f..0000000000 --- a/.circleci/scripts/run-tests-dotnet.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -pushd runtime-testsuite -export MAVEN_OPTS="-Xmx8g" -mvn -Dparallel=classes -DthreadCount=4 -Dtest=csharp.** test -popd diff --git a/.circleci/scripts/run-tests-go.sh b/.circleci/scripts/run-tests-go.sh deleted file mode 100755 index 4bf8ef67bd..0000000000 --- a/.circleci/scripts/run-tests-go.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -export PATH=$PATH:/usr/local/go/bin # for use on linux -go version - -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dparallel=classes -DthreadCount=4 -Dtest=go.** test -popd diff --git a/.circleci/scripts/run-tests-javascript.sh b/.circleci/scripts/run-tests-javascript.sh deleted file mode 100755 index b0b461fce0..0000000000 --- a/.circleci/scripts/run-tests-javascript.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -declare -i RESULT=0 - -pushd runtime/JavaScript - - echo "running jasmine tests..." - npm test - RESULT+=$? - -popd - -pushd runtime-testsuite - - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dtest=javascript.** test - RESULT+=$? - -popd - -exit $RESULT diff --git a/.circleci/scripts/run-tests-php.sh b/.circleci/scripts/run-tests-php.sh deleted file mode 100755 index e46ebb3d66..0000000000 --- a/.circleci/scripts/run-tests-php.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -php -v - -php_path=$(which php) -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -DPHP_PATH="${php_path}" -Dparallel=classes -DthreadCount=4 -Dtest=php.** test -popd diff --git a/.circleci/scripts/run-tests-python2.sh b/.circleci/scripts/run-tests-python2.sh deleted file mode 100755 index 772009ac3d..0000000000 --- a/.circleci/scripts/run-tests-python2.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -python2 --version - -# TODO: https://github.com/antlr/antlr4/issues/3521 -# -# pushd runtime/Python2/tests -# echo "running native tests..." -# python2 run.py -# rc=$? -# if [ $rc != 0 ]; then -# echo "failed running native tests" -# fi -# popd - -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dparallel=classes -DthreadCount=4 -Dtest=python2.** test -popd \ No newline at end of file diff --git a/.circleci/scripts/run-tests-python3.sh b/.circleci/scripts/run-tests-python3.sh deleted file mode 100755 index 2be5773a1c..0000000000 --- a/.circleci/scripts/run-tests-python3.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -python3 --version - -# TODO: https://github.com/antlr/antlr4/issues/3521 -# -# pushd runtime/Python3/tests -# echo "running native tests..." -# python3 run.py -# rc=$? -# if [ $rc != 0 ]; then -# echo "failed running native tests" -# fi -# popd - -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dparallel=classes -DthreadCount=4 -Dtest=python3.** test -popd diff --git a/.circleci/scripts/run-tests-swift.sh b/.circleci/scripts/run-tests-swift.sh deleted file mode 100755 index bc68f84735..0000000000 --- a/.circleci/scripts/run-tests-swift.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Appears to be unused - -set -euo pipefail - -# TODO: https://github.com/antlr/antlr4/issues/3521 -# -# pushd runtime/Swift -# echo "running native tests..." -# ./boot.py --test -# rc=$? -# if [ $rc != 0 ]; then -# echo "failed running native tests" -# fi -# popd - -pushd runtime-testsuite - echo "running maven tests..." - export MAVEN_OPTS="-Xmx8g" - mvn -Dparallel=classes -DthreadCount=4 -Dtest=swift.** test -popd diff --git a/.github/workflows/hosted.yml b/.github/workflows/hosted.yml index af41ed511e..fd8570b958 100644 --- a/.github/workflows/hosted.yml +++ b/.github/workflows/hosted.yml @@ -22,8 +22,6 @@ jobs: exclude: - os: windows-latest compiler: gcc - - os: windows-latest - compiler: clang include: - os: windows-latest compiler: cl @@ -158,6 +156,7 @@ jobs: windows-latest ] target: [ + tool, cpp, csharp, dart, @@ -170,30 +169,6 @@ jobs: swift, ] exclude: - - os: macos-latest - target: dart - - os: macos-latest - target: python3 - - os: macos-latest - target: swift - - - os: ubuntu-latest - target: csharp - - os: ubuntu-latest - target: dart - - - os: windows-latest - target: cpp - - os: windows-latest - target: csharp - - os: windows-latest - target: dart - - os: windows-latest - target: php - - os: windows-latest - target: python2 - - os: windows-latest - target: python3 - os: windows-latest target: swift @@ -234,7 +209,7 @@ jobs: if: steps.setup-java.outputs.cache-hit != 'true' uses: stCarolas/setup-maven@v4.4 with: - maven-version: 3.5.4 + maven-version: 3.8.5 - name: Add msbuild to PATH if: startswith(matrix.os, 'windows') && (matrix.target == 'cpp') @@ -306,17 +281,17 @@ jobs: if: startswith(matrix.os, 'macos') && (matrix.target == 'cpp') run: echo "PATH=$(brew --prefix)/opt/ccache/libexec:$PATH" >> $GITHUB_ENV - - name: Build tool with Maven + - name: Build ANTLR with Maven run: mvn install -DskipTests=true -Darguments="-Dmaven.javadoc.skip=true" -B -V - name: Test tool - if: matrix.target == 'java' + if: matrix.target == 'tool' run: | cd tool-testsuite mvn test - name: Test runtime (Windows) - if: startsWith(matrix.os, 'windows') + if: startsWith(matrix.os, 'windows') && (matrix.target != 'tool') run: | gci env:* | sort-object name @@ -332,7 +307,7 @@ jobs: CMAKE_GENERATOR: Ninja - name: Test runtime (non-Windows) - if: startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'macos') + if: (startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'macos')) && (matrix.target != 'tool') run: | env diff --git a/.gitignore b/.gitignore index 5c99ef58d6..f517fa3f88 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Nuget packages +*.nupkg + # Maven build folders target/ # ... but not code generation targets @@ -29,9 +32,8 @@ __pycache__/ *.userosscache *.sln.docstates -# User-specific files (MonoDevelop/Xamarin Studio) +# User-specific files (MonoDevelop/Xamarin Studio/Visual Studio) *.userprefs -*.user .vs/ project.lock.json @@ -47,9 +49,6 @@ bld/ [Oo]bj/ [Ll]og/ -# Visual Studio 2015 cache/options directory -.vs/ - # NetBeans user configuration files nbactions*.xml /nbproject/private/ @@ -102,3 +101,26 @@ javac-services.0.log.lck !runtime/Python3/test/ Antlr4.sln runtime/PHP + +# Swift binaries +.build/ + +# Cpp generated build files +runtime/Cpp/CMakeCache.txt +runtime/Cpp/CMakeFiles/ +runtime/Cpp/CPackConfig.cmake +runtime/Cpp/CPackSourceConfig.cmake +runtime/Cpp/CTestTestfile.cmake +runtime/Cpp/Makefile +runtime/Cpp/_deps/ +runtime/Cpp/cmake_install.cmake +runtime/Cpp/runtime/CMakeFiles/ +runtime/Cpp/runtime/CTestTestfile.cmake +runtime/Cpp/runtime/Makefile +runtime/Cpp/runtime/antlr4_tests +runtime/Cpp/runtime/antlr4_tests\[1]_include.cmake +runtime/Cpp/runtime/antlr4_tests\[1]_tests.cmake +runtime/Cpp/runtime/cmake_install.cmake +runtime/Cpp/runtime/libantlr4-runtime.4.10.1.dylib +runtime/Cpp/runtime/libantlr4-runtime.a +runtime/Cpp/runtime/libantlr4-runtime.dylib diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 33db1610db..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -sudo: true - -language: java - -before_cache: - - rm -rf $HOME/.m2/repository/org/antlr -cache: - timeout: 600 - directories: - - $HOME/.m2 - - $HOME/Library/Caches/Antlr4 - - $HOME/Library/Caches/Homebrew - -stages: -# - smoke-test -# - main-test - - extended-test - -matrix: - include: - - os: linux - dist: xenial - compiler: clang - env: - - TARGET=swift - - GROUP=ALL - stage: extended-test - -before_install: - - f="./.travis/before-install-$TRAVIS_OS_NAME-$TARGET.sh"; ! [ -x "$f" ] || "$f" - -script: - - | - cd runtime-testsuite; - travis_wait 40 ../.travis/run-tests-$TARGET.sh - rc=$? - cat target/surefire-reports/*.dumpstream || true - exit $rc diff --git a/LICENSE.txt b/LICENSE.txt index 2042d1bda6..5d27694155 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,52 +1,28 @@ -[The "BSD 3-clause license"] -Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -===== - -MIT License for codepointat.js from https://git.io/codepointat -MIT License for fromcodepoint.js from https://git.io/vDW1m - -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/THIRD-PARTY-NOTICES.txt b/THIRD-PARTY-NOTICES.txt new file mode 100644 index 0000000000..defb29eba5 --- /dev/null +++ b/THIRD-PARTY-NOTICES.txt @@ -0,0 +1,26 @@ +"antlr4" uses third-party libraries or other resources that may be distributed under licenses different than "antlr4". + +1. String.prototype.codePointAt (https://github.com/mathiasbynens/String.prototype.codePointAt) + +%% License notice for String.prototype.codePointAt +================================================== +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/antlr4-maven-plugin/pom.xml b/antlr4-maven-plugin/pom.xml index 57768a8a4b..9ddf63ccd7 100644 --- a/antlr4-maven-plugin/pom.xml +++ b/antlr4-maven-plugin/pom.xml @@ -8,18 +8,18 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT antlr4-maven-plugin maven-plugin ANTLR 4 Maven plugin Maven plugin for ANTLR 4 grammars - + 2009 - 3.8.4 + 3.8.5 @@ -34,13 +34,13 @@ org.apache.maven maven-plugin-api - 3.8.4 + ${mavenVersion} provided org.codehaus.plexus plexus-compiler-api - 2.9.0 + 2.12.1 org.sonatype.plexus @@ -65,7 +65,7 @@ org.apache.maven.plugin-tools maven-plugin-annotations - 3.6.2 + 3.6.4 provided @@ -89,18 +89,18 @@ org.codehaus.plexus plexus-utils - 3.4.1 + 3.4.2 provided org.slf4j slf4j-api - 1.7.32 + 2.0.0 org.slf4j slf4j-simple - 1.7.32 + 2.0.0 @@ -166,11 +166,6 @@ - - org.apache.maven.plugins - maven-plugin-plugin - 3.3 - org.apache.maven.plugins maven-javadoc-plugin @@ -179,11 +174,6 @@ true - - org.apache.maven.plugins - maven-jxr-plugin - 3.1.1 - - \ No newline at end of file + diff --git a/doc/actions.md b/doc/actions.md index 35455cb2f2..cb0e0be8d0 100644 --- a/doc/actions.md +++ b/doc/actions.md @@ -98,6 +98,7 @@ returnStat : 'return' expr {System.out.println("first token "+$start.getText()); |start|Token|The first token to be potentially matched by the rule that is on the main token channel; in other words, this attribute is never a hidden token. For rules that end up matching no tokens, this attribute points at the first token that could have been matched by this rule. When referring to the current rule, this attribute is available to any action within the rule.| |stop|Token|The last nonhidden channel token to be matched by the rule. When referring to the current rule, this attribute is available only to the after and finally actions.| |ctx|ParserRuleContext|The rule context object associated with a rule invocation. All of the other attributes are available through this attribute. For example, `$ctx.start` accesses the start field within the current rules context object. It’s the same as `$start`.| +|parser|Parser|The parser itself. This attribute can be used, for example, to invoke a method defined in the parser's `@members` section from a semantic predicate.| ## Dynamically-Scoped Attributes diff --git a/doc/antlr-project-testing.md b/doc/antlr-project-testing.md index 2cb11938ca..840921ebe4 100644 --- a/doc/antlr-project-testing.md +++ b/doc/antlr-project-testing.md @@ -2,11 +2,21 @@ ## Introduction -Because ANTLR supports multiple target languages, the unit tests are broken into two groups: the unit tests that test the tool itself (in `tool-testsuite`) and the unit tests that test the parser runtimes (in `antlr4/runtime-testsuite`). The tool tests are straightforward because they are Java code testing Java code; see the section at the bottom of this file. +Because ANTLR supports multiple target languages, the unit tests are broken into two groups: +the unit tests that test the tool itself (in `tool-testsuite`) and the unit tests that test the parser runtimes (in `antlr4/runtime-testsuite`). +The tool tests are straightforward because they are Java code testing Java code; see the section at the bottom of this file. -The runtime tests must be specified in a generic fashion to work across language targets. Furthermore, we must test the various targets from Java. This usually means Java launching processes to compile, say, C++ and run parsers. +The runtime tests must be specified in a generic fashion to work across language targets. +Furthermore, the various targets from Java must be tested. -As of 4.10, we use a Java descriptor file held as an [UniversalRuntimeTestDescriptor.java object](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/test/org/antlr/v4/test/runtime/UniversalRuntimeTestDescriptor.java) to represent each runtime test. Each test is described with a text file with various sections and resides in a group directory; see [directories under descriptors dir](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors). Here is a sample test descriptor: +This usually means Java launching processes to compile, say, C++ and run parsers. + +As of 4.10, a Java descriptor file held as an [RuntimeTestDescriptor.java](../runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java) +is used to represent each runtime test. + +Each test is described with a text file with various sections and resides in a group directory; +see [directories under descriptors' dir](../runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors). +Here is a sample test descriptor: ``` [notes] @@ -34,22 +44,25 @@ a b c """ ``` -The grammars are strings representing StringTemplates (`ST` objects) so `` will get replace when the unit test file is generated (`Test.java`, `Test.cs`, ...). The `writeln` template must be defined per target. Here are all of the -[Target templates for runtime tests](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates). Use triple-quotes `"""` when whitespace matters (usually input/output sections). +The grammars are strings representing StringTemplates (`ST` objects) so `` will get replace when the unit test file is generated (`Test.java`, `Test.cs`, ...). +The `writeln` template must be defined per target. +Here are all the +[Target templates for runtime tests](../runtime-testsuite/resources/org/antlr/v4/test/runtime/templates). +Use triple-quotes `"""` when whitespace matters (usually input/output sections). ## Requirements -*out of date, at least for mono* - -In order to perform the tests on all target languages, you need to have the following languages installed: +In order to perform the tests on all target languages, the following tools should be installed: -* `mono` (e.g., `brew install mono`) on non-Windows boxes (on Windows it uses the Microsoft .net stack). Also must [`xbuild` the runtime](https://github.com/antlr/antlr4/blob/master/doc/releasing-antlr.md) before tests will run; see below -* `nodejs` -* Python 2.7 -* Python 3.6 +* dotnet +* Node.js +* Python 2 +* Python 3 * Go -* Swift (via XCode) tested currently only osx -* clang (for C++ target) +* Swift +* Clang (Linux, Mac) or MSBuild (Windows) for C++ +* Dart +* PHP To **install into local repository** `~/.m2/repository/org/antlr`, do this: @@ -58,115 +71,53 @@ $ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux $ mvn install -DskipTests # make sure all artifacts are visible on this machine ``` -Now, make sure C# runtime is built and installed locally. - -```bash -cd ~/antlr/code/antlr4/runtime/CSharp/src -rm -rf `find . -name '{obj,bin}'` -dotnet build -c Release runtime/CSharp/src/Antlr4.csproj -``` - -C++ test rig automatically builds C++ runtime during tests. Others don't need a prebuilt lib. - - ## Running the runtime tests -A single test rig is sufficient to test all targets against all descriptors using the [junit parameterized tests](https://github.com/junit-team/junit4/wiki/parameterized-tests) mechanism. But, that is inconvenient because we often want to test just a single target or perhaps even just a single test within a single group of a single target. I have automatically generated a bunch of -[Target runtime test rigs](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime) that allow developers such flexibility. For example, here are the Python3 test rigs in intellij: +A single test rig is sufficient to test all targets against all descriptors using the [junit dynamic tests](https://junit.org/junit5/docs/current/user-guide/#writing-tests-dynamic-tests) mechanism. +But it's often convenient to test just a single target or perhaps even just a single test within a single group of a single target. +IntelliJ automatically generates a bunch of +[Target runtime test rigs](../runtime-testsuite/test/org/antlr/v4/test/runtime) that allows developers such flexibility. +For example, here are the Python3 test rigs in IntelliJ: - +![testrigs](images/testrigs.png) And the result of testing the entire subdirectory: - - -## Running test subsets - -*From the `runtime-testsuite` dir* +![python3-tests](images/python3-tests.png) -### Run one test group across targets +All test are run in parallel both via maven and via IDE. -```bash -$ cd runtime-testsuite -$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux -$ mvn -Dtest=TestParserExec test -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Running org.antlr.v4.test.runtime.cpp.TestParserExec -... -Tests run: 32, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 114.283 sec -Running org.antlr.v4.test.runtime.csharp.TestParserExec -... -``` +In IntelliJ, it's very easy to go to source by right-clicking on any test and pressing `Jump to source` (F4). -Or run all lexer related tests: +## Running test subsets -``` -$ cd runtime-testsuite -$ mvn -Dtest=Test*Lexer* test -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Running org.antlr.v4.test.runtime.cpp.TestCompositeLexers -... -``` +From the `runtime-testsuite` dir ### Run all tests for a single target ```bash $ cd runtime-testsuite +$ export MAVEN_OPTS="-Xmx1G" # don't forget this on linux $ mvn -Dtest=java.** test -... -``` - -Or run all lexer related tests in Java target only: - -```bash -$ cd runtime-testsuite -$ mvn -Dtest=java.*Lexer* test -... ------------------------------------------------------- T E S T S ------------------------------------------------------- -Running org.antlr.v4.test.runtime.java.TestCompositeLexers -Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.277 sec -Running org.antlr.v4.test.runtime.java.TestLexerErrors -Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.376 sec -Running org.antlr.v4.test.runtime.java.TestLexerExec -Tests run: 38, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.07 sec -Running org.antlr.v4.test.runtime.java.TestSemPredEvalLexer -Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.255 sec - -Results : - -Tests run: 59, Failures: 0, Errors: 0, Skipped: 0 -``` - -## Testing in parallel - -Use this to run tests in parallel: - -```bash -$ export MAVEN_OPTS="-Xmx1G" -$ mvn -Dparallel=classes -DthreadCount=4 test +[INFO] Running org.antlr.v4.test.runtime.java.TestIntegerList +[INFO] Running org.antlr.v4.test.runtime.java.JavaRuntimeTests ... -------------------------------------------------------- - T E S T S -------------------------------------------------------- -Concurrency config is parallel='classes', perCoreThreadCount=true, threadCount=4, useUnlimitedThreads=false +[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.023 s - in org.antlr.v4.test.runtime.java.TestIntegerList +[INFO] Tests run: 348, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.269 s - in org.antlr.v4.test.runtime.java.JavaRuntimeTests ... ``` -This can be combined with other `-D` above. - ## Adding a runtime test -To add a new runtime test, first determine which [group (dir) of tests](https://github.com/antlr/antlr4/blob/master/runtime-testsuite/descriptors/org/antlr/v4/test/runtime/descriptors) it belongs to. Then, add a new descriptor file implementation by filling in one of these (omitting unused sections): +To add a new runtime test, first determine which [group (dir) of tests](../runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors) it belongs to. +Then, add a new descriptor file implementation by filling in one of these (omitting unused sections): ``` [notes] - + [type] [grammar] @@ -186,12 +137,13 @@ To add a new runtime test, first determine which [group (dir) of tests](https:// [skip] ``` - -Your best bet is to find a similar test in the appropriate group and then copy and paste the descriptor file, creating a new file within the test group dir. Modify the sections to suit your new problem. +Your best bet is to find a similar test in the appropriate group and then copy and paste the descriptor file, creating a new file within the test group dir. +Modify the sections to suit your new problem. ### Ignoring tests -In order to turn off a test for a particular target, we need to use the `skip` section in the descriptor file. For example, the following skips PHP and Dart targets: +In order to turn off a test for a particular target, the `skip` section in the descriptor file should be used. +For example, the following skips PHP and Dart targets: ``` [skip] @@ -201,11 +153,12 @@ Dart ### Target API/library testing -Some parts of the runtime API need to be tested with code written specifically in the target language. For example, you can see all of the Java runtime API tests here: +Some parts of the runtime API need to be tested with code written specifically in the target language. +For example, all the Java runtime API tests are placed here: -[https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api) +[runtime-testsuite/test/org/antlr/v4/test/runtime/java/api](../runtime-testsuite/test/org/antlr/v4/test/runtime/java/api) -Notice that it is under an `api` dir. The directory above is where all of the `Test*` files go. +Notice that it is under an `api` dir. The directory above is where all of the `*Test*` files go. ### Cross-language actions embedded within grammars @@ -221,7 +174,7 @@ Use instead the language-neutral: ``` -Template file [runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg) has templates like: +Template file [Java.test.stg](../runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg) has templates like: ``` writeln(s) ::= <);>> @@ -231,6 +184,6 @@ that translate generic operations to target-specific language statements or expr ## Adding an ANTLR tool unit test -Just go into the appropriate Java test class in dir [antlr4/tool-testsuite/test/org/antlr/v4/test/tool](https://github.com/antlr/antlr4/tree/master/tool-testsuite/test/org/antlr/v4/test/tool) and add your unit test. +Just go into the appropriate Java test class in dir [antlr4/tool-testsuite/test/org/antlr/v4/test/tool](../tool-testsuite/test/org/antlr/v4/test/tool) and add your unit test. diff --git a/doc/building-antlr.md b/doc/building-antlr.md index 17091f9d76..76bdbcd0b6 100644 --- a/doc/building-antlr.md +++ b/doc/building-antlr.md @@ -46,7 +46,7 @@ $ if [[ "$?" != "0" ]]; then sudo apt install -y maven; fi The current maven build seems complicated to me because there is a dependency of the project on itself. The runtime tests naturally depend on the current version being available but it won't compile without the current version. Once you have the generated/installed jar, mvn builds but otherwise there's a dependency on what you are going to build. You will get this error when you try to clean but you can ignore it: ``` -[INFO] ANTLR 4 Runtime Tests (3rd generation) ............. FAILURE [ 0.073 s] +[INFO] ANTLR 4 Runtime Tests (4th generation) ............. FAILURE [ 0.073 s] ... [ERROR] Plugin org.antlr:antlr4-maven-plugin:4.10-SNAPSHOT or one of its dependencies could not be resolved: Could not find artifact org.antlr:antlr4-maven-plugin:jar:4.10-SNAPSHOT -> [Help 1] ``` diff --git a/doc/creating-a-language-target.md b/doc/creating-a-language-target.md index dd06208ea6..d658cf4437 100644 --- a/doc/creating-a-language-target.md +++ b/doc/creating-a-language-target.md @@ -6,19 +6,42 @@ This document describes how to make ANTLR generate parsers in a new language, *X Creating a new target involves the following key elements: -1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. This class describes language specific details about escape characters and strings and so on. There is very little to do here typically. -1. Create *X*.stg in directory tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg. This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express all of the parsing elements needed to generate code. You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... Each of these must be described how to build the indicated chunk of code. Your best bet is to find the closest existing target, copy that template file, and tweak to suit. -1. Create a runtime library to support the parsers generated by ANTLR. Under directory runtime/*X*, you are in complete control of the directory structure as dictated by common usage of that target language. For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below. -1. Create a template file for runtime tests. All you have to do is provide a few templates that indicate how to print values and declare variables. Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. It needs to know how to define various class fields, compare members and so on. You must create a *X*.test.stg file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/resources/org/antlr/v4/test/runtime). Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit. -1. Create test files under [/runtime-testsuite/test/org/antlr/v4/test/runtime](https://github.com/antlr/antlr4/tree/master/runtime-testsuite/test/org/antlr/v4/test/runtime). They will load defined test cases in each test descriptor. Also add the `/runtime-testsuite/test/org/antlr/v4/test/runtime/X/BaseXTest.java` which defines how test cases will execute and output. -1. Create/edit shell scripts in [/.travis](https://github.com/antlr/antlr4/blob/master/.travis) and [/appveyor.yml](https://github.com/antlr/antlr4/blob/master/appveyor.yml) to run tests in CI pipelines. +1. For the tool, create class *X*Target as a subclass of class `Target` in package `org.antlr.v4.codegen.target`. + This class describes language specific details about escape characters and strings and so on. + There is very little to do here typically. +2. Create `*X*.stg` in directory `tool/resources/org/antlr/v4/tool/templates/codegen/*X*/*X*.stg`. + This is a [StringTemplate](http://www.stringtemplate.org/) group file (`.stg`) that tells ANTLR how to express + all the parsing elements needed to generate code. + You will see templates called `ParserFile`, `Parser`, `Lexer`, `CodeBlockForAlt`, `AltBlock`, etc... + Each of these must be described how to build the indicated chunk of code. + Your best bet is to find the closest existing target, copy that template file, and tweak to suit. +3. Create a runtime library to support the parsers generated by ANTLR. + Under directory `runtime/*X*`, you are in complete control of the directory structure as dictated by common usage of that target language. + For example, Java has: `runtime/Java/lib` and `runtime/Java/src` directories. + Under `src`, you will find a directory structure for package `org.antlr.v4.runtime` and below. +4. Create a template file for runtime tests. + All you have to do is provide a few templates that indicate how to print values and declare variables. + Our runtime test mechanism in dir `runtime-testsuite` will automatically generate code using these templates for each target and check the test results. + It needs to know how to define various class fields, compare members and so on. + You must create a `*X*.test.stg` file underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime](../runtime-testsuite/resources/org/antlr/v4/test/runtime) + and `Test.*x*.stg` underneath [runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers](../runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers). + Again, your best bet is to copy the templates from the closest language to your target and tweak it to suit. +6. Create test files under [/runtime-testsuite/test/org/antlr/v4/test/runtime](../runtime-testsuite/test/org/antlr/v4/test/runtime). + They will load defined test cases in each test descriptor. + Also add the `/runtime-testsuite/test/org/antlr/v4/test/runtime/X/BaseXTest.java` which defines how test cases will execute and output. +7. Create/edit shell scripts in [/.github](../.github) to run tests in CI pipelines. ## Getting started -1. Fork the `antlr/antlr4` repository at github to your own user so that you have repository `username/antlr4`. -2. Clone `username/antlr4`, the forked repository, to your local disk. Your remote `origin` will be the forked repository on GitHub. Add a remote `upstream` to the original `antlr/antlr4` repository (URL `https://github.com/antlr/antlr4.git`). Changes that you would like to contribute back to the project are done with [pull requests](https://help.github.com/articles/using-pull-requests/). +1. Fork the `antlr/antlr4` repository at GitHub to your own user so that you have repository `username/antlr4`. +2. Clone `username/antlr4`, the forked repository, to your local disk. + Your remote `origin` will be the forked repository on GitHub. + Add a remote `upstream` to the original `antlr/antlr4` repository (URL `https://github.com/antlr/antlr4.git`). + Changes that you would like to contribute back to the project are done with [pull requests](https://help.github.com/articles/using-pull-requests/). 3. Try to build it before doing anything + ```bash $ mvn compile ``` + That should proceed with success. See [Building ANTLR](building-antlr.md) for more details. diff --git a/doc/getting-started.md b/doc/getting-started.md index 2a98019450..a6d415a0ed 100644 --- a/doc/getting-started.md +++ b/doc/getting-started.md @@ -6,7 +6,7 @@ Hi and welcome to the version 4 release of ANTLR! It's named after the fearless ANTLR is really two things: a tool that translates your grammar to a parser/lexer in Java (or other target language) and the runtime needed by the generated parsers/lexers. Even if you are using the ANTLR Intellij plug-in or ANTLRWorks to run the ANTLR tool, the generated code will still need the runtime library. -The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.10.1-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). +The first thing you should do is probably download and install a development tool plug-in. Even if you only use such tools for editing, they are great. Then, follow the instructions below to get the runtime environment available to your system to run generated parsers/lexers. In what follows, I talk about antlr-4.11.0-complete.jar, which has the tool and the runtime and any other support libraries (e.g., ANTLR v4 is written in v3). If you are going to integrate ANTLR into your existing build system using mvn, ant, or want to get ANTLR into your IDE such as eclipse or intellij, see [Integrating ANTLR into Development Systems](https://github.com/antlr/antlr4/blob/master/doc/IDEs.md). @@ -16,24 +16,24 @@ If you are going to integrate ANTLR into your existing build system using mvn, a 1. Download ``` $ cd /usr/local/lib -$ curl -O https://www.antlr.org/download/antlr-4.10.1-complete.jar +$ curl -O https://www.antlr.org/download/antlr-4.11.0-complete.jar ``` Or just download in browser from website: [https://www.antlr.org/download.html](https://www.antlr.org/download.html) and put it somewhere rational like `/usr/local/lib`. -if you are using lower version jdk, just download from [website download](https://github.com/antlr/website-antlr4/tree/gh-pages/download) for previous version, and antlr version before 4.10.1 support jdk 1.8 +if you are using lower version jdk, just download from [website download](https://github.com/antlr/website-antlr4/tree/gh-pages/download) for previous version, and antlr version before 4.11.0 support jdk 1.8 -2. Add `antlr-4.10.1-complete.jar` to your `CLASSPATH`: +2. Add `antlr-4.11.0-complete.jar` to your `CLASSPATH`: ``` -$ export CLASSPATH=".:/usr/local/lib/antlr-4.10.1-complete.jar:$CLASSPATH" +$ export CLASSPATH=".:/usr/local/lib/antlr-4.11.0-complete.jar:$CLASSPATH" ``` It's also a good idea to put this in your `.bash_profile` or whatever your startup script is. 3. Create aliases for the ANTLR Tool, and `TestRig`. ``` -$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.10.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' -$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.10.1-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' +$ alias antlr4='java -Xmx500M -cp "/usr/local/lib/antlr-4.11.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' +$ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.11.0-complete.jar:$CLASSPATH" org.antlr.v4.gui.TestRig' ``` ### WINDOWS @@ -41,13 +41,13 @@ $ alias grun='java -Xmx500M -cp "/usr/local/lib/antlr-4.10.1-complete.jar:$CLASS (*Thanks to Graham Wideman*) 0. Install Java (version 1.7 or higher) -1. Download antlr-4.10.1-complete.jar (or whatever version) from [https://www.antlr.org/download.html](https://www.antlr.org/download.html) +1. Download antlr-4.11.0-complete.jar (or whatever version) from [https://www.antlr.org/download.html](https://www.antlr.org/download.html) Save to your directory for 3rd party Java libraries, say `C:\Javalib` -2. Add `antlr-4.10.1-complete.jar` to CLASSPATH, either: +2. Add `antlr-4.11.0-complete.jar` to CLASSPATH, either: * Permanently: Using System Properties dialog > Environment variables > Create or append to `CLASSPATH` variable * Temporarily, at command line: ``` -SET CLASSPATH=.;C:\Javalib\antlr-4.10.1-complete.jar;%CLASSPATH% +SET CLASSPATH=.;C:\Javalib\antlr-4.11.0-complete.jar;%CLASSPATH% ``` 3. Create short convenient commands for the ANTLR Tool, and TestRig, using batch files or doskey commands: * Batch files (in directory in system PATH) antlr4.bat and grun.bat @@ -73,7 +73,7 @@ Either launch org.antlr.v4.Tool directly: ``` $ java org.antlr.v4.Tool -ANTLR Parser Generator Version 4.10.1 +ANTLR Parser Generator Version 4.11.0 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... @@ -82,8 +82,8 @@ ANTLR Parser Generator Version 4.10.1 or use -jar option on java: ``` -$ java -jar /usr/local/lib/antlr-4.10.1-complete.jar -ANTLR Parser Generator Version 4.10.1 +$ java -jar /usr/local/lib/antlr-4.11.0-complete.jar +ANTLR Parser Generator Version 4.11.0 -o ___ specify output directory where all output is generated -lib ___ specify location of .tokens files ... diff --git a/doc/go-target.md b/doc/go-target.md index 6bdac133b3..b85c39cc65 100644 --- a/doc/go-target.md +++ b/doc/go-target.md @@ -8,32 +8,102 @@ #### 2. Get the Go ANTLR runtime -Each target language for ANTLR has a runtime package for running parser generated by ANTLR4. The runtime provides a common set of tools for using your parser. +Each target language for ANTLR has a runtime package for running parser generated by ANTLR4. +The runtime provides a common set of tools for using your parser. Note that if you have existing projects and have +yet to replace the `v1.x.x` modules with the `v4` modules, then you can skip ahead to the section *Upgrading to v4 +from earlier versions* -Get the runtime and install it on your GOPATH: +The Go runtime uses modules and has a version path of `/v4` to stay in sync with the runtime versions of all the other runtimes. +Setup is the same as any other module based project: ```bash -go get github.com/antlr/antlr4/runtime/Go/antlr +$ cd mymodproject +$ go mod init mymodproject ``` -#### 3. Set the release tag (optional) +After which, you can use go get, to get the latest release version of the ANTLR v4 runtime using: -`go get` has no native way to specify a branch or commit. So, when you run it, you'll download the latest commits. This may or may not be your preference. +```bash +go get github.com/antlr/antlr4/runtime/Go/antlr/v4 +``` -You'll need to use git to set the release. For example, to set the release tag for release 4.9.3: +If your project is already using the v4 runtime, then you can upgrade to the latest release using the usual: ```bash -cd $GOPATH/src/github.com/antlr/antlr4 # enter the antlr4 source directory -git checkout tags/4.9.3 # the go runtime was added in release 4.9.3 +go get -u github.com/antlr/antlr4/runtime/Go/antlr/v4 +``` +If you have not yet upgraded existing projects to the `/v4` module path, consult the section *Upgrading to v4 +from earlier versions* + +The ANTLR runtime has only one external dependency, and that is part of the go system itself: + +``` +golang.org/x/exp +``` + +A complete list of releases can be found on [the release page](https://github.com/antlr/antlr4/releases). The Go +runtime will be tagged using standard Go tags, so release 4.11.0 will be tagged with `v4.11.0` and go get will pick +that up from the ANTLR repo. + +#### 3. Configuring `go generate` in your project + +In order to promote the use of repeatable builds, it is often useful to add the latest tool jar to your project's +repo and configure a `generate.sh` and `generate.go` file. You can of course globally alias the java command required to run the +tool. Your own CI and dev environment will guide you. + +Here is how you can configure `go generate` for your project, assuming that you follow the general recommendation to +place the ANTLR grammar files in their own package in your project structure. Here is a general template as a starting point: + +``` + . + ├── myproject + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.11.0-complete.jar + │ ├── error_listeners.go + │ ├── generate.go + │ ├── generate.sh + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go +``` + +Make sure that the package statement in your grammar file(s) reflects the go package they exist in. +The `generate.go` file then looks like this: + +```golang + package parser + + //go:generate ./generate.sh +``` + +And the `generate.sh` file will look similar to this: + +```shell + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.11.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 +``` + +From the command line at the root of your package “myproject” you can then simply issue the command: + +```shell + go generate ./... ``` -A complete list of releases can be found on [the release page](https://github.com/antlr/antlr4/releases). +If you have not yet run a `go get`, you can now run `go mod tidy` and update your -#### 4. Generate your parser +#### 4. Generate your parser manually You use the ANTLR4 "tool" to generate a parser. These will reference the ANTLR runtime, installed above. -Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool as described in [the getting started guide](getting-started.md). To generate your go parser, you'll need to invoke: +Suppose you're using a UNIX system and have set up an alias for the ANTLR4 tool as described in +[the getting started guide](getting-started.md). + +To generate your go parser, you'll need to invoke: ```bash antlr4 -Dlanguage=Go MyGrammar.g4 @@ -41,17 +111,59 @@ antlr4 -Dlanguage=Go MyGrammar.g4 For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). +### Upgrading to `/v4` from the default path + +*NB: While switch to new module path would normally imply that the public interface for the runtime has changed, this is +not actually the case - you will not need to change your existing code to upgrade. The main point of the path change is so +that git tagging works with the ANTLR Go runtime.* + +Prior to release v4.11.0 the Go runtime shipped with a module but the module had no version path. This meant that +the tags in the ANTLR repo did not work, as any tag above `v1` must refer to a matching module path. +So the command `go get github.com/antlr/antlr4/runtime/Go/antlr` would just bring in +whatever was the `HEAD` of the master branch. While this *kind of* worked, it is obviously subject to problems and does +not fit properly with the idiomatic ways of Go. + +As of v4.11.0 the module path for the Go runtime is properly in sync with the repo tags. However, this means you need to +perform a few simple actions in order to upgrade to the `/v4` path. + + - Firstly, make sure that you are using an ANTLR tool jar with a version number of 4.11.0 or greater. + - Next you replace any mention of the old (default) path to ANTLR in your go source files. Don't worry that this will +modify your generated files as... + - Now regenerate your grammar files either manually or using `go generate ./...` (see above) + +A quick way to replace original module path references is to use this script from your module's base directory: + +```shell +find . -type f \ + -name '*.go' \ + -exec sed -i -e 's,github.com/antlr/antlr4/runtime/Go/antlr,github.com/antlr/antlr4/runtime/Go/antlr/v4,g' {} \; +``` +After performing the steps above, issuing: + +```shell +go mod tidy +``` +Should fix up your `go.mod` file to reference only the `v4` version of the ANTLR Go runtime: + +```shell +require github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.11.0-xxxxxx-xxxxxxxxx +``` + +From this point on, your go mod commands will work correctly with the ANTLR repo and upgrades and downgrades will work +as you expect. As will branch version such as @dev + ### Referencing the Go ANTLR runtime You can reference the go ANTLR runtime package like this: -```go -import "github.com/antlr/antlr4/runtime/Go/antlr" +```golang +import "github.com/antlr/antlr4/runtime/Go/antlr/v4" ``` ### Complete example -Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json. +Suppose you're using the JSON grammar from https://github.com/antlr/grammars-v4/tree/master/json placed in the parser +directory and have initialized your `go mod` file. Then, invoke `antlr4 -Dlanguage=Go JSON.g4`. The result of this is a collection of .go files in the `parser` directory including: ``` @@ -61,15 +173,17 @@ json_lexer.go json_listener.go ``` -Another common option to the ANTLR tool is `-visitor`, which generates a parse tree visitor, but we won't be doing that here. For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). +Another common option to the ANTLR tool is `-visitor`, which generates a parse tree visitor, but we won't be doing that here. +For a full list of antlr4 tool options, please visit the [tool documentation page](tool-options.md). -We'll write a small main func to call the generated parser/lexer (assuming they are separate). This one writes out the encountered `ParseTreeContext`'s. Suppose the gen'ed parser code is in the `parser` directory relative to this code: +We'll write a small main func to call the generated parser/lexer (assuming they are separate). This one writes out the +encountered `ParseTreeContext`'s. Assuming the generated parser code is in the `parser` directory relative to this code: ```golang package main import ( - "github.com/antlr/antlr4/runtime/Go/antlr" + "github.com/antlr/antlr4/runtime/Go/antlr/v4" "./parser" "os" "fmt" diff --git a/doc/grammars.md b/doc/grammars.md index 5f88bf20ac..97ad2659ec 100644 --- a/doc/grammars.md +++ b/doc/grammars.md @@ -98,7 +98,7 @@ Not every kind of grammar can import every other kind of grammar: * Parsers can import parsers. * Combined grammars can import parsers or lexers without modes. -ANTLR adds imported rules to the end of the rule list in a main lexer grammar. That means lexer rules in the main grammar get precedence over imported rules. For example, if a main grammar defines rule `IF : ’if’ ;` and an imported grammar defines rule `ID : [a-z]+ ;` (which also recognizes `if`), the imported `ID` won’t hide the main grammar’s `IF` token definition. +ANTLR adds imported rules to the end of the rule list in a main lexer grammar. That means lexer rules in the main grammar get precedence over imported rules. For example, if a main grammar defines rule `IF : 'if' ;` and an imported grammar defines rule `ID : [a-z]+ ;` (which also recognizes `if`), the imported `ID` won’t hide the main grammar’s `IF` token definition. ## Tokens Section diff --git a/doc/images/python3-tests.png b/doc/images/python3-tests.png index 3f278e30e4..9d8a9c079e 100644 Binary files a/doc/images/python3-tests.png and b/doc/images/python3-tests.png differ diff --git a/doc/images/testrigs.png b/doc/images/testrigs.png index 00e05b7cef..a9fc04a822 100644 Binary files a/doc/images/testrigs.png and b/doc/images/testrigs.png differ diff --git a/doc/lexer-rules.md b/doc/lexer-rules.md index e446bfbf33..08fe7e5d1e 100644 --- a/doc/lexer-rules.md +++ b/doc/lexer-rules.md @@ -52,8 +52,8 @@ Match token T at the current input position. Tokens always begin with a capital -’literal’ -Match that character or sequence of characters. E.g., ’while’ or ’=’. +'literal' +Match that character or sequence of characters. E.g., 'while' or '='. @@ -97,8 +97,8 @@ DASH : [---] ; // match a single -, i.e., "any character" between - and - (note -’x’..’y’ -Match any single character between range x and y, inclusively. E.g., ’a’..’z’. ’a’..’z’ is identical to [a-z]. +'x'..'y' +Match any single character between range x and y, inclusively. E.g., 'a'..'z'. 'a'..'z' is identical to [a-z]. @@ -143,7 +143,7 @@ Evaluate semantic predicate «p». If «p» evaluates to false at runtime, the s ~x -Match any single character not in the set described by x. Set x can be a single character literal, a range, or a subrule set like ~(’x’|’y’|’z’) or ~[xyz]. Here is a rule that uses ~ to match any character other than characters using ~[\r\n]*: +Match any single character not in the set described by x. Set x can be a single character literal, a range, or a subrule set like ~('x'|'y'|'z') or ~[xyz]. Here is a rule that uses ~ to match any character other than characters using ~[\r\n]*:
 	
 COMMENT : '#' ~[\r\n]* '\r'? '\n' -> skip ;
 
@@ -176,7 +176,7 @@ mode STR; MASK : '&' ; ``` -A parser grammar cannot reference literal ’&’, but it can reference the name of the tokens: +A parser grammar cannot reference literal '&', but it can reference the name of the tokens: ``` parser grammar P; @@ -320,4 +320,4 @@ The option rewrites `caseInsensitive` grammar option value if it's defined. ```g4 options { caseInsensitive=true; } STRING options { caseInsensitive=false; } : 'N'? '\'' (~'\'' | '\'\'')* '\''; // lower n is not allowed -``` \ No newline at end of file +``` diff --git a/doc/lexicon.md b/doc/lexicon.md index b99940b1f2..a8f0427606 100644 --- a/doc/lexicon.md +++ b/doc/lexicon.md @@ -79,13 +79,13 @@ These more or less correspond to `isJavaIdentifierPart` and `isJavaIdentifierSta ## Literals -ANTLR does not distinguish between character and string literals as most languages do. All literal strings one or more characters in length are enclosed in single quotes such as `’;’`, `’if’`, `’>=’`, and `’\’’` (refers to the one-character string containing the single quote character). Literals never contain regular expressions. +ANTLR does not distinguish between character and string literals as most languages do. All literal strings one or more characters in length are enclosed in single quotes such as `';'`, `'if'`, `'>='`, and `'\''` (refers to the one-character string containing the single quote character). Literals never contain regular expressions. -Literals can contain Unicode escape sequences of the form `’\uXXXX’` (for Unicode code points up to `’U+FFFF’`) or `’\u{XXXXXX}’` (for all Unicode code points), where `’XXXX’` is the hexadecimal Unicode code point value. +Literals can contain Unicode escape sequences of the form `'\uXXXX'` (for Unicode code points up to `'U+FFFF'`) or `'\u{XXXXXX}'` (for all Unicode code points), where `'XXXX'` is the hexadecimal Unicode code point value. -For example, `’\u00E8’` is the French letter with a grave accent: `’è’`, and `’\u{1F4A9}’` is the famous emoji: `’💩’`. +For example, `'\u00E8'` is the French letter with a grave accent: `'è'`, and `'\u{1F4A9}'` is the famous emoji: `'💩'`. -ANTLR also understands the usual special escape sequences: `’\n’` (newline), `’\r’` (carriage return), `’\t’` (tab), `’\b’` (backspace), and `’\f’` (form feed). You can use Unicode code points directly within literals or use the Unicode escape sequences: +ANTLR also understands the usual special escape sequences: `'\n'` (newline), `'\r'` (carriage return), `'\t'` (tab), `'\b'` (backspace), and `'\f'` (form feed). You can use Unicode code points directly within literals or use the Unicode escape sequences: ``` grammar Foreign; diff --git a/doc/parser-rules.md b/doc/parser-rules.md index 73c363b990..e1a32fe736 100644 --- a/doc/parser-rules.md +++ b/doc/parser-rules.md @@ -196,7 +196,7 @@ ANTLR generates a field holding the list of context objects: ## Rule Elements -Rule elements specify what the parser should do at a given moment just like statements in a programming language. The elements can be rule, token, string literal like expression, ID, and ’return’. Here’s a complete list of the rule elements (we’ll look at actions and predicates in more detail later): +Rule elements specify what the parser should do at a given moment just like statements in a programming language. The elements can be rule, token, string literal like expression, ID, and 'return'. Here’s a complete list of the rule elements (we’ll look at actions and predicates in more detail later): @@ -207,7 +207,7 @@ Rule elements specify what the parser should do at a given moment just like stat Match token T at the current input position. Tokens always begin with a capital letter. - @@ -232,7 +232,7 @@ Match any single token except for the end of file token. The “dot” operator
’literal’ +'literal' Match the string literal at the current input position. A string literal is simply a token with a fixed string.
-When you want to match everything but a particular token or set of tokens, use the `~` “not” operator. This operator is rarely used in the parser but is available. `~INT` matches any token except the `INT` token. `~’,’` matches any token except the comma. `~(INT|ID)` matches any token except an INT or an ID. +When you want to match everything but a particular token or set of tokens, use the `~` “not” operator. This operator is rarely used in the parser but is available. `~INT` matches any token except the `INT` token. `~','` matches any token except the comma. `~(INT|ID)` matches any token except an INT or an ID. Token, string literal, and semantic predicate rule elements can take options. See Rule Element Options. @@ -486,4 +486,4 @@ Invalid input would cause `config` to return immediately without matching any in ``` file : element* EOF; // don't stop early. must match all input -``` \ No newline at end of file +``` diff --git a/doc/python-target.md b/doc/python-target.md index 7ed73c281e..6f7f6a84a9 100644 --- a/doc/python-target.md +++ b/doc/python-target.md @@ -116,14 +116,39 @@ If your grammar is targeted to Python only, you may ignore the following. But if ID {$text.equals("test")}? ``` -Unfortunately, this is not portable, but you can work around it. The trick involves: +Unfortunately, this is not portable, as Java and Python (and other target languages) have different syntaxes for all but the simplest language elements. But you can work around it. The trick involves: * deriving your parser from a parser you provide, such as BaseParser -* implementing utility methods in this BaseParser, such as "isEqualText" -* adding a "self" field to the Java/C# BaseParser, and initialize it with "this" +* implementing utility methods, such as "isEqualText", in this BaseParser, in different files for each target language +* invoking your utility methods in the semantic predicate from the `$parser` object Thanks to the above, you should be able to rewrite the above semantic predicate as follows: +File `MyGrammarParser.g4`: ``` -ID {$self.isEqualText($text,"test")}? +options { superClass = MyGrammarBaseParser; } +... +ID {$parser.isEqualText($text,"test")}? +``` + +File `MyGrammarBaseParser.py`: +```python +from antlr4 import * + +class MyGrammarBaseParser(Parser): + + def isEqualText(a, b): + return a is b +``` + +File `MyGrammarBaseParser.java`: +```java +import org.antlr.v4.runtime.*; + +public abstract class MyGrammarBaseParser extends Parser { + + public static boolean isEqualText(a, b) { + return a.equals(b); + } +} ``` diff --git a/doc/releasing-antlr.md b/doc/releasing-antlr.md index 0946880cec..1a677aa14f 100644 --- a/doc/releasing-antlr.md +++ b/doc/releasing-antlr.md @@ -25,14 +25,20 @@ Make sure this feature is turned on for the `antlr4` repo upon release. Wack any existing tag as mvn will create one and it fails if already there. ``` -$ git tag -d 4.10.1 -$ git push origin :refs/tags/4.10.1 -$ git push upstream :refs/tags/4.10.1 +$ git tag -d 4.11.0 +$ git push origin :refs/tags/4.11.0 +$ git push upstream :refs/tags/4.11.0 ``` ### Go release tags -It seems that [Go needs a `v` in the release git tag](https://go.dev/ref/mod#glos-version) so make sure that we double up with 4.10.1 and v4.10.1. +It seems that [Go needs a `v` in the release git tag](https://go.dev/ref/mod#glos-version) so make sure that we double up with 4.11.0 and v4.11.0. + +``` +$ git tag -a runtime/Go/antlr/v4/v4.11.0 -m "Go runtime module only" +$ git push upstream runtime/Go/antlr/v4/v4.11.0 +$ git push origin runtime/Go/antlr/v4/v4.11.0 +``` ## Bump version in code and other files @@ -40,11 +46,11 @@ It seems that [Go needs a `v` in the release git tag](https://go.dev/ref/mod#glo There are a number of files that require inversion number be updated. -(In a `pred-4.10.1` branch) Here is a simple script to display any line from the critical files with, say, `4.10.1` in it. Here's an example run of the script: +Here is a simple script to display any line from the critical files with, say, `4.11.0` in it. Here's an example run of the script: ```bash -~/antlr/code/antlr4 $ python scripts/update_antlr_version.py 4.10 4.10.1 -Updating ANTLR version from 4.10 to 4.10.1 +~/antlr/code/antlr4 $ python scripts/update_antlr_version.py 4.10 4.11.0 +Updating ANTLR version from 4.10 to 4.11.0 Set ANTLR repo root (default ~/antlr/code/antlr4): Perform antlr4 `mvn clean` and wipe build dirs Y/N? (default no): Ok, not cleaning antlr4 dir @@ -56,14 +62,14 @@ It's also worth doing a quick check to see if you find any other references to a ```bash mvn clean -find . -type f -exec grep -l '4\.9' {} \; | grep -v -E '\.o|\.a|\.jar|\.dylib|node_modules/|\.class|tests/|CHANGELOG|\.zip|\.gz|.iml|.svg' +find . -type f -exec grep -l '4\.10.1' {} \; | grep -v -E '\.o|\.a|\.jar|\.dylib|node_modules/|\.class|tests/|CHANGELOG|\.zip|\.gz|.iml|.svg' ``` Commit to repository. ### PHP runtime -We only have to copy the PHP runtime into the ANTLR repository to run the unittests. But, we still need to bump the version to 4.10.1 in `~/antlr/code/antlr-php-runtime/src/RuntimeMetaData.php` in the separate repository, commit, and push. +We only have to copy the PHP runtime into the ANTLR repository to run the unittests. But, we still need to bump the version to 4.11.0 in `~/antlr/code/antlr-php-runtime/src/RuntimeMetaData.php` in the separate repository, commit, and push. ``` cd ~/antlr/code/antlr-php-runtime/src @@ -74,19 +80,19 @@ git commit -a -m "Update PHP Runtime to latest version" ## Build XPath parsers -This section addresses a [circular dependency regarding XPath](https://github.com/antlr/antlr4/issues/3600). In the java target I avoided a circular dependency (gen 4.10.1 parser for XPath using 4.10.1 which needs it to build) by hand building the parser: runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPath.java. Probably we won't have to rerun this for the patch releases, just major ones that alter the ATN serialization. +This section addresses a [circular dependency regarding XPath](https://github.com/antlr/antlr4/issues/3600). In the java target I avoided a circular dependency (gen 4.11.0 parser for XPath using 4.11.0 which needs it to build) by hand building the parser: runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPath.java. Probably we won't have to rerun this for the patch releases, just major ones that alter the ATN serialization. ``` cd ~/antlr/code/antlr4/runtime/CSharp/src/Tree/Xpath -java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.10.1-SNAPSHOT/antlr4-4.10.1-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=CSharp XPathLexer.g4 +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.0-SNAPSHOT/antlr4-4.11.0-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=CSharp XPathLexer.g4 cd ~/antlr/code/antlr4/runtime/Python3/tests/expr -java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.10.1-SNAPSHOT/antlr4-4.10.1-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python2 Expr.g4 -java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.10.1-SNAPSHOT/antlr4-4.10.1-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python2 XPathLexer.g4 +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.0-SNAPSHOT/antlr4-4.11.0-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python2 Expr.g4 +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.0-SNAPSHOT/antlr4-4.11.0-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python2 XPathLexer.g4 cd ~/antlr/code/antlr4/runtime/Python3/tests/expr -java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.10.1-SNAPSHOT/antlr4-4.10.1-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 Expr.g4 -java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.10.1-SNAPSHOT/antlr4-4.10.1-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 XPathLexer.g4 +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.0-SNAPSHOT/antlr4-4.11.0-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 Expr.g4 +java -cp ":/Users/parrt/.m2/repository/org/antlr/antlr4/4.11.0-SNAPSHOT/antlr4-4.11.0-SNAPSHOT-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Python3 XPathLexer.g4 ``` ## Maven Repository Settings @@ -136,7 +142,7 @@ Here is the file template ## Maven deploy snapshot -The goal is to get a snapshot, such as `4.10.1-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4/4.10.1-SNAPSHOT/) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime/4.10.1-SNAPSHOT/). +The goal is to get a snapshot, such as `4.11.0-SNAPSHOT`, to the staging server: [antlr4 tool](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4/4.11.0-SNAPSHOT/) and [antlr4 java runtime](https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-runtime/4.11.0-SNAPSHOT/). Do this: @@ -144,16 +150,6 @@ Do this: $ mvn install -DskipTests # seems required to get the jar files visible to maven $ mvn deploy -DskipTests ... -[INFO] --- maven-deploy-plugin:2.7:deploy (default-deploy) @ antlr4-tool-testsuite --- -Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/maven-metadata.xml -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/antlr4-tool-testsuite-4.10.1-20161211.173752-1.jar -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/antlr4-tool-testsuite-4.10.1-20161211.173752-1.jar (3 KB at 3.4 KB/sec) -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/antlr4-tool-testsuite-4.10.1-20161211.173752-1.pom -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/antlr4-tool-testsuite-4.10.1-20161211.173752-1.pom (3 KB at 6.5 KB/sec) -Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml -Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (371 B at 1.4 KB/sec) -Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/maven-metadata.xml -Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/4.10.1-SNAPSHOT/maven-metadata.xml (774 B at 1.8 KB/sec) Uploading: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antlr4-tool-testsuite/maven-metadata.xml (388 B at 0.9 KB/sec) [INFO] ------------------------------------------------------------------------ @@ -165,7 +161,7 @@ Uploaded: https://oss.sonatype.org/content/repositories/snapshots/org/antlr/antl [INFO] ANTLR 4 Maven plugin ............................... SUCCESS [ 6.547 s] [INFO] ANTLR 4 Runtime Test Annotations ................... SUCCESS [ 2.519 s] [INFO] ANTLR 4 Runtime Test Processors .................... SUCCESS [ 2.385 s] -[INFO] ANTLR 4 Runtime Tests (3rd generation) ............. SUCCESS [ 15.276 s] +[INFO] ANTLR 4 Runtime Tests (4th generation) ............. SUCCESS [ 15.276 s] [INFO] ANTLR 4 Tool Tests ................................. SUCCESS [ 2.233 s] [INFO] ------------------------------------------------------------------------ [INFO] BUILD SUCCESS @@ -217,18 +213,18 @@ It will start out by asking you the version number: ``` ... -What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.10.1: : 4.10.1 -What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.10.1: : -What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.10.1: : -What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.10.1: : -What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.10.1: : -What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.10.1: : -What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.10.1: : 4.10.1 -What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.10.2-SNAPSHOT: +What is the release version for "ANTLR 4"? (org.antlr:antlr4-master) 4.11.0: : 4.11.0 +What is the release version for "ANTLR 4 Runtime"? (org.antlr:antlr4-runtime) 4.11.0: : +What is the release version for "ANTLR 4 Tool"? (org.antlr:antlr4) 4.11.0: : +What is the release version for "ANTLR 4 Maven plugin"? (org.antlr:antlr4-maven-plugin) 4.11.0: : +What is the release version for "ANTLR 4 Runtime Test Generator"? (org.antlr:antlr4-runtime-testsuite) 4.11.0: : +What is the release version for "ANTLR 4 Tool Tests"? (org.antlr:antlr4-tool-testsuite) 4.11.0: : +What is SCM release tag or label for "ANTLR 4"? (org.antlr:antlr4-master) antlr4-master-4.11.0: : 4.11.0 +What is the new development version for "ANTLR 4"? (org.antlr:antlr4-master) 4.11.1-SNAPSHOT: ... ``` -Maven will go through your pom.xml files to update versions from 4.10.1-SNAPSHOT to 4.10.1 for release and then to 4.10.2-SNAPSHOT after release, which is done with: +Maven will go through your pom.xml files to update versions from 4.11.0-SNAPSHOT to 4.11.0 for release and then to 4.11.1-SNAPSHOT after release, which is done with: ```bash mvn release:perform -Darguments="-DskipTests" @@ -242,7 +238,7 @@ Now, go here: and on the left click "Staging Repositories". You click the staging repo and close it, then you refresh, click it and release it. It's done when you see it here: -    [https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.10.1/antlr4-runtime-4.10.1.jar](https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.10.1/antlr4-runtime-4.10.1.jar) +    [https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.11.0/antlr4-runtime-4.11.0.jar](https://oss.sonatype.org/service/local/repositories/releases/content/org/antlr/antlr4-runtime/4.11.0/antlr4-runtime-4.11.0.jar) All releases should be here: [https://repo1.maven.org/maven2/org/antlr/antlr4-runtime](https://repo1.maven.org/maven2/org/antlr/antlr4-runtime). @@ -265,7 +261,7 @@ Move (and zip) target to website: ```bash cd src -zip -r ~/antlr/sites/website-antlr4/download/antlr-javascript-runtime-4.10.1.zip . +zip -r ~/antlr/sites/website-antlr4/download/antlr-javascript-runtime-4.11.0.zip . ``` ### CSharp @@ -295,7 +291,7 @@ Copyright (C) Microsoft Corporation. All rights reserved. Determining projects to restore... Restored /Users/parrt/antlr/code/antlr4/runtime/CSharp/src/Antlr4.csproj (in 340 ms). Antlr4 -> /Users/parrt/antlr/code/antlr4/runtime/CSharp/src/bin/Release/netstandard2.0/Antlr4.Runtime.Standard.dll - Successfully created package '/Users/parrt/antlr/code/antlr4/runtime/CSharp/src/bin/Release/Antlr4.Runtime.Standard.4.10.1.0.nupkg'. + Successfully created package '/Users/parrt/antlr/code/antlr4/runtime/CSharp/src/bin/Release/Antlr4.Runtime.Standard.4.11.0.0.nupkg'. Build succeeded. 0 Warning(s) @@ -316,7 +312,7 @@ Copyright 2002, 2003 Motus Technologies. Copyright 2004-2008 Novell. BSD license Assembly bin/Release/netstandard2.0/Antlr4.Runtime.Standard.dll is strongnamed. $ tree /Users/parrt/antlr/code/antlr4/runtime/CSharp/src/bin/Release/ /Users/parrt/antlr/code/antlr4/runtime/CSharp/src/bin/Release/ -├── Antlr4.Runtime.Standard.4.10.1.0.nupkg +├── Antlr4.Runtime.Standard.4.11.0.0.nupkg └── netstandard2.0 ├── Antlr4.Runtime.Standard.deps.json ├── Antlr4.Runtime.Standard.dll @@ -400,7 +396,7 @@ On a Mac (with XCode 7+ installed): ```bash cd ~/antlr/code/antlr4/runtime/Cpp ./deploy-macos.sh -cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.10.1-macos.zip +cp antlr4-cpp-runtime-macos.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.11.0-macos.zip ``` On any Mac or Linux machine: @@ -408,7 +404,7 @@ On any Mac or Linux machine: ```bash cd ~/antlr/code/antlr4/runtime/Cpp ./deploy-source.sh -cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.10.1-source.zip +cp antlr4-cpp-runtime-source.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.11.0-source.zip ``` On a Windows machine the build scripts checks if VS 2017 and/or VS 2019 are installed and builds binaries for each, if found. This script requires 7z to be installed (http://7-zip.org then do `set PATH=%PATH%;C:\Program Files\7-Zip\` from DOS not powershell). @@ -416,16 +412,16 @@ On a Windows machine the build scripts checks if VS 2017 and/or VS 2019 are inst ```bash cd ~/antlr/code/antlr4/runtime/Cpp deploy-windows.cmd Community -cp antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.10.1-vs2019.zip +cp antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download/antlr4-cpp-runtime-4.11.0-vs2019.zip ``` Move target to website (**_rename to a specific ANTLR version first if needed_**): ```bash pushd ~/antlr/sites/website-antlr4/download -git add antlr4-cpp-runtime-4.10.1-macos.zip -git add antlr4-cpp-runtime-4.10.1-windows.zip -git add antlr4-cpp-runtime-4.10.1-source.zip +git add antlr4-cpp-runtime-4.11.0-macos.zip +git add antlr4-cpp-runtime-4.11.0-windows.zip +git add antlr4-cpp-runtime-4.11.0-source.zip git commit -a -m 'update C++ runtime' git push origin gh-pages popd @@ -457,7 +453,7 @@ mvn -DskipTests javadoc:jar -q install # get lots of errors but works Jars are in: ``` -~/.m2/repository/org/antlr/antlr4-runtime/4.10.1/antlr4-runtime-4.10.1 +~/.m2/repository/org/antlr/antlr4-runtime/4.11.0/antlr4-runtime-4.11.0 ``` ### Update version and copy jars / api @@ -466,36 +462,36 @@ Copy javadoc and java jars to website using this script: ```bash cd ~/antlr/code/antlr4 -python scripts/deploy_to_website.py 4.10 4.10.1 +python scripts/deploy_to_website.py 4.10.1 4.11.0 ``` Output: ```bash -Updating ANTLR version from 4.10 to 4.10.1 +Updating ANTLR version from 4.10.1 to 4.11.0 Set ANTLR website root (default /Users/parrt/antlr/sites/website-antlr4): Version string updated. Please commit/push: Javadoc copied: - api/Java updated from antlr4-runtime-4.10.1-javadoc.jar - api/JavaTool updated from antlr4-4.10.1-javadoc.jar + api/Java updated from antlr4-runtime-4.11.0-javadoc.jar + api/JavaTool updated from antlr4-4.11.0-javadoc.jar Jars copied: - antlr-4.10.1-complete.jar - antlr-runtime-4.10.1.jar + antlr-4.11.0-complete.jar + antlr-runtime-4.11.0.jar Please look for and add new api files!! Then MANUALLY commit/push: -git commit -a -m 'Update website, javadoc, jars to 4.10.1' +git commit -a -m 'Update website, javadoc, jars to 4.11.0' git push origin gh-pages ``` @@ -503,7 +499,7 @@ Once it's done, you must do the following manually: ``` cd ~/antlr/sites/website-antlr4 -git commit -a -m 'Update website, javadoc, jars to 4.10.1' +git commit -a -m 'Update website, javadoc, jars to 4.11.0' git push origin gh-pages ``` @@ -515,9 +511,9 @@ cd ~/antlr/sites/website-antlr4/api git checkout gh-pages git pull origin gh-pages cd Java -jar xvf ~/.m2/repository/org/antlr/antlr4-runtime/4.10.1/antlr4-runtime-4.10.1-javadoc.jar +jar xvf ~/.m2/repository/org/antlr/antlr4-runtime/4.11.0/antlr4-runtime-4.11.0-javadoc.jar cd ../JavaTool -jar xvf ~/.m2/repository/org/antlr/antlr4/4.10.1/antlr4-4.10.1-javadoc.jar +jar xvf ~/.m2/repository/org/antlr/antlr4/4.11.0/antlr4-4.11.0-javadoc.jar git commit -a -m 'freshen api doc' git push origin gh-pages ``` diff --git a/doc/swift-target.md b/doc/swift-target.md index 4c11faad1d..2514fc7c9d 100644 --- a/doc/swift-target.md +++ b/doc/swift-target.md @@ -131,7 +131,7 @@ Add Antlr4 as a dependency to your `Package.swift` file. For more information, p ```swift -.package(name: "Antlr4", url: "https://github.com/antlr/antlr4", from: "4.10.1" +.package(name: "Antlr4", url: "https://github.com/antlr/antlr4", from: "4.11.0" ``` ## Swift access levels diff --git a/doc/wildcard.md b/doc/wildcard.md index f3d1c3a0b5..a597c649c9 100644 --- a/doc/wildcard.md +++ b/doc/wildcard.md @@ -61,7 +61,7 @@ END : '>>' ;

After crossing through a nongreedy subrule within a lexical rule, all decision-making from then on is "first match wins."

-For example, literal `ab` in rule right-hand side (grammar fragment) `.*? (’a’|’ab’)` is dead code and can never be matched. If the input is ab, the first alternative, ’a’, matches the first character and therefore succeeds. (’a’|’ab’) by itself on the right-hand side of a rule properly matches the second alternative for input ab. This quirk arises from a nongreedy design decision that’s too complicated to go into here.

+For example, literal `ab` in rule right-hand side (grammar fragment) `.*? ('a'|'ab')` is dead code and can never be matched. If the input is ab, the first alternative, 'a', matches the first character and therefore succeeds. ('a'|'ab') by itself on the right-hand side of a rule properly matches the second alternative for input ab. This quirk arises from a nongreedy design decision that’s too complicated to go into here.

  • @@ -74,7 +74,7 @@ ACTION3 : '<' ( STRING | ~[">] )* '>' ; // Doesn't allow <"foo>; greedy * STRING : '"' ( '\\"' | . )*? '"' ; ``` -Rule `ACTION1` allows unterminated strings, such as `"foo`, because input `"foo` matches to the wildcard part of the loop. It doesn’t have to go into rule `STRING` to match a quote. To fix that, rule `ACTION2` uses `~’"’` to match any character but the quote. Expression `~’"’` is still ambiguous with the `’]’` that ends the rule, but the fact that the subrule is nongreedy means that the lexer will exit the loop upon a right square bracket. To avoid a nongreedy subrule, make the alternatives explicit. Expression `~[">]` matches anything but the quote and right angle bracket. Here’s a sample run: +Rule `ACTION1` allows unterminated strings, such as `"foo`, because input `"foo` matches to the wildcard part of the loop. It doesn’t have to go into rule `STRING` to match a quote. To fix that, rule `ACTION2` uses `~'"'` to match any character but the quote. Expression `~'"'` is still ambiguous with the `']'` that ends the rule, but the fact that the subrule is nongreedy means that the lexer will exit the loop upon a right square bracket. To avoid a nongreedy subrule, make the alternatives explicit. Expression `~[">]` matches anything but the quote and right angle bracket. Here’s a sample run: ```bash $ antlr4 Actions.g4 @@ -225,4 +225,4 @@ ANY : . ; You get: - \ No newline at end of file + diff --git a/docker/Dockerfile b/docker/Dockerfile index 9bbd338f93..919dd1df52 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,7 +2,7 @@ FROM adoptopenjdk/openjdk11:alpine AS builder WORKDIR /opt/antlr4 -ARG ANTLR_VERSION="4.10.1" +ARG ANTLR_VERSION="4.11.0" ARG MAVEN_OPTS="-Xmx1G" diff --git a/docker/README.md b/docker/README.md index 53743347b3..05a19a7aa5 100644 --- a/docker/README.md +++ b/docker/README.md @@ -13,7 +13,7 @@ You can build docker image from source code locally. git clone https://github.com/antlr/antlr4.git cd antlr4/docker - docker build -t antlr/antlr4 . + docker build -t antlr/antlr4 --platfort linux/amd64 . ## Run diff --git a/pom.xml b/pom.xml index 2b5939695f..b0989b628a 100644 --- a/pom.xml +++ b/pom.xml @@ -13,16 +13,16 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT pom ANTLR 4 ANTLR 4 Master Build POM - http://www.antlr.org + https://www.antlr.org/ 1992 ANTLR - http://www.antlr.org + https://www.antlr.org/ @@ -31,8 +31,8 @@ - The BSD License - http://www.antlr.org/license.html + BSD-3-Clause + https://www.antlr.org/license.html repo @@ -40,7 +40,7 @@ Terence Parr - http://parrt.cs.usfca.edu + https://github.com/parrt Project lead - ANTLR @@ -67,9 +67,10 @@ Jim Idle jimi@idle.ws - http://www.linkedin.com/in/jimidle + https://www.linkedin.com/in/jimidle/ Developer - Maven Plugin + Developer - Go runtime diff --git a/runtime-testsuite/pom.xml b/runtime-testsuite/pom.xml index fbd9ec3958..1ad8e1686e 100644 --- a/runtime-testsuite/pom.xml +++ b/runtime-testsuite/pom.xml @@ -10,10 +10,10 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT antlr4-runtime-testsuite - ANTLR 4 Runtime Tests (3rd generation) + ANTLR 4 Runtime Tests (4th generation) A collection of tests for ANTLR 4 Runtime libraries. @@ -22,11 +22,15 @@ 2009 + + 5.9.0 + + org.antlr ST4 - 4.3.1 + 4.3.4 test @@ -42,9 +46,15 @@ test - junit - junit - 4.13.2 + org.junit.jupiter + junit-jupiter-api + ${jUnitVersion} + test + + + org.junit.jupiter + junit-jupiter-engine + ${jUnitVersion} test @@ -62,19 +72,6 @@ test - - - resources - - - ../runtime - - **/.build/** - **/target/** - Swift/*.xcodeproj/** - - - org.apache.maven.plugins @@ -83,18 +80,6 @@ -Dfile.encoding=UTF-8 - - **/csharp/Test*.java - **/java/Test*.java - **/java/api/Test*.java - **/go/Test*.java - **/javascript/Test*.java - **/python2/Test*.java - **/python3/Test*.java - **/php/Test*.java - **/dart/Test*.java - ${antlr.tests.swift} - @@ -138,18 +123,4 @@ - - - - includeSwiftTests - - - mac - - - - **/swift/Test*.java - - - diff --git a/runtime-testsuite/resources/junit-platform.properties b/runtime-testsuite/resources/junit-platform.properties new file mode 100644 index 0000000000..ad19ea833b --- /dev/null +++ b/runtime-testsuite/resources/junit-platform.properties @@ -0,0 +1,3 @@ +junit.jupiter.execution.parallel.enabled = true +junit.jupiter.execution.parallel.mode.default = concurrent +junit.jupiter.execution.parallel.mode.classes.default = concurrent \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt index 43b6e62243..fa198f3065 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/BringInLiteralsFromDelegate.txt @@ -18,6 +18,4 @@ s =a [output] -"""S.a -""" - +"""S.a""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt index 82ba1aec11..339a3f4740 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorInvokesDelegateRuleWithReturnStruct.txt @@ -19,6 +19,5 @@ s b [output] -"""S.ab -""" +"""S.ab""" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt index 74d90f55bc..d25532f56e 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/CompositeParsers/DelegatorRuleOverridesDelegate.txt @@ -19,6 +19,4 @@ a c [output] -"""S.a -""" - +"""S.a""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt index ce9f4ab7fb..1db5baad56 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ContextListGetters.txt @@ -21,6 +21,4 @@ s abab [output] -"""abab -""" - +"""abab""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt index 7db6198596..d1cc7624a4 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserErrors/ExtraneousInput.txt @@ -25,7 +25,7 @@ baa Cpp CSharp Go -Node +JavaScript PHP Python2 Python3 diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt index 6b964f0a9c..e6c01d50a2 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelForClosureContext.txt @@ -33,3 +33,5 @@ expression [input] a +[skip] +Go \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt new file mode 100644 index 0000000000..ac6963e588 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ListLabelsOnRuleRefStartOfAlt.txt @@ -0,0 +1,33 @@ +[notes] +Checks that this compiles; see https://github.com/antlr/antlr4/issues/2016 + +[type] +Parser + +[grammar] +grammar Test; + +expression +@after { + +} + : op=NOT args+=expression + | args+=expression (op=AND args+=expression)+ + | args+=expression (op=OR args+=expression)+ + | IDENTIFIER + ; + +AND : 'and' ; +OR : 'or' ; +NOT : 'not' ; +IDENTIFIER : [a-zA-Z_][a-zA-Z0-9_]* ; +WS : [ \t\r\n]+ -> skip ; + +[start] +expression + +[input] +a and b + +[skip] +Cpp \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt index 7ae27a5233..15e8d6c4aa 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/ParserExec/ReservedWordsEscaping.txt @@ -35,5 +35,4 @@ root for for break break for if if for continue [output] -"""for for break break for if if for continue -""" \ No newline at end of file +"""for for break break for if if for continue""" \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt index 7736c7a467..25775e96e6 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/Performance/DropLoopEntryBranchInLRRule_4.txt @@ -51,6 +51,6 @@ between X1 and X2 or between X3 and X4 Go Python2 Python3 -Node +JavaScript PHP diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt index 70ee58e5cd..7c13488fdf 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/descriptors/SemPredEvalParser/PredFromAltTestedInLoopBack_1.txt @@ -38,7 +38,7 @@ Cpp CSharp Dart Go -Node +JavaScript PHP Python2 Python3 diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg new file mode 100644 index 0000000000..aa1073c574 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg @@ -0,0 +1,25 @@ +\ + + \ + \net6.0\ + \$(NoWarn);CS3021\ + \Test\ + \Exe\ + \.\ + \Antlr4.Test\ + \false\ + \false\ + \false\ + \false\ + \false\ + \false\ + \false\ + \ + + \ + \ + \\ + \ + \ + +\ diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg new file mode 100644 index 0000000000..43ad150307 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Package.swift.stg @@ -0,0 +1,11 @@ +// swift-tools-version: 5.6 + +import PackageDescription + +let package = Package( + name: "Test", + targets: [ + .executableTarget(name: "Test", path: ".", + exclude:[ "}; separator = ", ", wrap> ]), + ] +) diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestLexer.java b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestLexer.java new file mode 100644 index 0000000000..d1cb77866f --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestLexer.java @@ -0,0 +1,10 @@ +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Lexer; + +public abstract class RuntimeTestLexer extends Lexer { + protected java.io.PrintStream outStream = System.out; + + public RuntimeTestLexer(CharStream input) { super(input); } + + public void setOutStream(java.io.PrintStream outStream) { this.outStream = outStream; } +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestParser.java b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestParser.java new file mode 100644 index 0000000000..1ed38f4959 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/RuntimeTestParser.java @@ -0,0 +1,14 @@ +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.TokenStream; + +public abstract class RuntimeTestParser extends Parser { + protected java.io.PrintStream outStream = System.out; + + public RuntimeTestParser(TokenStream input) { + super(input); + } + + public void setOutStream(java.io.PrintStream outStream) { + this.outStream = outStream; + } +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg new file mode 100644 index 0000000000..7afd8c8750 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cpp.stg @@ -0,0 +1,52 @@ +#include \ + +#include "antlr4-runtime.h" +#include ".h" + +#include ".h" + + +using namespace antlr4; + + +class TreeShapeListener : public tree::ParseTreeListener { +public: + void visitTerminal(tree::TerminalNode *) override {} + void visitErrorNode(tree::ErrorNode *) override {} + void exitEveryRule(ParserRuleContext *) override {} + void enterEveryRule(ParserRuleContext *ctx) override { + for (auto child : ctx->children) { + tree::ParseTree *parent = child->parent; + ParserRuleContext *rule = dynamic_cast\(parent); + if (rule != ctx) { + throw "Invalid parse tree shape detected."; + } + } + } +}; + + +int main(int argc, const char* argv[]) { + ANTLRFileStream input; + input.loadFromFile(argv[1]); + lexer(&input); + CommonTokenStream tokens(&lexer); + + parser(&tokens); + + DiagnosticErrorListener errorListener; + parser.addErrorListener(&errorListener); + + tree::ParseTree *tree = parser.(); + TreeShapeListener listener; + tree::ParseTreeWalker::DEFAULT.walk(&listener, tree); + + tokens.fill(); + for (auto token : tokens.getTokens()) + std::cout \<\< token->toString() \<\< std::endl; + + std::cout \<\< lexer.getInterpreter\()->getDFA(Lexer::DEFAULT_MODE).toLexerString(); + + + return 0; +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg new file mode 100644 index 0000000000..d9ec86b6c5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.cs.stg @@ -0,0 +1,47 @@ +using System; +using Antlr4.Runtime; +using Antlr4.Runtime.Tree; +using System.Text; + +public class Test { + public static void Main(string[] args) { + Console.OutputEncoding = Encoding.UTF8; + Console.InputEncoding = Encoding.UTF8; + var input = CharStreams.fromPath(args[0]); + var lex = new (input); + var tokens = new CommonTokenStream(lex); + + var parser = new (tokens); + + parser.AddErrorListener(new DiagnosticErrorListener()); + + parser.BuildParseTree = true; + var tree = parser.(); + ParseTreeWalker.Default.Walk(new TreeShapeListener(), tree); + + tokens.Fill(); + foreach (object t in tokens.GetTokens()) + Console.Out.WriteLine(t); + + Console.Out.Write(lex.Interpreter.GetDFA(Lexer.DEFAULT_MODE).ToLexerString()); + + + } +} + + +class TreeShapeListener : IParseTreeListener { + public void VisitTerminal(ITerminalNode node) { } + public void VisitErrorNode(IErrorNode node) { } + public void ExitEveryRule(ParserRuleContext ctx) { } + + public void EnterEveryRule(ParserRuleContext ctx) { + for (int i = 0; i \< ctx.ChildCount; i++) { + IParseTree parent = ctx.GetChild(i).Parent; + if (!(parent is IRuleNode) || ((IRuleNode)parent).RuleContext != ctx) { + throw new Exception("Invalid parse tree shape detected."); + } + } + } +} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg new file mode 100644 index 0000000000..e645baa4d5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.dart.stg @@ -0,0 +1,60 @@ +import 'dart:io'; +import 'package:antlr4/antlr4.dart'; + +import '.dart'; + +import '.dart'; + + +void main(List\ args) async { + CharStream input = await InputStream.fromPath(args[0]); + final lex = (input); + final tokens = CommonTokenStream(lex); + + final parser = (tokens); + + parser.addErrorListener(new DiagnosticErrorListener()); + + + ProfilingATNSimulator profiler = ProfilingATNSimulator(parser); + parser.setInterpreter(profiler); + + parser.buildParseTree = true; + + ProfilingATNSimulator profiler = ProfilingATNSimulator(parser); + parser.setInterpreter(profiler); + + ParserRuleContext tree = parser.(); + + print('[${profiler.getDecisionInfo().join(', ')}]'); + + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree); + + tokens.fill(); + for (Object t in tokens.getTokens()!) + print(t); + + stdout.write(lex.interpreter!.getDFA(Lexer.DEFAULT_MODE).toLexerString()); + + +} + + +class TreeShapeListener implements ParseTreeListener { + @override void visitTerminal(TerminalNode node) {} + + @override void visitErrorNode(ErrorNode node) {} + + @override void exitEveryRule(ParserRuleContext ctx) {} + + @override + void enterEveryRule(ParserRuleContext ctx) { + for (var i = 0; i \< ctx.childCount; i++) { + final parent = ctx.getChild(i)?.parent; + if (!(parent is RuleNode) || (parent as RuleNode).ruleContext != ctx) { + throw StateError('Invalid parse tree shape detected.'); + } + } + } +} + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg new file mode 100644 index 0000000000..d8b479b977 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.go.stg @@ -0,0 +1,54 @@ +package main +import ( + "test/parser" + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "fmt" + "os" +) + + +type TreeShapeListener struct { + *parser.BaseListener +} + +func NewTreeShapeListener() *TreeShapeListener { + return new(TreeShapeListener) +} + +func (this *TreeShapeListener) EnterEveryRule(ctx antlr.ParserRuleContext) { + for i := 0; i\ + +func main() { + input, err := antlr.NewFileStream(os.Args[1]) + if err != nil { + fmt.Printf("Failed to find file: %v", err) + return + } + lexer := parser.New(input) + stream := antlr.NewCommonTokenStream(lexer,0) + + p := parser.New(stream) + + p.AddErrorListener(antlr.NewDiagnosticErrorListener(true)) + + p.BuildParseTrees = true + tree := p.() + antlr.ParseTreeWalkerDefault.Walk(NewTreeShapeListener(), tree) + + stream.Fill() + for _, t := range stream.GetAllTokens() { + fmt.Println(t) + } + + fmt.Print(lexer.GetInterpreter().DecisionToDFA()[antlr.LexerDefaultMode].ToLexerString()) + + +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.java.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.java.stg new file mode 100644 index 0000000000..76eae4e316 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.java.stg @@ -0,0 +1,89 @@ +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.tree.*; +import org.antlr.v4.runtime.atn.*; + +import java.io.IOException; +import java.io.PrintStream; +import java.nio.file.Paths; +import java.util.Arrays; + +public class Test { + public static void main(String[] args) throws Exception { + recognize(args[0], System.out, System.err); + } + + public static void recognize(String inputFile, PrintStream outStream, PrintStream errorStream) throws IOException { + CustomStreamErrorListener errorListener = new CustomStreamErrorListener(errorStream); + + CharStream input = CharStreams.fromPath(Paths.get(inputFile)); + lexer = new (input); + lexer.setOutStream(outStream); + lexer.removeErrorListeners(); + lexer.addErrorListener(errorListener); + CommonTokenStream tokens = new CommonTokenStream(lexer); + + CommonTokenStream tokens = null; // It's required for compilation + + + parser = new (tokens); + parser.setOutStream(outStream); + parser.removeErrorListeners(); + parser.addErrorListener(errorListener); + + parser.addErrorListener(new DiagnosticErrorListener()); + + parser.setBuildParseTree(true); + + ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser); + parser.setInterpreter(profiler); + + ParserRuleContext tree = parser.(); + + outStream.println(Arrays.toString(profiler.getDecisionInfo())); + + ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree); + + tokens.fill(); + for (Object t : tokens.getTokens()) outStream.println(t); + + outStream.print(lexer.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString()); + + + } + + static class CustomStreamErrorListener extends BaseErrorListener { + private final PrintStream printStream; + + public CustomStreamErrorListener(PrintStream printStream){ + this.printStream = printStream; + } + + @Override + public void syntaxError(Recognizer\ recognizer, + Object offendingSymbol, + int line, + int charPositionInLine, + String msg, + RecognitionException e) { + printStream.println("line " + line + ":" + charPositionInLine + " " + msg); + } + } + + + static class TreeShapeListener implements ParseTreeListener { + @Override public void visitTerminal(TerminalNode node) { } + @Override public void visitErrorNode(ErrorNode node) { } + @Override public void exitEveryRule(ParserRuleContext ctx) { } + + @Override + public void enterEveryRule(ParserRuleContext ctx) { + for (int i = 0; i \< ctx.getChildCount(); i++) { + ParseTree parent = ctx.getChild(i).getParent(); + if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) { + throw new IllegalStateException("Invalid parse tree shape detected."); + } + } + } + } + +} diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg new file mode 100644 index 0000000000..f82782bd28 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.js.stg @@ -0,0 +1,55 @@ +import antlr4 from 'file:///src/antlr4/index.js' +import from './.js'; + +import from './.js'; + +import Listener from './Listener.js'; + + +import Visitor from './Visitor.js'; + + +class TreeShapeListener extends antlr4.tree.ParseTreeListener { + enterEveryRule(ctx) { + for (let i = 0; i \< ctx.getChildCount; i++) { + const child = ctx.getChild(i) + const parent = child.parentCtx + if (parent.getRuleContext() !== ctx || !(parent instanceof antlr4.tree.RuleNode)) { + throw `Invalid parse tree shape detected.` + } + } + } +} + + +function main(argv) { + var input = new antlr4.FileStream(argv[2], true); + var lexer = new (input); + var stream = new antlr4.CommonTokenStream(lexer); + + var parser = new (stream); + + parser.addErrorListener(new antlr4.error.DiagnosticErrorListener()); + + parser.buildParseTrees = true; + const printer = function() { + this.println = function(s) { console.log(s); } + this.print = function(s) { process.stdout.write(s); } + return this; + }; + parser.printer = new printer(); + var tree = parser.(); + antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree); + + stream.fill(); + for(var i=0; i\ + process.stdout.write(lexer._interp.decisionToDFA[antlr4.Lexer.DEFAULT_MODE].toLexerString()); + + +} + +main(process.argv); + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg new file mode 100644 index 0000000000..a806e591c0 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.php.stg @@ -0,0 +1,68 @@ +\ +final class TreeShapeListener implements ParseTreeListener { + public function visitTerminal(TerminalNode $node) : void {} + public function visitErrorNode(ErrorNode $node) : void {} + public function exitEveryRule(ParserRuleContext $ctx) : void {} + + public function enterEveryRule(ParserRuleContext $ctx) : void { + for ($i = 0, $count = $ctx->getChildCount(); $i \< $count; $i++) { + $parent = $ctx->getChild($i)->getParent(); + + if (!($parent instanceof RuleNode) || $parent->getRuleContext() !== $ctx) { + throw new RuntimeException('Invalid parse tree shape detected.'); + } + } + } +} + + +$input = InputStream::fromPath($argv[1]); +$lexer = new ($input); +$lexer->addErrorListener(new ConsoleErrorListener()); +$tokens = new CommonTokenStream($lexer); + +$parser = new ($tokens); + +$parser->addErrorListener(new DiagnosticErrorListener()); + +$parser->addErrorListener(new ConsoleErrorListener()); +$parser->setBuildParseTree(true); +$tree = $parser->(); + +ParseTreeWalker::default()->walk(new TreeShapeListener(), $tree); + +$tokens->fill(); + +foreach ($tokens->getAllTokens() as $token) { + echo $token . \PHP_EOL; +} + +echo $lexer->getInterpreter()->getDFA(Lexer::DEFAULT_MODE)->toLexerString(); + + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg new file mode 100644 index 0000000000..e09e2a1169 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.py.stg @@ -0,0 +1,51 @@ +from __future__ import print_function +import sys +import codecs +from antlr4 import * +from import + +from import +from Listener import Listener +from Visitor import Visitor + +class TreeShapeListener(ParseTreeListener): + + def visitTerminal(self, node:TerminalNode): + pass + + def visitErrorNode(self, node:ErrorNode): + pass + + def exitEveryRule(self, ctx:ParserRuleContext): + pass + + def enterEveryRule(self, ctx:ParserRuleContext): + for child in ctx.getChildren(): + parent = child.parentCtx + if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx: + raise IllegalStateException("Invalid parse tree shape detected.") + + +def main(argv): + input = FileStream(argv[1], encoding='utf-8', errors='replace') + lexer = (input) + stream = CommonTokenStream(lexer) + + parser = (stream) + + parser.addErrorListener(DiagnosticErrorListener()) + + parser.buildParseTrees = True + tree = parser.() + ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree) + + stream.fill() + [ print(tunicode(t)) for t in stream.tokens ] + + print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='') + + + +if __name__ == '__main__': + main(sys.argv) + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg new file mode 100644 index 0000000000..aefd731b6d --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg @@ -0,0 +1,85 @@ +\ +\ + \ + \ + \Release\ + \x64\ + \ + \ + \ + \16.0\ + \Win32Proj\ + \{f3708606-c8fb-45ca-ae36-b729f91e972b}\ + \Test\ + \10.0\ + \ + \ + + \ + \Application\ + \false\ + \v143\ + \false\ \ + \Unicode\ + \ + + \ + \ + \ + \ + + \ + \false\ + \;$(IncludePath)\ + \$(VC_ReferencesPath_x64);\ + \$(SolutionDir)\ + \ + + \ + \ + \TurnOffAllWarnings\ + \false\ + \false\ + \false\ + \NDEBUG;_CONSOLE;%(PreprocessorDefinitions)\ + \true\ + \stdcpp17\ + \None\ + \ + \ + \Console\ + \true\ + \false\ + \false\ + \"\\antlr4-runtime.lib";%(AdditionalDependencies)\ + \ + \ + + \ + \ + \ + + + \ + \ + + + + \ + \ + \ + \ + + + + \ + \ + \ + \ + + + \ + \ + + \ +\ diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg new file mode 100644 index 0000000000..3d43f33640 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/main.swift.stg @@ -0,0 +1,47 @@ +import Antlr4 +import Foundation + + +class TreeShapeListener: ParseTreeListener{ + func visitTerminal(_ node: TerminalNode){ } + func visitErrorNode(_ node: ErrorNode){ } + func enterEveryRule(_ ctx: ParserRuleContext) throws { } + func exitEveryRule(_ ctx: ParserRuleContext) throws { + for i in 0..\ + +let args = CommandLine.arguments +let input = try ANTLRFileStream(args[1]) +let lex = (input) +let tokens = CommonTokenStream(lex) + +let parser = try (tokens) + +parser.addErrorListener(DiagnosticErrorListener()) + +parser.setBuildParseTree(true) + +let profiler = ProfilingATNSimulator(parser) +parser.setInterpreter(profiler) + +let tree = try parser.() + +print(profiler.getDecisionInfo().description) + +try ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree) + +try tokens.fill() +for t in tokens.getTokens() { + print(t) +} + +print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString(), terminator: "") + + diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package.json b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package.json new file mode 100644 index 0000000000..1632c2c4df --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/package.json @@ -0,0 +1 @@ +{"type": "module"} \ No newline at end of file diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg new file mode 100644 index 0000000000..16e3c769e5 --- /dev/null +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg @@ -0,0 +1,6 @@ +name: "test" +dependencies: + antlr4: + path: +environment: + sdk: ">=2.12.0 \<3.0.0" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg index cd9270286c..5d9cb986fc 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/CSharp.test.stg @@ -18,7 +18,7 @@ AppendStr(a,b) ::= <%%> Concat(a,b) ::= "" -AssertIsList(v) ::= "System.Collections.IList __ttt__ = ;" // just use static type system +AssertIsList(v) ::= "System.Collections.IList __ttt__ = (System.Collections.IList);" // just use static type system AssignLocal(s,v) ::= " = ;" @@ -232,7 +232,7 @@ public class LeafListener : TBaseListener { } sb.Length = sb.Length - 2; sb.Append ("]"); - Output.Write ("{0} {1} {2}", ctx.INT (0).Symbol.Text, + Output.Write ("{0} {1} {2}\n", ctx.INT (0).Symbol.Text, ctx.INT (1).Symbol.Text, sb.ToString()); } else @@ -253,7 +253,7 @@ public class LeafListener : TBaseListener { public override void ExitA(TParser.AContext ctx) { if (ctx.ChildCount==2) { - Output.Write("{0} {1} {2}",ctx.b(0).Start.Text, + Output.Write("{0} {1} {2}\n",ctx.b(0).Start.Text, ctx.b(1).Start.Text,ctx.b()[0].Start.Text); } else Output.WriteLine(ctx.b(0).Start.Text); @@ -293,7 +293,7 @@ public class LeafListener : TBaseListener { } public override void ExitCall(TParser.CallContext ctx) { - Output.Write("{0} {1}",ctx.e().Start.Text,ctx.eList()); + Output.Write("{0} {1}\n",ctx.e().Start.Text,ctx.eList()); } public override void ExitInt(TParser.IntContext ctx) { Output.WriteLine(ctx.INT().Symbol.Text); diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg deleted file mode 100644 index 7b4729eb5c..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Chrome.test.stg +++ /dev/null @@ -1,290 +0,0 @@ -writeln(s) ::= < + '\\n';>> - -write(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= "" - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << - -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -RuleGetterListener(X) ::= << -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - - -LRListener(X) ::= << -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -LRWithLabelsListener(X) ::= << -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; - ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." -ParserPropertyCall(p, call) ::= "

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg deleted file mode 100644 index 3882eb0590..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Firefox.test.stg +++ /dev/null @@ -1,298 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -CheckVectorContext(s,v) ::= " = [].concat();" - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= "" - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg index 8ab158d8c5..f7c1240d7d 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Go.test.stg @@ -18,7 +18,16 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -AssertIsList(v) ::= "" +AssertIsList(v) ::= << +// A noddy range over the list will not compile if it is not getting a slice +// however, Go will not compile the generated code if the slice vs single value is wrong. +// Makes the Java based tests suite work though. +j1__ := make([]interface{}, len()) +j2__ := +for j3__ := range j2__ { + j1__[j3__] = j2__[j3__] +} +>> AssignLocal(s, v) ::= " = ;" @@ -236,7 +245,7 @@ func NewLeafListener() *LeafListener { func (*LeafListener) ExitA(ctx *AContext) { if ctx.GetChildCount() == 2 { - fmt.Printf("%s %s %s", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT()))) + fmt.Printf("%s %s %s\n", ctx.INT(0).GetSymbol().GetText(), ctx.INT(1).GetSymbol().GetText(), antlr.PrintArrayJavaStyle(antlr.TerminalNodeToStringArray(ctx.AllINT()))) } else { fmt.Println(ctx.ID().GetSymbol()) } @@ -256,7 +265,7 @@ func NewLeafListener() *LeafListener { func (*LeafListener) ExitA(ctx *AContext) { if ctx.GetChildCount() == 2 { - fmt.Printf("%s %s %s", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText()) + fmt.Printf("%s %s %s\n", ctx.B(0).GetStart().GetText(), ctx.B(1).GetStart().GetText(), ctx.AllB()[0].GetStart().GetText()) } else { fmt.Println(ctx.B(0).GetStart().GetText()) } @@ -295,7 +304,7 @@ func NewLeafListener() *LeafListener { } func (*LeafListener) ExitCall(ctx *CallContext) { - fmt.Printf("%s %s", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil)) + fmt.Printf("%s %s\n", ctx.E().GetStart().GetText(), ctx.EList().String(nil, nil)) } func (*LeafListener) ExitInt(ctx *IntContext) { diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg index 16607933f4..b1aa46db10 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Java.test.stg @@ -1,6 +1,6 @@ -writeln(s) ::= <);>> -write(s) ::= <);>> -writeList(s) ::= <);>> +writeln(s) ::= <);>> +write(s) ::= <);>> +writeList(s) ::= <);>> False() ::= "false" @@ -44,7 +44,7 @@ ModMemberEquals(n,m,v) ::= <%this. % == %> ModMemberNotEquals(n,m,v) ::= <%this. % != %> -DumpDFA() ::= "this.dumpDFA();" +DumpDFA() ::= "this.dumpDFA(outStream);" Pass() ::= "" @@ -186,9 +186,9 @@ protected static class PositionAdjustingLexerATNSimulator extends LexerATNSimula BasicListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void visitTerminal(TerminalNode node) { - System.out.println(node.getSymbol().getText()); + outStream.println(node.getSymbol().getText()); } } } @@ -214,13 +214,13 @@ public static class MyRuleNode extends ParserRuleContext { TokenGetterListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitA(TParser.AContext ctx) { if (ctx.getChildCount()==2) - System.out.printf("%s %s %s",ctx.INT(0).getSymbol().getText(), + outStream.printf("%s %s %s\n",ctx.INT(0).getSymbol().getText(), ctx.INT(1).getSymbol().getText(),ctx.INT()); else - System.out.println(ctx.ID().getSymbol()); + outStream.println(ctx.ID().getSymbol()); } } } @@ -228,13 +228,13 @@ public static class LeafListener extends TBaseListener { RuleGetterListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitA(TParser.AContext ctx) { if (ctx.getChildCount()==2) { - System.out.printf("%s %s %s",ctx.b(0).start.getText(), + outStream.printf("%s %s %s\n",ctx.b(0).start.getText(), ctx.b(1).start.getText(),ctx.b().get(0).start.getText()); } else - System.out.println(ctx.b(0).start.getText()); + outStream.println(ctx.b(0).start.getText()); } } } @@ -243,13 +243,13 @@ public static class LeafListener extends TBaseListener { LRListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitE(TParser.EContext ctx) { if (ctx.getChildCount()==3) { - System.out.printf("%s %s %s\n",ctx.e(0).start.getText(), + outStream.printf("%s %s %s\n",ctx.e(0).start.getText(), ctx.e(1).start.getText(), ctx.e().get(0).start.getText()); } else - System.out.println(ctx.INT().getSymbol().getText()); + outStream.println(ctx.INT().getSymbol().getText()); } } } @@ -257,12 +257,12 @@ public static class LeafListener extends TBaseListener { LRWithLabelsListener(X) ::= << @parser::members { -public static class LeafListener extends TBaseListener { +public class LeafListener extends TBaseListener { public void exitCall(TParser.CallContext ctx) { - System.out.printf("%s %s",ctx.e().start.getText(),ctx.eList()); + outStream.printf("%s %s\n",ctx.e().start.getText(),ctx.eList()); } public void exitInt(TParser.IntContext ctx) { - System.out.println(ctx.INT().getSymbol().getText()); + outStream.println(ctx.INT().getSymbol().getText()); } } } @@ -277,13 +277,13 @@ void foo() { >> Declare_foo() ::= << - public void foo() {System.out.println("foo");} + public void foo() {outStream.println("foo");} >> Invoke_foo() ::= "foo();" Declare_pred() ::= <" -AssertIsList(v) ::= <> +AssertIsList(v) ::= < instanceof Array) ) {throw "value is not an array";}>> AssignLocal(s,v) ::= " = ;" diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg index 2292cafe7b..67e93f814f 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python2.test.stg @@ -18,7 +18,7 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -AssertIsList(v) ::= "assert isinstance(v, (list, tuple))" +AssertIsList(v) ::= "assert isinstance(, (list, tuple))" AssignLocal(s,v) ::= " = " diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg index 65dcdcd83a..8ea28c57cf 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Python3.test.stg @@ -18,7 +18,7 @@ AppendStr(a,b) ::= " + " Concat(a,b) ::= "" -AssertIsList(v) ::= "assert isinstance(v, (list, tuple))" +AssertIsList(v) ::= "assert isinstance(, (list, tuple))" AssignLocal(s,v) ::= " = " diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg deleted file mode 100644 index a33f612d35..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Safari.test.stg +++ /dev/null @@ -1,297 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= "" - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%

    %> - -Result(r) ::= <%%> - -ParserPropertyMember() ::= << -@members { -this.Property = function() { - return true; -} -} ->> - -ParserPropertyCall(p, call) ::= "

    ." - -PositionAdjustingLexerDef() ::= "" - -PositionAdjustingLexer() ::= << - -PositionAdjustingLexer.prototype.resetAcceptPosition = function(index, line, column) { - this._input.seek(index); - this.line = line; - this.column = column; - this._interp.consume(this._input); -}; - -PositionAdjustingLexer.prototype.nextToken = function() { - if (!("resetAcceptPosition" in this._interp)) { - var lexer = this; - this._interp.resetAcceptPosition = function(index, line, column) { lexer.resetAcceptPosition(index, line, column); }; - } - return antlr4.Lexer.prototype.nextToken.call(this); -}; - -PositionAdjustingLexer.prototype.emit = function() { - switch(this._type) { - case PositionAdjustingLexer.TOKENS: - this.handleAcceptPositionForKeyword("tokens"); - break; - case PositionAdjustingLexer.LABEL: - this.handleAcceptPositionForIdentifier(); - break; - } - return antlr4.Lexer.prototype.emit.call(this); -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForIdentifier = function() { - var tokenText = this.text; - var identifierLength = 0; - while (identifierLength \< tokenText.length && - PositionAdjustingLexer.isIdentifierChar(tokenText[identifierLength]) - ) { - identifierLength += 1; - } - if (this._input.index > this._tokenStartCharIndex + identifierLength) { - var offset = identifierLength - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.prototype.handleAcceptPositionForKeyword = function(keyword) { - if (this._input.index > this._tokenStartCharIndex + keyword.length) { - var offset = keyword.length - 1; - this._interp.resetAcceptPosition(this._tokenStartCharIndex + offset, - this._tokenStartLine, this._tokenStartColumn + offset); - return true; - } else { - return false; - } -}; - -PositionAdjustingLexer.isIdentifierChar = function(c) { - return c.match(/^[0-9a-zA-Z_]+$/); -} - ->> - -BasicListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.visitTerminal = function(node) { - document.getElementById('output').value += node.symbol.text + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -WalkListener(s) ::= << -var walker = new antlr4.tree.ParseTreeWalker(); -walker.walk(new this.LeafListener(), ); ->> - -TreeNodeWithAltNumField(X) ::= << -@parser::header { -MyRuleNode = function(parent, invokingState) { - antlr4.ParserRuleContext.call(this, parent, invokingState); - this.altNum = 0; - return this; -}; - -MyRuleNode.prototype = Object.create(antlr4.ParserRuleContext.prototype); -MyRuleNode.prototype.constructor = MyRuleNode; -} ->> - - -TokenGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.INT(0).symbol.text + ' ' + ctx.INT(1).symbol.text + ' ' + antlr4.Utils.arrayToString(ctx.INT()); - } else { - str = ctx.ID().symbol.toString(); - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -RuleGetterListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitA = function(ctx) { - var str; - if(ctx.getChildCount()===2) { - str = ctx.b(0).start.text + ' ' + ctx.b(1).start.text + ' ' + ctx.b()[0].start.text; - } else { - str = ctx.b(0).start.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - - -LRListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitE = function(ctx) { - var str; - if(ctx.getChildCount()===3) { - str = ctx.e(0).start.text + ' ' + ctx.e(1).start.text + ' ' + ctx.e()[0].start.text; - } else { - str = ctx.INT().symbol.text; - } - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -LRWithLabelsListener(X) ::= << -@parser::members { -this.LeafListener = function() { - this.exitCall = function(ctx) { - var str = ctx.e().start.text + ' ' + ctx.eList(); - document.getElementById('output').value += str + '\\n'; - }; - this.exitInt = function(ctx) { - var str = ctx.INT().symbol.text; - document.getElementById('output').value += str + '\\n'; - }; - return this; -}; -this.LeafListener.prototype = Object.create(Listener.prototype); -this.LeafListener.prototype.constructor = this.LeafListener; -} ->> - -DeclareContextListGettersFunction() ::= << - function foo() { - var s = new SContext(); - var a = s.a(); - var b = s.b(); - }; ->> - -Declare_foo() ::= "this.foo = function() {document.getElementById('output').value += 'foo' + '\\n';};" - -Invoke_foo() ::= "this.foo();" - -Declare_pred() ::= <> - -Invoke_pred(v) ::= <)>> -ParserTokenType(t) ::= "Parser." -ContextRuleFunction(ctx, rule) ::= "." -StringType() ::= "String" -ContextMember(ctx, subctx, member) ::= ".." diff --git a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java index c40c4048ca..7c1792374e 100644 --- a/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java +++ b/runtime-testsuite/test/org/antlr/v4/runtime/TestCodePointCharStream.java @@ -7,17 +7,11 @@ package org.antlr.v4.runtime; import org.antlr.v4.runtime.misc.Interval; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.*; public class TestCodePointCharStream { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void emptyBytesHasSize0() { CodePointCharStream s = CharStreams.fromString(""); @@ -36,9 +30,11 @@ public void emptyBytesLookAheadReturnsEOF() { @Test public void consumingEmptyStreamShouldThrow() { CodePointCharStream s = CharStreams.fromString(""); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows( + IllegalStateException.class, + s::consume + ); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -59,9 +55,8 @@ public void consumingSingleLatinCodePointShouldMoveIndex() { public void consumingPastSingleLatinCodePointShouldThrow() { CodePointCharStream s = CharStreams.fromString("X"); s.consume(); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -107,9 +102,8 @@ public void consumingSingleCJKCodePointShouldMoveIndex() { public void consumingPastSingleCJKCodePointShouldThrow() { CodePointCharStream s = CharStreams.fromString("\u611B"); s.consume(); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test @@ -150,9 +144,8 @@ public void consumingPastEndOfEmojiCodePointWithShouldThrow() { assertEquals(0, s.index()); s.consume(); assertEquals(1, s.index()); - thrown.expect(IllegalStateException.class); - thrown.expectMessage("cannot consume EOF"); - s.consume(); + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, s::consume); + assertEquals("cannot consume EOF", illegalStateException.getMessage()); } @Test diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java deleted file mode 100644 index fdd476a6f8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTest.java +++ /dev/null @@ -1,782 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import org.antlr.v4.Tool; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.DefaultToolListener; -import org.junit.*; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupFile; -import org.stringtemplate.v4.StringRenderer; - -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.*; - -import static junit.framework.TestCase.fail; -import static junit.framework.TestCase.failNotEquals; -import static org.junit.Assume.assumeFalse; - -/** This class represents a single runtime test. It pulls data from - * a {@link RuntimeTestDescriptor} and uses junit to trigger a test. - * The only functionality needed to execute a test is defined in - * {@link RuntimeTestSupport}. All of the various test rig classes - * derived from this one. E.g., see {@link org.antlr.v4.test.runtime.java.TestParserExec}. - * - * @since 4.6. - */ -public abstract class BaseRuntimeTest { - final static Set sections = new HashSet<>(Arrays.asList( - "notes", "type", "grammar", "slaveGrammar", "start", "input", "output", "errors", "flags", "skip" - )); - - @BeforeClass - public static void startHeartbeatToAvoidTimeout() { - if(requiresHeartbeat()) { - startHeartbeat(); - } - } - - private static boolean requiresHeartbeat() { - return isTravisCI() - || isAppVeyorCI() - || (isCPP() && isRecursion()) - || (isCircleCI() && isGo()) - || (isCircleCI() && isDotNet() && isRecursion()); - } - - @AfterClass - public static void stopHeartbeat() { - heartbeat = false; - } - - private static boolean isRecursion() { - String s = System.getenv("GROUP"); - return "recursion".equalsIgnoreCase(s); - } - - private static boolean isGo() { - String s = System.getenv("TARGET"); - return "go".equalsIgnoreCase(s); - } - - private static boolean isCPP() { - String s = System.getenv("TARGET"); - return "cpp".equalsIgnoreCase(s); - } - - private static boolean isDotNet() { - String s = System.getenv("TARGET"); - return "dotnet".equalsIgnoreCase(s); - } - - private static boolean isCircleCI() { - // see https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables - String s = System.getenv("CIRCLECI"); - return "true".equalsIgnoreCase(s); - } - - private static boolean isAppVeyorCI() { - // see https://www.appveyor.com/docs/environment-variables/ - String s = System.getenv("APPVEYOR"); - return "true".equalsIgnoreCase(s); - } - - private static boolean isTravisCI() { - // see https://docs.travis-ci.com/user/environment-variables/#default-environment-variables - String s = System.getenv("TRAVIS"); - return "true".equalsIgnoreCase(s); - } - - static boolean heartbeat = false; - - private static void startHeartbeat() { - // Add heartbeat thread to gen minimal output for travis, appveyor to avoid timeout. - Thread t = new Thread("heartbeat") { - @Override - public void run() { - heartbeat = true; - while (heartbeat) { - try { - //noinspection BusyWait - Thread.sleep(10000); - } catch (Exception e) { - e.printStackTrace(); - } - System.out.print('.'); - } - } - }; - t.start(); - } - - /** ANTLR isn't thread-safe to process grammars so we use a global lock for testing */ - public static final Object antlrLock = new Object(); - - protected RuntimeTestSupport delegate; - protected RuntimeTestDescriptor descriptor; - - public BaseRuntimeTest(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate) { - this.descriptor = descriptor; - this.delegate = delegate; - } - - @Before - public void setUp() throws Exception { - // From http://junit.sourceforge.net/javadoc/org/junit/Assume.html - // "The default JUnit runner treats tests with failing assumptions as ignored" - assumeFalse(checkIgnored()); - delegate.testSetUp(); - } - - public boolean checkIgnored() { - boolean ignored = !TestContext.isSupportedTarget(descriptor.getTarget()) || descriptor.ignore(descriptor.getTarget()); - if (ignored) { - System.out.println("Ignore " + descriptor); - } - return ignored; - } - - @Rule - public final TestRule testWatcher = new TestWatcher() { - @Override - protected void succeeded(Description description) { - // remove tmpdir if no error. - delegate.eraseTempDir(); - } - }; - - @Test - public void testOne() throws Exception { - // System.out.println(descriptor.getTestName()); - // System.out.println(delegate.getTmpDir()); - if (descriptor.ignore(descriptor.getTarget()) ) { - System.out.println("Ignore " + descriptor); - return; - } - delegate.beforeTest(descriptor); - if (descriptor.getTestType().contains("Parser") ) { - testParser(descriptor); - } - else { - testLexer(descriptor); - } - delegate.afterTest(descriptor); - } - - public void testParser(RuntimeTestDescriptor descriptor) { - RuntimeTestUtils.mkdir(delegate.getTempParserDirPath()); - - Pair pair = descriptor.getGrammar(); - - ClassLoader cloader = getClass().getClassLoader(); - URL templates = cloader.getResource("org/antlr/v4/test/runtime/templates/"+descriptor.getTarget()+".test.stg"); - STGroupFile targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); - targetTemplates.registerRenderer(String.class, new StringRenderer()); - - // write out any slave grammars - List> slaveGrammars = descriptor.getSlaveGrammars(); - if ( slaveGrammars!=null ) { - for (Pair spair : slaveGrammars) { - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, spair.b); - writeFile(delegate.getTempParserDirPath(), spair.a+".g4", grammarST.render()); - } - } - - String grammarName = pair.a; - String grammar = pair.b; - STGroup g = new STGroup('<', '>'); - g.importTemplates(targetTemplates); - g.registerRenderer(String.class, new StringRenderer()); - ST grammarST = new ST(g, grammar); - grammar = grammarST.render(); - - String found = delegate.execParser(grammarName+".g4", grammar, - grammarName+"Parser", - grammarName+"Lexer", - grammarName+"Listener", - grammarName+"Visitor", - descriptor.getStartRule(), - descriptor.getInput(), - descriptor.showDiagnosticErrors() - ); - assertCorrectOutput(descriptor, delegate, found); - } - - public void testLexer(RuntimeTestDescriptor descriptor) throws Exception { - RuntimeTestUtils.mkdir(delegate.getTempParserDirPath()); - - Pair pair = descriptor.getGrammar(); - - ClassLoader cloader = getClass().getClassLoader(); - URL templates = cloader.getResource("org/antlr/v4/test/runtime/templates/"+descriptor.getTarget()+".test.stg"); - STGroupFile targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); - targetTemplates.registerRenderer(String.class, new StringRenderer()); - - // write out any slave grammars - List> slaveGrammars = descriptor.getSlaveGrammars(); - if ( slaveGrammars!=null ) { - for (Pair spair : slaveGrammars) { - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, spair.b); - writeFile(delegate.getTempParserDirPath(), spair.a+".g4", grammarST.render()); - } - } - - String grammarName = pair.a; - String grammar = pair.b; - STGroup g = new STGroup('<', '>'); - g.registerRenderer(String.class, new StringRenderer()); - g.importTemplates(targetTemplates); - ST grammarST = new ST(g, grammar); - grammar = grammarST.render(); - - String found = delegate.execLexer(grammarName+".g4", grammar, grammarName, descriptor.getInput(), descriptor.showDFA()); - assertCorrectOutput(descriptor, delegate, found); - } - - /** Write a grammar to tmpdir and run antlr */ - public static ErrorQueue antlrOnString(String workdir, - String targetName, - String grammarFileName, - String grammarStr, - boolean defaultListener, - String... extraOptions) - { - RuntimeTestUtils.mkdir(workdir); - writeFile(workdir, grammarFileName, grammarStr); - return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions); - } - - /** Run ANTLR on stuff in workdir and error queue back */ - public static ErrorQueue antlrOnString(String workdir, - String targetName, - String grammarFileName, - boolean defaultListener, - String... extraOptions) - { - final List options = new ArrayList<>(); - Collections.addAll(options, extraOptions); - if ( targetName!=null ) { - options.add("-Dlanguage="+targetName); - } - if ( !options.contains("-o") ) { - options.add("-o"); - options.add(workdir); - } - if ( !options.contains("-lib") ) { - options.add("-lib"); - options.add(workdir); - } - if ( !options.contains("-encoding") ) { - options.add("-encoding"); - options.add("UTF-8"); - } - options.add(new File(workdir,grammarFileName).toString()); - - final String[] optionsA = new String[options.size()]; - options.toArray(optionsA); - Tool antlr = new Tool(optionsA); - ErrorQueue equeue = new ErrorQueue(antlr); - antlr.addListener(equeue); - if (defaultListener) { - antlr.addListener(new DefaultToolListener(antlr)); - } - synchronized (antlrLock) { - antlr.processGrammarsOnCommandLine(); - } - - List errors = new ArrayList<>(); - - if ( !defaultListener && !equeue.errors.isEmpty() ) { - for (int i = 0; i < equeue.errors.size(); i++) { - ANTLRMessage msg = equeue.errors.get(i); - ST msgST = antlr.errMgr.getMessageTemplate(msg); - errors.add(msgST.render()); - } - } - if ( !defaultListener && !equeue.warnings.isEmpty() ) { - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage msg = equeue.warnings.get(i); - // antlrToolErrors.append(msg); warnings are hushed - } - } - - return equeue; - } - - // ---- support ---- - - public static RuntimeTestDescriptor[] getRuntimeTestDescriptors(String group, String targetName) { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL descrURL = loader.getResource("org/antlr/v4/test/runtime/descriptors/" +group); - String[] descriptorFilenames = null; - try { - descriptorFilenames = new File(descrURL.toURI()).list(); - } - catch (URISyntaxException e) { - System.err.println("Bad URL:"+descrURL); - } - -// String[] descriptorFilenames = new File("/tmp/descriptors/"+group).list(); - List descriptors = new ArrayList<>(); - for (String fname : descriptorFilenames) { - try { -// String dtext = Files.readString(Path.of("/tmp/descriptors",group,fname)); - final URL dURL = loader.getResource("org/antlr/v4/test/runtime/descriptors/" +group+"/"+fname); - String dtext = null; - try { - URI uri = dURL.toURI(); - dtext = new String(Files.readAllBytes(Paths.get(uri))); - } - catch (URISyntaxException e) { - System.err.println("Bad URL:"+dURL); - } - UniversalRuntimeTestDescriptor d = readDescriptor(dtext); - if ( !d.ignore(targetName) ) { - d.name = fname.replace(".txt", ""); - d.targetName = targetName; - descriptors.add(d); - } - } - catch (IOException ioe) { - System.err.println("Can't read descriptor file "+fname); - } - } - - if (group.equals("LexerExec")) { - descriptors.add(GeneratedLexerDescriptors.getLineSeparatorLfDescriptor(targetName)); - descriptors.add(GeneratedLexerDescriptors.getLineSeparatorCrLfDescriptor(targetName)); - descriptors.add(GeneratedLexerDescriptors.getLargeLexerDescriptor(targetName)); - descriptors.add(GeneratedLexerDescriptors.getAtnStatesSizeMoreThan65535Descriptor(targetName)); - } - - return descriptors.toArray(new RuntimeTestDescriptor[0]); - } - - /** Read stuff like: - [grammar] - grammar T; - s @after {} - : ID | ID {} ; - ID : 'a'..'z'+; - WS : (' '|'\t'|'\n')+ -> skip ; - - [grammarName] - T - - [start] - s - - [input] - abc - - [output] - Decision 0: - s0-ID->:s1^=>1 - - [errors] - """line 1:0 reportAttemptingFullContext d=0 (s), input='abc' - """ - - Some can be missing like [errors]. - - Get gr names automatically "lexer grammar Unicode;" "grammar T;" "parser grammar S;" - - Also handle slave grammars: - - [grammar] - grammar M; - import S,T; - s : a ; - B : 'b' ; // defines B from inherited token space - WS : (' '|'\n') -> skip ; - - [slaveGrammar] - parser grammar T; - a : B {}; - - [slaveGrammar] - parser grammar S; - a : b {}; - b : B; - */ - public static UniversalRuntimeTestDescriptor readDescriptor(String dtext) - throws RuntimeException - { - String currentField = null; - StringBuilder currentValue = new StringBuilder(); - - List> pairs = new ArrayList<>(); - String[] lines = dtext.split("\r?\n"); - - for (String line : lines) { - boolean newSection = false; - String sectionName = null; - if (line.startsWith("[") && line.length() > 2) { - sectionName = line.substring(1, line.length() - 1); - newSection = sections.contains(sectionName); - } - - if (newSection) { - if (currentField != null) { - pairs.add(new Pair<>(currentField, currentValue.toString())); - } - currentField = sectionName; - currentValue.setLength(0); - } - else { - currentValue.append(line); - currentValue.append("\n"); - } - } - pairs.add(new Pair<>(currentField, currentValue.toString())); - - UniversalRuntimeTestDescriptor d = new UniversalRuntimeTestDescriptor(); - for (Pair p : pairs) { - String section = p.a; - String value = ""; - if ( p.b!=null ) { - value = p.b.trim(); - } - if ( value.startsWith("\"\"\"") ) { - value = value.replace("\"\"\"", ""); - } - else if ( value.indexOf('\n')>=0 ) { - value = value + "\n"; // if multi line and not quoted, leave \n on end. - } - switch (section) { - case "notes": - d.notes = value; - break; - case "type": - d.testType = value; - break; - case "grammar": - d.grammarName = getGrammarName(value.split("\n")[0]); - d.grammar = value; - break; - case "slaveGrammar": - String gname = getGrammarName(value.split("\n")[0]); - d.slaveGrammars.add(new Pair<>(gname, value)); - case "start": - d.startRule = value; - break; - case "input": - d.input = value; - break; - case "output": - d.output = value; - break; - case "errors": - d.errors = value; - break; - case "flags": - String[] flags = value.split("\n"); - for (String f : flags) { - switch (f) { - case "showDFA": - d.showDFA = true; - break; - case "showDiagnosticErrors": - d.showDiagnosticErrors = true; - break; - } - } - break; - case "skip": - d.skipTargets = Arrays.asList(value.split("\n")); - break; - default: - throw new RuntimeException("Unknown descriptor section ignored: "+section); - } - } - return d; - } - - /** Get A, B, or C from: - * "lexer grammar A;" "grammar B;" "parser grammar C;" - */ - public static String getGrammarName(String grammarDeclLine) { - int gi = grammarDeclLine.indexOf("grammar "); - if ( gi<0 ) { - return ""; - } - gi += "grammar ".length(); - int gsemi = grammarDeclLine.indexOf(';'); - return grammarDeclLine.substring(gi, gsemi); - } - - public static void writeFile(String dir, String fileName, String content) { - try { - Utils.writeFile(dir+"/"+fileName, content, "UTF-8"); - } - catch (IOException ioe) { - System.err.println("can't write file"); - ioe.printStackTrace(System.err); - } - } - - public static String readFile(String dir, String fileName) { - try { - return String.copyValueOf(Utils.readFile(dir+"/"+fileName, "UTF-8")); - } - catch (IOException ioe) { - System.err.println("can't read file"); - ioe.printStackTrace(System.err); - } - return null; - } - - protected static void assertCorrectOutput(RuntimeTestDescriptor descriptor, RuntimeTestSupport delegate, String actualOutput) { - String actualParseErrors = delegate.getParseErrors(); - String actualToolErrors = delegate.getANTLRToolErrors(); - String expectedOutput = descriptor.getOutput(); - String expectedParseErrors = descriptor.getErrors(); - String expectedToolErrors = descriptor.getANTLRToolErrors(); - - if (actualOutput == null) { - actualOutput = ""; - } - if (actualParseErrors == null) { - actualParseErrors = ""; - } - if (actualToolErrors == null) { - actualToolErrors = ""; - } - if (expectedOutput == null) { - expectedOutput = ""; - } - if (expectedParseErrors == null) { - expectedParseErrors = ""; - } - if (expectedToolErrors == null) { - expectedToolErrors = ""; - } - - if (actualOutput.equals(expectedOutput) && - actualParseErrors.equals(expectedParseErrors) && - actualToolErrors.equals(expectedToolErrors)) { - return; - } - - if (actualOutput.equals(expectedOutput)) { - if (actualParseErrors.equals(expectedParseErrors)) { - failNotEquals("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output and parse errors are as expected, but tool errors are incorrect", - expectedToolErrors, actualToolErrors); - } - else { - fail("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output is as expected, but errors are not: " + - "expectedParseErrors:<" + expectedParseErrors + - ">; actualParseErrors:<" + actualParseErrors + - ">; expectedToolErrors:<" + expectedToolErrors + - ">; actualToolErrors:<" + actualToolErrors + - ">."); - } - } - else { - fail("[" + descriptor.getTarget() + ":" + descriptor.getTestName() + "] " + - "Parse output is incorrect: " + - "expectedOutput:<" + expectedOutput + - ">; actualOutput:<" + actualOutput + - ">; expectedParseErrors:<" + expectedParseErrors + - ">; actualParseErrors:<" + actualParseErrors + - ">; expectedToolErrors:<" + expectedToolErrors + - ">; actualToolErrors:<" + actualToolErrors + - ">."); - } - } - - // ---------------------------------------------------------------------------- - // stuff used during conversion that I don't want to throw away yet and we might lose if - // I squash this branch unless I keep it around in a comment or something - // ---------------------------------------------------------------------------- - -// public static RuntimeTestDescriptor[] OLD_getRuntimeTestDescriptors(Class clazz, String targetName) { -// if(!TestContext.isSupportedTarget(targetName)) -// return new RuntimeTestDescriptor[0]; -// Class[] nestedClasses = clazz.getClasses(); -// List descriptors = new ArrayList(); -// for (Class nestedClass : nestedClasses) { -// int modifiers = nestedClass.getModifiers(); -// if ( RuntimeTestDescriptor.class.isAssignableFrom(nestedClass) && !Modifier.isAbstract(modifiers) ) { -// try { -// RuntimeTestDescriptor d = (RuntimeTestDescriptor) nestedClass.newInstance(); -// if(!d.ignore(targetName)) { -// d.setTarget(targetName); -// descriptors.add(d); -// } -// } catch (Exception e) { -// e.printStackTrace(System.err); -// } -// } -// } -// writeDescriptors(clazz, descriptors); -// return descriptors.toArray(new RuntimeTestDescriptor[0]); -// } - - - /** Write descriptor files. */ -// private static void writeDescriptors(Class clazz, List descriptors) { -// String descrRootDir = "/Users/parrt/antlr/code/antlr4/runtime-testsuite/resources/org/antlr/v4/test/runtime/new_descriptors"; -// new File(descrRootDir).mkdir(); -// String groupName = clazz.getSimpleName(); -// groupName = groupName.replace("Descriptors", ""); -// String groupDir = descrRootDir + "/" + groupName; -// new File(groupDir).mkdir(); -// -// for (RuntimeTestDescriptor d : descriptors) { -// try { -// Pair g = d.getGrammar(); -// String gname = g.a; -// String grammar = g.b; -// String filename = d.getTestName()+".txt"; -// String content = ""; -// String input = quoteForDescriptorFile(d.getInput()); -// String output = quoteForDescriptorFile(d.getOutput()); -// String errors = quoteForDescriptorFile(d.getErrors()); -// content += "[type]\n"; -// content += d.getTestType(); -// content += "\n\n"; -// content += "[grammar]\n"; -// content += grammar; -// if ( !content.endsWith("\n\n") ) content += "\n"; -// if ( d.getSlaveGrammars()!=null ) { -// for (Pair slaveG : d.getSlaveGrammars()) { -// String sg = quoteForDescriptorFile(slaveG.b); -// content += "[slaveGrammar]\n"; -// content += sg; -// content += "\n"; -// } -// } -// if ( d.getStartRule()!=null && d.getStartRule().length()>0 ) { -// content += "[start]\n"; -// content += d.getStartRule(); -// content += "\n\n"; -// } -// if ( input!=null ) { -// content += "[input]\n"; -// content += input; -// content += "\n"; -// } -// if ( output!=null ) { -// content += "[output]\n"; -// content += output; -// content += "\n"; -// } -// if ( errors!=null ) { -// content += "[errors]\n"; -// content += errors; -// content += "\n"; -// } -// if ( d.showDFA() || d.showDiagnosticErrors() ) { -// content += "[flags]\n"; -// if (d.showDFA()) { -// content += "showDFA\n"; -// } -// if (d.showDiagnosticErrors()) { -// content += "showDiagnosticErrors\n"; -// } -// content += '\n'; -// } -// List skip = new ArrayList<>(); -// for (String target : Targets) { -// if ( d.ignore(target) ) { -// skip.add(target); -// } -// } -// if ( skip.size()>0 ) { -// content += "[skip]\n"; -// for (String sk : skip) { -// content += sk+"\n"; -// } -// content += '\n'; -// } -// Files.write(Paths.get(groupDir + "/" + filename), content.getBytes()); -// } -// catch (IOException e) { -// //exception handling left as an exercise for the reader -// System.err.println(e.getMessage()); -// } -// } -// } -// -// /** Rules for strings look like this: -// * -// * [input] if one line, remove all WS before/after -// * a b -// * -// * [input] need whitespace -// * """34 -// * 34""" -// * -// * [input] single quote char, remove all WS before/after -// * " -// * -// * [input] same as "b = 6\n" in java -// * """b = 6 -// * """ -// * -// * [input] -// * """a """ space and no newline inside -// * -// * [input] same as java string "\"aaa" -// * "aaa -// * -// * [input] ignore front/back \n except leave last \n -// * a -// * b -// * c -// * d -// */ -// private static String quoteForDescriptorFile(String s) { -// if ( s==null ) { -// return null; -// } -// long nnl = s.chars().filter(ch -> ch == '\n').count(); -// -// if ( s.endsWith(" ") || // whitespace matters -// (nnl==1&&s.endsWith("\n")) || // "b = 6\n" -// s.startsWith("\n") ) { // whitespace matters -// return "\"\"\"" + s + "\"\"\"\n"; -// } -// if ( s.endsWith(" \n") || s.endsWith("\n\n") ) { -// return "\"\"\"" + s + "\"\"\"\n"; -// } -// if ( nnl==0 ) { // one line input -// return s + "\n"; -// } -// if ( nnl>1 && s.endsWith("\n") ) { -// return s; -// } -// if ( !s.endsWith("\n") ) { // "a\n b" -// return "\"\"\"" + s + "\"\"\"\n"; -// } -// -// return s; -// } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestSupport.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestSupport.java deleted file mode 100644 index 578e0cf3e1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/BaseRuntimeTestSupport.java +++ /dev/null @@ -1,245 +0,0 @@ -package org.antlr.v4.test.runtime; - -import org.antlr.v4.Tool; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ATNSerializer; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.LexerGrammar; -import org.junit.rules.TestRule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.*; -import java.util.logging.Logger; - -import static org.junit.Assert.assertEquals; - -@SuppressWarnings("ResultOfMethodCallIgnored") -public abstract class BaseRuntimeTestSupport implements RuntimeTestSupport { - - // -J-Dorg.antlr.v4.test.BaseTest.level=FINE - protected static final Logger logger = Logger.getLogger(BaseRuntimeTestSupport.class.getName()); - - public static final String NEW_LINE = System.getProperty("line.separator"); - public static final String PATH_SEP = System.getProperty("path.separator"); - - private File tempTestDir = null; - - /** If error during parser execution, store stderr here; can't return - * stdout and stderr. This doesn't trap errors from running antlr. - */ - private String parseErrors; - - /** Errors found while running antlr */ - private StringBuilder antlrToolErrors; - - public static String cachingDirectory; - - static { - cachingDirectory = new File(System.getProperty("java.io.tmpdir"), "ANTLR-runtime-testsuite-cache").getAbsolutePath(); - } - - @org.junit.Rule - public final TestRule testWatcher = new TestWatcher() { - - @Override - protected void succeeded(Description description) { - testSucceeded(description); - } - - }; - - protected void testSucceeded(Description description) { - // remove tmpdir if no error. - eraseTempDir(); - } - - @Override - public File getTempParserDir() { - return getTempTestDir(); - } - - @Override - public String getTempParserDirPath() { - return getTempParserDir() == null ? null : getTempParserDir().getAbsolutePath(); - } - - @Override - public final File getTempTestDir() { - return tempTestDir; - } - - @Override - public final String getTempDirPath() { - return tempTestDir ==null ? null : tempTestDir.getAbsolutePath(); - } - - - public void setParseErrors(String errors) { - this.parseErrors = errors; - } - - public String getParseErrors() { - return parseErrors; - } - - public String getANTLRToolErrors() { - if ( antlrToolErrors.length()==0 ) { - return null; - } - return antlrToolErrors.toString(); - } - - protected abstract String getPropertyPrefix(); - - @Override - public void testSetUp() throws Exception { - createTempDir(); - antlrToolErrors = new StringBuilder(); - } - - private void createTempDir() { - // new output dir for each test - String propName = getPropertyPrefix() + "-test-dir"; - String prop = System.getProperty(propName); - if(prop!=null && prop.length()>0) { - tempTestDir = new File(prop); - } else { - String dirName = getClass().getSimpleName() + "-" + Thread.currentThread().getName() + "-" + System.currentTimeMillis(); - tempTestDir = new File(System.getProperty("java.io.tmpdir"), dirName); - } - } - - @Override - public void testTearDown() throws Exception { - } - - @Override - public void beforeTest(RuntimeTestDescriptor descriptor) { - } - - @Override - public void afterTest(RuntimeTestDescriptor descriptor) { - } - - public void eraseTempDir() { - if(shouldEraseTempDir()) { - eraseDirectory(getTempTestDir()); - } - } - - protected boolean shouldEraseTempDir() { - if(tempTestDir == null) - return false; - String propName = getPropertyPrefix() + "-erase-test-dir"; - String prop = System.getProperty(propName); - if (prop != null && prop.length() > 0) - return Boolean.getBoolean(prop); - else - return true; - } - - public static void eraseDirectory(File dir) { - if ( dir.exists() ) { - eraseFilesInDir(dir); - dir.delete(); - } - } - - - public static void eraseFilesInDir(File dir) { - String[] files = dir.list(); - for(int i = 0; files!=null && i < files.length; i++) { - try { - eraseFile(dir, files[i]); - } catch(IOException e) { - logger.info(e.getMessage()); - } - } - } - - private static void eraseFile(File dir, String name) throws IOException { - File file = new File(dir,name); - if(Files.isSymbolicLink((file.toPath()))) - Files.delete(file.toPath()); - else if(file.isDirectory()) { - // work around issue where Files.isSymbolicLink returns false on Windows for node/antlr4 linked package - if("antlr4".equals(name)) - ; // logger.warning("antlr4 not seen as a symlink"); - else - eraseDirectory(file); - } else - file.delete(); - } - - - private static String detectedOS; - - public static String getOS() { - if (detectedOS == null) { - String os = System.getProperty("os.name", "generic").toLowerCase(Locale.ENGLISH); - if (os.contains("mac") || os.contains("darwin")) { - detectedOS = "mac"; - } - else if (os.contains("win")) { - detectedOS = "windows"; - } - else if (os.contains("nux")) { - detectedOS = "linux"; - } - else { - detectedOS = "unknown"; - } - } - return detectedOS; - } - - - public static boolean isWindows() { - return getOS().equalsIgnoreCase("windows"); - } - - protected ATN createATN(Grammar g, boolean useSerializer) { - if ( g.atn==null ) { - semanticProcess(g); - assertEquals(0, g.tool.getNumErrors()); - - ParserATNFactory f = g.isLexer() ? new LexerATNFactory((LexerGrammar) g) : new ParserATNFactory(g); - - g.atn = f.createATN(); - assertEquals(0, g.tool.getNumErrors()); - } - - ATN atn = g.atn; - if ( useSerializer ) { - // sets some flags in ATN - IntegerList serialized = ATNSerializer.getSerialized(atn); - return new ATNDeserializer().deserialize(serialized.toArray()); - } - - return atn; - } - - protected void semanticProcess(Grammar g) { - if ( g.ast!=null && !g.ast.hasErrors ) { -// System.out.println(g.ast.toStringTree()); - Tool antlr = new Tool(); - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) - for (Grammar imp : g.getImportedGrammars()) { - antlr.processNonCombinedGrammar(imp, false); - } - } - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java new file mode 100644 index 0000000000..721d217763 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/CustomDescriptors.java @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import java.net.URI; +import java.nio.file.Paths; +import java.util.*; + +public class CustomDescriptors { + public final static HashMap descriptors; + private final static URI uri; + + static { + uri = Paths.get(RuntimeTestUtils.runtimeTestsuitePath.toString(), + "test", "org", "antlr", "v4", "test", "runtime", "CustomDescriptors.java").toUri(); + + descriptors = new HashMap<>(); + descriptors.put("LexerExec", + new RuntimeTestDescriptor[]{ + getLineSeparatorLfDescriptor(), + getLineSeparatorCrLfDescriptor(), + getLargeLexerDescriptor(), + getAtnStatesSizeMoreThan65535Descriptor() + }); + descriptors.put("ParserExec", + new RuntimeTestDescriptor[] { + getMultiTokenAlternativeDescriptor() + }); + } + + private static RuntimeTestDescriptor getLineSeparatorLfDescriptor() { + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LineSeparatorLf", + "", + "1\n2\n3", + "[@0,0:0='1',<1>,1:0]\n" + + "[@1,1:1='\\n',<2>,1:1]\n" + + "[@2,2:2='2',<1>,2:0]\n" + + "[@3,3:3='\\n',<2>,2:1]\n" + + "[@4,4:4='3',<1>,3:0]\n" + + "[@5,5:4='',<-1>,3:1]\n", + "", + null, + "L", + "lexer grammar L;\n" + + "T: ~'\\n'+;\n" + + "SEPARATOR: '\\n';", + null, false, false, null, uri); + } + + private static RuntimeTestDescriptor getLineSeparatorCrLfDescriptor() { + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LineSeparatorCrLf", + "", + "1\r\n2\r\n3", + "[@0,0:0='1',<1>,1:0]\n" + + "[@1,1:2='\\r\\n',<2>,1:1]\n" + + "[@2,3:3='2',<1>,2:0]\n" + + "[@3,4:5='\\r\\n',<2>,2:1]\n" + + "[@4,6:6='3',<1>,3:0]\n" + + "[@5,7:6='',<-1>,3:1]\n", + "", + "", + "L", + "lexer grammar L;\n" + + "T: ~'\\r'+;\n" + + "SEPARATOR: '\\r\\n';", + null, false, false, null, uri); + } + + private static RuntimeTestDescriptor getLargeLexerDescriptor() { + final int tokensCount = 4000; + final String grammarName = "L"; + + StringBuilder grammar = new StringBuilder(); + grammar.append("lexer grammar ").append(grammarName).append(";\n"); + grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); + for (int i = 0; i < tokensCount; i++) { + grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); + } + + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "LargeLexer", + "This is a regression test for antlr/antlr4#76 \"Serialized ATN strings\n" + + "should be split when longer than 2^16 bytes (class file limitation)\"\n" + + "https://github.com/antlr/antlr4/issues/76", + "KW400", + "[@0,0:4='KW400',<402>,1:0]\n" + + "[@1,5:4='',<-1>,1:5]\n", + "", + "", + grammarName, + grammar.toString(), + null, false, false, null, uri); + } + + private static RuntimeTestDescriptor getAtnStatesSizeMoreThan65535Descriptor() { + // I tried playing around with different sizes, and I think 1002 works for Go but 1003 does not; + // the executing lexer gets a token syntax error for T208 or something like that + final int tokensCount = 1024; + final String suffix = String.join("", Collections.nCopies(70, "_")); + + final String grammarName = "L"; + StringBuilder grammar = new StringBuilder(); + grammar.append("lexer grammar ").append(grammarName).append(";\n"); + grammar.append('\n'); + StringBuilder input = new StringBuilder(); + StringBuilder output = new StringBuilder(); + int startOffset; + int stopOffset = -2; + for (int i = 0; i < tokensCount; i++) { + String ruleName = String.format("T_%06d", i); + String value = ruleName+suffix; + grammar.append(ruleName).append(": '").append(value).append("';\n"); + input.append(value).append('\n'); + + startOffset = stopOffset + 2; + stopOffset += value.length() + 1; + + output.append("[@").append(i).append(',').append(startOffset).append(':').append(stopOffset) + .append("='").append(value).append("',<").append(i + 1).append(">,").append(i + 1) + .append(":0]\n"); + } + + grammar.append("\n"); + grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); + + startOffset = stopOffset + 2; + stopOffset = startOffset - 1; + output.append("[@").append(tokensCount).append(',').append(startOffset).append(':').append(stopOffset) + .append("='',<-1>,").append(tokensCount + 1).append(":0]\n"); + + return new RuntimeTestDescriptor( + GrammarType.Lexer, + "AtnStatesSizeMoreThan65535", + "Regression for https://github.com/antlr/antlr4/issues/1863", + input.toString(), + output.toString(), + "", + "", + grammarName, + grammar.toString(), + null, false, false, + new String[] {"CSharp", "Python2", "Python3", "Go", "PHP", "Swift", "JavaScript", "Dart"}, uri); + } + + private static RuntimeTestDescriptor getMultiTokenAlternativeDescriptor() { + final int tokensCount = 64; + + StringBuilder rule = new StringBuilder("r1: "); + StringBuilder tokens = new StringBuilder(); + StringBuilder input = new StringBuilder(); + StringBuilder output = new StringBuilder(); + + for (int i = 0; i < tokensCount; i++) { + String currentToken = "T" + i; + rule.append(currentToken); + if (i < tokensCount - 1) { + rule.append(" | "); + } else { + rule.append(";"); + } + tokens.append(currentToken).append(": '").append(currentToken).append("';\n"); + input.append(currentToken).append(" "); + output.append(currentToken); + } + String currentToken = "T" + tokensCount; + tokens.append(currentToken).append(": '").append(currentToken).append("';\n"); + input.append(currentToken).append(" "); + output.append(currentToken); + + String grammar = "grammar P;\n" + + "r: (r1 | T" + tokensCount + ")+ EOF {};\n" + + rule + "\n" + + tokens + "\n" + + "WS: [ ]+ -> skip;"; + + return new RuntimeTestDescriptor( + GrammarType.Parser, + "MultiTokenAlternative", + "https://github.com/antlr/antlr4/issues/3698, https://github.com/antlr/antlr4/issues/3703", + input.toString(), + output + "\n", + "", + "r", + "P", + grammar, + null, false, false, null, uri); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java new file mode 100644 index 0000000000..9a0418471a --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/FileUtils.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.misc.Utils; + +import java.io.*; +import java.nio.charset.StandardCharsets; +import java.nio.file.*; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class FileUtils { + public static void writeFile(String dir, String fileName, String content) { + try { + Utils.writeFile(dir + FileSeparator + fileName, content, "UTF-8"); + } + catch (IOException ioe) { + System.err.println("can't write file"); + ioe.printStackTrace(System.err); + } + } + + public static String readFile(String dir, String fileName) { + try { + return String.copyValueOf(Utils.readFile(dir+"/"+fileName, "UTF-8")); + } + catch (IOException ioe) { + System.err.println("can't read file"); + ioe.printStackTrace(System.err); + } + return null; + } + + public static void replaceInFile(Path sourcePath, String target, String replacement) throws IOException { + replaceInFile(sourcePath, sourcePath, target, replacement); + } + + public static void replaceInFile(Path sourcePath, Path destPath, String target, String replacement) throws IOException { + String content = new String(Files.readAllBytes(sourcePath), StandardCharsets.UTF_8); + String newContent = content.replace(target, replacement); + try (PrintWriter out = new PrintWriter(destPath.toString())) { + out.println(newContent); + } + } + + public static void mkdir(String dir) { + File f = new File(dir); + //noinspection ResultOfMethodCallIgnored + f.mkdirs(); + } + + public static void deleteDirectory(File f) throws IOException { + if (f.isDirectory()) { + File[] files = f.listFiles(); + if (files != null) { + for (File c : files) + deleteDirectory(c); + } + } + if (!f.delete()) + throw new IOException("Failed to delete file: " + f); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java new file mode 100644 index 0000000000..6db8a47fbc --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedFile.java @@ -0,0 +1,16 @@ +package org.antlr.v4.test.runtime; + +public class GeneratedFile { + public final String name; + public final boolean isParser; + + public GeneratedFile(String name, boolean isParser) { + this.name = name; + this.isParser = isParser; + } + + @Override + public String toString() { + return name + "; isParser:" + isParser; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedLexerDescriptors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedLexerDescriptors.java deleted file mode 100644 index b489e36142..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/GeneratedLexerDescriptors.java +++ /dev/null @@ -1,130 +0,0 @@ -package org.antlr.v4.test.runtime; - -import java.util.*; - -public class GeneratedLexerDescriptors { - static RuntimeTestDescriptor getLineSeparatorLfDescriptor(String targetName) { - UniversalRuntimeTestDescriptor result = new UniversalRuntimeTestDescriptor(); - result.name = "LineSeparatorLf"; - result.targetName = targetName; - result.testType = "Lexer"; - result.grammar = "lexer grammar L;\n" + - "T: ~'\\n'+;\n" + - "SEPARATOR: '\\n';"; - result.grammarName = "L"; - result.input = "1\n2\n3"; - result.output = "[@0,0:0='1',<1>,1:0]\n" + - "[@1,1:1='\\n',<2>,1:1]\n" + - "[@2,2:2='2',<1>,2:0]\n" + - "[@3,3:3='\\n',<2>,2:1]\n" + - "[@4,4:4='3',<1>,3:0]\n" + - "[@5,5:4='',<-1>,3:1]\n"; - return result; - } - - static RuntimeTestDescriptor getLineSeparatorCrLfDescriptor(String targetName) { - UniversalRuntimeTestDescriptor result = new UniversalRuntimeTestDescriptor(); - result.name = "LineSeparatorCrLf"; - result.targetName = targetName; - result.testType = "Lexer"; - result.grammar = "lexer grammar L;\n" + - "T: ~'\\r'+;\n" + - "SEPARATOR: '\\r\\n';"; - result.grammarName = "L"; - result.input = "1\r\n2\r\n3"; - result.output = "[@0,0:0='1',<1>,1:0]\n" + - "[@1,1:2='\\r\\n',<2>,1:1]\n" + - "[@2,3:3='2',<1>,2:0]\n" + - "[@3,4:5='\\r\\n',<2>,2:1]\n" + - "[@4,6:6='3',<1>,3:0]\n" + - "[@5,7:6='',<-1>,3:1]\n"; - return result; - } - - static RuntimeTestDescriptor getLargeLexerDescriptor(String targetName) { - UniversalRuntimeTestDescriptor result = new UniversalRuntimeTestDescriptor(); - result.name = "LargeLexer"; - result.notes = "This is a regression test for antlr/antlr4#76 \"Serialized ATN strings\n" + - "should be split when longer than 2^16 bytes (class file limitation)\"\n" + - "https://github.com/antlr/antlr4/issues/76"; - result.targetName = targetName; - result.testType = "Lexer"; - - final int tokensCount = 4000; - - String grammarName = "L"; - StringBuilder grammar = new StringBuilder(); - grammar.append("lexer grammar ").append(grammarName).append(";\n"); - grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); - for (int i = 0; i < tokensCount; i++) { - grammar.append("KW").append(i).append(" : 'KW' '").append(i).append("';\n"); - } - - result.grammar = grammar.toString(); - result.grammarName = grammarName; - result.input = "KW400"; - result.output = "[@0,0:4='KW400',<402>,1:0]\n" + - "[@1,5:4='',<-1>,1:5]\n"; - return result; - } - - static RuntimeTestDescriptor getAtnStatesSizeMoreThan65535Descriptor(String targetName) { - UniversalRuntimeTestDescriptor result = new UniversalRuntimeTestDescriptor(); - result.name = "AtnStatesSizeMoreThan65535"; - result.notes = "Regression for https://github.com/antlr/antlr4/issues/1863"; - result.targetName = targetName; - result.testType = "Lexer"; - - // I tried playing around with different sizes and I think 1002 works for Go but 1003 does not; - // the executing lexer gets a token syntax error for T208 or something like that - final int tokensCount = 1024; - final String suffix = String.join("", Collections.nCopies(70, "_")); - - String grammarName = "L"; - StringBuilder grammar = new StringBuilder(); - grammar.append("lexer grammar ").append(grammarName).append(";\n"); - grammar.append('\n'); - StringBuilder input = new StringBuilder(); - StringBuilder output = new StringBuilder(); - int startOffset; - int stopOffset = -2; - for (int i = 0; i < tokensCount; i++) { - String ruleName = String.format("T_%06d", i); - String value = ruleName+suffix; - grammar.append(ruleName).append(": '").append(value).append("';\n"); - input.append(value).append('\n'); - - startOffset = stopOffset + 2; - stopOffset += value.length() + 1; - - output.append("[@").append(i).append(',').append(startOffset).append(':').append(stopOffset) - .append("='").append(value).append("',<").append(i + 1).append(">,").append(i + 1) - .append(":0]\n"); - } - - grammar.append("\n"); - grammar.append("WS: [ \\t\\r\\n]+ -> skip;\n"); - - startOffset = stopOffset + 2; - stopOffset = startOffset - 1; - output.append("[@").append(tokensCount).append(',').append(startOffset).append(':').append(stopOffset) - .append("='',<-1>,").append(tokensCount + 1).append(":0]\n"); - - result.grammar = grammar.toString(); - result.grammarName = grammarName; - result.input = input.toString(); - result.output = output.toString(); - - // We seem to get memory errors and so I am turning this off during CI - List all = Arrays.asList( -// "CSharp", "Python2", "Python3", "Cpp", "Go", "PHP", "Swift", "Java", "JavaScript", "Node", "Dart" - "CSharp", "Python2", "Python3", "Go", "PHP", "Swift", "JavaScript", "Node", "Dart" - ); - result.skipTargets.addAll(all); - -// result.skipTargets.add("Node"); // doesn't terminate -// result.skipTargets.add("PHP"); // "Allowed memory size of 134217728 bytes exhausted (tried to allocate 16384 bytes)..." -// result.skipTargets.add("Go"); // syntax error - return result; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java new file mode 100644 index 0000000000..18bd776494 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Generator.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.Tool; +import org.antlr.v4.tool.ANTLRMessage; +import org.antlr.v4.tool.DefaultToolListener; +import org.stringtemplate.v4.ST; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; + +public class Generator { + /** Write a grammar to tmpdir and run antlr */ + public static ErrorQueue antlrOnString(String workdir, + String targetName, + String grammarFileName, + String grammarStr, + boolean defaultListener, + String... extraOptions) + { + FileUtils.mkdir(workdir); + writeFile(workdir, grammarFileName, grammarStr); + return antlrOnString(workdir, targetName, grammarFileName, defaultListener, extraOptions); + } + + /** Run ANTLR on stuff in workdir and error queue back */ + public static ErrorQueue antlrOnString(String workdir, + String targetName, + String grammarFileName, + boolean defaultListener, + String... extraOptions) + { + final List options = new ArrayList<>(); + Collections.addAll(options, extraOptions); + if ( targetName!=null ) { + options.add("-Dlanguage="+targetName); + } + if ( !options.contains("-o") ) { + options.add("-o"); + options.add(workdir); + } + if ( !options.contains("-lib") ) { + options.add("-lib"); + options.add(workdir); + } + if ( !options.contains("-encoding") ) { + options.add("-encoding"); + options.add("UTF-8"); + } + options.add(new File(workdir,grammarFileName).toString()); + + final String[] optionsA = new String[options.size()]; + options.toArray(optionsA); + Tool antlr = new Tool(optionsA); + ErrorQueue equeue = new ErrorQueue(antlr); + antlr.addListener(equeue); + if (defaultListener) { + antlr.addListener(new DefaultToolListener(antlr)); + } + antlr.processGrammarsOnCommandLine(); + + List errors = new ArrayList<>(); + + if ( !defaultListener && !equeue.errors.isEmpty() ) { + for (int i = 0; i < equeue.errors.size(); i++) { + ANTLRMessage msg = equeue.errors.get(i); + ST msgST = antlr.errMgr.getMessageTemplate(msg); + errors.add(msgST.render()); + } + } + if ( !defaultListener && !equeue.warnings.isEmpty() ) { + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage msg = equeue.warnings.get(i); + // antlrToolErrors.append(msg); warnings are hushed + } + } + + return equeue; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java new file mode 100644 index 0000000000..92a1498b59 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/GrammarType.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum GrammarType { + Lexer, + Parser, + CompositeLexer, + CompositeParser +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java new file mode 100644 index 0000000000..1dcad27a10 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/OSType.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum OSType { + Windows, + Linux, + Mac, + Unknown +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java new file mode 100644 index 0000000000..cd136642e6 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Processor.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; + +public class Processor { + public final String[] arguments; + public final String workingDirectory; + public final Map environmentVariables; + public final boolean throwOnNonZeroErrorCode; + + public static ProcessorResult run(String[] arguments, String workingDirectory, Map environmentVariables) + throws InterruptedException, IOException + { + return new Processor(arguments, workingDirectory, environmentVariables, true).start(); + } + + public static ProcessorResult run(String[] arguments, String workingDirectory) throws InterruptedException, IOException { + return new Processor(arguments, workingDirectory, new HashMap<>(), true).start(); + } + + public Processor(String[] arguments, String workingDirectory, Map environmentVariables, + boolean throwOnNonZeroErrorCode) { + this.arguments = arguments; + this.workingDirectory = workingDirectory; + this.environmentVariables = environmentVariables; + this.throwOnNonZeroErrorCode = throwOnNonZeroErrorCode; + } + + public ProcessorResult start() throws InterruptedException, IOException { + ProcessBuilder builder = new ProcessBuilder(arguments); + if (workingDirectory != null) { + builder.directory(new File(workingDirectory)); + } + if (environmentVariables != null && environmentVariables.size() > 0) { + Map environment = builder.environment(); + for (String key : environmentVariables.keySet()) { + environment.put(key, environmentVariables.get(key)); + } + } + + Process process = builder.start(); + StreamReader stdoutReader = new StreamReader(process.getInputStream()); + StreamReader stderrReader = new StreamReader(process.getErrorStream()); + stdoutReader.start(); + stderrReader.start(); + process.waitFor(); + stdoutReader.join(); + stderrReader.join(); + + String output = stdoutReader.toString(); + String errors = stderrReader.toString(); + if (throwOnNonZeroErrorCode && process.exitValue() != 0) { + throw new InterruptedException(joinLines(output, errors)); + } + return new ProcessorResult(process.exitValue(), output, errors); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java new file mode 100644 index 0000000000..ef88abda16 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/ProcessorResult.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public class ProcessorResult { + public final int exitCode; + public final String output; + public final String errors; + + public ProcessorResult(int exitCode, String output, String errors) { + this.exitCode = exitCode; + this.output = output; + this.errors = errors; + } + + public boolean isSuccess() { + return exitCode == 0; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java new file mode 100644 index 0000000000..1c675b9578 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RunOptions.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public class RunOptions { + public final String grammarFileName; + public final String grammarStr; + public final String parserName; + public final String lexerName; + public final String grammarName; + public final boolean useListener; + public final boolean useVisitor; + public final String startRuleName; + public final String input; + public final boolean profile; + public final boolean showDiagnosticErrors; + public final boolean showDFA; + public final Stage endStage; + public final boolean returnObject; + public final String superClass; + + public RunOptions(String grammarFileName, String grammarStr, String parserName, String lexerName, + boolean useListener, boolean useVisitor, String startRuleName, + String input, boolean profile, boolean showDiagnosticErrors, + boolean showDFA, Stage endStage, boolean returnObject, + String language, String superClass) { + this.grammarFileName = grammarFileName; + this.grammarStr = grammarStr; + this.parserName = parserName; + this.lexerName = lexerName; + String grammarName = null; + boolean isCombinedGrammar = lexerName != null && parserName != null || language.equals("Go"); + if (isCombinedGrammar) { + if (parserName != null) { + grammarName = parserName.endsWith("Parser") + ? parserName.substring(0, parserName.length() - "Parser".length()) + : parserName; + } + else if (lexerName != null) { + grammarName = lexerName.endsWith("Lexer") + ? lexerName.substring(0, lexerName.length() - "Lexer".length()) + : lexerName; + } + } + else { + if (parserName != null) { + grammarName = parserName; + } + else { + grammarName = lexerName; + } + } + this.grammarName = grammarName; + this.useListener = useListener; + this.useVisitor = useVisitor; + this.startRuleName = startRuleName; + this.input = input; + this.profile = profile; + this.showDiagnosticErrors = showDiagnosticErrors; + this.showDFA = showDFA; + this.endStage = endStage; + this.returnObject = returnObject; + this.superClass = superClass; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java new file mode 100644 index 0000000000..7e48a355e5 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeRunner.java @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.antlr.v4.test.runtime.states.State; +import org.stringtemplate.v4.ST; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.*; + +public abstract class RuntimeRunner implements AutoCloseable { + public abstract String getLanguage(); + + protected String getExtension() { return getLanguage().toLowerCase(); } + + protected String getTitleName() { return getLanguage(); } + + protected String getTestFileName() { return "Test"; } + + protected String getLexerSuffix() { return "Lexer"; } + + protected String getParserSuffix() { return "Parser"; } + + protected String getBaseListenerSuffix() { return "BaseListener"; } + + protected String getListenerSuffix() { return "Listener"; } + + protected String getBaseVisitorSuffix() { return "BaseVisitor"; } + + protected String getVisitorSuffix() { return "Visitor"; } + + protected String grammarNameToFileName(String grammarName) { return grammarName; } + + private static String runtimeToolPath; + private static String compilerPath; + + protected final String getCompilerPath() { + if (compilerPath == null) { + compilerPath = getCompilerName(); + if (compilerPath != null) { + String compilerPathFromProperty = System.getProperty(getPropertyPrefix() + "-compiler"); + if (compilerPathFromProperty != null && compilerPathFromProperty.length() > 0) { + compilerPath = compilerPathFromProperty; + } + } + } + + return compilerPath; + } + + protected final String getRuntimeToolPath() { + if (runtimeToolPath == null) { + runtimeToolPath = getRuntimeToolName(); + if (runtimeToolPath != null) { + String runtimeToolPathFromProperty = System.getProperty(getPropertyPrefix() + "-exec"); + if (runtimeToolPathFromProperty != null && runtimeToolPathFromProperty.length() > 0) { + runtimeToolPath = runtimeToolPathFromProperty; + } + } + } + + return runtimeToolPath; + } + + protected String getCompilerName() { return null; } + + protected String getRuntimeToolName() { return getLanguage().toLowerCase(); } + + protected String getTestFileWithExt() { return getTestFileName() + "." + getExtension(); } + + protected String getExecFileName() { return getTestFileWithExt(); } + + protected String[] getExtraRunArgs() { return null; } + + protected Map getExecEnvironment() { return null; } + + protected String getPropertyPrefix() { + return "antlr-" + getLanguage().toLowerCase(); + } + + public final String getTempDirPath() { + return tempTestDir.toString(); + } + + private boolean saveTestDir; + + protected final Path tempTestDir; + + protected RuntimeRunner() { + this(null, false); + } + + protected RuntimeRunner(Path tempDir, boolean saveTestDir) { + if (tempDir == null) { + String dirName = getClass().getSimpleName() + "-" + Thread.currentThread().getName() + "-" + System.currentTimeMillis(); + tempTestDir = Paths.get(TempDirectory, dirName); + } + else { + tempTestDir = tempDir; + } + this.saveTestDir = saveTestDir; + } + + public void setSaveTestDir(boolean saveTestDir) { + this.saveTestDir = saveTestDir; + } + + public void close() { + removeTempTestDirIfRequired(); + } + + public final static String cacheDirectory; + + private static class InitializationStatus { + public final Object lockObject = new Object(); + public volatile Boolean isInitialized; + public Exception exception; + } + + private final static HashMap runtimeInitializationStatuses = new HashMap<>(); + + static { + cacheDirectory = new File(System.getProperty("java.io.tmpdir"), "ANTLR-runtime-testsuite-cache").getAbsolutePath(); + } + + protected final String getCachePath() { + return getCachePath(getLanguage()); + } + + public static String getCachePath(String language) { + return cacheDirectory + FileSeparator + language; + } + + protected final String getRuntimePath() { + return getRuntimePath(getLanguage()); + } + + public static String getRuntimePath(String language) { + return runtimePath.toString() + FileSeparator + language; + } + + public State run(RunOptions runOptions) { + List options = new ArrayList<>(); + if (runOptions.useVisitor) { + options.add("-visitor"); + } + if (runOptions.superClass != null && runOptions.superClass.length() > 0) { + options.add("-DsuperClass=" + runOptions.superClass); + } + ErrorQueue errorQueue = Generator.antlrOnString(getTempDirPath(), getLanguage(), + runOptions.grammarFileName, runOptions.grammarStr, false, options.toArray(new String[0])); + + List generatedFiles = getGeneratedFiles(runOptions); + GeneratedState generatedState = new GeneratedState(errorQueue, generatedFiles, null); + + if (generatedState.containsErrors() || runOptions.endStage == Stage.Generate) { + return generatedState; + } + + if (!initAntlrRuntimeIfRequired()) { + // Do not repeat ANTLR runtime initialization error + return new CompiledState(generatedState, new Exception(getTitleName() + " ANTLR runtime is not initialized")); + } + + writeRecognizerFile(runOptions); + + CompiledState compiledState = compile(runOptions, generatedState); + + if (compiledState.containsErrors() || runOptions.endStage == Stage.Compile) { + return compiledState; + } + + writeFile(getTempDirPath(), "input", runOptions.input); + + return execute(runOptions, compiledState); + } + + protected List getGeneratedFiles(RunOptions runOptions) { + List files = new ArrayList<>(); + String extensionWithDot = "." + getExtension(); + String fileGrammarName = grammarNameToFileName(runOptions.grammarName); + boolean isCombinedGrammarOrGo = runOptions.lexerName != null && runOptions.parserName != null || getLanguage().equals("Go"); + if (runOptions.lexerName != null) { + files.add(new GeneratedFile(fileGrammarName + (isCombinedGrammarOrGo ? getLexerSuffix() : "") + extensionWithDot, false)); + } + if (runOptions.parserName != null) { + files.add(new GeneratedFile(fileGrammarName + (isCombinedGrammarOrGo ? getParserSuffix() : "") + extensionWithDot, true)); + if (runOptions.useListener) { + files.add(new GeneratedFile(fileGrammarName + getListenerSuffix() + extensionWithDot, true)); + String baseListenerSuffix = getBaseListenerSuffix(); + if (baseListenerSuffix != null) { + files.add(new GeneratedFile(fileGrammarName + baseListenerSuffix + extensionWithDot, true)); + } + } + if (runOptions.useVisitor) { + files.add(new GeneratedFile(fileGrammarName + getVisitorSuffix() + extensionWithDot, true)); + String baseVisitorSuffix = getBaseVisitorSuffix(); + if (baseVisitorSuffix != null) { + files.add(new GeneratedFile(fileGrammarName + baseVisitorSuffix + extensionWithDot, true)); + } + } + } + return files; + } + + protected void writeRecognizerFile(RunOptions runOptions) { + String text = RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/" + getTestFileWithExt() + ".stg"); + ST outputFileST = new ST(text); + outputFileST.add("grammarName", runOptions.grammarName); + outputFileST.add("lexerName", runOptions.lexerName); + outputFileST.add("parserName", runOptions.parserName); + outputFileST.add("parserStartRuleName", grammarParseRuleToRecognizerName(runOptions.startRuleName)); + outputFileST.add("debug", runOptions.showDiagnosticErrors); + outputFileST.add("profile", runOptions.profile); + outputFileST.add("showDFA", runOptions.showDFA); + outputFileST.add("useListener", runOptions.useListener); + outputFileST.add("useVisitor", runOptions.useVisitor); + addExtraRecognizerParameters(outputFileST); + writeFile(getTempDirPath(), getTestFileWithExt(), outputFileST.render()); + } + + protected String grammarParseRuleToRecognizerName(String startRuleName) { + return startRuleName; + } + + protected void addExtraRecognizerParameters(ST template) {} + + private boolean initAntlrRuntimeIfRequired() { + String language = getLanguage(); + InitializationStatus status; + + // Create initialization status for every runtime with lock object + synchronized (runtimeInitializationStatuses) { + status = runtimeInitializationStatuses.get(language); + if (status == null) { + status = new InitializationStatus(); + runtimeInitializationStatuses.put(language, status); + } + } + + if (status.isInitialized != null) { + return status.isInitialized; + } + + // Locking per runtime, several runtimes can be being initialized simultaneously + synchronized (status.lockObject) { + if (status.isInitialized == null) { + Exception exception = null; + try { + initRuntime(); + } catch (Exception e) { + exception = e; + e.printStackTrace(); + } + status.isInitialized = exception == null; + status.exception = exception; + } + } + return status.isInitialized; + } + + protected void initRuntime() throws Exception { + } + + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + return new CompiledState(generatedState, null); + } + + protected ExecutedState execute(RunOptions runOptions, CompiledState compiledState) { + String output = null; + String errors = null; + Exception exception = null; + try { + List args = new ArrayList<>(); + String runtimeToolPath = getRuntimeToolPath(); + if (runtimeToolPath != null) { + args.add(runtimeToolPath); + } + String[] extraRunArgs = getExtraRunArgs(); + if (extraRunArgs != null) { + args.addAll(Arrays.asList(extraRunArgs)); + } + args.add(getExecFileName()); + args.add("input"); + ProcessorResult result = Processor.run(args.toArray(new String[0]), getTempDirPath(), getExecEnvironment()); + output = result.output; + errors = result.errors; + } catch (InterruptedException | IOException e) { + exception = e; + } + return new ExecutedState(compiledState, output, errors, exception); + } + + protected ProcessorResult runCommand(String[] command, String workPath) throws Exception { + return runCommand(command, workPath, null); + } + + protected ProcessorResult runCommand(String[] command, String workPath, String description) throws Exception { + try { + return Processor.run(command, workPath); + } catch (InterruptedException | IOException e) { + throw description != null ? new Exception("can't " + description, e) : e; + } + } + + private void removeTempTestDirIfRequired() { + if (!saveTestDir) { + File dirFile = tempTestDir.toFile(); + if (dirFile.exists()) { + try { + deleteDirectory(dirFile); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java index be4cf26e9f..a4f377b8c2 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -8,58 +8,82 @@ import org.antlr.v4.runtime.misc.Pair; +import java.net.URI; +import java.util.Arrays; import java.util.List; -/** This interface describes everything that a runtime test - * descriptor can specify. Most testing descriptors will - * subclass {@link UniversalRuntimeTestDescriptor} rather than - * implement this directly. The {@link BaseRuntimeTest} - * class pulls data from descriptors to execute tests. - * - * @since 4.6 +/** This object represents all the information we need about a single test and is the + * in-memory representation of a descriptor file */ -public interface RuntimeTestDescriptor { - String getTestName(); - +public class RuntimeTestDescriptor { /** A type in {"Lexer", "Parser", "CompositeLexer", "CompositeParser"} */ - String getTestType(); + public final GrammarType testType; + + /** Return a string representing the name of the target currently testing + * this descriptor. + * Multiple instances of the same descriptor class + * can be created to test different targets. + */ + public final String name; + + public final String notes; /** Parser input. Return "" if not input should be provided to the parser or lexer. */ - String getInput(); + public final String input; /** Output from executing the parser. Return null if no output is expected. */ - String getOutput(); + public final String output; /** Parse errors Return null if no errors are expected. */ - String getErrors(); - - /** Errors generated by ANTLR processing the grammar. Return null if no errors are expected. */ - String getANTLRToolErrors(); + public final String errors; /** The rule at which parsing should start */ - String getStartRule(); + public final String startRule; + public final String grammarName; + + public final String grammar; + /** List of grammars imported into the grammar */ + public final List> slaveGrammars; /** For lexical tests, dump the DFA of the default lexer mode to stdout */ - boolean showDFA(); + public final boolean showDFA; /** For parsing, engage the DiagnosticErrorListener, dumping results to stderr */ - boolean showDiagnosticErrors(); - - /** Associates name of grammar like M in M.g4 to string (template) of grammar */ - Pair getGrammar(); - - /** Return a list of grammars imported into the grammar specified in {#getGrammar}. */ - List> getSlaveGrammars(); - - /** Return a string representing the name of the target currently testing - * this descriptor. Multiple instances of the same descriptor class - * can be created to test different targets. - */ - String getTarget(); - - /** Set the target we are testing */ - void setTarget(String targetName); + public final boolean showDiagnosticErrors; + + public final String[] skipTargets; + + public final URI uri; + + public RuntimeTestDescriptor(GrammarType testType, String name, String notes, + String input, String output, String errors, + String startRule, + String grammarName, String grammar, List> slaveGrammars, + boolean showDFA, boolean showDiagnosticErrors, String[] skipTargets, + URI uri) { + this.testType = testType; + this.name = name; + this.notes = notes; + this.input = input; + this.output = output; + this.errors = errors; + this.startRule = startRule; + this.grammarName = grammarName; + this.grammar = grammar; + this.slaveGrammars = slaveGrammars; + this.showDFA = showDFA; + this.showDiagnosticErrors = showDiagnosticErrors; + this.skipTargets = skipTargets != null ? skipTargets : new String[0]; + this.uri = uri; + } /** Return true if this test should be ignored for the indicated target */ - boolean ignore(String targetName); + public boolean ignore(String targetName) { + return Arrays.asList(skipTargets).contains(targetName); + } + + @Override + public String toString() { + return name; + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java new file mode 100644 index 0000000000..16867cbd41 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestDescriptorParser.java @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.misc.Pair; + +import java.net.URI; +import java.util.*; + +public class RuntimeTestDescriptorParser { + private final static Set sections = new HashSet<>(Arrays.asList( + "notes", "type", "grammar", "slaveGrammar", "start", "input", "output", "errors", "flags", "skip" + )); + + /** Read stuff like: + [grammar] + grammar T; + s @after {} + : ID | ID {} ; + ID : 'a'..'z'+; + WS : (' '|'\t'|'\n')+ -> skip ; + + [grammarName] + T + + [start] + s + + [input] + abc + + [output] + Decision 0: + s0-ID->:s1^=>1 + + [errors] + """line 1:0 reportAttemptingFullContext d=0 (s), input='abc' + """ + + Some can be missing like [errors]. + + Get gr names automatically "lexer grammar Unicode;" "grammar T;" "parser grammar S;" + + Also handle slave grammars: + + [grammar] + grammar M; + import S,T; + s : a ; + B : 'b' ; // defines B from inherited token space + WS : (' '|'\n') -> skip ; + + [slaveGrammar] + parser grammar T; + a : B {}; + + [slaveGrammar] + parser grammar S; + a : b {}; + b : B; + */ + public static RuntimeTestDescriptor parse(String name, String text, URI uri) throws RuntimeException { + String currentField = null; + StringBuilder currentValue = new StringBuilder(); + + List> pairs = new ArrayList<>(); + String[] lines = text.split("\r?\n"); + + for (String line : lines) { + boolean newSection = false; + String sectionName = null; + if (line.startsWith("[") && line.length() > 2) { + sectionName = line.substring(1, line.length() - 1); + newSection = sections.contains(sectionName); + } + + if (newSection) { + if (currentField != null) { + pairs.add(new Pair<>(currentField, currentValue.toString())); + } + currentField = sectionName; + currentValue.setLength(0); + } + else { + currentValue.append(line); + currentValue.append("\n"); + } + } + pairs.add(new Pair<>(currentField, currentValue.toString())); + + String notes = ""; + GrammarType testType = GrammarType.Lexer; + String grammar = ""; + String grammarName = ""; + List> slaveGrammars = new ArrayList<>(); + String startRule = ""; + String input = ""; + String output = ""; + String errors = ""; + boolean showDFA = false; + boolean showDiagnosticErrors = false; + String[] skipTargets = new String[0]; + for (Pair p : pairs) { + String section = p.a; + String value = ""; + if ( p.b!=null ) { + value = p.b.trim(); + } + if ( value.startsWith("\"\"\"") ) { + value = value.replace("\"\"\"", ""); + } + else if ( value.indexOf('\n')>=0 ) { + value = value + "\n"; // if multi line and not quoted, leave \n on end. + } + switch (section) { + case "notes": + notes = value; + break; + case "type": + testType = Enum.valueOf(GrammarType.class, value); + break; + case "grammar": + grammarName = getGrammarName(value.split("\n")[0]); + grammar = value; + break; + case "slaveGrammar": + String gname = getGrammarName(value.split("\n")[0]); + slaveGrammars.add(new Pair<>(gname, value)); + case "start": + startRule = value; + break; + case "input": + input = value; + break; + case "output": + output = value; + break; + case "errors": + errors = value; + break; + case "flags": + String[] flags = value.split("\n"); + for (String f : flags) { + switch (f) { + case "showDFA": + showDFA = true; + break; + case "showDiagnosticErrors": + showDiagnosticErrors = true; + break; + } + } + break; + case "skip": + skipTargets = value.split("\n"); + break; + default: + throw new RuntimeException("Unknown descriptor section ignored: "+section); + } + } + return new RuntimeTestDescriptor(testType, name, notes, input, output, errors, startRule, grammarName, grammar, + slaveGrammars, showDFA, showDiagnosticErrors, skipTargets, uri); + } + + /** Get A, B, or C from: + * "lexer grammar A;" "grammar B;" "parser grammar C;" + */ + private static String getGrammarName(String grammarDeclLine) { + int gi = grammarDeclLine.indexOf("grammar "); + if ( gi<0 ) { + return ""; + } + gi += "grammar ".length(); + int gsemi = grammarDeclLine.indexOf(';'); + return grammarDeclLine.substring(gi, gsemi); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java deleted file mode 100644 index 87fcc763e7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestSupport.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import java.io.File; - -/** This interface describes functionality needed to execute a runtime test. - * Unfortunately the Base*Test.java files are big junk drawers. This is - * an attempt to make it more obvious what new target implementers have to - * implement. - * - * @since 4.6 - */ -public interface RuntimeTestSupport { - - // dir containing grammar input and output - File getTempParserDir(); - String getTempParserDirPath(); - - // dir containing test input and output - File getTempTestDir(); - String getTempDirPath(); - void eraseTempDir(); - - void testSetUp() throws Exception; - void testTearDown() throws Exception; - - void beforeTest(RuntimeTestDescriptor descriptor); - void afterTest(RuntimeTestDescriptor descriptor); - - String getParseErrors(); - String getANTLRToolErrors(); - - String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA); - - String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors); - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java index d2c30800cb..8411d8ce6c 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTestUtils.java @@ -1,89 +1,117 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + package org.antlr.v4.test.runtime; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.tool.LexerGrammar; +import org.antlr.v4.automata.ATNPrinter; +import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.Rule; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; -import java.io.*; -import java.util.*; +import static org.junit.jupiter.api.Assertions.assertEquals; public abstract class RuntimeTestUtils { + public static final String NewLine = System.getProperty("line.separator"); + public static final String PathSeparator = System.getProperty("path.separator"); + public static final String FileSeparator = System.getProperty("file.separator"); + public static final String TempDirectory = System.getProperty("java.io.tmpdir"); + + public final static Path runtimePath; + public final static Path runtimeTestsuitePath; + public final static Path resourcePath; + + private final static Map resourceCache = new HashMap<>(); + private static OSType detectedOS; + private static Boolean isWindows; + + static { + String locationPath = RuntimeTestUtils.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + if (isWindows()) { + locationPath = locationPath.replaceFirst("/", ""); + } + Path potentialRuntimeTestsuitePath = Paths.get(locationPath, "..", "..").normalize(); + Path potentialResourcePath = Paths.get(potentialRuntimeTestsuitePath.toString(), "resources"); + + if (Files.exists(potentialResourcePath)) { + runtimeTestsuitePath = potentialRuntimeTestsuitePath; + } + else { + runtimeTestsuitePath = Paths.get("..", "runtime-testsuite").normalize(); + } - /** Sort a list */ - public static > List sort(List data) { - List dup = new ArrayList(data); - dup.addAll(data); - Collections.sort(dup); - return dup; + runtimePath = Paths.get(runtimeTestsuitePath.toString(), "..", "runtime").normalize(); + resourcePath = Paths.get(runtimeTestsuitePath.toString(), "resources"); } - /** Return map sorted by key */ - public static ,V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); + public static boolean isWindows() { + if (isWindows == null) { + isWindows = getOS() == OSType.Windows; } - return dup; + + return isWindows; } - public static List getTokenTypes(LexerGrammar lg, - ATN atn, - CharStream input) { - LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[]{new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE))}, null); - List tokenTypes = new ArrayList(); - int ttype; - boolean hitEOF = false; - do { - if ( hitEOF ) { - tokenTypes.add("EOF"); - break; + public static OSType getOS() { + if (detectedOS == null) { + String os = System.getProperty("os.name", "generic").toLowerCase(Locale.ENGLISH); + if (os.contains("mac") || os.contains("darwin")) { + detectedOS = OSType.Mac; + } + else if (os.contains("win")) { + detectedOS = OSType.Windows; } - int t = input.LA(1); - ttype = interp.match(input, Lexer.DEFAULT_MODE); - if ( ttype==Token.EOF ) { - tokenTypes.add("EOF"); + else if (os.contains("nux")) { + detectedOS = OSType.Linux; } else { - tokenTypes.add(lg.typeToTokenList.get(ttype)); + detectedOS = OSType.Unknown; } + } + return detectedOS; + } - if ( t== IntStream.EOF ) { - hitEOF = true; + public static synchronized String getTextFromResource(String name) { + try { + String text = resourceCache.get(name); + if (text == null) { + Path path = Paths.get(resourcePath.toString(), name); + text = new String(Files.readAllBytes(path)); + resourceCache.put(name, text); } - } while ( ttype!=Token.EOF ); - return tokenTypes; + return text; + } + catch (Exception ex) { + throw new RuntimeException(ex); + } } - public static IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { - ANTLRInputStream in = new ANTLRInputStream(input); - IntegerList tokenTypes = new IntegerList(); - int ttype; - do { - ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); - tokenTypes.add(ttype); - } while ( ttype!= Token.EOF ); - return tokenTypes; + public static void checkRuleATN(Grammar g, String ruleName, String expecting) { + Rule r = g.getRule(ruleName); + ATNState startState = g.getATN().ruleToStartState[r.index]; + ATNPrinter serializer = new ATNPrinter(g, startState); + String result = serializer.asString(); + + assertEquals(expecting, result); } - public static void copyFile(File source, File dest) throws IOException { - InputStream is = new FileInputStream(source); - OutputStream os = new FileOutputStream(dest); - byte[] buf = new byte[4 << 10]; - int l; - while ((l = is.read(buf)) > -1) { - os.write(buf, 0, l); + public static String joinLines(Object... args) { + StringBuilder result = new StringBuilder(); + for (Object arg : args) { + String str = arg.toString(); + result.append(str); + if (!str.endsWith("\n")) + result.append("\n"); } - is.close(); - os.close(); + return result.toString(); } - - public static void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java new file mode 100644 index 0000000000..f3f367e8ff --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/RuntimeTests.java @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.java.JavaRuntimeTests; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.State; +import org.junit.jupiter.api.DynamicNode; +import org.junit.jupiter.api.TestFactory; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; +import org.stringtemplate.v4.*; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.stream.Stream; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.DynamicContainer.dynamicContainer; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +/** This class represents runtime tests for specified runtime. + * It pulls data from {@link RuntimeTestDescriptor} and uses junit to trigger tests. + * The only functionality needed to execute a test is defined in {@link RuntimeRunner}. + * All the various test rig classes derived from this one. + * E.g., see {@link JavaRuntimeTests}. + */ +public abstract class RuntimeTests { + protected abstract RuntimeRunner createRuntimeRunner(); + + private final static HashMap testDescriptors = new HashMap<>(); + private final static Map cachedTargetTemplates = new HashMap<>(); + private final static StringRenderer rendered = new StringRenderer(); + + static { + File descriptorsDir = new File(Paths.get(RuntimeTestUtils.resourcePath.toString(), "org/antlr/v4/test/runtime/descriptors").toString()); + File[] directoryListing = descriptorsDir.listFiles(); + assert directoryListing != null; + for (File directory : directoryListing) { + String groupName = directory.getName(); + if (groupName.startsWith(".")) { + continue; // Ignore service directories (like .DS_Store in Mac) + } + + List descriptors = new ArrayList<>(); + + File[] descriptorFiles = directory.listFiles(); + assert descriptorFiles != null; + for (File descriptorFile : descriptorFiles) { + String name = descriptorFile.getName().replace(".txt", ""); + if (name.startsWith(".")) { + continue; + } + + String text; + try { + text = new String(Files.readAllBytes(descriptorFile.toPath())); + } catch (IOException e) { + throw new RuntimeException(e); + } + descriptors.add(RuntimeTestDescriptorParser.parse(name, text, descriptorFile.toURI())); + } + + testDescriptors.put(groupName, descriptors.toArray(new RuntimeTestDescriptor[0])); + } + + for (String key : CustomDescriptors.descriptors.keySet()) { + RuntimeTestDescriptor[] descriptors = CustomDescriptors.descriptors.get(key); + RuntimeTestDescriptor[] existedDescriptors = testDescriptors.putIfAbsent(key, descriptors); + if (existedDescriptors != null) { + testDescriptors.put(key, Stream.concat(Arrays.stream(existedDescriptors), Arrays.stream(descriptors)) + .toArray(RuntimeTestDescriptor[]::new)); + } + } + } + + @TestFactory + @Execution(ExecutionMode.CONCURRENT) + public List runtimeTests() { + List result = new ArrayList<>(); + + for (String group : testDescriptors.keySet()) { + ArrayList descriptorTests = new ArrayList<>(); + RuntimeTestDescriptor[] descriptors = testDescriptors.get(group); + for (RuntimeTestDescriptor descriptor : descriptors) { + descriptorTests.add(dynamicTest(descriptor.name, descriptor.uri, () -> { + try (RuntimeRunner runner = createRuntimeRunner()) { + String errorMessage = test(descriptor, runner); + if (errorMessage != null) { + runner.setSaveTestDir(true); + fail(joinLines("Test: " + descriptor.name + "; " + errorMessage, "Test directory: " + runner.getTempDirPath())); + } + } + })); + } + + Path descriptorGroupPath = Paths.get(RuntimeTestUtils.resourcePath.toString(), "descriptors", group); + result.add(dynamicContainer(group, descriptorGroupPath.toUri(), Arrays.stream(descriptorTests.toArray(new DynamicNode[0])))); + } + + return result; + } + + private static String test(RuntimeTestDescriptor descriptor, RuntimeRunner runner) { + String targetName = runner.getLanguage(); + if (descriptor.ignore(targetName)) { + System.out.println("Ignore " + descriptor); + return null; + } + + FileUtils.mkdir(runner.getTempDirPath()); + + String grammarName = descriptor.grammarName; + String grammar = prepareGrammars(descriptor, runner); + + String lexerName, parserName; + boolean useListenerOrVisitor; + String superClass; + if (descriptor.testType == GrammarType.Parser || descriptor.testType == GrammarType.CompositeParser) { + lexerName = grammarName + "Lexer"; + parserName = grammarName + "Parser"; + useListenerOrVisitor = true; + if (targetName.equals("Java")) { + superClass = JavaRunner.runtimeTestParserName; + } + else { + superClass = null; + } + } + else { + lexerName = grammarName; + parserName = null; + useListenerOrVisitor = false; + if (targetName.equals("Java")) { + superClass = JavaRunner.runtimeTestLexerName; + } + else { + superClass = null; + } + } + + RunOptions runOptions = new RunOptions(grammarName + ".g4", + grammar, + parserName, + lexerName, + useListenerOrVisitor, + useListenerOrVisitor, + descriptor.startRule, + descriptor.input, + false, + descriptor.showDiagnosticErrors, + descriptor.showDFA, + Stage.Execute, + false, + targetName, + superClass + ); + + State result = runner.run(runOptions); + + return assertCorrectOutput(descriptor, targetName, result); + } + + private static String prepareGrammars(RuntimeTestDescriptor descriptor, RuntimeRunner runner) { + String targetName = runner.getLanguage(); + + STGroup targetTemplates; + synchronized (cachedTargetTemplates) { + targetTemplates = cachedTargetTemplates.get(targetName); + if (targetTemplates == null) { + ClassLoader classLoader = RuntimeTests.class.getClassLoader(); + URL templates = classLoader.getResource("org/antlr/v4/test/runtime/templates/" + targetName + ".test.stg"); + assert templates != null; + targetTemplates = new STGroupFile(templates, "UTF-8", '<', '>'); + targetTemplates.registerRenderer(String.class, rendered); + cachedTargetTemplates.put(targetName, targetTemplates); + } + } + + // write out any slave grammars + List> slaveGrammars = descriptor.slaveGrammars; + if (slaveGrammars != null) { + for (Pair spair : slaveGrammars) { + STGroup g = new STGroup('<', '>'); + g.registerRenderer(String.class, rendered); + g.importTemplates(targetTemplates); + ST grammarST = new ST(g, spair.b); + writeFile(runner.getTempDirPath(), spair.a + ".g4", grammarST.render()); + } + } + + STGroup g = new STGroup('<', '>'); + g.importTemplates(targetTemplates); + g.registerRenderer(String.class, rendered); + ST grammarST = new ST(g, descriptor.grammar); + return grammarST.render(); + } + + private static String assertCorrectOutput(RuntimeTestDescriptor descriptor, String targetName, State state) { + ExecutedState executedState; + if (state instanceof ExecutedState) { + executedState = (ExecutedState)state; + if (executedState.exception != null) { + return state.getErrorMessage(); + } + } + else { + return state.getErrorMessage(); + } + + String expectedOutput = descriptor.output; + String expectedParseErrors = descriptor.errors; + + boolean doesOutputEqualToExpected = executedState.output.equals(expectedOutput); + if (!doesOutputEqualToExpected || !executedState.errors.equals(expectedParseErrors)) { + String message; + if (doesOutputEqualToExpected) { + message = "Parse output is as expected, but errors are not: "; + } + else { + message = "Parse output is incorrect: " + + "expectedOutput:<" + expectedOutput + ">; actualOutput:<" + executedState.output + ">; "; + } + + return "[" + targetName + ":" + descriptor.name + "] " + + message + + "expectedParseErrors:<" + expectedParseErrors + ">;" + + "actualParseErrors:<" + executedState.errors + ">."; + } + + return null; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java new file mode 100644 index 0000000000..ed807ad6f8 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/Stage.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime; + +public enum Stage { + Generate, + Compile, + Execute +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java similarity index 55% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java rename to runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java index d0daa19479..2c84ab9c70 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamVacuum.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/StreamReader.java @@ -12,32 +12,46 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; -public final class StreamVacuum implements Runnable { - private StringBuilder buf = new StringBuilder(); - private BufferedReader in; - private Thread sucker; - public StreamVacuum(InputStream in) { - this.in = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8) ); +public final class StreamReader implements Runnable { + private final StringBuilder buffer = new StringBuilder(); + private final BufferedReader in; + private final Thread worker; + + public StreamReader(InputStream in) { + this.in = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8) ); + worker = new Thread(this); } + public void start() { - sucker = new Thread(this); - sucker.start(); + worker.start(); } + @Override public void run() { try { - TestOutputReading.append(in, buf); + while (true) { + int c = in.read(); + if (c == -1) { + break; + } + if (c == '\r') { + continue; + } + buffer.append((char) c); + } } catch (IOException ioe) { System.err.println("can't read output from process"); } } + /** wait for the thread to finish */ public void join() throws InterruptedException { - sucker.join(); + worker.join(); } + @Override public String toString() { - return buf.toString(); + return buffer.toString(); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java deleted file mode 100644 index 9646c352cd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestContext.java +++ /dev/null @@ -1,33 +0,0 @@ -package org.antlr.v4.test.runtime; - -public abstract class TestContext { - - public static boolean isTravisCI() { - return "true".equals(String.valueOf(System.getenv("TRAVIS")).toLowerCase()); - } - - public static boolean isGitHubCI() { - return "true".equals(String.valueOf(System.getenv("CI")).toLowerCase()); - } - - public static boolean isAppVeyorCI() { - return "true".equals(String.valueOf(System.getenv("APPVEYOR")).toLowerCase()); - } - - public static boolean isCircleCI() { - return "true".equals(String.valueOf(System.getenv("CIRCLECI")).toLowerCase()); - } - - public static boolean isCI() { - return isAppVeyorCI() || isCircleCI() || isGitHubCI() || isTravisCI(); - } - - @SuppressWarnings("BooleanMethodIsAlwaysInverted") - public static boolean isSupportedTarget(String target) { - if(isAppVeyorCI()) - return !target.matches("Swift"); - else - return true; - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java deleted file mode 100644 index 91bb1a61ed..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/TestOutputReading.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; - -public abstract class TestOutputReading { - public static void append(BufferedReader in, StringBuilder buf) throws IOException { - String line = in.readLine(); - while (line!=null) { - buf.append(line); - // NOTE: This appends a newline at EOF - // regardless of whether or not the - // input actually ended with a - // newline. - // - // We should revisit this and read a - // block at a time rather than a line - // at a time, and change all tests - // which rely on this behavior to - // remove the trailing newline at EOF. - // - // When we fix this, we can remove the - // TestOutputReading class entirely. - buf.append('\n'); - line = in.readLine(); - } - } - - /** - * Read in the UTF-8 bytes at {@code path}, convert all - * platform-specific line terminators to NL, and append NL - * if the file was non-empty and didn't already end with one. - * - * {@see StreamVacuum#run()} for why this method exists. - * - * Returns {@code null} if the file does not exist or the output - * was empty. - */ - public static String read(Path path) throws IOException { - // Mimic StreamVacuum.run()'s behavior of replacing all platform-specific - // EOL sequences with NL. - StringBuilder buf = new StringBuilder(); - try (BufferedReader in = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { - append(in, buf); - } catch (FileNotFoundException | NoSuchFileException e) { - return null; - } - if (buf.length() > 0) { - return buf.toString(); - } else { - return null; - } - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/UniversalRuntimeTestDescriptor.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/UniversalRuntimeTestDescriptor.java deleted file mode 100644 index 617a0230bc..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/UniversalRuntimeTestDescriptor.java +++ /dev/null @@ -1,104 +0,0 @@ -package org.antlr.v4.test.runtime; - -import org.antlr.v4.runtime.misc.Pair; - -import java.util.ArrayList; -import java.util.List; - -/** This object represents all the information we need about a single test and is the - * in-memory representation of a descriptor file - */ -public class UniversalRuntimeTestDescriptor implements RuntimeTestDescriptor { - public String testType; - public String targetName; - public String name; - public String notes; - public String input = ""; - public String output; - public String errors; - public String startRule; - public String grammarName; - public String grammar; - public List> slaveGrammars = new ArrayList<>(); - public boolean showDFA = false; - public boolean showDiagnosticErrors = false; - - public List skipTargets = new ArrayList<>(); - - @Override - public String getTestName() { - return name; - } - - @Override - public String getTestType() { - return testType; - } - - @Override - public String getInput() { - return input; - } - - @Override - public String getOutput() { - return output; - } - - @Override - public String getErrors() { - return errors; - } - - @Override - public String getANTLRToolErrors() { - return null; - } - - @Override - public String getStartRule() { - return startRule; - } - - @Override - public List> getSlaveGrammars() { - if ( slaveGrammars.size()==0 ) return null; - return slaveGrammars; - } - - @Override - public String getTarget() { - return targetName; - } - - @Override - public void setTarget(String targetName) { - this.targetName = targetName; - } - - @Override - public boolean showDFA() { - return showDFA; - } - - @Override - public boolean showDiagnosticErrors() { - return showDiagnosticErrors; - } - - @Override - public Pair getGrammar() { - return new Pair<>(grammarName,grammar); - } - - @Override - public boolean ignore(String targetName) { - return skipTargets.contains(targetName); - } - - - @Override - public String toString() { - return getTarget()+":"+getTestName(); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java deleted file mode 100644 index 9f0f93b522..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/BaseCppTest.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import java.io.File; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertTrue; - -public class BaseCppTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - protected String getPropertyPrefix() { - return "antlr-" + getLanguage().toLowerCase(); - } - - protected String getLanguage() { - return "Cpp"; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) - { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execModule("Test.cpp"); - return output; - } - - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - showDiagnosticErrors, - false); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - antlrOnString(getTempDirPath(), "Cpp", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".cpp"); - files.add(lexerName+".h"); - } - if ( parserName!=null ) { - files.add(parserName+".cpp"); - files.add(parserName+".h"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.cpp"); - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.h"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.cpp"); - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.h"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace) - { - setParseErrors(null); - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, trace); - } - } - - public String execRecognizer() { - return execModule("Test.cpp"); - } - - - public List allCppFiles(String path) { - ArrayList files = new ArrayList(); - File folder = new File(path); - File[] listOfFiles = folder.listFiles(); - for (File listOfFile : listOfFiles) { - String file = listOfFile.getAbsolutePath(); - if (file.endsWith(".cpp")) { - files.add(file); - } - } - return files; - } - - private String runProcess(ProcessBuilder builder, String description, boolean showStderr) throws Exception { - // System.out.println("BUILDER: " + builder.command() + " @ " + builder.directory().toString()); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - int errcode = process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( stderrVacuum.toString().length()>0 ) { - setParseErrors(stderrVacuum.toString()); - if ( showStderr ) System.err.println(getParseErrors()); - } - if (errcode != 0) { - String err = "execution of '"+description+"' failed with error code: "+errcode; - if ( getParseErrors()!=null ) { - setParseErrors(getParseErrors() + err); - } - else { - setParseErrors(err); - } - } - - return output; - } - - private String runCommand(String[] command, String workPath, String description, boolean showStderr) throws Exception { - ProcessBuilder builder = new ProcessBuilder(command); - builder.directory(new File(workPath)); - - return runProcess(builder, description, showStderr); - } - - // TODO: add a buildRuntimeOnWindows variant. - private boolean buildRuntime() { - String runtimePath = locateRuntime(); - System.out.println("Building ANTLR4 C++ runtime (if necessary) at "+ runtimePath); - - try { - String[] command = { "cmake", ".", "-DCMAKE_BUILD_TYPE=Debug" }; - if (runCommand(command, runtimePath, "antlr runtime cmake", false) == null) { - return false; - } - } - catch (Exception e) { - System.err.println("can't configure antlr cpp runtime cmake file"); - } - - try { - String[] command = { "make", "-j", Integer.toString(Runtime.getRuntime().availableProcessors()) }; - if (runCommand(command, runtimePath, "building antlr runtime", true) == null) - return false; - } - catch (Exception e) { - System.err.println("can't compile antlr cpp runtime"); - e.printStackTrace(System.err); - try { - String[] command = { "ls", "-la" }; - String output = runCommand(command, runtimePath + "/dist/", "printing library folder content", true); - System.out.println(output); - } - catch (Exception e2) { - System.err.println("can't even list folder content"); - e2.printStackTrace(System.err); - } - } - -/* for debugging - try { - String command[] = { "ls", "-la" }; - String output = runCommand(command, runtimePath + "/dist/", "printing library folder content"); - System.out.println(output); - } - catch (Exception e) { - System.err.println("can't print folder content"); - } -*/ - - return true; - } - - static Boolean runtimeBuiltOnce = false; - - public String execModule(String fileName) { - String runtimePath = locateRuntime(); - String includePath = runtimePath + "/runtime/src"; - String binPath = new File(getTempTestDir(), "a.out").getAbsolutePath(); - String inputPath = new File(getTempTestDir(), "input").getAbsolutePath(); - - // Build runtime using cmake once per VM. - synchronized (BaseCppTest.class) { - if ( !runtimeBuiltOnce ) { - try { - String[] command = {"clang++", "--version"}; - String output = runCommand(command, getTempDirPath(), "printing compiler version", false); - System.out.println("Compiler version is: "+output); - } - catch (Exception e) { - System.err.println("Can't get compiler version"); - } - - runtimeBuiltOnce = true; - if ( !buildRuntime() ) { - System.out.println("C++ runtime build failed\n"); - return null; - } - System.out.println("C++ runtime build succeeded\n"); - } - } - - // Create symlink to the runtime. Currently only used on OSX. - String libExtension = (getOS().equals("mac")) ? "dylib" : "so"; - try { - String[] command = { "ln", "-s", runtimePath + "/dist/libantlr4-runtime." + libExtension }; - if (runCommand(command, getTempDirPath(), "sym linking C++ runtime", true) == null) - return null; - } - catch (Exception e) { - System.err.println("can't create link to " + runtimePath + "/dist/libantlr4-runtime." + libExtension); - e.printStackTrace(System.err); - return null; - } - - try { - List command2 = new ArrayList(Arrays.asList("clang++", "-std=c++17", "-I", includePath, "-L.", "-lantlr4-runtime", "-pthread", "-o", "a.out")); - command2.addAll(allCppFiles(getTempDirPath())); - if (runCommand(command2.toArray(new String[0]), getTempDirPath(), "building test binary", true) == null) { - return null; - } - } - catch (Exception e) { - System.err.println("can't compile test module: " + e.getMessage()); - e.printStackTrace(System.err); - return null; - } - - // Now run the newly minted binary. Reset the error output, as we could have got compiler warnings which are not relevant here. - setParseErrors(null); - try { - ProcessBuilder builder = new ProcessBuilder(binPath, inputPath); - builder.directory(getTempTestDir()); - Map env = builder.environment(); - env.put("LD_PRELOAD", runtimePath + "/dist/libantlr4-runtime." + libExtension); - String output = runProcess(builder, "running test binary", false); - if ( output.length()==0 ) { - output = null; - } - - /* for debugging - System.out.println("========================================================="); - System.out.println(output); - System.out.println("========================================================="); - */ - return output; - } - catch (Exception e) { - System.err.println("can't exec module: " + fileName); - e.printStackTrace(System.err); - } - - return null; - } - - protected String locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeURL = loader.getResource("Cpp"); - if (runtimeURL == null) { - throw new RuntimeException("Cannot find runtime"); - } - // Windows not getting runtime right. See: - // http://stackoverflow.com/questions/6164448/convert-url-to-normal-windows-filename-java - // it was coming back "/C:/projects/antlr4-l7imv/runtime-testsuite/target/classes/Cpp" - String p; - try { - p = Paths.get(runtimeURL.toURI()).toFile().toString(); - } - catch (URISyntaxException use) { - p = "Can't find runtime at " + runtimeURL; - } - return p; - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "#include \\\n" - + "\n" - + "#include \"antlr4-runtime.h\"\n" - + "#include \".h\"\n" - + "#include \".h\"\n" - + "\n" - + "using namespace antlr4;\n" - + "\n" - + "class TreeShapeListener : public tree::ParseTreeListener {\n" - + "public:\n" - + " void visitTerminal(tree::TerminalNode *) override {}\n" - + " void visitErrorNode(tree::ErrorNode *) override {}\n" - + " void exitEveryRule(ParserRuleContext *) override {}\n" - + " void enterEveryRule(ParserRuleContext *ctx) override {\n" - + " for (auto child : ctx->children) {\n" - + " tree::ParseTree *parent = child->parent;\n" - + " ParserRuleContext *rule = dynamic_cast\\(parent);\n" - + " if (rule != ctx) {\n" - + " throw \"Invalid parse tree shape detected.\";\n" - + " }\n" - - + " }\n" - + " }\n" - + "};\n" - + "\n" - + "\n" - + "int main(int argc, const char* argv[]) {\n" - + " ANTLRFileStream input;\n" - + " input.loadFromFile(argv[1]);\n" - + " lexer(&input);\n" - + " CommonTokenStream tokens(&lexer);\n" - + "" - + "\n" - + " tree::ParseTree *tree = parser.;\n" - + " TreeShapeListener listener;\n" - + " tree::ParseTreeWalker::DEFAULT.walk(&listener, tree);\n" - + "\n" - + " return 0;\n" - + "}\n" - ); - - String stSource = " parser(&tokens);\n"; - if(debug) { - stSource += " DiagnosticErrorListener errorListener;\n"; - stSource += " parser.addErrorListener(&errorListener);\n"; - } - if(trace) - stSource += " parser.setTrace(true);\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.cpp", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "#include \\\n" - + "\n" - + "#include \"antlr4-runtime.h\"\n" - + "#include \".h\"\n" - + "\n" - + "#include \"support/StringUtils.h\"\n" - + "\n" - + "using namespace antlr4;\n" - + "\n" - + "int main(int argc, const char* argv[]) {\n" - + " ANTLRFileStream input;\n" - + " input.loadFromFile(argv[1]);\n" - + " lexer(&input);\n" - + " CommonTokenStream tokens(&lexer);\n" - + " tokens.fill();\n" - + " for (auto token : tokens.getTokens())\n" - + " std::cout \\<\\< token->toString() \\<\\< std::endl;\n" - + (showDFA ? " std::cout \\<\\< lexer.getInterpreter\\()->getDFA(Lexer::DEFAULT_MODE).toLexerString();\n" : "\n") - + " return 0;\n" - + "}\n"); - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.cpp", outputFileST.render()); - } - -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java new file mode 100644 index 0000000000..dec7336ec1 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRunner.java @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.cpp; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.getOS; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.isWindows; + +/** + * For my own information on I'm recording what I needed to do to get a unit test to compile and run in C++ on the Mac. + * I got a segmentation violation and couldn't figure out how to get information about it, so I turned on debugging + * and then figured out lldb enough to create this issue: https://github.com/antlr/antlr4/issues/3845 on a bug. + * + * cd ~/antlr/code/antlr4/runtime/Cpp + * cmake . -D CMAKE_OSX_ARCHITECTURES="arm64; x86_64" -DCMAKE_BUILD_TYPE=Debug + * make -j 8 + * + * In test dir with generated test code: + * + * clang++ -g -std=c++17 -I /Users/parrt/antlr/code/antlr4/runtime/Cpp/runtime/src -L. -lantlr4-runtime *.cpp + * ./a.out input + * + * $ lldb ./a.out input + * (lldb) run + * ... crash ... + * (lldb) thread backtrace + */ +public class CppRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Cpp"; + } + + @Override + public String getTitleName() { return "C++"; } + + private static final String runtimeSourcePath; + private static final String runtimeBinaryPath; + private static final String runtimeLibraryFileName; + private static String compilerName; + private static final String visualStudioProjectContent; + private static final Map environment; + + static { + String runtimePath = getRuntimePath("Cpp"); + runtimeSourcePath = Paths.get(runtimePath, "runtime", "src").toString(); + + environment = new HashMap<>(); + if (isWindows()) { + runtimeBinaryPath = Paths.get(runtimePath, "runtime", "bin", "vs-2022", "x64", "Release DLL").toString(); + runtimeLibraryFileName = Paths.get(runtimeBinaryPath, "antlr4-runtime.dll").toString(); + String path = System.getenv("PATH"); + environment.put("PATH", path == null ? runtimeBinaryPath : path + ";" + runtimeBinaryPath); + } + else { + runtimeBinaryPath = Paths.get(runtimePath, "dist").toString(); + runtimeLibraryFileName = Paths.get(runtimeBinaryPath, + "libantlr4-runtime." + (getOS() == OSType.Mac ? "dylib" : "so")).toString(); + environment.put("LD_PRELOAD", runtimeLibraryFileName); + } + + if (isWindows()) { + visualStudioProjectContent = RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/Test.vcxproj.stg"); + } else { + visualStudioProjectContent = null; + } + } + + @Override + protected String getCompilerName() { + if (compilerName == null) { + if (isWindows()) { + compilerName = "MSBuild"; + } + else { + compilerName = "clang++"; + } + } + + return compilerName; + } + + @Override + protected void initRuntime() throws Exception { + String runtimePath = getRuntimePath(); + + if (isWindows()) { + String[] command = { + getCompilerPath(), "antlr4cpp-vs2022.vcxproj", "/p:configuration=Release DLL", "/p:platform=x64" + }; + + runCommand(command, runtimePath + "\\runtime","build c++ ANTLR runtime using MSBuild"); + } + else { + String[] command = {"cmake", ".", "-DCMAKE_BUILD_TYPE=Release"}; + runCommand(command, runtimePath, "run cmake on antlr c++ runtime"); + + command = new String[] {"make", "-j", Integer.toString(Runtime.getRuntime().availableProcessors())}; + runCommand(command, runtimePath, "run make on antlr c++ runtime"); + } + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + if (isWindows()) { + writeVisualStudioProjectFile(runOptions.grammarName, runOptions.lexerName, runOptions.parserName, + runOptions.useListener, runOptions.useVisitor); + } + + Exception exception = null; + try { + if (!isWindows()) { + String[] linkCommand = new String[]{"ln", "-s", runtimeLibraryFileName}; + runCommand(linkCommand, getTempDirPath(), "sym link C++ runtime"); + } + + List buildCommand = new ArrayList<>(); + buildCommand.add(getCompilerPath()); + if (isWindows()) { + buildCommand.add(getTestFileName() + ".vcxproj"); + buildCommand.add("/p:configuration=Release"); + buildCommand.add("/p:platform=x64"); + } + else { + buildCommand.add("-std=c++17"); + buildCommand.add("-I"); + buildCommand.add(runtimeSourcePath); + buildCommand.add("-L."); + buildCommand.add("-lantlr4-runtime"); + buildCommand.add("-pthread"); + buildCommand.add("-o"); + buildCommand.add(getTestFileName() + ".out"); + buildCommand.add(getTestFileWithExt()); + buildCommand.addAll(generatedState.generatedFiles.stream().map(file -> file.name).collect(Collectors.toList())); + } + + runCommand(buildCommand.toArray(new String[0]), getTempDirPath(), "build test c++ binary"); + } + catch (Exception ex) { + exception = ex; + } + return new CompiledState(generatedState, exception); + } + + private void writeVisualStudioProjectFile(String grammarName, String lexerName, String parserName, + boolean useListener, boolean useVisitor) { + ST projectFileST = new ST(visualStudioProjectContent); + projectFileST.add("runtimeSourcePath", runtimeSourcePath); + projectFileST.add("runtimeBinaryPath", runtimeBinaryPath); + projectFileST.add("grammarName", grammarName); + projectFileST.add("lexerName", lexerName); + projectFileST.add("parserName", parserName); + projectFileST.add("useListener", useListener); + projectFileST.add("useVisitor", useVisitor); + writeFile(getTempDirPath(), "Test.vcxproj", projectFileST.render()); + } + + @Override + public String getRuntimeToolName() { + return null; + } + + @Override + public String getExecFileName() { + return Paths.get(getTempDirPath(), getTestFileName() + "." + (isWindows() ? "exe" : "out")).toString(); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} + diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java new file mode 100644 index 0000000000..a1728674f1 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/CppRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.cpp; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class CppRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new CppRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java deleted file mode 100644 index 64800e84d9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeLexers.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java deleted file mode 100644 index ce08ba4b3d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestCompositeParsers.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java deleted file mode 100644 index 41e26819e7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestFullContextParsing.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java deleted file mode 100644 index 6e62076f12..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLeftRecursion.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java deleted file mode 100644 index 015f9d577c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerErrors.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java deleted file mode 100644 index 64ea001509..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestLexerExec.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java deleted file mode 100644 index d575cd74b0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestListeners.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java deleted file mode 100644 index 962f6d1765..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParseTrees.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java deleted file mode 100644 index 1f764f03c7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserErrors.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java deleted file mode 100644 index 769aea08ff..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestParserExec.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java deleted file mode 100644 index 81746438b9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestPerformance.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java deleted file mode 100644 index cc8d3037f5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalLexer.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java deleted file mode 100644 index 0909c68a82..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSemPredEvalParser.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java deleted file mode 100644 index ee4e82c2a0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/cpp/TestSets.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.cpp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCppTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Cpp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.csproj b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.csproj deleted file mode 100644 index 061e2ff69e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/Antlr4.Test.csproj +++ /dev/null @@ -1,24 +0,0 @@ - - - - netcoreapp3.1 - $(NoWarn);CS3021 - Test - Exe - Antlr4.Test - false - false - false - false - false - false - false - - - - - Antlr4.Runtime.Standard.dll - - - - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java deleted file mode 100644 index 14ac3bf6f5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/BaseCSharpTest.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import java.io.*; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertTrue; - -public class BaseCSharpTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - private static Boolean isRuntimeInitialized = false; - private final static String cSharpAntlrRuntimeDllName = "Antlr4.Runtime.Standard.dll"; - private final static String testProjectFileName = "Antlr4.Test.csproj"; - private static String cSharpTestProjectContent; - private static final String cSharpCachingDirectory = Paths.get(cachingDirectory, "CSharp").toString(); - - @Override - protected String getPropertyPrefix() { - return "antlr4-csharp"; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) { - boolean success = rawGenerateRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - addSourceFiles("Test.cs"); - if (!compile()) { - System.err.println("Failed to compile!"); - return getParseErrors(); - } - String output = execTest(); - if (output != null && output.length() == 0) { - output = null; - } - return output; - } - - Set sourceFiles = new HashSet<>(); - - private void addSourceFiles(String... files) { - Collections.addAll(sourceFiles, files); - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) { - boolean success = rawGenerateRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - showDiagnosticErrors); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) { - return rawGenerateRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTempDirPath(), "CSharp", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if (lexerName != null) { - files.add(lexerName + ".cs"); - } - if (parserName != null) { - files.add(parserName + ".cs"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName + "Listener.cs"); - files.add(grammarName + "BaseListener.cs"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName + "Visitor.cs"); - files.add(grammarName + "BaseVisitor.cs"); - } - } - addSourceFiles(files.toArray(new String[0])); - return true; - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) { - setParseErrors(null); - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } else { - writeParserTestFile(parserName, - lexerName, - parserStartRuleName, - debug); - } - - addSourceFiles("Test.cs"); - return execRecognizer(); - } - - public String execRecognizer() { - boolean success = compile(); - assertTrue(success); - - String output = execTest(); - if (output != null && output.length() == 0) { - output = null; - } - return output; - } - - public boolean compile() { - try { - return buildProject(); - } - catch (Exception e) { - e.printStackTrace(System.err); - return false; - } - } - - private String locateExec() { - return new File(getTempTestDir(), "bin/Release/netcoreapp3.1/Test.dll").getAbsolutePath(); - } - - public boolean buildProject() { - try { - assertTrue(initializeRuntime()); - // save auxiliary files - try (PrintWriter out = new PrintWriter(new File(getTempTestDir(), testProjectFileName))) { - out.print(cSharpTestProjectContent); - } - - // build test - String[] args = new String[] { "dotnet", "build", testProjectFileName, "-c", "Release" }; - boolean success = runProcess(args, getTempDirPath()); - assertTrue(success); - } - catch (Exception e) { - e.printStackTrace(System.err); - return false; - } - - return true; - } - - private boolean initializeRuntime() { - // Compile runtime project once per overall maven test session (assuming forkCount=0) - synchronized (BaseCSharpTest.class) { - if ( isRuntimeInitialized) { -// System.out.println("C# runtime build REUSED\n"); - return true; - } - - System.out.println("Building C# runtime\n"); - - // find runtime package - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeProj = loader.getResource("CSharp/src/Antlr4.csproj"); - if (runtimeProj == null) { - throw new RuntimeException("C# runtime project file not found!"); - } - File runtimeProjFile = new File(runtimeProj.getFile()); - String runtimeProjPath = runtimeProjFile.getPath(); - - RuntimeTestUtils.mkdir(cSharpCachingDirectory); - String[] args = new String[]{ - "dotnet", - "build", - runtimeProjPath, - "-c", - "Release", - "-o", - cSharpCachingDirectory - }; - - boolean success; - try { - String cSharpTestProjectResourceName = BaseCSharpTest.class.getPackage().getName().replace(".", "/") + "/"; - InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(cSharpTestProjectResourceName + testProjectFileName); - int bufferSize = 1024; - char[] buffer = new char[bufferSize]; - StringBuilder out = new StringBuilder(); - Reader in = new InputStreamReader(inputStream, StandardCharsets.UTF_8); - for (int numRead; (numRead = in.read(buffer, 0, buffer.length)) > 0; ) { - out.append(buffer, 0, numRead); - } - cSharpTestProjectContent = out.toString().replace(cSharpAntlrRuntimeDllName, Paths.get(cSharpCachingDirectory, cSharpAntlrRuntimeDllName).toString()); - - success = runProcess(args, cSharpCachingDirectory); - } catch (Exception e) { - e.printStackTrace(System.err); - success = false; - } - - if (success) System.out.println("C# runtime build succeeded\n"); - else System.out.println("C# runtime build failed\n"); - - isRuntimeInitialized = true; // try only once - return success; - } - } - - private boolean runProcess(String[] args, String path) throws Exception { - return runProcess(args, path, 0); - } - - private boolean runProcess(String[] args, String path, int retries) throws Exception { - ProcessBuilder pb = new ProcessBuilder(args); - pb.directory(new File(path)); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - int exitValue = process.exitValue(); - boolean success = (exitValue == 0); - if (!success) { - setParseErrors(stderrVacuum.toString()); - System.err.println("runProcess command: " + Utils.join(args, " ")); - System.err.println("runProcess exitValue: " + exitValue); - System.err.println("runProcess stdoutVacuum: " + stdoutVacuum); - System.err.println("runProcess stderrVacuum: " + getParseErrors()); - } - if (exitValue == 132) { - // Retry after SIGILL. We are seeing this intermittently on - // macOS (issue #2078). - if (retries < 3) { - System.err.println("runProcess retrying; " + retries + - " retries so far"); - return runProcess(args, path, retries + 1); - } else { - System.err.println("runProcess giving up after " + retries + - " retries"); - return false; - } - } - return success; - } - - public String execTest() { - String exec = locateExec(); - try { - File tmpdirFile = new File(getTempDirPath()); - String[] args = new String[] { "dotnet", exec, new File(getTempTestDir(), "input").getAbsolutePath() }; - ProcessBuilder pb = new ProcessBuilder(args); - pb.directory(tmpdirFile); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - process.exitValue(); - String stdoutString = stdoutVacuum.toString(); - String stderrString = stderrVacuum.toString(); - setParseErrors(stderrString); - return stdoutString; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - protected void writeParserTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug) { - ST outputFileST = new ST( - "using System;\n" + - "using Antlr4.Runtime;\n" + - "using Antlr4.Runtime.Tree;\n" + - "using System.Text;\n" + - "\n" + - "public class Test {\n" + - " public static void Main(string[] args) {\n" + - " Console.OutputEncoding = Encoding.UTF8;\n" + - " Console.InputEncoding = Encoding.UTF8;\n" + - " var input = CharStreams.fromPath(args[0]);\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " \n" + - " parser.BuildParseTree = true;\n" + - " ParserRuleContext tree = parser.();\n" + - " ParseTreeWalker.Default.Walk(new TreeShapeListener(), tree);\n" + - " }\n" + - "}\n" + - "\n" + - "class TreeShapeListener : IParseTreeListener {\n" + - " public void VisitTerminal(ITerminalNode node) { }\n" + - " public void VisitErrorNode(IErrorNode node) { }\n" + - " public void ExitEveryRule(ParserRuleContext ctx) { }\n" + - "\n" + - " public void EnterEveryRule(ParserRuleContext ctx) {\n" + - " for (int i = 0; i \\< ctx.ChildCount; i++) {\n" + - " IParseTree parent = ctx.GetChild(i).Parent;\n" + - " if (!(parent is IRuleNode) || ((IRuleNode)parent).RuleContext != ctx) {\n" + - " throw new Exception(\"Invalid parse tree shape detected.\");\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - ST createParserST = new ST(" parser = new (tokens);\n"); - if (debug) { - createParserST = - new ST( - " parser = new (tokens);\n" + - " parser.AddErrorListener(new DiagnosticErrorListener());\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.cs", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "using System;\n" + - "using Antlr4.Runtime;\n" + - "using System.IO;\n" + - "using System.Text;\n" + - "\n" + - "public class Test {\n" + - " public static void Main(string[] args) {\n" + - " Console.OutputEncoding = Encoding.UTF8;\n" + - " Console.InputEncoding = Encoding.UTF8;\n" + - " var input = CharStreams.fromPath(args[0]);\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " tokens.Fill();\n" + - " foreach (object t in tokens.GetTokens())\n" + - " Console.Out.WriteLine(t);\n" + - (showDFA ? " Console.Out.Write(lex.Interpreter.GetDFA(Lexer.DEFAULT_MODE).ToLexerString());\n" : "") + - " }\n" + - "}" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.cs", outputFileST.render()); - } - - /** - * Return map sorted by key - */ - public , V> LinkedHashMap sort(Map data) { - LinkedHashMap dup = new LinkedHashMap(); - List keys = new ArrayList(data.keySet()); - Collections.sort(keys); - for (K k : keys) { - dup.put(k, data.get(k)); - } - return dup; - } - - protected static void assertEquals(String msg, int a, int b) { - org.junit.Assert.assertEquals(msg, a, b); - } - - protected static void assertEquals(String a, String b) { - org.junit.Assert.assertEquals(a, b); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java new file mode 100644 index 0000000000..22c77674fa --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRunner.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.csharp; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.nio.file.Paths; + +import static org.antlr.v4.test.runtime.FileUtils.mkdir; +import static org.antlr.v4.test.runtime.FileUtils.writeFile; + +public class CSharpRunner extends RuntimeRunner { + @Override + public String getLanguage() { return "CSharp"; } + + @Override + public String getTitleName() { return "C#"; } + + @Override + public String getExtension() { return "cs"; } + + @Override + public String getRuntimeToolName() { return "dotnet"; } + + @Override + public String getExecFileName() { return getTestFileName() + ".dll"; } + + private final static String testProjectFileName = "Antlr4.Test.csproj"; + private final static String cSharpAntlrRuntimeDllName = + Paths.get(getCachePath("CSharp"), "Antlr4.Runtime.Standard.dll").toString(); + + private final static String cSharpTestProjectContent; + + static { + ST projectTemplate = new ST(RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/Antlr4.Test.csproj.stg")); + projectTemplate.add("runtimeLibraryPath", cSharpAntlrRuntimeDllName); + cSharpTestProjectContent = projectTemplate.render(); + } + + @Override + protected void initRuntime() throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + String projectPath = Paths.get(getRuntimePath(), "src", "Antlr4.csproj").toString(); + String[] args = new String[]{getRuntimeToolPath(), "build", projectPath, "-c", "Release", "-o", cachePath}; + runCommand(args, cachePath, "build " + getTitleName() + " ANTLR runtime"); + } + + @Override + public CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + Exception exception = null; + try { + writeFile(getTempDirPath(), testProjectFileName, cSharpTestProjectContent); + runCommand(new String[]{getRuntimeToolPath(), "build", testProjectFileName, "-c", "Release"}, getTempDirPath(), + "build C# test binary"); + } catch (Exception e) { + exception = e; + } + return new CompiledState(generatedState, exception); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java new file mode 100644 index 0000000000..3d466ae259 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/CSharpRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.csharp; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class CSharpRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new CSharpRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java deleted file mode 100644 index 75ecb9d111..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeLexers.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java deleted file mode 100644 index f27b2aa0b2..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestCompositeParsers.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java deleted file mode 100644 index 01e9642616..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestFullContextParsing.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java deleted file mode 100644 index b9ad2b871a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLeftRecursion.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java deleted file mode 100644 index b21011c40c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerErrors.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java deleted file mode 100644 index a6ba3ef2a4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestLexerExec.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java deleted file mode 100644 index 84058c0e86..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestListeners.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java deleted file mode 100644 index 99941d4e2e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParseTrees.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java deleted file mode 100644 index f4dab02020..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserErrors.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java deleted file mode 100644 index 30c63b97bf..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestParserExec.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java deleted file mode 100644 index b0d5f5f89a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestPerformance.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java deleted file mode 100644 index 4c353990bd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalLexer.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java deleted file mode 100644 index f1771833d5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSemPredEvalParser.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java deleted file mode 100644 index b45fadf9bb..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/csharp/TestSets.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.csharp; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseCSharpTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "CSharp"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java deleted file mode 100644 index dbfbb278f5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/BaseDartTest.java +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.misc.Utils; -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import java.io.*; -import java.net.URL; -import java.util.*; - -import static junit.framework.TestCase.*; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.readFile; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - - -public class BaseDartTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - private static String cacheDartPackages; - private static String cacheDartPackageConfig; - - public String getPropertyPrefix() { - return "antlr-dart"; - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - String output = execClass("Test", false); - return output; - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) { - return execParser(grammarFileName, grammarStr, parserName, lexerName, - listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); - } - - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors, - boolean profile) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - showDiagnosticErrors, - profile, - false); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) { - ErrorQueue equeue = - BaseRuntimeTest.antlrOnString(getTempDirPath(), "Dart", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if (lexerName != null) { - files.add(lexerName + ".dart"); - } - if (parserName != null) { - files.add(parserName + ".dart"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName + "Listener.dart"); - files.add(grammarName + "BaseListener.dart"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName + "Visitor.dart"); - files.add(grammarName + "BaseVisitor.dart"); - } - } - - String runtime = locateRuntime(); - writeFile(getTempDirPath(), "pubspec.yaml", - "name: \"test\"\n" + - "dependencies:\n" + - " antlr4:\n" + - " path: " + runtime + "\n" + - "environment:\n" + - " sdk: \">=2.12.0 <3.0.0\"\n"); - final File dartToolDir = new File(getTempDirPath(), ".dart_tool"); - if (cacheDartPackages == null) { - try { - final Process process = - Runtime.getRuntime().exec( - new String[]{locateDart(), "pub", "get"}, null, getTempTestDir()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stderrVacuum.start(); - Timer timer = new Timer(); - timer.schedule(new TimerTask() { - @Override - public void run() { - try { - process.destroy(); - } catch(Exception e) { - e.printStackTrace(System.err); - } - } - }, 30_000); - process.waitFor(); - timer.cancel(); - stderrVacuum.join(); - String stderrDuringPubGet = stderrVacuum.toString(); - if (!stderrDuringPubGet.isEmpty()) { - System.out.println("Pub Get error: " + stderrVacuum); - } - } catch (IOException | InterruptedException e) { - e.printStackTrace(); - return false; - } - cacheDartPackages = readFile(getTempDirPath(), ".packages"); - cacheDartPackageConfig = readFile(dartToolDir.getAbsolutePath(), "package_config.json"); - } else { - writeFile(getTempDirPath(), ".packages", cacheDartPackages); - //noinspection ResultOfMethodCallIgnored - dartToolDir.mkdir(); - writeFile(dartToolDir.getAbsolutePath(), "package_config.json", cacheDartPackageConfig); - } - return true; // allIsWell: no compile - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile, - boolean aotCompile) { - setParseErrors(null); - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - return execClass("Test", aotCompile); - } - - public String execClass(String className, boolean compile) { - try { - if (compile) { - String[] args = new String[]{ - locateDart(), - "compile", "exe", className + ".dart", "-o", className - }; - String cmdLine = Utils.join(args, " "); - System.err.println("Compile: " + cmdLine); - final Process process = - Runtime.getRuntime().exec(args, null, getTempTestDir()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stderrVacuum.start(); - Timer timer = new Timer(); - timer.schedule(new TimerTask() { - @Override - public void run() { - try { - process.destroy(); - } catch(Exception e) { - e.printStackTrace(System.err); - } - } - }, 30_000); - int result = process.waitFor(); - timer.cancel(); - if (result != 0) { - stderrVacuum.join(); - System.err.print("Error compiling dart file: " + stderrVacuum); - } - } - - String[] args; - if (compile) { - args = new String[]{ - new File(getTempTestDir(), className).getAbsolutePath(), new File(getTempTestDir(), "input").getAbsolutePath() - }; - } else { - args = new String[]{ - locateDart(), - className + ".dart", new File(getTempTestDir(), "input").getAbsolutePath() - }; - } - //String cmdLine = Utils.join(args, " "); - //System.err.println("execParser: " + cmdLine); - final Process process = - Runtime.getRuntime().exec(args, null, getTempTestDir()); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - Timer timer = new Timer(); - timer.schedule(new TimerTask() { - @Override - public void run() { - try { - process.destroy(); - } catch(Exception e) { - e.printStackTrace(System.err); - } - } - }, 30_000); - process.waitFor(); - timer.cancel(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if (output.length() == 0) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - final String dartPath = System.getProperty("DART_PATH"); - - final String[] tools = isWindows() - ? new String[]{tool + ".exe", tool + ".bat", tool} - : new String[]{tool}; - - if (dartPath != null) { - for (String t : tools) { - if (new File(dartPath + t).exists()) { - return dartPath + t; - } - } - } - - final String[] roots = isWindows() - ? new String[]{"C:\\tools\\dart-sdk\\bin\\"} - : new String[]{"/usr/local/bin/", "/opt/local/bin/", "/opt/homebrew/bin/", "/usr/bin/", "/usr/lib/dart/bin/", "/usr/local/opt/dart/libexec"}; - - for (String root : roots) { - for (String t : tools) { - if (new File(root + t).exists()) { - return root + t; - } - } - } - - throw new RuntimeException("Could not locate " + tool); - } - - protected String locateDart() { - String propName = getPropertyPrefix() + "-dart"; - String prop = System.getProperty(propName); - - if (prop == null || prop.length() == 0) { - prop = locateTool("dart"); - } - - File file = new File(prop); - - if (!file.exists()) { - throw new RuntimeException("Missing system property:" + propName); - } - - return file.getAbsolutePath(); - } - - private String locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("Dart"); - if (runtimeSrc == null) { - throw new RuntimeException("Cannot find Dart runtime"); - } - if (isWindows()) { - return runtimeSrc.getPath().replaceFirst("/", ""); - } - return runtimeSrc.getPath(); - } - - protected void writeTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) { - ST outputFileST = new ST( - "import 'package:antlr4/antlr4.dart';\n" + - "\n" + - "import '.dart';\n" + - "import '.dart';\n" + - "\n" + - "void main(List\\ args) async {\n" + - " CharStream input = await InputStream.fromPath(args[0]);\n" + - " final lex = (input);\n" + - " final tokens = CommonTokenStream(lex);\n" + - " \n" + - " parser.buildParseTree = true;\n" + - " \n" + - " ParserRuleContext tree = parser.();\n" + - " print('[${profiler.getDecisionInfo().join(', ')}]');\n" + - " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree);\n" + - "}\n" + - "\n" + - "class TreeShapeListener implements ParseTreeListener {\n" + - " @override void visitTerminal(TerminalNode node) {}\n" + - "\n" + - " @override void visitErrorNode(ErrorNode node) {}\n" + - "\n" + - " @override void exitEveryRule(ParserRuleContext ctx) {}\n" + - "\n" + - " @override\n" + - " void enterEveryRule(ParserRuleContext ctx) {\n" + - " for (var i = 0; i \\< ctx.childCount; i++) {\n" + - " final parent = ctx.getChild(i)?.parent;\n" + - " if (!(parent is RuleNode) || (parent as RuleNode).ruleContext != ctx) {\n" + - " throw StateError('Invalid parse tree shape detected.');\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - ST createParserST = new ST("final parser = (tokens);\n"); - if (debug) { - createParserST = - new ST( - "final parser = (tokens);\n" + - " parser.addErrorListener(new DiagnosticErrorListener());\n"); - } - if (profile) { - outputFileST.add("profile", - "ProfilingATNSimulator profiler = ProfilingATNSimulator(parser);\n" + - "parser.setInterpreter(profiler);"); - } else { - outputFileST.add("profile", new ArrayList()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.dart", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import 'dart:io';\n" + - "\n" + - "import 'package:antlr4/antlr4.dart';\n" + - "\n" + - "import '.dart';\n" + - "\n" + - "void main(List\\ args) async {\n" + - " CharStream input = await InputStream.fromPath(args[0]);\n" + - " lex = (input);\n" + - " CommonTokenStream tokens = CommonTokenStream(lex);\n" + - " tokens.fill();\n" + - " for (Object t in tokens.getTokens()!)\n" + - " print(t);\n" + - "\n" + - (showDFA ? "stdout.write(lex.interpreter!.getDFA(Lexer.DEFAULT_MODE).toLexerString());\n" : "") + - "}\n" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.dart", outputFileST.render()); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java new file mode 100644 index 0000000000..929c9fa7ef --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRunner.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.dart; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.io.*; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class DartRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Dart"; + } + + private static String cacheDartPackageConfig; + + @Override + protected void initRuntime() throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + + ST projectTemplate = new ST(RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/pubspec.yaml.stg")); + projectTemplate.add("runtimePath", getRuntimePath()); + + writeFile(cachePath, "pubspec.yaml", projectTemplate.render()); + + runCommand(new String[]{getRuntimeToolPath(), "pub", "get"}, cachePath); + + cacheDartPackageConfig = readFile(cachePath + FileSeparator + ".dart_tool", "package_config.json"); + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + String dartToolDirPath = new File(getTempDirPath(), ".dart_tool").getAbsolutePath(); + mkdir(dartToolDirPath); + writeFile(dartToolDirPath, "package_config.json", cacheDartPackageConfig); + + return new CompiledState(generatedState, null); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java new file mode 100644 index 0000000000..b049f7d0c3 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/DartRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.dart; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class DartRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new DartRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java deleted file mode 100644 index 27585f139d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java deleted file mode 100644 index 049f1d8616..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java deleted file mode 100644 index 9b3a45385e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java deleted file mode 100644 index b1af01a1a6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java deleted file mode 100644 index 668c990d40..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java deleted file mode 100644 index a6d5736a67..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java deleted file mode 100644 index 314b96f804..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java deleted file mode 100644 index 992c33cd08..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java deleted file mode 100644 index 2f9c423f67..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java deleted file mode 100644 index a6f3812bf9..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java deleted file mode 100644 index fe290bbaa1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java deleted file mode 100644 index f6d870427f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java deleted file mode 100644 index 7bee5f541b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java deleted file mode 100644 index aaf3c25632..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/dart/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.dart; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseDartTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Dart"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java deleted file mode 100644 index 69f02de2a5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/BaseGoTest.java +++ /dev/null @@ -1,387 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.go; - - -import org.antlr.v4.test.runtime.*; -import org.junit.Assert; -import org.stringtemplate.v4.ST; - -import java.io.*; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.*; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.EnumSet; - -import static junit.framework.TestCase.*; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.fail; - -public class BaseGoTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - private final static String antlrTestPackageName = "antlr"; - private static final String goModFileName = "go.mod"; - private static final String GO_RUNTIME_IMPORT_PATH = "github.com/antlr/antlr4/runtime/Go/antlr"; // TODO: Change this before merging with upstream - private static boolean isRuntimeInitialized = false; - private static String newGoRootString; - private static String goModContent = null; - - private File parserTempDir; // "parser" with tempDir - - @Override - protected String getPropertyPrefix() { - return "antlr4-go"; - } - - public static void groupSetUp() throws Exception { } - public static void groupTearDown() throws Exception { } - - public void testSetUp() throws Exception { - eraseParserTempDir(); - super.testSetUp(); - parserTempDir = new File(getTempTestDir(), "parser"); - } - - @Override - public File getTempParserDir() { - return parserTempDir; - } - - private void eraseParserTempDir() { - if(parserTempDir != null) { - eraseDirectory(parserTempDir); - parserTempDir = null; - } - } - - protected String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input, boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, null, lexerName, "-no-listener"); - assertTrue(success); - replaceImportPath(); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - writeGoModFile(); - return execModule("Test.go"); - } - - @Override - public String execParser(String grammarFileName, String grammarStr, - String parserName, String lexerName, String listenerName, - String visitorName, String startRuleName, String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, parserName, lexerName, "-visitor"); - assertTrue(success); - replaceImportPath(); - writeFile(getTempDirPath(), "input", input); - writeGoModFile(); - rawBuildRecognizerTestFile(parserName, lexerName, listenerName, - visitorName, startRuleName, showDiagnosticErrors); - return execModule("Test.go"); - } - - private void writeGoModFile() { - if (goModContent == null) { - try { - ProcessBuilder pb = new ProcessBuilder("go", "mod", "init", "test"); - pb.directory(getTempTestDir()); - pb.redirectErrorStream(true); - Process process = pb.start(); - StreamVacuum sucker = new StreamVacuum(process.getInputStream()); - sucker.start(); - int exit = process.waitFor(); - sucker.join(); - if (exit != 0) { - throw new Exception("Non-zero exit while setting up go module: " + sucker); - } - goModContent = new String(Files.readAllBytes(Paths.get(getTempDirPath(), goModFileName)), StandardCharsets.UTF_8); - } catch (Exception e) { - e.printStackTrace(); - Assert.fail("Unable to execute go mod"); - } - } else { - try (PrintWriter out = new PrintWriter(Paths.get(getTempDirPath(), goModFileName).toString())) { - out.println(goModContent); - } catch (FileNotFoundException e) { - e.printStackTrace(); - Assert.fail("Unable to write " + goModFileName); - } - } - } - - private void replaceImportPath() { - File[] files = getTempParserDir().listFiles(new GoFileFilter()); - for (File file : files) { - String fileName = file.toString(); - try { - String content = new String(Files.readAllBytes(file.toPath()), StandardCharsets.UTF_8); - String newContent = content.replaceAll(GO_RUNTIME_IMPORT_PATH, antlrTestPackageName); - try (PrintWriter out = new PrintWriter(fileName)) { - out.println(newContent); - } - } catch (IOException e) { - fail("Error during processing " + fileName); - } - } - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, - parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - boolean defaultListener, String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTempParserDirPath(), "Go", grammarFileName, grammarStr, - defaultListener, extraOptions); - return equeue.errors.isEmpty(); - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - setParseErrors(null); - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - private String execModule(String fileName) { - initializeRuntime(); - - String modulePath = new File(getTempTestDir(), fileName).getAbsolutePath(); - String inputPath = new File(getTempTestDir(), "input").getAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder("go", "run", modulePath, inputPath); - builder.directory(getTempTestDir()); - builder.environment().put("GOROOT", newGoRootString); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if (output.length() == 0) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private static synchronized boolean initializeRuntime() { - if (isRuntimeInitialized) - return true; - - String goRoot = getGoRootValue(); - Path newGoRoot = Paths.get(cachingDirectory, "Go"); - newGoRootString = newGoRoot.toString(); - try { - File newGoRootDirectory = newGoRoot.toFile(); - if (newGoRootDirectory.exists()) - deleteDirectory(newGoRootDirectory); - copyDirectory(Paths.get(goRoot), newGoRoot); - } catch (IOException e) { - e.printStackTrace(); - Assert.fail("Unable to copy go system files"); - } - - String packageDir = Paths.get(newGoRootString, "src", antlrTestPackageName).toString(); - RuntimeTestUtils.mkdir(packageDir); - File[] runtimeFiles = locateRuntime().listFiles(new GoFileFilter()); - if (runtimeFiles == null) { - Assert.fail("Go runtime file list is empty."); - } - - for (File runtimeFile : runtimeFiles) { - File dest = new File(packageDir, runtimeFile.getName()); - try { - RuntimeTestUtils.copyFile(runtimeFile, dest); - } catch (IOException e) { - e.printStackTrace(); - Assert.fail("Unable to copy runtime file " + runtimeFile); - } - } - - isRuntimeInitialized = true; - return isRuntimeInitialized; - } - - private static void copyDirectory(final Path source, final Path target, final CopyOption... options) - throws IOException { - Files.walkFileTree(source, EnumSet.of(FileVisitOption.FOLLOW_LINKS), 2147483647, new SimpleFileVisitor() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) - throws IOException { - Files.createDirectories(target.resolve(source.relativize(dir))); - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) - throws IOException { - Files.copy(file, target.resolve(source.relativize(file)), options); - return FileVisitResult.CONTINUE; - } - }); - } - - private static void deleteDirectory(File f) throws IOException { - if (f.isDirectory()) { - for (File c : f.listFiles()) - deleteDirectory(c); - } - if (!f.delete()) - throw new FileNotFoundException("Failed to delete file: " + f); - } - - private static String getGoRootValue() { - try { - ProcessBuilder pb = new ProcessBuilder("go", "env", "GOROOT"); - Process process = pb.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - stdoutVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - return stdoutVacuum.toString().trim(); - } catch (Exception e) { - e.printStackTrace(); - Assert.fail("Unable to execute go env"); - } - return null; - } - - private static File locateRuntime() { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("Go"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find Go ANTLR runtime"); - } - File runtimeDir = new File(runtimeSrc.getPath(), "antlr"); - if (!runtimeDir.exists()) { - throw new RuntimeException("Cannot find Go ANTLR runtime"); - } - return new File(runtimeDir.getPath()); - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - ST outputFileST = new ST( - "package main\n" + - "import (\n" - + " \"test/parser\"\n" - + " \"" + antlrTestPackageName + "\"\n" - + " \"fmt\"\n" - + " \"os\"\n" - + ")\n" - + "\n" - + "type TreeShapeListener struct {\n" - + " *parser.Base\n" - + "}\n" - + "\n" - + "func NewTreeShapeListener() *TreeShapeListener {\n" - + " return new(TreeShapeListener)\n" - + "}\n" - + "\n" - + "func (this *TreeShapeListener) EnterEveryRule(ctx antlr.ParserRuleContext) {\n" - + " for i := 0; i\\(input)\n" - + " stream := antlr.NewCommonTokenStream(lexer,0)\n" - + "" - + " p.BuildParseTrees = true\n" - + " tree := p.()\n" - + " antlr.ParseTreeWalkerDefault.Walk(NewTreeShapeListener(), tree)\n" - + "}\n"); - - ST createParserST = new ST( - " p := parser.New(stream)\n"); - if (debug) { - createParserST = new ST( - " p := parser.New(stream)\n" - + " p.AddErrorListener(antlr.NewDiagnosticErrorListener(true))\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName.substring(0, 1).toUpperCase() + parserStartRuleName.substring(1) ); - writeFile(getTempDirPath(), "Test.go", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "package main\n" + - "import (\n" - + " \"test/parser\"\n" - + " \"" + antlrTestPackageName + "\"\n" - + " \"os\"\n" - + " \"fmt\"\n" - + ")\n" - + "\n" - + "func main() {\n" - + " input, err := antlr.NewFileStream(os.Args[1])\n" - + " if err != nil {\n" - + " fmt.Printf(\"Failed to find file: %v\", err)\n" - + " return\n" - + " }\n" - + " lexer := parser.New(input)\n" - + " stream := antlr.NewCommonTokenStream(lexer,0)\n" - + " stream.Fill()\n" - + " for _, t := range stream.GetAllTokens() {\n" - + " fmt.Println(t)\n" - + " }\n" - + (showDFA ? "fmt.Print(lexer.GetInterpreter().DecisionToDFA()[antlr.LexerDefaultMode].ToLexerString())\n" - : "") - + "}\n" - + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.go", outputFileST.render()); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoFileFilter.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoFileFilter.java deleted file mode 100644 index 6b4606df0a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoFileFilter.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.go; - -import java.io.File; -import java.io.FilenameFilter; - -public class GoFileFilter implements FilenameFilter { - public boolean accept(File dir, String name) { - return name.endsWith(".go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java new file mode 100644 index 0000000000..d66df24ded --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRunner.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.go; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.FileSeparator; + +public class GoRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Go"; + } + + @Override + public String getLexerSuffix() { + return "_lexer"; + } + + @Override + public String getParserSuffix() { + return "_parser"; + } + + @Override + public String getBaseListenerSuffix() { + return "_base_listener"; + } + + @Override + public String getListenerSuffix() { + return "_listener"; + } + + @Override + public String getBaseVisitorSuffix() { + return "_base_visitor"; + } + + @Override + public String getVisitorSuffix() { + return "_visitor"; + } + + @Override + protected String grammarNameToFileName(String grammarName) { + return grammarName.toLowerCase(); + } + + @Override + public String[] getExtraRunArgs() { + return new String[]{"run"}; + } + + private static final String GoRuntimeImportPath = "github.com/antlr/antlr4/runtime/Go/antlr/v4"; + + private final static Map environment; + + private static String cachedGoMod; + + static { + environment = new HashMap<>(); + environment.put("GOWORK", "off"); + } + + @Override + protected void initRuntime() throws Exception { + String cachePath = getCachePath(); + mkdir(cachePath); + Path runtimeFilesPath = Paths.get(getRuntimePath("Go"), "antlr"); + String runtimeToolPath = getRuntimeToolPath(); + File goModFile = new File(cachePath, "go.mod"); + if (goModFile.exists()) + if (!goModFile.delete()) + throw new IOException("Can't delete " + goModFile); + Processor.run(new String[] {runtimeToolPath, "mod", "init", "test"}, cachePath, environment); + Processor.run(new String[] {runtimeToolPath, "mod", "edit", + "-replace=" + GoRuntimeImportPath + "=" + runtimeFilesPath}, cachePath, environment); + cachedGoMod = readFile(cachePath + FileSeparator, "go.mod"); + } + + @Override + protected String grammarParseRuleToRecognizerName(String startRuleName) { + if (startRuleName == null || startRuleName.length() == 0) { + return null; + } + + return startRuleName.substring(0, 1).toUpperCase() + startRuleName.substring(1); + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + List generatedFiles = generatedState.generatedFiles; + String tempDirPath = getTempDirPath(); + File generatedParserDir = new File(tempDirPath, "parser"); + if (!generatedParserDir.mkdir()) { + return new CompiledState(generatedState, new Exception("can't make dir " + generatedParserDir)); + } + + // The generated files seem to need to be in the parser subdirectory. + // We have no need to change the import of the runtime because of go mod replace so, we could just generate them + // directly in to the parser subdir. But in case down the line, there is some reason to want to replace things in + // the generated code, then I will leave this here, and we can use replaceInFile() + // + for (GeneratedFile generatedFile : generatedFiles) { + try { + Path originalFile = Paths.get(tempDirPath, generatedFile.name); + Files.move(originalFile, Paths.get(tempDirPath, "parser", generatedFile.name)); + } catch (IOException e) { + return new CompiledState(generatedState, e); + } + } + + writeFile(tempDirPath, "go.mod", cachedGoMod); + Exception ex = null; + try { + Processor.run(new String[] {getRuntimeToolPath(), "mod", "tidy"}, tempDirPath, environment); + } catch (InterruptedException | IOException e) { + ex = e; + } + + return new CompiledState(generatedState, ex); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} \ No newline at end of file diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java new file mode 100644 index 0000000000..d09dffbc21 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/GoRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.go; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class GoRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new GoRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java deleted file mode 100644 index 2021be5be5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeLexers.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java deleted file mode 100644 index 52a69a274e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestCompositeParsers.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java deleted file mode 100644 index e4307dccf1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestFullContextParsing.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java deleted file mode 100644 index f13abc7b5c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLeftRecursion.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java deleted file mode 100644 index 89cef89b3a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerErrors.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java deleted file mode 100644 index 616b4c0913..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestLexerExec.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java deleted file mode 100644 index cf36811922..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestListeners.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java deleted file mode 100644 index 61fd8d1980..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParseTrees.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java deleted file mode 100644 index f21dae78e0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserErrors.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java deleted file mode 100644 index d7c360e3c4..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestParserExec.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java deleted file mode 100644 index 74af6646ea..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestPerformance.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java deleted file mode 100644 index 1af66d8995..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalLexer.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java deleted file mode 100644 index ad0d218d7d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSemPredEvalParser.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java deleted file mode 100644 index e1520626c7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/go/TestSets.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.go; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseGoTest()); - } - - @BeforeClass - public static void groupSetUp() throws Exception { BaseGoTest.groupSetUp(); } - - @AfterClass - public static void groupTearDown() throws Exception { BaseGoTest.groupTearDown(); } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Go"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java deleted file mode 100644 index de192d5a8e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/BaseJavaTest.java +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.analysis.AnalysisPipeline; -import org.antlr.v4.automata.ATNFactory; -import org.antlr.v4.automata.ATNPrinter; -import org.antlr.v4.automata.LexerATNFactory; -import org.antlr.v4.automata.ParserATNFactory; -import org.antlr.v4.codegen.CodeGenerator; -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonTokenStream; -import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.atn.ATNState; -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.runtime.tree.ParseTree; -import org.antlr.v4.semantics.SemanticPipeline; -import org.antlr.v4.test.runtime.*; -import org.antlr.v4.tool.ANTLRMessage; -import org.antlr.v4.tool.Grammar; -import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.antlr.v4.tool.LexerGrammar; -import org.antlr.v4.tool.Rule; -import org.stringtemplate.v4.ST; -import org.stringtemplate.v4.STGroup; -import org.stringtemplate.v4.STGroupString; - -import javax.tools.JavaCompiler; -import javax.tools.JavaFileObject; -import javax.tools.StandardJavaFileManager; -import javax.tools.ToolProvider; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; -import java.io.PrintStream; -import java.io.StringReader; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertNotNull; -import static junit.framework.TestCase.assertTrue; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - -public class BaseJavaTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - /** - * When the {@code antlr.testinprocess} runtime property is set to - * {@code true}, the test suite will attempt to load generated classes into - * the test process for direct execution rather than invoking the JVM in a - * new process for testing. - *

    - *

    - * In-process testing results in a substantial performance improvement, but - * some test environments created by IDEs do not support the mechanisms - * currently used by the tests to dynamically load compiled code. Therefore, - * the default behavior (used in all other cases) favors reliable - * cross-system test execution by executing generated test code in a - * separate process.

    - */ - public static final boolean TEST_IN_SAME_PROCESS = Boolean.parseBoolean(System.getProperty("antlr.testinprocess")); - - /** - * Build up the full classpath we need, including the surefire path (if present) - */ - public static final String CLASSPATH = System.getProperty("java.class.path"); - - @Override - protected String getPropertyPrefix() { - return "antrl4-java"; - } - - protected String load(String fileName, String encoding) - throws IOException { - if ( fileName==null ) { - return null; - } - - String fullFileName = getClass().getPackage().getName().replace('.', '/')+'/'+fileName; - int size = 65000; - InputStreamReader isr; - InputStream fis = getClass().getClassLoader().getResourceAsStream(fullFileName); - if ( encoding!=null ) { - isr = new InputStreamReader(fis, encoding); - } - else { - isr = new InputStreamReader(fis); - } - try { - char[] data = new char[size]; - int n = isr.read(data); - return new String(data, 0, n); - } finally { - isr.close(); - } - } - - /** - * Wow! much faster than compiling outside of VM. Finicky though. - * Had rules called r and modulo. Wouldn't compile til I changed to 'a'. - */ - protected boolean compile(String... fileNames) { - List files = new ArrayList(); - for (String fileName : fileNames) { - File f = new File(getTempTestDir(), fileName); - files.add(f); - } - - JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); - - StandardJavaFileManager fileManager = - compiler.getStandardFileManager(null, null, null); - - Iterable compilationUnits = - fileManager.getJavaFileObjectsFromFiles(files); - - Iterable compileOptions = - Arrays.asList("-g", "-source", "1.8", "-target", "1.8", "-implicit:class", "-Xlint:-options", "-d", getTempDirPath(), "-cp", getTempDirPath() + PATH_SEP + CLASSPATH); - - JavaCompiler.CompilationTask task = - compiler.getTask(null, fileManager, null, compileOptions, null, - compilationUnits); - boolean ok = task.call(); - - try { - fileManager.close(); - } catch (IOException ioe) { - ioe.printStackTrace(System.err); - } - - return ok; - } - - protected String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - compile("Test.java"); - return execClass("Test"); - } - - public ParseTree execParser(String startRuleName, String input, - String parserName, String lexerName) - throws Exception - { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - return execStartRule(startRuleName, parser); - } - - public ParseTree execStartRule(String startRuleName, Parser parser) - throws IllegalAccessException, InvocationTargetException, - NoSuchMethodException { - Method startRule; - Object[] args = null; - try { - startRule = parser.getClass().getMethod(startRuleName); - } catch (NoSuchMethodException nsme) { - // try with int _p arg for recursive func - startRule = parser.getClass().getMethod(startRuleName, int.class); - args = new Integer[]{0}; - } - ParseTree result = (ParseTree) startRule.invoke(parser, args); -// System.out.println("parse tree = "+result.toStringTree(parser)); - return result; - } - - public Pair getParserAndLexer(String input, - String parserName, String lexerName) - throws Exception { - final Class lexerClass = loadLexerClassFromTempDir(lexerName); - final Class parserClass = loadParserClassFromTempDir(parserName); - - ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); - - Class c = lexerClass.asSubclass(Lexer.class); - Constructor ctor = c.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance(in); - - Class pc = parserClass.asSubclass(Parser.class); - Constructor pctor = pc.getConstructor(TokenStream.class); - CommonTokenStream tokens = new CommonTokenStream(lexer); - Parser parser = pctor.newInstance(tokens); - return new Pair(parser, lexer); - } - - public Class loadClassFromTempDir(String name) throws Exception { - ClassLoader loader = - new URLClassLoader(new URL[]{getTempTestDir().toURI().toURL()}, - ClassLoader.getSystemClassLoader()); - return loader.loadClass(name); - } - - public Class loadLexerClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Lexer.class); - } - - public Class loadParserClassFromTempDir(String name) throws Exception { - return loadClassFromTempDir(name).asSubclass(Parser.class); - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) - { - return execParser(grammarFileName, grammarStr, parserName, lexerName, - listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); - } - - /** ANTLR isn't thread-safe to process grammars so we use a global lock for testing */ - public static final Object antlrLock = new Object(); - - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors, - boolean profile) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - return rawExecRecognizer(parserName, - lexerName, - startRuleName, - showDiagnosticErrors, - profile); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = - BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".java"); - } - if ( parserName!=null ) { - files.add(parserName+".java"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName+"Listener.java"); - files.add(grammarName+"BaseListener.java"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName+"Visitor.java"); - files.add(grammarName+"BaseVisitor.java"); - } - } - return compile(files.toArray(new String[0])); - } - - protected String rawExecRecognizer(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - setParseErrors(null); - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - compile("Test.java"); - return execClass("Test"); - } - - public String execRecognizer() { - return execClass("Test"); - } - - public String execClass(String className) { - if (TEST_IN_SAME_PROCESS) { - try { - ClassLoader loader = new URLClassLoader(new URL[] { getTempTestDir().toURI().toURL() }, ClassLoader.getSystemClassLoader()); - final Class mainClass = (Class)loader.loadClass(className); - final Method mainMethod = mainClass.getDeclaredMethod("main", String[].class); - PipedInputStream stdoutIn = new PipedInputStream(); - PipedInputStream stderrIn = new PipedInputStream(); - PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); - PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); - StreamVacuum stdoutVacuum = new StreamVacuum(stdoutIn); - StreamVacuum stderrVacuum = new StreamVacuum(stderrIn); - - PrintStream originalOut = System.out; - System.setOut(new PrintStream(stdoutOut)); - try { - PrintStream originalErr = System.err; - try { - System.setErr(new PrintStream(stderrOut)); - stdoutVacuum.start(); - stderrVacuum.start(); - mainMethod.invoke(null, (Object)new String[] { new File(getTempTestDir(), "input").getAbsolutePath() }); - } - finally { - System.setErr(originalErr); - } - } - finally { - System.setOut(originalOut); - } - - stdoutOut.close(); - stderrOut.close(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if ( stderrVacuum.toString().length()>0 ) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } - catch (Exception ex) { - throw new RuntimeException(ex); - } - } - - try { - String[] args = new String[] { - "java", "-classpath", getTempDirPath() + PATH_SEP + CLASSPATH, - "-Dfile.encoding=UTF-8", - className, new File(getTempTestDir(), "input").getAbsolutePath() - }; -// String cmdLine = Utils.join(args, " "); -// System.err.println("execParser: "+cmdLine); - Process process = - Runtime.getRuntime().exec(args, null, getTempTestDir()); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if ( stderrVacuum.toString().length()>0 ) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - public void checkRuleATN(Grammar g, String ruleName, String expecting) { -// DOTGenerator dot = new DOTGenerator(g); -// System.out.println(dot.getDOT(g.atn.ruleToStartState[g.getRule(ruleName).index])); - - Rule r = g.getRule(ruleName); - ATNState startState = g.getATN().ruleToStartState[r.index]; - ATNPrinter serializer = new ATNPrinter(g, startState); - String result = serializer.asString(); - - //System.out.print(result); - assertEquals(expecting, result); - } - - public void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { - int lp = templates.indexOf('('); - String name = templates.substring(0, lp); - STGroup group = new STGroupString(templates); - ST st = group.getInstanceOf(name); - st.add(actionName, action); - String grammar = st.render(); - ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(grammar, equeue); - if ( g.ast!=null && !g.ast.hasErrors ) { - SemanticPipeline sem = new SemanticPipeline(g); - sem.process(); - - ATNFactory factory = new ParserATNFactory(g); - if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); - g.atn = factory.createATN(); - - AnalysisPipeline anal = new AnalysisPipeline(g); - anal.process(); - - CodeGenerator gen = CodeGenerator.create(g); - ST outputFileST = gen.generateParser(false); - String output = outputFileST.render(); - //System.out.println(output); - String b = "#" + actionName + "#"; - int start = output.indexOf(b); - String e = "#end-" + actionName + "#"; - int end = output.indexOf(e); - String snippet = output.substring(start+b.length(),end); - assertEquals(expected, snippet); - } - if ( equeue.size()>0 ) { -// System.err.println(equeue.toString()); - } - } - - - - protected void checkGrammarSemanticsWarning(ErrorQueue equeue, - GrammarSemanticsMessage expectedMessage) - throws Exception - { - ANTLRMessage foundMsg = null; - for (int i = 0; i < equeue.warnings.size(); i++) { - ANTLRMessage m = equeue.warnings.get(i); - if (m.getErrorType()==expectedMessage.getErrorType() ) { - foundMsg = m; - } - } - assertNotNull("no error; "+expectedMessage.getErrorType()+" expected", foundMsg); - assertTrue("error is not a GrammarSemanticsMessage", - foundMsg instanceof GrammarSemanticsMessage); - assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); - if ( equeue.size()!=1 ) { - System.err.println(equeue); - } - } - - protected void writeTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - ST outputFileST = new ST( - "import org.antlr.v4.runtime.*;\n" + - "import org.antlr.v4.runtime.tree.*;\n" + - "import org.antlr.v4.runtime.atn.*;\n" + - "import java.nio.file.Paths;\n"+ - "import java.util.Arrays;\n"+ - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = CharStreams.fromPath(Paths.get(args[0]));\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " \n"+ - " parser.setBuildParseTree(true);\n" + - " \n"+ - " ParserRuleContext tree = parser.();\n" + - " System.out.println(Arrays.toString(profiler.getDecisionInfo()));\n" + - " ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" + - " }\n" + - "\n" + - " static class TreeShapeListener implements ParseTreeListener {\n" + - " @Override public void visitTerminal(TerminalNode node) { }\n" + - " @Override public void visitErrorNode(ErrorNode node) { }\n" + - " @Override public void exitEveryRule(ParserRuleContext ctx) { }\n" + - "\n" + - " @Override\n" + - " public void enterEveryRule(ParserRuleContext ctx) {\n" + - " for (int i = 0; i \\< ctx.getChildCount(); i++) {\n" + - " ParseTree parent = ctx.getChild(i).getParent();\n" + - " if (!(parent instanceof RuleNode) || ((RuleNode)parent).getRuleContext() != ctx) {\n" + - " throw new IllegalStateException(\"Invalid parse tree shape detected.\");\n" + - " }\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - ST createParserST = new ST(" parser = new (tokens);\n"); - if ( debug ) { - createParserST = - new ST( - " parser = new (tokens);\n" + - " parser.addErrorListener(new DiagnosticErrorListener());\n"); - } - if ( profile ) { - outputFileST.add("profile", - "ProfilingATNSimulator profiler = new ProfilingATNSimulator(parser);\n" + - "parser.setInterpreter(profiler);"); - } - else { - outputFileST.add("profile", new ArrayList()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.java", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import java.nio.file.Paths;\n" + - "import org.antlr.v4.runtime.*;\n" + - "\n" + - "public class Test {\n" + - " public static void main(String[] args) throws Exception {\n" + - " CharStream input = CharStreams.fromPath(Paths.get(args[0]));\n" + - " lex = new (input);\n" + - " CommonTokenStream tokens = new CommonTokenStream(lex);\n" + - " tokens.fill();\n" + - " for (Object t : tokens.getTokens()) System.out.println(t);\n" + - (showDFA?"System.out.print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString());\n":"")+ - " }\n" + - "}" - ); - - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.java", outputFileST.render()); - } - - - public List realElements(List elements) { - return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); - } - - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java new file mode 100644 index 0000000000..b78b2bf1d8 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRunner.java @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.java; + +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.*; + +import javax.tools.JavaCompiler; +import javax.tools.JavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; +import java.io.*; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.replaceInFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.PathSeparator; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.getTextFromResource; + +public class JavaRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Java"; + } + + public static final String classPath = System.getProperty("java.class.path"); + + public static final String runtimeTestLexerName = "RuntimeTestLexer"; + public static final String runtimeTestParserName = "RuntimeTestParser"; + + private final static String testLexerContent; + private final static String testParserContent; + private static JavaCompiler compiler; + + static { + testLexerContent = getTextFromResource("org/antlr/v4/test/runtime/helpers/" + runtimeTestLexerName + ".java"); + testParserContent = getTextFromResource("org/antlr/v4/test/runtime/helpers/" + runtimeTestParserName + ".java"); + } + + public JavaRunner(Path tempDir, boolean saveTestDir) { + super(tempDir, saveTestDir); + } + + public JavaRunner() { + super(); + } + + @Override + protected void initRuntime() { + compiler = ToolProvider.getSystemJavaCompiler(); + } + + @Override + protected String getCompilerName() { + return "javac"; + } + + @Override + protected JavaCompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + String tempTestDir = getTempDirPath(); + + List generatedFiles = generatedState.generatedFiles; + GeneratedFile firstFile = generatedFiles.get(0); + + if (!firstFile.isParser) { + FileUtils.writeFile(tempTestDir, runtimeTestLexerName + ".java", testLexerContent); + try { + // superClass for combined grammar generates the same extends base class for Lexer and Parser + // So, for lexer it should be replaced on correct base lexer class + replaceInFile(Paths.get(getTempDirPath(), firstFile.name), + "extends " + runtimeTestParserName + " {", + "extends " + runtimeTestLexerName + " {"); + } catch (IOException e) { + return new JavaCompiledState(generatedState, null, null, null, e); + } + } + if (generatedFiles.stream().anyMatch(file -> file.isParser)) { + FileUtils.writeFile(tempTestDir, runtimeTestParserName + ".java", testParserContent); + } + + ClassLoader loader = null; + Class lexer = null; + Class parser = null; + Exception exception = null; + + try { + StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null); + + ClassLoader systemClassLoader = ClassLoader.getSystemClassLoader(); + + List files = new ArrayList<>(); + File f = new File(tempTestDir, getTestFileWithExt()); + files.add(f); + + Iterable compilationUnits = fileManager.getJavaFileObjectsFromFiles(files); + + Iterable compileOptions = + Arrays.asList("-g", "-source", "1.8", "-target", "1.8", "-implicit:class", "-Xlint:-options", "-d", + tempTestDir, "-cp", tempTestDir + PathSeparator + classPath); + + JavaCompiler.CompilationTask task = + compiler.getTask(null, fileManager, null, compileOptions, null, + compilationUnits); + task.call(); + + loader = new URLClassLoader(new URL[]{new File(tempTestDir).toURI().toURL()}, systemClassLoader); + if (runOptions.lexerName != null) { + lexer = loader.loadClass(runOptions.lexerName).asSubclass(Lexer.class); + } + if (runOptions.parserName != null) { + parser = loader.loadClass(runOptions.parserName).asSubclass(Parser.class); + } + } catch (Exception ex) { + exception = ex; + } + + return new JavaCompiledState(generatedState, loader, lexer, parser, exception); + } + + @Override + protected ExecutedState execute(RunOptions runOptions, CompiledState compiledState) { + JavaCompiledState javaCompiledState = (JavaCompiledState) compiledState; + + ExecutedState result; + if (runOptions.returnObject) { + result = execWithObject(runOptions, javaCompiledState); + } else { + result = execCommon(javaCompiledState); + } + return result; + } + + private JavaExecutedState execWithObject(RunOptions runOptions, JavaCompiledState javaCompiledState) { + ParseTree parseTree = null; + Exception exception = null; + try { + Pair lexerParser = javaCompiledState.initializeLexerAndParser(runOptions.input); + + if (runOptions.parserName != null) { + Method startRule; + Object[] args = null; + try { + startRule = javaCompiledState.parser.getMethod(runOptions.startRuleName); + } catch (NoSuchMethodException noSuchMethodException) { + // try with int _p arg for recursive func + startRule = javaCompiledState.parser.getMethod(runOptions.startRuleName, int.class); + args = new Integer[]{0}; + } + parseTree = (ParseTree) startRule.invoke(lexerParser.b, args); + } + } catch (Exception ex) { + exception = ex; + } + return new JavaExecutedState(javaCompiledState, null, null, parseTree, exception); + } + + private ExecutedState execCommon(JavaCompiledState compiledState) { + Exception exception = null; + String output = null; + String errors = null; + try { + final Class mainClass = compiledState.loader.loadClass(getTestFileName()); + final Method recognizeMethod = mainClass.getDeclaredMethod("recognize", String.class, + PrintStream.class, PrintStream.class); + + PipedInputStream stdoutIn = new PipedInputStream(); + PipedInputStream stderrIn = new PipedInputStream(); + PipedOutputStream stdoutOut = new PipedOutputStream(stdoutIn); + PipedOutputStream stderrOut = new PipedOutputStream(stderrIn); + StreamReader stdoutReader = new StreamReader(stdoutIn); + StreamReader stderrReader = new StreamReader(stderrIn); + stdoutReader.start(); + stderrReader.start(); + + recognizeMethod.invoke(null, new File(getTempDirPath(), "input").getAbsolutePath(), + new PrintStream(stdoutOut), new PrintStream(stderrOut)); + + stdoutOut.close(); + stderrOut.close(); + stdoutReader.join(); + stderrReader.join(); + output = stdoutReader.toString(); + errors = stderrReader.toString(); + } catch (Exception ex) { + exception = ex; + } + return new JavaExecutedState(compiledState, output, errors, null, exception); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java new file mode 100644 index 0000000000..a6a83d8d2a --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/JavaRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.java; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class JavaRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new JavaRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java index f5bb891ea7..cbe1a71e0d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCharStreams.java @@ -8,11 +8,10 @@ import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CharStreams; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.InputStream; import java.io.Reader; import java.nio.channels.SeekableByteChannel; @@ -23,15 +22,10 @@ import java.nio.file.Files; import java.nio.file.Path; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class TestCharStreams { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void fromBMPStringHasExpectedSize() { CharStream s = CharStreams.fromString("hello"); @@ -50,19 +44,19 @@ public void fromSMPStringHasExpectedSize() { } @Test - public void fromBMPUTF8PathHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); - Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); - CharStream s = CharStreams.fromPath(p); + public void fromBMPUTF8PathHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path test = new File(tempDir.toString(), "test").toPath(); + Files.write(test, "hello".getBytes(StandardCharsets.UTF_8)); + CharStream s = CharStreams.fromPath(test); assertEquals(5, s.size()); assertEquals(0, s.index()); assertEquals("hello", s.toString()); - assertEquals(p.toString(), s.getSourceName()); + assertEquals(test.toString(), s.getSourceName()); } @Test - public void fromSMPUTF8PathHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8PathHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); CharStream s = CharStreams.fromPath(p); assertEquals(7, s.size()); @@ -72,8 +66,8 @@ public void fromSMPUTF8PathHasExpectedSize() throws Exception { } @Test - public void fromBMPUTF8InputStreamHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromBMPUTF8InputStreamHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); try (InputStream is = Files.newInputStream(p)) { CharStream s = CharStreams.fromStream(is); @@ -84,8 +78,8 @@ public void fromBMPUTF8InputStreamHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF8InputStreamHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8InputStreamHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (InputStream is = Files.newInputStream(p)) { CharStream s = CharStreams.fromStream(is); @@ -96,8 +90,8 @@ public void fromSMPUTF8InputStreamHasExpectedSize() throws Exception { } @Test - public void fromBMPUTF8ChannelHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromBMPUTF8ChannelHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -110,8 +104,8 @@ public void fromBMPUTF8ChannelHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF8ChannelHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8ChannelHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -124,9 +118,9 @@ public void fromSMPUTF8ChannelHasExpectedSize() throws Exception { } @Test - public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode() + public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode(@TempDir Path tempDir) throws Exception { - Path p = folder.newFile().toPath(); + Path p = getTestFile(tempDir); byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE, (byte)0xFE, (byte)0xED }; Files.write(p, toWrite); try (SeekableByteChannel c = Files.newByteChannel(p)) { @@ -139,19 +133,21 @@ public void fromInvalidUTF8BytesChannelReplacesWithSubstCharInReplaceMode() } @Test - public void fromInvalidUTF8BytesThrowsInReportMode() throws Exception { - Path p = folder.newFile().toPath(); + public void fromInvalidUTF8BytesThrowsInReportMode(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); byte[] toWrite = new byte[] { (byte)0xCA, (byte)0xFE }; Files.write(p, toWrite); try (SeekableByteChannel c = Files.newByteChannel(p)) { - thrown.expect(CharacterCodingException.class); - CharStreams.fromChannel(c, 4096, CodingErrorAction.REPORT, "foo"); + assertThrows( + CharacterCodingException.class, + () -> CharStreams.fromChannel(c, 4096, CodingErrorAction.REPORT, "foo") + ); } } @Test - public void fromSMPUTF8SequenceStraddlingBufferBoundary() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF8SequenceStraddlingBufferBoundary(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (SeekableByteChannel c = Files.newByteChannel(p)) { CharStream s = CharStreams.fromChannel( @@ -168,8 +164,8 @@ public void fromSMPUTF8SequenceStraddlingBufferBoundary() throws Exception { } @Test - public void fromFileName() throws Exception { - Path p = folder.newFile().toPath(); + public void fromFileName(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); CharStream s = CharStreams.fromFileName(p.toString()); assertEquals(7, s.size()); @@ -180,20 +176,19 @@ public void fromFileName() throws Exception { } @Test - public void fromFileNameWithLatin1() throws Exception { - Path p = folder.newFile().toPath(); + public void fromFileNameWithLatin1(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \u00CA\u00FE".getBytes(StandardCharsets.ISO_8859_1)); CharStream s = CharStreams.fromFileName(p.toString(), StandardCharsets.ISO_8859_1); assertEquals(8, s.size()); assertEquals(0, s.index()); assertEquals("hello \u00CA\u00FE", s.toString()); assertEquals(p.toString(), s.getSourceName()); - } @Test - public void fromReader() throws Exception { - Path p = folder.newFile().toPath(); + public void fromReader(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_8)); try (Reader r = Files.newBufferedReader(p, StandardCharsets.UTF_8)) { CharStream s = CharStreams.fromReader(r); @@ -204,8 +199,8 @@ public void fromReader() throws Exception { } @Test - public void fromSMPUTF16LEPathSMPHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF16LEPathSMPHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); Files.write(p, "hello \uD83C\uDF0E".getBytes(StandardCharsets.UTF_16LE)); CharStream s = CharStreams.fromPath(p, StandardCharsets.UTF_16LE); assertEquals(7, s.size()); @@ -215,8 +210,8 @@ public void fromSMPUTF16LEPathSMPHasExpectedSize() throws Exception { } @Test - public void fromSMPUTF32LEPathSMPHasExpectedSize() throws Exception { - Path p = folder.newFile().toPath(); + public void fromSMPUTF32LEPathSMPHasExpectedSize(@TempDir Path tempDir) throws Exception { + Path p = getTestFile(tempDir); // UTF-32 isn't popular enough to have an entry in StandardCharsets. Charset c = Charset.forName("UTF-32LE"); Files.write(p, "hello \uD83C\uDF0E".getBytes(c)); @@ -226,4 +221,8 @@ public void fromSMPUTF32LEPathSMPHasExpectedSize() throws Exception { assertEquals("hello \uD83C\uDF0E", s.toString()); assertEquals(p.toString(), s.getSourceName()); } + + private Path getTestFile(Path dir) { + return new File(dir.toString(), "test").toPath(); + } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java deleted file mode 100644 index b995f03754..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java deleted file mode 100644 index 51f79ef432..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java deleted file mode 100644 index c1fa1f3532..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java index eee769f806..3d1492ce1a 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestIntegerList.java @@ -7,16 +7,12 @@ package org.antlr.v4.test.runtime.java; import org.antlr.v4.runtime.misc.IntegerList; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class TestIntegerList { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void emptyListToEmptyCharArray() { IntegerList l = new IntegerList(); @@ -27,8 +23,10 @@ public void emptyListToEmptyCharArray() { public void negativeIntegerToCharArrayThrows() { IntegerList l = new IntegerList(); l.add(-42); - thrown.expect(IllegalArgumentException.class); - l.toCharArray(); + assertThrows( + IllegalArgumentException.class, + l::toCharArray + ); } @Test @@ -45,8 +43,10 @@ public void surrogateRangeIntegerToCharArray() { public void tooLargeIntegerToCharArrayThrows() { IntegerList l = new IntegerList(); l.add(0x110000); - thrown.expect(IllegalArgumentException.class); - l.toCharArray(); + assertThrows( + IllegalArgumentException.class, + l::toCharArray + ); } @Test diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java index d11f6904a3..45a3fb1c1f 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestInterpreterDataReader.java @@ -1,3 +1,9 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + package org.antlr.v4.test.runtime.java; import org.antlr.v4.Tool; @@ -9,8 +15,7 @@ import org.antlr.v4.runtime.misc.IntegerList; import org.antlr.v4.runtime.misc.InterpreterDataReader; import org.antlr.v4.tool.Grammar; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.lang.reflect.Field; @@ -20,10 +25,12 @@ import java.util.ArrayList; import java.util.List; +import static org.junit.jupiter.api.Assertions.*; + /** This file represents a simple sanity checks on the parsing of the .interp file * available to the Java runtime for interpreting rather than compiling and executing parsers. */ -public class TestInterpreterDataReader extends BaseJavaTest { +public class TestInterpreterDataReader { @Test public void testParseFile() throws IOException, NoSuchFieldException, IllegalAccessException, org.antlr.runtime.RecognitionException { Grammar g = new Grammar( @@ -67,15 +74,15 @@ public void testParseFile() throws IOException, NoSuchFieldException, IllegalAcc List channels = castList(channelsField.get(interpreterData), String.class); List modes = castList(modesField.get(interpreterData), String.class); - Assert.assertEquals(6, vocabulary.getMaxTokenType()); - Assert.assertArrayEquals(new String[]{"s","expr"}, ruleNames.toArray()); - Assert.assertArrayEquals(new String[]{"", "", "'*'", "'/'", "'+'", "'-'", ""}, literalNames); - Assert.assertArrayEquals(new String[]{"", "INT", "MUL", "DIV", "ADD", "SUB", "WS"}, symbolicNames); - Assert.assertNull(channels); - Assert.assertNull(modes); + assertEquals(6, vocabulary.getMaxTokenType()); + assertArrayEquals(new String[]{"s","expr"}, ruleNames.toArray()); + assertArrayEquals(new String[]{"", "", "'*'", "'/'", "'+'", "'-'", ""}, literalNames); + assertArrayEquals(new String[]{"", "INT", "MUL", "DIV", "ADD", "SUB", "WS"}, symbolicNames); + assertNull(channels); + assertNull(modes); IntegerList serialized = ATNSerializer.getSerialized(atn); - Assert.assertEquals(ATNDeserializer.SERIALIZED_VERSION, serialized.get(0)); + assertEquals(ATNDeserializer.SERIALIZED_VERSION, serialized.get(0)); } private List castList(Object obj, Class clazz) { diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java deleted file mode 100644 index f408c588fb..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java deleted file mode 100644 index aaccc0905d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java deleted file mode 100644 index 0637ceed80..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java deleted file mode 100644 index 97244f0a75..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java deleted file mode 100644 index f4e8c656e7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java deleted file mode 100644 index ff590ccb07..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java deleted file mode 100644 index 839211f04f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java deleted file mode 100644 index 348ab58852..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java deleted file mode 100644 index 81297f1a9c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java deleted file mode 100644 index d40066b449..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java deleted file mode 100644 index 73be980b1b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.java; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseJavaTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Java"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java index f1758813c4..9123d1d3b5 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestExpectedTokens.java @@ -9,14 +9,16 @@ import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.misc.IntervalSet; -import org.antlr.v4.test.runtime.java.BaseJavaTest; +import org.antlr.v4.test.runtime.RuntimeTestUtils; +import org.antlr.v4.test.runtime.java.JavaRunner; import org.antlr.v4.tool.Grammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestExpectedTokens extends BaseJavaTest { - @Test public void testEpsilonAltSubrule() throws Exception { +public class TestExpectedTokens extends JavaRunner { + @Test + public void testEpsilonAltSubrule() throws Exception { String gtext = "parser grammar T;\n" + "a : A (B | ) C ;\n"; @@ -32,7 +34,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s7-C->s8\n"+ "s8->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); ATN atn = g.getATN(); int blkStartStateNumber = 5; @@ -55,7 +57,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s6-C->s7\n"+ "s7->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); ATN atn = g.getATN(); int blkStartStateNumber = 4; @@ -75,7 +77,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s5-A->s6\n"+ "s6->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s11\n"; - checkRuleATN(g, "a", atnText); + RuntimeTestUtils.checkRuleATN(g, "a", atnText); atnText = "RuleStart_b_2->BlockStart_9\n"+ "BlockStart_9->s7\n"+ @@ -84,13 +86,13 @@ public class TestExpectedTokens extends BaseJavaTest { "s8->BlockEnd_10\n"+ "BlockEnd_10->RuleStop_b_3\n"+ "RuleStop_b_3->s5\n"; - checkRuleATN(g, "b", atnText); + RuntimeTestUtils.checkRuleATN(g, "b", atnText); ATN atn = g.getATN(); // From the start of 'b' with empty stack, can only see B and EOF int blkStartStateNumber = 9; - IntervalSet tokens = atn.getExpectedTokens(blkStartStateNumber, RuleContext.EMPTY); + IntervalSet tokens = atn.getExpectedTokens(blkStartStateNumber, ParserRuleContext.EMPTY); assertEquals("{, B}", tokens.toString(g.getTokenNames())); // Now call from 'a' @@ -132,7 +134,7 @@ public class TestExpectedTokens extends BaseJavaTest { "s17-expr->RuleStart_expr_2\n"+ "BlockEnd_19->StarLoopBack_22\n"+ "StarLoopBack_22->StarLoopEntry_20\n"; - checkRuleATN(g, "expr", atnText); + RuntimeTestUtils.checkRuleATN(g, "expr", atnText); ATN atn = g.getATN(); diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java index 9f1f29b193..84951180ff 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStream.java @@ -10,8 +10,9 @@ import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.TokenStream; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class contains tests for specific API functionality in {@link TokenStream} and derived types. @@ -26,17 +27,16 @@ public void testBufferedTokenStreamReuseAfterFill() { CharStream firstInput = new ANTLRInputStream("A"); BufferedTokenStream tokenStream = new BufferedTokenStream(new VisitorBasicLexer(firstInput)); tokenStream.fill(); - Assert.assertEquals(2, tokenStream.size()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); - Assert.assertEquals(Token.EOF, tokenStream.get(1).getType()); + assertEquals(2, tokenStream.size()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); + assertEquals(Token.EOF, tokenStream.get(1).getType()); CharStream secondInput = new ANTLRInputStream("AA"); tokenStream.setTokenSource(new VisitorBasicLexer(secondInput)); tokenStream.fill(); - Assert.assertEquals(3, tokenStream.size()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); - Assert.assertEquals(VisitorBasicLexer.A, tokenStream.get(1).getType()); - Assert.assertEquals(Token.EOF, tokenStream.get(2).getType()); + assertEquals(3, tokenStream.size()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(0).getType()); + assertEquals(VisitorBasicLexer.A, tokenStream.get(1).getType()); + assertEquals(Token.EOF, tokenStream.get(2).getType()); } - } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java index d8344de21a..dc6c4b1d4d 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestTokenStreamRewriter.java @@ -10,27 +10,20 @@ import org.antlr.v4.runtime.LexerInterpreter; import org.antlr.v4.runtime.TokenStreamRewriter; import org.antlr.v4.runtime.misc.Interval; -import org.antlr.v4.test.runtime.java.BaseJavaTest; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; -public class TestTokenStreamRewriter extends BaseJavaTest { +public class TestTokenStreamRewriter { /** Public default constructor used by TestRig */ public TestTokenStreamRewriter() { } - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testInsertBeforeIndex0() throws Exception { + @Test + public void testInsertBeforeIndex0() throws Exception { LexerGrammar g = new LexerGrammar( "lexer grammar T;\n"+ "A : 'a';\n" + diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java index 7a494d7873..4999e8074e 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/java/api/TestVisitors.java @@ -14,12 +14,13 @@ import org.antlr.v4.runtime.tree.ErrorNode; import org.antlr.v4.runtime.tree.RuleNode; import org.antlr.v4.runtime.tree.TerminalNode; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestVisitors { /** @@ -33,7 +34,7 @@ public void testVisitTerminalNode() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -56,7 +57,7 @@ protected String aggregateResult(String aggregate, String nextResult) { String expected = "[@0,0:0='A',<1>,1:0]\n" + "[@1,1:0='',<-1>,1:1]\n"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -79,9 +80,9 @@ public void syntaxError(Recognizer recognizer, Object offendingSymbol, int }); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s )", context.toStringTree(parser)); - Assert.assertEquals(1, errors.size()); - Assert.assertEquals("line 1:0 missing 'A' at ''", errors.get(0)); + assertEquals("(s )", context.toStringTree(parser)); + assertEquals(1, errors.size()); + assertEquals("line 1:0 missing 'A' at ''", errors.get(0)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -102,7 +103,7 @@ protected String aggregateResult(String aggregate, String nextResult) { String result = listener.visit(context); String expected = "Error encountered: [@-1,-1:-1='',<1>,1:0]"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -118,7 +119,7 @@ public void testShouldNotVisitEOF() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -134,7 +135,7 @@ protected boolean shouldVisitNextChild(RuleNode node, String currentResult) { String result = listener.visit(context); String expected = "[@0,0:0='A',<1>,1:0]\n"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -149,7 +150,7 @@ public void testShouldNotVisitTerminal() { VisitorBasicParser parser = new VisitorBasicParser(new CommonTokenStream(lexer)); VisitorBasicParser.SContext context = parser.s(); - Assert.assertEquals("(s A )", context.toStringTree(parser)); + assertEquals("(s A )", context.toStringTree(parser)); VisitorBasicVisitor listener = new VisitorBasicBaseVisitor() { @Override @@ -170,7 +171,7 @@ protected boolean shouldVisitNextChild(RuleNode node, String currentResult) { String result = listener.visit(context); String expected = "default result"; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } /** @@ -183,7 +184,7 @@ public void testCalculatorVisitor() { VisitorCalcParser parser = new VisitorCalcParser(new CommonTokenStream(lexer)); VisitorCalcParser.SContext context = parser.s(); - Assert.assertEquals("(s (expr (expr 2) + (expr (expr 8) / (expr 2))) )", context.toStringTree(parser)); + assertEquals("(s (expr (expr 2) + (expr (expr 8) / (expr 2))) )", context.toStringTree(parser)); VisitorCalcVisitor listener = new VisitorCalcBaseVisitor() { @Override @@ -233,7 +234,7 @@ protected Integer aggregateResult(Integer aggregate, Integer nextResult) { int result = listener.visit(context); int expected = 6; - Assert.assertEquals(expected, result); + assertEquals(expected, result); } } diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java deleted file mode 100644 index 97bf6388dd..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/BaseNodeTest.java +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.*; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.*; - -public class BaseNodeTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - private static String runtimeDir; - - static { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource("JavaScript"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find JavaScript runtime"); - } - runtimeDir = runtimeSrc.getPath(); - if(isWindows()){ - runtimeDir = runtimeDir.replaceFirst("/", ""); - } - } - - @Override - protected String getPropertyPrefix() { - return "antlr4-javascript"; - } - - protected String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input) { - return execLexer(grammarFileName, grammarStr, lexerName, input, false); - } - - @Override - public String execLexer(String grammarFileName, String grammarStr, - String lexerName, String input, boolean showDFA) { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, null, lexerName, "-no-listener"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - writeFile(getTempDirPath(), "package.json", "{\"type\": \"module\"}"); - String output = execModule("Test.js"); - if ( output!=null && output.length()==0 ) { - output = null; - } - return output; - } - - @Override - public String execParser(String grammarFileName, String grammarStr, - String parserName, String lexerName, String listenerName, - String visitorName, String startRuleName, String input, - boolean showDiagnosticErrors) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, parserName, lexerName, "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - rawBuildRecognizerTestFile(parserName, lexerName, listenerName, - visitorName, startRuleName, showDiagnosticErrors); - writeFile(getTempDirPath(), "package.json", "{\"type\": \"module\"}"); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - String... extraOptions) { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, - parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, String parserName, String lexerName, - boolean defaultListener, String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTempDirPath(), "JavaScript", grammarFileName, grammarStr, - defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if (lexerName != null) { - files.add(lexerName + ".js"); - } - if (parserName != null) { - files.add(parserName + ".js"); - Set optionsSet = new HashSet( - Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Listener.js"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, - grammarFileName.lastIndexOf('.')) - + "Visitor.js"); - } - } - - String newImportAntlrString = "import antlr4 from 'file://" + runtimeDir + "/src/antlr4/index.js'"; - for (String file : files) { - Path path = Paths.get(getTempDirPath(), file); - try { - String content = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); - String newContent = content.replaceAll("import antlr4 from 'antlr4';", newImportAntlrString); - try (PrintWriter out = new PrintWriter(path.toString())) { - out.println(newContent); - } - } catch (IOException e) { - fail("File not found: " + path); - } - } - - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - setParseErrors(null); - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, lexerName, listenerName, - visitorName, parserStartRuleName, debug); - } - } - - public String execRecognizer() { - return execModule("Test.js"); - } - - public String execModule(String fileName) { - try { - String modulePath = new File(getTempTestDir(), fileName).getAbsolutePath(); - String nodejsPath = locateNodeJS(); - String inputPath = new File(getTempTestDir(), "input").getAbsolutePath(); - ProcessBuilder builder = new ProcessBuilder(nodejsPath, modulePath, - inputPath); - builder.environment().put("NODE_PATH", getTempDirPath()); - builder.directory(getTempTestDir()); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum( - process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum( - process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - // TODO switch to jdk 8 - process.waitFor(); - // if(!process.waitFor(1L, TimeUnit.MINUTES)) - // process.destroyForcibly(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - if ( output.length()==0 ) { - output = null; - } - if (stderrVacuum.toString().length() > 0) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - System.err.println(); - return null; - } - } - - private boolean canExecute(String tool) { - try { - ProcessBuilder builder = new ProcessBuilder(tool, "--version"); - builder.redirectErrorStream(true); - Process process = builder.start(); - StreamVacuum vacuum = new StreamVacuum(process.getInputStream()); - vacuum.start(); - // TODO switch to jdk 8 - process.waitFor(); - // if(!process.waitFor(30L, TimeUnit.SECONDS)) - // process.destroyForcibly(); - vacuum.join(); - return process.exitValue() == 0; - } catch (Exception e) { - return false; - } - } - - private String locateNodeJS() { - // typically /usr/local/bin/node - String prop = System.getProperty("antlr-javascript-nodejs"); - if ( prop!=null && prop.length()!=0 ) { - if(prop.contains(" ")) - prop = "\"" + prop + "\""; - return prop; - } - if (canExecute("nodejs")) { - return "nodejs"; // nodejs on Debian without node-legacy package - } - return "node"; // everywhere else - } - - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug) { - ST outputFileST = new ST( - "import antlr4 from 'file:///src/antlr4/index.js'\n" - + "import from './.js';\n" - + "import from './.js';\n" - + "import from './.js';\n" - + "import from './.js';\n" - + "\n" - + "class TreeShapeListener extends antlr4.tree.ParseTreeListener {\n" + - " enterEveryRule(ctx) {\n" + - " for (let i = 0; i \\< ctx.getChildCount; i++) {\n" + - " const child = ctx.getChild(i)\n" + - " const parent = child.parentCtx\n" + - " if (parent.getRuleContext() !== ctx || !(parent instanceof antlr4.tree.RuleNode)) {\n" + - " throw `Invalid parse tree shape detected.`\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - + "\n" - + "function main(argv) {\n" - + " var input = new antlr4.FileStream(argv[2], true);\n" - + " var lexer = new (input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + "" - + " parser.buildParseTrees = true;\n" - + " const printer = function() {\n" - + " this.println = function(s) { console.log(s); }\n" - + " this.print = function(s) { process.stdout.write(s); }\n" - + " return this;\n" - + " };\n" - + " parser.printer = new printer();\n" - + " var tree = parser.();\n" - + " antlr4.tree.ParseTreeWalker.DEFAULT.walk(new TreeShapeListener(), tree);\n" - + "}\n" + "\n" + "main(process.argv);\n" + "\n"); - ST createParserST = new ST( - " var parser = new (stream);\n"); - if (debug) { - createParserST = new ST( - " var parser = new (stream);\n" - + " parser.addErrorListener(new antlr4.error.DiagnosticErrorListener());\n"); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - outputFileST.add("runtimeDir", runtimeDir); - writeFile(getTempDirPath(), "Test.js", outputFileST.render()); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import antlr4 from 'file:///src/antlr4/index.js'\n" - + "import from './.js';\n" - + "\n" - + "function main(argv) {\n" - + " var input = new antlr4.FileStream(argv[2], true);\n" - + " var lexer = new (input);\n" - + " var stream = new antlr4.CommonTokenStream(lexer);\n" - + " stream.fill();\n" - + " for(var i=0; i\\ generatedFiles = generatedState.generatedFiles; + for (GeneratedFile generatedFile : generatedFiles) { + try { + FileUtils.replaceInFile(Paths.get(getTempDirPath(), generatedFile.name), + "import antlr4 from 'antlr4';", + newImportAntlrString); + } catch (IOException e) { + return new CompiledState(generatedState, e); + } + } + + writeFile(getTempDirPath(), "package.json", + RuntimeTestUtils.getTextFromResource("org/antlr/v4/test/runtime/helpers/package.json")); + return new CompiledState(generatedState, null); + } + + @Override + protected void addExtraRecognizerParameters(ST template) { + template.add("runtimePath", normalizedRuntimePath); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java deleted file mode 100644 index 430292ba46..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java deleted file mode 100644 index 508abb49fa..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java deleted file mode 100644 index 1e537b99b0..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java deleted file mode 100644 index 9042001757..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java deleted file mode 100644 index 79c19fd6c5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java deleted file mode 100644 index 84020d0679..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java deleted file mode 100644 index 3af1925fc1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java deleted file mode 100644 index 9cc7462037..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java deleted file mode 100644 index 79cee3bec7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java deleted file mode 100644 index 39d6495728..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java deleted file mode 100644 index e02046f8c7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java deleted file mode 100644 index fb40627f13..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java deleted file mode 100644 index 4fe6946e28..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java deleted file mode 100644 index b333734599..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/javascript/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.javascript; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseNodeTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Node"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java deleted file mode 100644 index ec6d4fda8e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/BasePHPTest.java +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertTrue; - -public class BasePHPTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - public String getPropertyPrefix() { - return "antlr-php"; - } - - @Override - public String execLexer( - String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA - ) { - boolean success = rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - null, - lexerName, - "-no-listener" - ); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - return execModule("Test.php"); - } - - public String execParser( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors - ) { - return execParser_( - grammarFileName, - grammarStr, - parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - input, - showDiagnosticErrors, - false - ); - } - - public String execParser_( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean debug, - boolean trace - ) { - boolean success = rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor" - ); - - assertTrue(success); - - writeFile(getTempDirPath(), "input", input); - - rawBuildRecognizerTestFile( - parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - debug, - trace - ); - - return execRecognizer(); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions - ) { - return rawGenerateAndBuildRecognizer( - grammarFileName, - grammarStr, - parserName, - lexerName, - false, - extraOptions - ); - } - - /** - * Return true if all is well - */ - protected boolean rawGenerateAndBuildRecognizer( - String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions - ) { - ErrorQueue equeue = antlrOnString(getTempDirPath(), "PHP", grammarFileName, grammarStr, defaultListener, extraOptions); - - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - - if (lexerName != null) { - files.add(lexerName + ".php"); - } - - if (parserName != null) { - files.add(parserName + ".php"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.')) + "Listener.php"); - } - - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.')) + "Visitor.php"); - } - } - - return true; - } - - protected void rawBuildRecognizerTestFile( - String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace - ) { - setParseErrors(null); - if (parserName == null) { - writeLexerTestFile(lexerName, false); - } else { - writeParserTestFile( - parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, - trace - ); - } - } - - public String execRecognizer() { - return execModule("Test.php"); - } - - public String execModule(String fileName) { - String phpPath = locatePhp(); - String runtimePath = locateRuntime(); - - String modulePath = new File(getTempTestDir(), fileName).getAbsolutePath(); - String inputPath = new File(getTempTestDir(), "input").getAbsolutePath(); - Path outputPath = getTempTestDir().toPath().resolve("output").toAbsolutePath(); - - try { - ProcessBuilder builder = new ProcessBuilder(phpPath, modulePath, inputPath, outputPath.toString()); - builder.environment().put("RUNTIME", runtimePath); - builder.directory(getTempTestDir()); - Process process = builder.start(); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - process.waitFor(); - stdoutVacuum.join(); - stderrVacuum.join(); - String output = stdoutVacuum.toString(); - - if (output.length() == 0) { - output = null; - } - - if (stderrVacuum.toString().length() > 0) { - setParseErrors(stderrVacuum.toString()); - } - - return output; - } catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(String tool) { - final String phpPath = System.getProperty("PHP_PATH"); - - if (phpPath != null && new File(phpPath).exists()) { - return phpPath; - } - - String[] roots = {"/usr/local/bin/", "/opt/local/bin", "/opt/homebrew/bin/", "/usr/bin/"}; - - for (String root: roots) { - if (new File(root + tool).exists()) { - return root + tool; - } - } - - throw new RuntimeException("Could not locate " + tool); - } - - protected String locatePhp() { - String propName = getPropertyPrefix() + "-php"; - String prop = System.getProperty(propName); - - if (prop == null || prop.length() == 0) { - prop = locateTool("php"); - } - - File file = new File(prop); - - if (!file.exists()) { - throw new RuntimeException("Missing system property:" + propName); - } - - return file.getAbsolutePath(); - } - - protected String locateRuntime() { - String propName = "antlr-php-runtime"; - String prop = System.getProperty(propName); - - if (prop == null || prop.length() == 0) { - prop = "../runtime/PHP"; - } - - File file = new File(prop); - - if (!file.exists()) { - throw new RuntimeException("Missing system property:" + propName); - } - - try { - return file.getCanonicalPath(); - } catch (IOException e) { - return file.getAbsolutePath(); - } - } - - protected void mkdir(String dir) { - File f = new File(dir); - f.mkdirs(); - } - - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "\\($input);\n" - + "$lexer->addErrorListener(new ConsoleErrorListener());" - + "$tokens = new CommonTokenStream($lexer);\n" - + "$tokens->fill();\n" - + "\n" - + "foreach ($tokens->getAllTokens() as $token) {\n" - + " echo $token . \\PHP_EOL;\n" - + "}" - + (showDFA - ? "echo $lexer->getInterpreter()->getDFA(Lexer::DEFAULT_MODE)->toLexerString();\n" - : "") - ); - - outputFileST.add("lexerName", lexerName); - - writeFile(getTempDirPath(), "Test.php", outputFileST.render()); - } - - protected void writeParserTestFile( - String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace - ) { - if (!parserStartRuleName.endsWith(")")) { - parserStartRuleName += "()"; - } - ST outputFileST = new ST( - "\\getChildCount(); $i \\< $count; $i++) {\n" - + " $parent = $ctx->getChild($i)->getParent();\n" - + "\n" - + " if (!($parent instanceof RuleNode) || $parent->getRuleContext() !== $ctx) {\n" - + " throw new RuntimeException('Invalid parse tree shape detected.');\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - + "\n" - + "$input = InputStream::fromPath($argv[1]);\n" - + "$lexer = new ($input);\n" - + "$lexer->addErrorListener(new ConsoleErrorListener());" - + "$tokens = new CommonTokenStream($lexer);\n" - + "" - + "$parser->addErrorListener(new ConsoleErrorListener());" - + "$parser->setBuildParseTree(true);\n" - + "$tree = $parser->;\n\n" - + "ParseTreeWalker::default()->walk(new TreeShapeListener(), $tree);\n" - ); - - String stSource = "$parser = new ($tokens);\n"; - - if (debug) { - stSource += "$parser->addErrorListener(new DiagnosticErrorListener());\n"; - } - - if (trace) { - stSource += "$parser->setTrace(true);\n"; - } - - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - - writeFile(getTempDirPath(), "Test.php", outputFileST.render()); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java new file mode 100644 index 0000000000..94655dc052 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PHPRunner.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.php; + +import java.util.HashMap; +import java.util.Map; + +import org.antlr.v4.test.runtime.*; + +public class PHPRunner extends RuntimeRunner { + private static final Map environment; + + static { + environment = new HashMap<>(); + environment.put("RUNTIME", getRuntimePath("PHP")); + } + + @Override + public String getLanguage() { + return "PHP"; + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java new file mode 100644 index 0000000000..9de0273bb9 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/PhpRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.php; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class PhpRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new PHPRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java deleted file mode 100644 index 42f5541852..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java deleted file mode 100644 index 5c6552c64d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java deleted file mode 100644 index 9a0ce2dc6a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java deleted file mode 100644 index 70e08dd162..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java deleted file mode 100644 index a4f22db5a7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java deleted file mode 100644 index 3742362070..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java deleted file mode 100644 index aff19e65b7..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java deleted file mode 100644 index 6ce3573d4d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java deleted file mode 100644 index 0c3b39c433..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java deleted file mode 100644 index c282ffb71b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java deleted file mode 100644 index 22b6bc3204..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java deleted file mode 100644 index f46b7167d8..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java deleted file mode 100644 index 7bee122154..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java deleted file mode 100644 index 3ee62ae034..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/php/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.php; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePHPTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "PHP"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java deleted file mode 100644 index 126e2e0552..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/BasePythonTest.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.python; - -import org.antlr.v4.test.runtime.*; -import org.junit.runner.Description; - -import java.io.File; -import java.net.URL; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.*; - -public abstract class BasePythonTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - @Override - protected void testSucceeded(Description description) { - eraseTempPyCache(); - eraseTempDir(); - } - - @Override - protected String getPropertyPrefix() { - return "antlr-" + getLanguage().toLowerCase(); - } - - - protected abstract String getLanguage(); - - @Override - public String execLexer(String grammarFileName, - String grammarStr, - String lexerName, - String input, - boolean showDFA) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - null, - lexerName,"-no-listener"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - return execModule("Test.py"); - } - - @Override - public String execParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean showDiagnosticErrors) { - return execParser_(grammarFileName, grammarStr, parserName, lexerName, - listenerName, visitorName, startRuleName, input, showDiagnosticErrors, false); - } - - public String execParser_(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String listenerName, - String visitorName, - String startRuleName, - String input, - boolean debug, - boolean trace) - { - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - assertTrue(success); - writeFile(getTempDirPath(), "input", input); - rawBuildRecognizerTestFile(parserName, - lexerName, - listenerName, - visitorName, - startRuleName, - debug, - trace); - return execRecognizer(); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) - { - return rawGenerateAndBuildRecognizer(grammarFileName, grammarStr, parserName, lexerName, false, extraOptions); - } - - /** Return true if all is well */ - protected boolean rawGenerateAndBuildRecognizer(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - boolean defaultListener, - String... extraOptions) - { - ErrorQueue equeue = antlrOnString(getTempDirPath(), getLanguage(), grammarFileName, grammarStr, defaultListener, extraOptions); - if (!equeue.errors.isEmpty()) { - return false; - } - - List files = new ArrayList(); - if ( lexerName!=null ) { - files.add(lexerName+".py"); - } - if ( parserName!=null ) { - files.add(parserName+".py"); - Set optionsSet = new HashSet(Arrays.asList(extraOptions)); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Listener.py"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarFileName.substring(0, grammarFileName.lastIndexOf('.'))+"Visitor.py"); - } - } - return true; // allIsWell: no compile - } - - protected void rawBuildRecognizerTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean trace) - { - setParseErrors(null); - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - listenerName, - visitorName, - parserStartRuleName, - debug, trace); - } - } - - public String execRecognizer() { - return execModule("Test.py"); - } - - public String execModule(String fileName) { - String pythonPath = locatePython(); - String runtimePath = locateRuntime(); - File tmpdirFile = new File(getTempDirPath()); - String modulePath = new File(tmpdirFile, fileName).getAbsolutePath(); - String inputPath = new File(tmpdirFile, "input").getAbsolutePath(); - Path outputPath = tmpdirFile.toPath().resolve("output").toAbsolutePath(); - try { - ProcessBuilder builder = new ProcessBuilder( pythonPath, modulePath, inputPath, outputPath.toString() ); - builder.environment().put("PYTHONPATH",runtimePath); - builder.environment().put("PYTHONIOENCODING", "utf-8"); - builder.directory(tmpdirFile); - Process process = builder.start(); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stderrVacuum.start(); - process.waitFor(); - stderrVacuum.join(); - String output = TestOutputReading.read(outputPath); - if ( stderrVacuum.toString().length()>0 ) { - setParseErrors(stderrVacuum.toString()); - } - return output; - } - catch (Exception e) { - System.err.println("can't exec recognizer"); - e.printStackTrace(System.err); - } - return null; - } - - private String locateTool(List tools) { - String[] roots = { - "/opt/local/bin", "/usr/bin/", "/usr/local/bin/", - "/Users/"+System.getProperty("user.name")+"/anaconda3/bin/", - "/Users/"+System.getProperty("user.name")+"/opt/anaconda3/bin/" - }; - for(String root : roots) { - for (String tool : tools) { - if ( new File(root+tool).exists() ) { - return root+tool; - } - } - } - throw new RuntimeException("Could not locate " + tools); - } - - protected String locatePython() { - String propName = getPropertyPrefix() + "-python"; - String prop = System.getProperty(propName); - if(prop==null || prop.length()==0) - prop = locateTool(getPythonExecutables()); - File file = new File(prop); - if(!file.exists()) - throw new RuntimeException("Missing system property:" + propName); - return file.getAbsolutePath(); - } - - protected abstract List getPythonExecutables(); - - protected String locateRuntime() { return locateRuntime(getLanguage()); } - - protected String locateRuntime(String targetName) { - final ClassLoader loader = Thread.currentThread().getContextClassLoader(); - final URL runtimeSrc = loader.getResource(targetName+"/src"); - if ( runtimeSrc==null ) { - throw new RuntimeException("Cannot find "+targetName+" runtime"); - } - if(isWindows()){ - return runtimeSrc.getPath().replaceFirst("/", ""); - } - return runtimeSrc.getPath(); - } - - protected abstract void writeParserTestFile(String parserName, - String lexerName, - String listenerName, - String visitorName, - String parserStartRuleName, - boolean debug, - boolean setTrace); - - - - protected abstract void writeLexerTestFile(String lexerName, boolean showDFA); - - - protected void eraseTempPyCache() { - File tmpdirF = new File(getTempTestDir() + "/__pycache__"); - if ( tmpdirF.exists() ) { - eraseFilesInDir(tmpdirF); - tmpdirF.delete(); - } - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java new file mode 100644 index 0000000000..90085af5a7 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python/PythonRunner.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.python; + +import org.antlr.v4.test.runtime.*; +import org.stringtemplate.v4.ST; + +public abstract class PythonRunner extends RuntimeRunner { + @Override + public String getExtension() { return "py"; } + + @Override + protected void addExtraRecognizerParameters(ST template) { + template.add("python3", getLanguage().equals("Python3")); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java deleted file mode 100644 index 5eb8ec8b43..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/BasePython2Test.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.python.BasePythonTest; -import org.stringtemplate.v4.ST; - -import java.util.Collections; -import java.util.List; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - -public class BasePython2Test extends BasePythonTest { - - @Override - protected String getLanguage() { - return "Python2"; - } - - @Override - protected List getPythonExecutables() { - return Collections.singletonList("python2.7"); - } - - @Override - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "from __future__ import print_function\n" - + "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + " stream.fill()\n" - + " [ print(unicode(t), file=output) for t in stream.tokens ]\n" - + (showDFA ? " print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='', file=output)\n" - : "") + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.py", outputFileST.render()); - } - - @Override - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "from import \n" - + "from import \n" - + "from import \n" - + "\n" - + "class TreeShapeListener(ParseTreeListener):\n" - + "\n" - + " def visitTerminal(self, node):\n" - + " pass\n" - + "\n" - + " def visitErrorNode(self, node):\n" - + " pass\n" - + "\n" - + " def exitEveryRule(self, ctx):\n" - + " pass\n" - + "\n" - + " def enterEveryRule(self, ctx):\n" - + " for child in ctx.getChildren():\n" - + " parent = child.parentCtx\n" - + " if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx:\n" - + " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + "" - + " parser.buildParseTrees = True\n" - + " tree = parser.\n" - + " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - String stSource = " parser = (stream, output)\n"; - if(debug) - stSource += " parser.addErrorListener(DiagnosticErrorListener())\n"; - if(trace) - stSource += " parser.setTrace(True)\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.py", outputFileST.render()); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2Runner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2Runner.java new file mode 100644 index 0000000000..afd37580d2 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2Runner.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.python2; + +import org.antlr.v4.test.runtime.python.PythonRunner; + +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; + +public class Python2Runner extends PythonRunner { + public final static Map environment; + + static { + environment = new HashMap<>(); + environment.put("PYTHONPATH", Paths.get(getRuntimePath("Python2"), "src").toString()); + environment.put("PYTHONIOENCODING", "utf-8"); + } + + @Override + public String getLanguage() { + return "Python2"; + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2RuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2RuntimeTests.java new file mode 100644 index 0000000000..32b65c97e4 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/Python2RuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.python2; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class Python2RuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new Python2Runner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java deleted file mode 100644 index 5838dc0daa..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java deleted file mode 100644 index 66f4be9a42..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java deleted file mode 100644 index b1917b38b6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java deleted file mode 100644 index 66f94e419e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java deleted file mode 100644 index b1f576628a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java deleted file mode 100644 index 10947ab034..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java deleted file mode 100644 index 118a16fb65..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java deleted file mode 100644 index a72c93dfaf..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java deleted file mode 100644 index cd6f630421..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java deleted file mode 100644 index 58c754bf47..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java deleted file mode 100644 index 458a6df1df..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java deleted file mode 100644 index c0e98b964e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java deleted file mode 100644 index 51d8ba935e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java deleted file mode 100644 index ca5caed02d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python2/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python2; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython2Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Python2"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java deleted file mode 100644 index 834d99174f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/BasePython3Test.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.python.BasePythonTest; -import org.stringtemplate.v4.ST; - -import java.util.Arrays; -import java.util.List; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; - -public class BasePython3Test extends BasePythonTest { - - @Override - protected String getLanguage() { - return "Python3"; - } - - @Override - protected List getPythonExecutables() { - return Arrays.asList("python3.7", "python3.8"); - } // force 3.7 or 3.8 - - @Override - protected void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + " stream.fill()\n" - + " [ print(t, file=output) for t in stream.tokens ]\n" - + (showDFA ? " print(lexer._interp.decisionToDFA[Lexer.DEFAULT_MODE].toLexerString(), end='', file=output)\n" - : "") + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "Test.py", outputFileST.render()); - } - - @Override - protected void writeParserTestFile(String parserName, String lexerName, - String listenerName, String visitorName, - String parserStartRuleName, boolean debug, boolean trace) { - if(!parserStartRuleName.endsWith(")")) - parserStartRuleName += "()"; - ST outputFileST = new ST( - "import sys\n" - + "import codecs\n" - + "from antlr4 import *\n" - + "from import \n" - + "from import \n" - + "from import \n" - + "from import \n" - + "\n" - + "class TreeShapeListener(ParseTreeListener):\n" - + "\n" - + " def visitTerminal(self, node:TerminalNode):\n" - + " pass\n" - + "\n" - + " def visitErrorNode(self, node:ErrorNode):\n" - + " pass\n" - + "\n" - + " def exitEveryRule(self, ctx:ParserRuleContext):\n" - + " pass\n" - + "\n" - + " def enterEveryRule(self, ctx:ParserRuleContext):\n" - + " for child in ctx.getChildren():\n" - + " parent = child.parentCtx\n" - + " if not isinstance(parent, RuleNode) or parent.getRuleContext() != ctx:\n" - + " raise IllegalStateException(\"Invalid parse tree shape detected.\")\n" - + "\n" - + "def main(argv):\n" - + " input = FileStream(argv[1], encoding='utf-8', errors='replace')\n" - + " with codecs.open(argv[2], 'w', 'utf-8', 'replace') as output:\n" - + " lexer = (input, output)\n" - + " stream = CommonTokenStream(lexer)\n" - + "" - + " parser.buildParseTrees = True\n" - + " tree = parser.\n" - + " ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - + "\n" + "if __name__ == '__main__':\n" - + " main(sys.argv)\n" + "\n"); - String stSource = " parser = (stream, output)\n"; - if (debug) - stSource += " parser.addErrorListener(DiagnosticErrorListener())\n"; - if (trace) - stSource += " parser.setTrace(True)\n"; - ST createParserST = new ST(stSource); - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("listenerName", listenerName); - outputFileST.add("visitorName", visitorName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "Test.py", outputFileST.render()); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java new file mode 100644 index 0000000000..6e11344a45 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3Runner.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ +package org.antlr.v4.test.runtime.python3; + +import org.antlr.v4.test.runtime.python.PythonRunner; + +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; + +public class Python3Runner extends PythonRunner { + public final static Map environment; + + static { + environment = new HashMap<>(); + environment.put("PYTHONPATH", Paths.get(getRuntimePath("Python3"), "src").toString()); + environment.put("PYTHONIOENCODING", "utf-8"); + } + + @Override + public String getLanguage() { + return "Python3"; + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java new file mode 100644 index 0000000000..881b37fbcb --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/Python3RuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.python3; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class Python3RuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new Python3Runner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java deleted file mode 100644 index c10c139646..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeLexers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java deleted file mode 100644 index 73c9807b7e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestCompositeParsers.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java deleted file mode 100644 index 17ab6126d6..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestFullContextParsing.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java deleted file mode 100644 index 144e60d1b1..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLeftRecursion.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java deleted file mode 100644 index 6c1dde4263..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java deleted file mode 100644 index 6a1708e27b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestLexerExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java deleted file mode 100644 index 9afff0917b..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestListeners.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java deleted file mode 100644 index 4930f16b16..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParseTrees.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java deleted file mode 100644 index a5d061afe3..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserErrors.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java deleted file mode 100644 index 3addf15858..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestParserExec.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java deleted file mode 100644 index b436ae8393..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestPerformance.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java deleted file mode 100644 index 74f771cb4f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalLexer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java deleted file mode 100644 index 7a33b4ce14..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSemPredEvalParser.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java deleted file mode 100644 index 03abf0237c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/python3/TestSets.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.python3; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BasePython3Test()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Python3"); - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java new file mode 100644 index 0000000000..bea78b36c1 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/CompiledState.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public class CompiledState extends State { + @Override + public Stage getStage() { + return Stage.Compile; + } + + public CompiledState(GeneratedState previousState, Exception exception) { + super(previousState, exception); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java new file mode 100644 index 0000000000..af693fed92 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/ExecutedState.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public class ExecutedState extends State { + @Override + public Stage getStage() { + return Stage.Execute; + } + + public final String output; + + public final String errors; + + public ExecutedState(CompiledState previousState, String output, String errors, Exception exception) { + super(previousState, exception); + this.output = output != null ? output : ""; + this.errors = errors != null ? errors : ""; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java new file mode 100644 index 0000000000..faecec5c62 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/GeneratedState.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.ErrorQueue; +import org.antlr.v4.test.runtime.GeneratedFile; +import org.antlr.v4.test.runtime.Stage; + +import java.util.List; + +import static org.antlr.v4.test.runtime.RuntimeTestUtils.joinLines; + +public class GeneratedState extends State { + @Override + public Stage getStage() { + return Stage.Generate; + } + + public final ErrorQueue errorQueue; + public final List generatedFiles; + + @Override + public boolean containsErrors() { + return errorQueue.errors.size() > 0 || super.containsErrors(); + } + + public String getErrorMessage() { + String result = super.getErrorMessage(); + + if (errorQueue.errors.size() > 0) { + result = joinLines(result, errorQueue.toString(true)); + } + + return result; + } + + public GeneratedState(ErrorQueue errorQueue, List generatedFiles, Exception exception) { + super(null, exception); + this.errorQueue = errorQueue; + this.generatedFiles = generatedFiles; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java new file mode 100644 index 0000000000..625f3effdb --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaCompiledState.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.Pair; + +import java.io.IOException; +import java.io.StringReader; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +public class JavaCompiledState extends CompiledState { + public final ClassLoader loader; + public final Class lexer; + public final Class parser; + + public JavaCompiledState(GeneratedState previousState, + ClassLoader loader, + Class lexer, + Class parser, + Exception exception + ) { + super(previousState, exception); + this.loader = loader; + this.lexer = lexer; + this.parser = parser; + } + + public Pair initializeLexerAndParser(String input) + throws IOException, NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException { + ANTLRInputStream in = new ANTLRInputStream(new StringReader(input)); + + Constructor lexerConstructor = lexer.getConstructor(CharStream.class); + Lexer lexer = lexerConstructor.newInstance(in); + + CommonTokenStream tokens = new CommonTokenStream(lexer); + + Constructor parserConstructor = parser.getConstructor(TokenStream.class); + Parser parser = parserConstructor.newInstance(tokens); + return new Pair<>(lexer, parser); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java new file mode 100644 index 0000000000..57431ccf60 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/JavaExecutedState.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.runtime.tree.ParseTree; + +public class JavaExecutedState extends ExecutedState { + public final ParseTree parseTree; + + public JavaExecutedState(JavaCompiledState previousState, String output, String errors, ParseTree parseTree, + Exception exception) { + super(previousState, output, errors, exception); + this.parseTree = parseTree; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java new file mode 100644 index 0000000000..eda832a690 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/states/State.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.states; + +import org.antlr.v4.test.runtime.Stage; + +public abstract class State { + public final State previousState; + + public final Exception exception; + + public abstract Stage getStage(); + + public boolean containsErrors() { + return exception != null; + } + + public String getErrorMessage() { + String result = "State: " + getStage() + "; "; + if (exception != null) { + result += exception.toString(); + if ( exception.getCause()!=null ) { + result += "\nCause:\n"; + result += exception.getCause().toString(); + } + } + return result; + } + + public State(State previousState, Exception exception) { + this.previousState = previousState; + this.exception = exception; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java deleted file mode 100644 index 6751b33c16..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/BaseSwiftTest.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.runtime.misc.Pair; -import org.antlr.v4.test.runtime.*; -import org.stringtemplate.v4.ST; - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.URL; -import java.util.*; - -import static org.antlr.v4.test.runtime.BaseRuntimeTest.antlrOnString; -import static org.antlr.v4.test.runtime.RuntimeTestUtils.mkdir; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.junit.Assert.assertTrue; - -public class BaseSwiftTest extends BaseRuntimeTestSupport implements RuntimeTestSupport { - - private static final boolean USE_ARCH_ARM64 = false; - private static final boolean VERBOSE = false; - - /** - * Path of the ANTLR runtime. - */ - private static final String ANTLR_RUNTIME_PATH; - - /** - * Absolute path to swift command. - */ - private static final String SWIFT_CMD; - - /** - * Environment variable name for swift home. - */ - private static final String SWIFT_HOME_ENV_KEY = "SWIFT_HOME"; - - private static String getParent(String resourcePath, int count) { - String result = resourcePath; - while (count > 0) { - int index = result.lastIndexOf('/'); - if (index > 0) { - result = result.substring(0, index); - } - count -= 1; - } - return result; - } - - static { - Map env = System.getenv(); - String swiftHome = env.containsKey(SWIFT_HOME_ENV_KEY) ? env.get(SWIFT_HOME_ENV_KEY) : ""; - SWIFT_CMD = swiftHome + "swift"; - - ClassLoader loader = Thread.currentThread().getContextClassLoader(); - // build swift runtime - // path like: file:/Users/100mango/Desktop/antlr4/runtime-testsuite/target/classes/Swift - URL swiftRuntime = loader.getResource("Swift"); - if (swiftRuntime == null) { - throw new RuntimeException("Swift runtime file not found"); - } - - //enter project root - ANTLR_RUNTIME_PATH = getParent(swiftRuntime.getPath(),4); - try { - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "build", "-c", "release"); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - throw new RuntimeException(e); - } - - // shutdown logic - Runtime.getRuntime().addShutdownHook(new Thread() { - public void run() { - try { - fastFailRunProcess(ANTLR_RUNTIME_PATH, SWIFT_CMD, "package", "clean"); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - } - } - }); - } - - @Override - protected String getPropertyPrefix() { - return "antrl4-swift"; - } - - /** - * Source files used in each small swift project. - */ - private final Set sourceFiles = new HashSet<>(); - - - @Override - public String execLexer(String grammarFileName, String grammarStr, String lexerName, String input, boolean showDFA) { - generateParser(grammarFileName, - grammarStr, - null, - lexerName); - writeFile(getTempDirPath(), "input", input); - writeLexerTestFile(lexerName, showDFA); - addSourceFiles("main.swift"); - - String projectName = "testcase-" + System.currentTimeMillis(); - String projectDir = new File(getTempTestDir(), projectName).getAbsolutePath(); - try { - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - return null; - } - } - - @Override - public String execParser(String grammarFileName, String grammarStr, String parserName, String lexerName, String listenerName, String visitorName, String startRuleName, String input, boolean showDiagnosticErrors) { - generateParser(grammarFileName, - grammarStr, - parserName, - lexerName, - "-visitor"); - writeFile(getTempDirPath(), "input", input); - return execParser(parserName, - lexerName, - startRuleName, - showDiagnosticErrors,false); - } - - private String execTest(String projectDir, String projectName) { - try { - Pair output = runProcess(projectDir, "./.build/release/" + projectName, "input"); - if (output.b.length() > 0) { - setParseErrors(output.b); - } - String stdout = output.a; - return stdout.length() > 0 ? stdout : null; - } - catch (Exception e) { - System.err.println("Execution of testcase failed."); - e.printStackTrace(System.err); - } - return null; - } - - private void addSourceFiles(String... files) { - Collections.addAll(this.sourceFiles, files); - } - - private void buildProject(String projectDir, String projectName) throws IOException, InterruptedException { - mkdir(projectDir); - fastFailRunProcess(projectDir, SWIFT_CMD, "package", "init", "--type", "executable"); - for (String sourceFile: sourceFiles) { - String absPath = new File(getTempTestDir(), sourceFile).getAbsolutePath(); - fastFailRunProcess(getTempDirPath(), "mv", "-f", absPath, projectDir + "/Sources/" + projectName); - } - fastFailRunProcess(getTempDirPath(), "mv", "-f", "input", projectDir); - String dylibPath = ANTLR_RUNTIME_PATH + "/.build/release/"; -// System.err.println(dylibPath); - Pair buildResult = runProcess(projectDir, SWIFT_CMD, "build", - "-c", "release", - "-Xswiftc", "-I"+dylibPath, - "-Xlinker", "-L"+dylibPath, - "-Xlinker", "-lAntlr4", - "-Xlinker", "-rpath", - "-Xlinker", dylibPath); - if (buildResult.b.length() > 0) { - throw new IOException("unit test build failed: " + buildResult.a + "\n" + buildResult.b); - } - } - - static Boolean IS_MAC_ARM_64 = null; - - private static boolean isMacOSArm64() { - if (IS_MAC_ARM_64 == null) { - IS_MAC_ARM_64 = computeIsMacOSArm64(); - System.err.println("IS_MAC_ARM_64 = " + IS_MAC_ARM_64); - } - return IS_MAC_ARM_64; - } - - private static boolean computeIsMacOSArm64() { - String os = System.getenv("RUNNER_OS"); - if(os==null || !os.equalsIgnoreCase("macos")) - return false; - try { - Process p = Runtime.getRuntime().exec("uname -a"); - BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream())); - String uname = in.readLine(); - return uname.contains("_ARM64_"); - } catch (IOException e) { - e.printStackTrace(); - return false; - } - } - - private static Pair runProcess(String execPath, String... args) throws IOException, InterruptedException { - List argsWithArch = new ArrayList<>(); - if(USE_ARCH_ARM64 && isMacOSArm64()) - argsWithArch.addAll(Arrays.asList("arch", "-arm64")); - argsWithArch.addAll(Arrays.asList(args)); - if(VERBOSE) - System.err.println("Executing " + argsWithArch + " " + execPath); - final Process process = Runtime.getRuntime().exec(argsWithArch.toArray(new String[0]), null, new File(execPath)); - StreamVacuum stdoutVacuum = new StreamVacuum(process.getInputStream()); - StreamVacuum stderrVacuum = new StreamVacuum(process.getErrorStream()); - stdoutVacuum.start(); - stderrVacuum.start(); - Timer timer = new Timer(); - timer.schedule(new TimerTask() { - @Override - public void run() { - try { - process.destroy(); - } catch(Exception e) { - e.printStackTrace(System.err); - } - } - }, 120_000); - int status = process.waitFor(); - timer.cancel(); - stdoutVacuum.join(); - stderrVacuum.join(); - if(VERBOSE) - System.err.println("Done executing " + argsWithArch + " " + execPath); - if (status != 0) { - System.err.println("Process exited with status " + status); - throw new IOException("Process exited with status " + status + ":\n" + stdoutVacuum + "\n" + stderrVacuum); - } - return new Pair<>(stdoutVacuum.toString(), stderrVacuum.toString()); - } - - private static void fastFailRunProcess(String workingDir, String... command) throws IOException, InterruptedException { - List argsWithArch = new ArrayList<>(); - if(USE_ARCH_ARM64 && isMacOSArm64()) - argsWithArch.addAll(Arrays.asList("arch", "-arm64")); - argsWithArch.addAll(Arrays.asList(command)); - if(VERBOSE) - System.err.println("Executing " + argsWithArch + " " + workingDir); - ProcessBuilder builder = new ProcessBuilder(argsWithArch.toArray(new String[0])); - builder.directory(new File(workingDir)); - final Process process = builder.start(); - Timer timer = new Timer(); - timer.schedule(new TimerTask() { - @Override - public void run() { - try { - process.destroy(); - } catch(Exception e) { - e.printStackTrace(System.err); - } - } - }, 120_000); - int status = process.waitFor(); - timer.cancel(); - if(VERBOSE) - System.err.println("Done executing " + argsWithArch + " " + workingDir); - if (status != 0) { - System.err.println("Process exited with status " + status); - throw new IOException("Process exited with status " + status); - } - } - - @SuppressWarnings("SameParameterValue") - private String execParser(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) - { - if ( parserName==null ) { - writeLexerTestFile(lexerName, false); - } - else { - writeParserTestFile(parserName, - lexerName, - parserStartRuleName, - debug, - profile); - } - - addSourceFiles("main.swift"); - String projectName = "testcase-" + System.currentTimeMillis(); - String projectDir = new File(getTempTestDir(), projectName).getAbsolutePath(); - try { - buildProject(projectDir, projectName); - return execTest(projectDir, projectName); - } - catch (IOException | InterruptedException e) { - e.printStackTrace(); - return null; - } - } - - private void writeParserTestFile(String parserName, - String lexerName, - String parserStartRuleName, - boolean debug, - boolean profile) { - - ST outputFileST = new ST( - "import Antlr4\n" + - "import Foundation\n" + - "setbuf(stdout, nil)\n" + - "class TreeShapeListener: ParseTreeListener{\n" + - " func visitTerminal(_ node: TerminalNode){ }\n" + - " func visitErrorNode(_ node: ErrorNode){ }\n" + - " func enterEveryRule(_ ctx: ParserRuleContext) throws { }\n" + - " func exitEveryRule(_ ctx: ParserRuleContext) throws {\n" + - " for i in 0..\\(input)\n" + - "let tokens = CommonTokenStream(lex)\n" + - "\n" + - "parser.setBuildParseTree(true)\n" + - "\n" + - "let tree = try parser.()\n" + - "print(profiler.getDecisionInfo().description)\n" + - "try ParseTreeWalker.DEFAULT.walk(TreeShapeListener(), tree)\n" - ); - ST createParserST = new ST(" let parser = try (tokens)\n"); - if (debug) { - createParserST = - new ST( - " let parser = try (tokens)\n" + - " parser.addErrorListener(DiagnosticErrorListener())\n"); - } - if (profile) { - outputFileST.add("profile", - "let profiler = ProfilingATNSimulator(parser)\n" + - "parser.setInterpreter(profiler)"); - } - else { - outputFileST.add("profile", new ArrayList<>()); - } - outputFileST.add("createParser", createParserST); - outputFileST.add("parserName", parserName); - outputFileST.add("lexerName", lexerName); - outputFileST.add("parserStartRuleName", parserStartRuleName); - writeFile(getTempDirPath(), "main.swift", outputFileST.render()); - } - - private void writeLexerTestFile(String lexerName, boolean showDFA) { - ST outputFileST = new ST( - "import Antlr4\n" + - "import Foundation\n" + - - "setbuf(stdout, nil)\n" + - "let args = CommandLine.arguments\n" + - "let input = try ANTLRFileStream(args[1])\n" + - "let lex = (input)\n" + - "let tokens = CommonTokenStream(lex)\n" + - - "try tokens.fill()\n" + - - "for t in tokens.getTokens() {\n" + - " print(t)\n" + - "}\n" + - (showDFA ? "print(lex.getInterpreter().getDFA(Lexer.DEFAULT_MODE).toLexerString(), terminator: \"\" )\n" : "")); - - outputFileST.add("lexerName", lexerName); - writeFile(getTempDirPath(), "main.swift", outputFileST.render()); - } - - /** - * Generates the parser for one test case. - */ - private void generateParser(String grammarFileName, - String grammarStr, - String parserName, - String lexerName, - String... extraOptions) { - ErrorQueue equeue = antlrOnString(getTempDirPath(), "Swift", grammarFileName, grammarStr, false, extraOptions); - assertTrue(equeue.errors.isEmpty()); -// System.out.println(getTmpDir()); - - List files = new ArrayList<>(); - if (lexerName != null) { - files.add(lexerName + ".swift"); - } - - if (parserName != null) { - files.add(parserName + ".swift"); - Set optionsSet = new HashSet<>(Arrays.asList(extraOptions)); - String grammarName = grammarFileName.substring(0, grammarFileName.lastIndexOf('.')); - if (!optionsSet.contains("-no-listener")) { - files.add(grammarName + "Listener.swift"); - files.add(grammarName + "BaseListener.swift"); - } - if (optionsSet.contains("-visitor")) { - files.add(grammarName + "Visitor.swift"); - files.add(grammarName + "BaseVisitor.swift"); - } - } - addSourceFiles(files.toArray(new String[0])); - } - -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java new file mode 100644 index 0000000000..1295227774 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRunner.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.swift; + +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.states.CompiledState; +import org.antlr.v4.test.runtime.states.GeneratedState; +import org.stringtemplate.v4.ST; + +import java.io.File; +import java.io.FilenameFilter; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.antlr.v4.test.runtime.FileUtils.*; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.getTextFromResource; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.isWindows; + +public class SwiftRunner extends RuntimeRunner { + @Override + public String getLanguage() { + return "Swift"; + } + + @Override + public String getTestFileName() { + return "main"; + } + + private static final String swiftRuntimePath; + private static final String buildSuffix; + private static final Map environment; + + private static final String includePath; + private static final String libraryPath; + + static { + swiftRuntimePath = getRuntimePath("Swift"); + buildSuffix = isWindows() ? "x86_64-unknown-windows-msvc" : ""; + includePath = Paths.get(swiftRuntimePath, ".build", buildSuffix, "release").toString(); + environment = new HashMap<>(); + if (isWindows()) { + libraryPath = Paths.get(includePath, "Antlr4.lib").toString(); + String path = System.getenv("PATH"); + environment.put("PATH", path == null ? includePath : path + ";" + includePath); + } + else { + libraryPath = includePath; + } + } + + @Override + protected String getCompilerName() { + return "swift"; + } + + @Override + protected void initRuntime() throws Exception { + runCommand(new String[] {getCompilerPath(), "build", "-c", "release"}, swiftRuntimePath, "build Swift runtime"); + } + + @Override + protected CompiledState compile(RunOptions runOptions, GeneratedState generatedState) { + Exception exception = null; + try { + String tempDirPath = getTempDirPath(); + File tempDirFile = new File(tempDirPath); + + File[] ignoredFiles = tempDirFile.listFiles(NoSwiftFileFilter.Instance); + assert ignoredFiles != null; + List excludedFiles = Arrays.stream(ignoredFiles).map(File::getName).collect(Collectors.toList()); + + String text = getTextFromResource("org/antlr/v4/test/runtime/helpers/Package.swift.stg"); + ST outputFileST = new ST(text); + outputFileST.add("excludedFiles", excludedFiles); + writeFile(tempDirPath, "Package.swift", outputFileST.render()); + + String[] buildProjectArgs = new String[]{ + getCompilerPath(), + "build", + "-c", + "release", + "-Xswiftc", + "-I" + includePath, + "-Xlinker", + "-L" + includePath, + "-Xlinker", + "-lAntlr4", + "-Xlinker", + "-rpath", + "-Xlinker", + libraryPath + }; + runCommand(buildProjectArgs, tempDirPath); + } catch (Exception e) { + exception = e; + } + + return new CompiledState(generatedState, exception); + } + + static class NoSwiftFileFilter implements FilenameFilter { + public final static NoSwiftFileFilter Instance = new NoSwiftFileFilter(); + + public boolean accept(File dir, String name) { + return !name.endsWith(".swift"); + } + } + + @Override + public String getRuntimeToolName() { + return null; + } + + @Override + public String getExecFileName() { + return Paths.get(getTempDirPath(), + ".build", + buildSuffix, + "release", + "Test" + (isWindows() ? ".exe" : "")).toString(); + } + + @Override + public Map getExecEnvironment() { + return environment; + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java new file mode 100644 index 0000000000..11efa5c895 --- /dev/null +++ b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/SwiftRuntimeTests.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.runtime.swift; + +import org.antlr.v4.test.runtime.RuntimeTests; +import org.antlr.v4.test.runtime.RuntimeRunner; + +public class SwiftRuntimeTests extends RuntimeTests { + @Override + protected RuntimeRunner createRuntimeRunner() { + return new SwiftRunner(); + } +} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java deleted file mode 100644 index 7526fbcb23..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeLexers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeLexers extends BaseRuntimeTest { - public TestCompositeLexers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeLexers", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java deleted file mode 100644 index d927d4667c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestCompositeParsers.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestCompositeParsers extends BaseRuntimeTest { - public TestCompositeParsers(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("CompositeParsers", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java deleted file mode 100644 index 2adb97bfde..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestFullContextParsing.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestFullContextParsing extends BaseRuntimeTest { - public TestFullContextParsing(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("FullContextParsing", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java deleted file mode 100644 index a1deab7cf5..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLeftRecursion.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLeftRecursion extends BaseRuntimeTest { - public TestLeftRecursion(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LeftRecursion", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java deleted file mode 100644 index 3484e7ac70..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerErrors extends BaseRuntimeTest { - public TestLexerErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerErrors", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java deleted file mode 100644 index 4933c9ce0d..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestLexerExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestLexerExec extends BaseRuntimeTest { - public TestLexerExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("LexerExec", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java deleted file mode 100644 index 06e1d41d9c..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestListeners.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestListeners extends BaseRuntimeTest { - public TestListeners(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Listeners", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java deleted file mode 100644 index 35e7cace68..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParseTrees.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParseTrees extends BaseRuntimeTest { - public TestParseTrees(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParseTrees", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java deleted file mode 100644 index 737a5d21ae..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserErrors.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserErrors extends BaseRuntimeTest { - public TestParserErrors(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserErrors", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java deleted file mode 100644 index f01b867c7f..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestParserExec.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestParserExec extends BaseRuntimeTest { - public TestParserExec(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("ParserExec", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java deleted file mode 100644 index b0e03bd58a..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestPerformance.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestPerformance extends BaseRuntimeTest { - public TestPerformance(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Performance", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java deleted file mode 100644 index 40b7134f14..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalLexer.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -/* This file is generated by TestGenerator, any edits will be overwritten by the next generation. */ -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalLexer extends BaseRuntimeTest { - public TestSemPredEvalLexer(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalLexer", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java deleted file mode 100644 index 1486f12c62..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSemPredEvalParser.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSemPredEvalParser extends BaseRuntimeTest { - public TestSemPredEvalParser(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("SemPredEvalParser", "Swift"); - } -} - diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java b/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java deleted file mode 100644 index 83f8cfb06e..0000000000 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/swift/TestSets.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.runtime.swift; - -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.RuntimeTestDescriptor; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class TestSets extends BaseRuntimeTest { - public TestSets(RuntimeTestDescriptor descriptor) { - super(descriptor,new BaseSwiftTest()); - } - - @Parameterized.Parameters(name="{0}") - public static RuntimeTestDescriptor[] getAllTestDescriptors() { - return BaseRuntimeTest.getRuntimeTestDescriptors("Sets", "Swift"); - } -} - diff --git a/runtime/CSharp/src/Antlr4.csproj b/runtime/CSharp/src/Antlr4.csproj index ec4ddf2213..a966fe56df 100644 --- a/runtime/CSharp/src/Antlr4.csproj +++ b/runtime/CSharp/src/Antlr4.csproj @@ -1,7 +1,7 @@  The ANTLR Organization - 4.10.1 + 4.11.0 en-US netstandard2.0 net45;netstandard2.0 diff --git a/runtime/CSharp/src/Atn/ATN.cs b/runtime/CSharp/src/Atn/ATN.cs index 8f2a20f3be..a8044ca2cf 100644 --- a/runtime/CSharp/src/Atn/ATN.cs +++ b/runtime/CSharp/src/Atn/ATN.cs @@ -104,7 +104,7 @@ public virtual PredictionContext GetCachedContext(PredictionContext context) /// If /// /// is - /// + /// /// , the set of tokens will not include what can follow /// the rule surrounding /// diff --git a/runtime/CSharp/src/Atn/ATNConfig.cs b/runtime/CSharp/src/Atn/ATNConfig.cs index 11e3e6bfb3..7ca6e6235f 100644 --- a/runtime/CSharp/src/Atn/ATNConfig.cs +++ b/runtime/CSharp/src/Atn/ATNConfig.cs @@ -81,7 +81,7 @@ public ATNConfig(ATNConfig old) public ATNConfig(ATNState state, int alt, PredictionContext context) - : this(state, alt, context, SemanticContext.NONE) + : this(state, alt, context, SemanticContext.Empty.Instance) { } @@ -229,7 +229,7 @@ public String ToString(IRecognizer recog, bool showAlt) buf.Append(context.ToString()); buf.Append("]"); } - if (semanticContext != null && semanticContext != SemanticContext.NONE) + if (semanticContext != null && semanticContext != SemanticContext.Empty.Instance) { buf.Append(","); buf.Append(semanticContext); diff --git a/runtime/CSharp/src/Atn/ATNConfigSet.cs b/runtime/CSharp/src/Atn/ATNConfigSet.cs index 2ea9fe4b30..7c3d68d593 100644 --- a/runtime/CSharp/src/Atn/ATNConfigSet.cs +++ b/runtime/CSharp/src/Atn/ATNConfigSet.cs @@ -95,7 +95,7 @@ public bool Add(ATNConfig config, MergeCache mergeCache) { if (readOnly) throw new Exception("This set is readonly"); - if (config.semanticContext != SemanticContext.NONE) + if (config.semanticContext != SemanticContext.Empty.Instance) { hasSemanticContext = true; } @@ -171,7 +171,7 @@ public List GetPredicates() List preds = new List(); foreach (ATNConfig c in configs) { - if (c.semanticContext != SemanticContext.NONE) + if (c.semanticContext != SemanticContext.Empty.Instance) { preds.Add(c.semanticContext); } diff --git a/runtime/CSharp/src/Atn/EmptyPredictionContext.cs b/runtime/CSharp/src/Atn/EmptyPredictionContext.cs index 937092d88b..dfc971b751 100644 --- a/runtime/CSharp/src/Atn/EmptyPredictionContext.cs +++ b/runtime/CSharp/src/Atn/EmptyPredictionContext.cs @@ -12,14 +12,13 @@ namespace Antlr4.Runtime.Atn #pragma warning disable 0659 // 'class' overrides Object.Equals(object o) but does not override Object.GetHashCode() public sealed class EmptyPredictionContext : SingletonPredictionContext { + public static readonly EmptyPredictionContext Instance = new EmptyPredictionContext(); internal EmptyPredictionContext() : base(null, EMPTY_RETURN_STATE) { } - - public override PredictionContext GetParent(int index) { return null; diff --git a/runtime/CSharp/src/Atn/LL1Analyzer.cs b/runtime/CSharp/src/Atn/LL1Analyzer.cs index e679334128..87239c8d3c 100644 --- a/runtime/CSharp/src/Atn/LL1Analyzer.cs +++ b/runtime/CSharp/src/Atn/LL1Analyzer.cs @@ -48,7 +48,7 @@ public virtual IntervalSet[] GetDecisionLookahead(ATNState s) HashSet lookBusy = new HashSet(); bool seeThruPreds = false; // fail to get lookahead upon pred - Look_(s.Transition(alt).target, null, PredictionContext.EMPTY, look[alt], lookBusy, new BitSet(), seeThruPreds, false); + Look_(s.Transition(alt).target, null, EmptyPredictionContext.Instance, look[alt], lookBusy, new BitSet(), seeThruPreds, false); // Wipe out lookahead for this alternative if we found nothing // or we had a predicate when we !seeThruPreds if (look[alt].Count == 0 || look[alt].Contains(HitPred)) @@ -171,7 +171,7 @@ protected internal virtual void Look_(ATNState s, ATNState stopState, Prediction look.Add(TokenConstants.EOF); return; } - if (ctx != PredictionContext.EMPTY) + if (ctx != EmptyPredictionContext.Instance) { bool removed = calledRuleStack.Get(s.ruleIndex); try diff --git a/runtime/CSharp/src/Atn/LexerATNConfig.cs b/runtime/CSharp/src/Atn/LexerATNConfig.cs index 1004a49641..aa78cffe51 100644 --- a/runtime/CSharp/src/Atn/LexerATNConfig.cs +++ b/runtime/CSharp/src/Atn/LexerATNConfig.cs @@ -20,7 +20,7 @@ public class LexerATNConfig : ATNConfig public LexerATNConfig(ATNState state, int alt, PredictionContext context) - : base(state, alt, context/*, SemanticContext.NONE*/) // TODO + : base(state, alt, context/*, SemanticContext.Empty.Instance*/) // TODO { this.passedThroughNonGreedyDecision = false; this.lexerActionExecutor = null; @@ -30,7 +30,7 @@ public LexerATNConfig(ATNState state, int alt, PredictionContext context, LexerActionExecutor lexerActionExecutor) - : base(state, alt, context, SemanticContext.NONE) + : base(state, alt, context, SemanticContext.Empty.Instance) { this.lexerActionExecutor = lexerActionExecutor; this.passedThroughNonGreedyDecision = false; diff --git a/runtime/CSharp/src/Atn/LexerATNSimulator.cs b/runtime/CSharp/src/Atn/LexerATNSimulator.cs index 11effa8483..3c9479ca80 100644 --- a/runtime/CSharp/src/Atn/LexerATNSimulator.cs +++ b/runtime/CSharp/src/Atn/LexerATNSimulator.cs @@ -383,7 +383,7 @@ protected ATNState GetReachableTarget(Transition trans, int t) protected ATNConfigSet ComputeStartState(ICharStream input, ATNState p) { - PredictionContext initialContext = PredictionContext.EMPTY; + PredictionContext initialContext = EmptyPredictionContext.Instance; ATNConfigSet configs = new OrderedATNConfigSet(); for (int i = 0; i < p.NumberOfTransitions; i++) { @@ -432,7 +432,7 @@ protected bool Closure(ICharStream input, LexerATNConfig config, ATNConfigSet co return true; } else { - configs.Add(new LexerATNConfig(config, config.state, PredictionContext.EMPTY)); + configs.Add(new LexerATNConfig(config, config.state, EmptyPredictionContext.Instance)); currentAltReachedAcceptState = true; } } diff --git a/runtime/CSharp/src/Atn/ParserATNSimulator.cs b/runtime/CSharp/src/Atn/ParserATNSimulator.cs index 38ae76b9c1..eb6c2bfcb1 100644 --- a/runtime/CSharp/src/Atn/ParserATNSimulator.cs +++ b/runtime/CSharp/src/Atn/ParserATNSimulator.cs @@ -1238,11 +1238,11 @@ protected SemanticContext[] GetPredsForAmbigAlts(BitSet ambigAlts, /* altToPred starts as an array of all null contexts. The entry at index i * corresponds to alternative i. altToPred[i] may have one of three values: * 1. null: no ATNConfig c is found such that c.alt==i - * 2. SemanticContext.NONE: At least one ATNConfig c exists such that - * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + * 2. SemanticContext.Empty.Instance: At least one ATNConfig c exists such that + * c.alt==i and c.semanticContext==SemanticContext.Empty.Instance. In other words, * alt i has at least one unpredicated config. * 3. Non-NONE Semantic Context: There exists at least one, and for all - * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.Empty.Instance. * * From this, it is clear that NONE||anything==NONE. */ @@ -1260,9 +1260,9 @@ protected SemanticContext[] GetPredsForAmbigAlts(BitSet ambigAlts, { if (altToPred[i] == null) { - altToPred[i] = SemanticContext.NONE; + altToPred[i] = SemanticContext.Empty.Instance; } - else if (altToPred[i] != SemanticContext.NONE) + else if (altToPred[i] != SemanticContext.Empty.Instance) { nPredAlts++; } @@ -1288,13 +1288,13 @@ protected PredPrediction[] GetPredicatePredictions(BitSet ambigAlts, { SemanticContext pred = altToPred[i]; - // unpredicated is indicated by SemanticContext.NONE + // unpredicated is indicated by SemanticContext.Empty.Instance if (ambigAlts != null && ambigAlts[i]) { pairs.Add(new PredPrediction(pred, i)); } - if (pred != SemanticContext.NONE) containsPredicate = true; + if (pred != SemanticContext.Empty.Instance) containsPredicate = true; } if (!containsPredicate) @@ -1407,7 +1407,7 @@ protected Pair SplitAccordingToSemanticValidity( ATNConfigSet failed = new ATNConfigSet(configSet.fullCtx); foreach (ATNConfig c in configSet.configs) { - if (c.semanticContext != SemanticContext.NONE) + if (c.semanticContext != SemanticContext.Empty.Instance) { bool predicateEvaluationResult = EvalSemanticContext(c.semanticContext, outerContext, c.alt, configSet.fullCtx); if (predicateEvaluationResult) @@ -1438,7 +1438,7 @@ protected virtual BitSet EvalSemanticContext(PredPrediction[] predPredictions, BitSet predictions = new BitSet(); foreach (PredPrediction pair in predPredictions) { - if (pair.pred == SemanticContext.NONE) + if (pair.pred == SemanticContext.Empty.Instance) { predictions[pair.alt] = true; if (!complete) @@ -1547,7 +1547,7 @@ protected void ClosureCheckingStopState(ATNConfig config, { if (fullCtx) { - configSet.Add(new ATNConfig(config, config.state, PredictionContext.EMPTY), mergeCache); + configSet.Add(new ATNConfig(config, config.state, EmptyPredictionContext.Instance), mergeCache); continue; } else { diff --git a/runtime/CSharp/src/Atn/PredicateEvalInfo.cs b/runtime/CSharp/src/Atn/PredicateEvalInfo.cs index cbc3d084d3..93d3e3e980 100644 --- a/runtime/CSharp/src/Atn/PredicateEvalInfo.cs +++ b/runtime/CSharp/src/Atn/PredicateEvalInfo.cs @@ -31,7 +31,7 @@ public class PredicateEvalInfo : DecisionEventInfo /// . Note that other ATN /// configurations may predict the same alternative which are guarded by /// other semantic contexts and/or - /// + /// /// . /// public readonly int predictedAlt; diff --git a/runtime/CSharp/src/Atn/PredictionContext.cs b/runtime/CSharp/src/Atn/PredictionContext.cs index ac5dd9eb88..b8334c382d 100644 --- a/runtime/CSharp/src/Atn/PredictionContext.cs +++ b/runtime/CSharp/src/Atn/PredictionContext.cs @@ -13,8 +13,6 @@ public abstract class PredictionContext { public static readonly int EMPTY_RETURN_STATE = int.MaxValue; - public static readonly EmptyPredictionContext EMPTY = new EmptyPredictionContext(); - private static readonly int INITIAL_HASH = 1; protected internal static int CalculateEmptyHashCode() @@ -60,7 +58,7 @@ public static PredictionContext FromRuleContext(ATN atn, RuleContext outerContex if (outerContext == null) outerContext = ParserRuleContext.EMPTY; if (outerContext.Parent == null || outerContext == ParserRuleContext.EMPTY) - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; PredictionContext parent = PredictionContext.FromRuleContext(atn, outerContext.Parent); ATNState state = atn.states[outerContext.invokingState]; RuleTransition transition = (RuleTransition)state.Transition(0); @@ -80,7 +78,7 @@ public virtual bool IsEmpty { get { - return this == EMPTY; + return this == EmptyPredictionContext.Instance; } } @@ -372,14 +370,14 @@ public static PredictionContext MergeRoot(SingletonPredictionContext a, { if (rootIsWildcard) { - if (a == PredictionContext.EMPTY) - return PredictionContext.EMPTY; // * + b = * - if (b == PredictionContext.EMPTY) - return PredictionContext.EMPTY; // a + * = * + if (a == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; // * + b = * + if (b == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; // a + * = * } else { - if (a == EMPTY && b == EMPTY) return EMPTY; // $ + $ = $ - if (a == EMPTY) + if (a == EmptyPredictionContext.Instance && b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // $ + $ = $ + if (a == EmptyPredictionContext.Instance) { // $ + x = [$,x] int[] payloads = { b.returnState, EMPTY_RETURN_STATE }; PredictionContext[] parents = { b.parent, null }; @@ -387,7 +385,7 @@ public static PredictionContext MergeRoot(SingletonPredictionContext a, new ArrayPredictionContext(parents, payloads); return joined; } - if (b == EMPTY) + if (b == EmptyPredictionContext.Instance) { // x + $ = [$,x] ($ is always first if present) int[] payloads = { a.returnState, EMPTY_RETURN_STATE }; PredictionContext[] parents = { a.parent, null }; @@ -452,7 +450,7 @@ public static PredictionContext GetCachedContext(PredictionContext context, Pred PredictionContext updated; if (parents.Length == 0) { - updated = EMPTY; + updated = EmptyPredictionContext.Instance; } else if (parents.Length == 1) { @@ -478,7 +476,7 @@ public virtual PredictionContext GetChild(int returnState) public virtual string[] ToStrings(IRecognizer recognizer, int currentState) { - return ToStrings(recognizer, PredictionContext.EMPTY, currentState); + return ToStrings(recognizer, EmptyPredictionContext.Instance, currentState); } public virtual string[] ToStrings(IRecognizer recognizer, PredictionContext stop, int currentState) diff --git a/runtime/CSharp/src/Atn/PredictionContextCache.cs b/runtime/CSharp/src/Atn/PredictionContextCache.cs index c07ee7871b..8a36db59a0 100644 --- a/runtime/CSharp/src/Atn/PredictionContextCache.cs +++ b/runtime/CSharp/src/Atn/PredictionContextCache.cs @@ -19,8 +19,8 @@ public class PredictionContextCache */ public PredictionContext Add(PredictionContext ctx) { - if (ctx == PredictionContext.EMPTY) - return PredictionContext.EMPTY; + if (ctx == EmptyPredictionContext.Instance) + return EmptyPredictionContext.Instance; PredictionContext existing = cache.Get(ctx); if (existing != null) { diff --git a/runtime/CSharp/src/Atn/PredictionMode.cs b/runtime/CSharp/src/Atn/PredictionMode.cs index 8cd7d6e76e..1d801c9560 100644 --- a/runtime/CSharp/src/Atn/PredictionMode.cs +++ b/runtime/CSharp/src/Atn/PredictionMode.cs @@ -258,7 +258,7 @@ public static bool HasSLLConflictTerminatingPrediction(PredictionMode mode, ATNC ATNConfigSet dup = new ATNConfigSet(); foreach (ATNConfig c in configSet.configs) { - dup.Add(new ATNConfig(c, SemanticContext.NONE)); + dup.Add(new ATNConfig(c, SemanticContext.Empty.Instance)); } configSet = dup; } diff --git a/runtime/CSharp/src/Atn/SemanticContext.cs b/runtime/CSharp/src/Atn/SemanticContext.cs index 4d8f357b0e..33db8649d9 100644 --- a/runtime/CSharp/src/Atn/SemanticContext.cs +++ b/runtime/CSharp/src/Atn/SemanticContext.cs @@ -12,8 +12,6 @@ namespace Antlr4.Runtime.Atn { public abstract class SemanticContext { - public static readonly SemanticContext NONE = new SemanticContext.Predicate(); - public abstract bool Eval(Recognizer parser, RuleContext parserCallStack) where ATNInterpreter : ATNSimulator; @@ -23,6 +21,16 @@ public virtual SemanticContext EvalPrecedence(Recognizer return this; } + public class Empty : SemanticContext + { + public static readonly SemanticContext Instance = new Empty(); + + public override bool Eval(Recognizer parser, RuleContext parserCallStack) + { + return false; + } + } + public class Predicate : SemanticContext { public readonly int ruleIndex; @@ -105,7 +113,7 @@ public override SemanticContext EvalPrecedence(Recognize { if (parser.Precpred(parserCallStack, precedence)) { - return SemanticContext.NONE; + return SemanticContext.Empty.Instance; } else { @@ -243,7 +251,7 @@ public override SemanticContext EvalPrecedence(Recognize } else { - if (evaluated != NONE) + if (evaluated != Empty.Instance) { // Reduce the result by skipping true elements operands.Add(evaluated); @@ -257,7 +265,7 @@ public override SemanticContext EvalPrecedence(Recognize if (operands.Count == 0) { // all elements were true, so the AND context is true - return NONE; + return Empty.Instance; } SemanticContext result = operands[0]; for (int i = 1; i < operands.Count; i++) @@ -354,10 +362,10 @@ public override SemanticContext EvalPrecedence(Recognize { SemanticContext evaluated = context.EvalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == NONE) + if (evaluated == Empty.Instance) { // The OR context is true if any element is true - return NONE; + return Empty.Instance; } else { @@ -393,11 +401,11 @@ public override string ToString() public static SemanticContext AndOp(SemanticContext a, SemanticContext b) { - if (a == null || a == NONE) + if (a == null || a == Empty.Instance) { return b; } - if (b == null || b == NONE) + if (b == null || b == Empty.Instance) { return a; } @@ -419,9 +427,9 @@ public static SemanticContext OrOp(SemanticContext a, SemanticContext b) { return a; } - if (a == NONE || b == NONE) + if (a == Empty.Instance || b == Empty.Instance) { - return NONE; + return Empty.Instance; } SemanticContext.OR result = new SemanticContext.OR(a, b); if (result.opnds.Length == 1) diff --git a/runtime/CSharp/src/Atn/SingletonPredictionContext.cs b/runtime/CSharp/src/Atn/SingletonPredictionContext.cs index e162dcaca7..c378554a2d 100644 --- a/runtime/CSharp/src/Atn/SingletonPredictionContext.cs +++ b/runtime/CSharp/src/Atn/SingletonPredictionContext.cs @@ -15,7 +15,7 @@ public static PredictionContext Create(PredictionContext parent, int returnState if (returnState == EMPTY_RETURN_STATE && parent == null) { // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; } return new SingletonPredictionContext(parent, returnState); } diff --git a/runtime/CSharp/src/Dfa/DFAState.cs b/runtime/CSharp/src/Dfa/DFAState.cs index 0986ac6c03..4e2df69cfd 100644 --- a/runtime/CSharp/src/Dfa/DFAState.cs +++ b/runtime/CSharp/src/Dfa/DFAState.cs @@ -168,7 +168,7 @@ public override String ToString() public class PredPrediction { - public SemanticContext pred; // never null; at least SemanticContext.NONE + public SemanticContext pred; // never null; at least SemanticContext.Empty.Instance public int alt; public PredPrediction(SemanticContext pred, int alt) { diff --git a/runtime/CSharp/src/Properties/AssemblyInfo.cs b/runtime/CSharp/src/Properties/AssemblyInfo.cs index 1bbfc3a5c2..164f6dee6a 100644 --- a/runtime/CSharp/src/Properties/AssemblyInfo.cs +++ b/runtime/CSharp/src/Properties/AssemblyInfo.cs @@ -6,4 +6,4 @@ using System.Reflection; [assembly: CLSCompliant(true)] -[assembly: AssemblyVersion("4.10.1")] +[assembly: AssemblyVersion("4.11.0")] diff --git a/runtime/CSharp/src/README.md b/runtime/CSharp/src/README.md index 2223c061b6..e2eaf48357 100644 --- a/runtime/CSharp/src/README.md +++ b/runtime/CSharp/src/README.md @@ -41,7 +41,7 @@ See the docs and the book to learn about writing lexer and parser grammars. ### Step 4: Generate the C# code This can be done either from the cmd line, or by adding a custom pre-build command in your project. -At minimal, the cmd line should look as follows: ``java -jar antlr4-4.10.1.jar -Dlanguage=CSharp grammar.g4`` +At minimal, the cmd line should look as follows: ``java -jar antlr4-4.11.0.jar -Dlanguage=CSharp grammar.g4`` This will generate the files, which you can then integrate in your project. This is just a quick start. The tool has many useful options to control generation, please refer to its documentation. diff --git a/runtime/Cpp/CMakeLists.txt b/runtime/Cpp/CMakeLists.txt index 302cd4a783..df621b11de 100644 --- a/runtime/Cpp/CMakeLists.txt +++ b/runtime/Cpp/CMakeLists.txt @@ -1,6 +1,7 @@ # -*- mode:cmake -*- -cmake_minimum_required (VERSION 3.14) +cmake_minimum_required (VERSION 3.15) # 3.14 needed because of FetchContent +# 3.15 needed to avid spew of warnings related to overriding cl command line flags enable_testing() @@ -24,6 +25,14 @@ endif(NOT WITH_DEMO) option(WITH_LIBCXX "Building with clang++ and libc++(in Linux). To enable with: -DWITH_LIBCXX=On" Off) option(WITH_STATIC_CRT "(Visual C++) Enable to statically link CRT, which avoids requiring users to install the redistribution package. To disable with: -DWITH_STATIC_CRT=Off" On) +option(DISABLE_WARNINGS "Suppress compiler warnings for all built ANTLR targets" OFF) + +cmake_policy(SET CMP0091 NEW) # Enable use of CMAKE_MSVC_RUNTIME_LIBRARY +if(WITH_STATIC_CRT) + set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") +else() + set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>DLL") +endif(WITH_STATIC_CRT) project(LIBANTLR4) @@ -41,10 +50,6 @@ if(CMAKE_VERSION VERSION_EQUAL "3.3.0" OR CMAKE_POLICY(SET CMP0054 OLD) endif() -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - find_package(PkgConfig REQUIRED) - pkg_check_modules(UUID REQUIRED uuid) -endif() if(APPLE) find_library(COREFOUNDATION_LIBRARY CoreFoundation) endif() @@ -67,17 +72,25 @@ if(WITH_DEMO) endif() endif(WITH_DEMO) -if(MSVC_VERSION) +if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") set(MY_CXX_WARNING_FLAGS " /W4") + + if(DISABLE_WARNINGS) + set(MY_CXX_WARNING_FLAGS " /w") + endif() else() set(MY_CXX_WARNING_FLAGS " -Wall -pedantic -W") + + if(DISABLE_WARNINGS) + set(MY_CXX_WARNING_FLAGS " -w") + endif() endif() # Define USE_UTF8_INSTEAD_OF_CODECVT macro. # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_UTF8_INSTEAD_OF_CODECVT") # Initialize CXXFLAGS. -if("${CMAKE_VERSION}" VERSION_GREATER 3.1.0) +if(CMAKE_VERSION VERSION_GREATER 3.1.0) if(NOT DEFINED CMAKE_CXX_STANDARD) # only set CMAKE_CXX_STANDARD if not already set # this allows the standard to be set by the caller, for example with -DCMAKE_CXX_STANDARD:STRING=17 @@ -94,7 +107,8 @@ else() endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_CXX_WARNING_FLAGS}") -if(MSVC_VERSION) + +if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MP ${MY_CXX_WARNING_FLAGS}") set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /O1 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLAGS}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2 /Oi /Ob2 /Gy /MP /DNDEBUG ${MY_CXX_WARNING_FLGAS}") @@ -107,20 +121,20 @@ else() endif() # Compiler-specific C++17 activation. -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Intel") +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) # Just g++-5.0 and greater contain header. (test in ubuntu) if(NOT (GCC_VERSION VERSION_GREATER 5.0 OR GCC_VERSION VERSION_EQUAL 5.0)) message(FATAL_ERROR "${PROJECT_NAME} requires g++ 5.0 or greater.") endif () -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ANDROID) +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND ANDROID) # Need -Os cflag and cxxflags here to work with exception handling on armeabi. # see https://github.com/android-ndk/ndk/issues/573 # and without -stdlib=libc++ cxxflags -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND APPLE) +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -stdlib=libc++") -elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "FreeBSD") ) +elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND ( CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "FreeBSD") ) execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE CLANG_VERSION) if(NOT (CLANG_VERSION VERSION_GREATER 4.2.1 OR CLANG_VERSION VERSION_EQUAL 4.2.1)) diff --git a/runtime/Cpp/CMakeSettings.json b/runtime/Cpp/CMakeSettings.json index 9eec934673..b17e0da38e 100644 --- a/runtime/Cpp/CMakeSettings.json +++ b/runtime/Cpp/CMakeSettings.json @@ -6,13 +6,21 @@ "generator": "Ninja", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x86" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -23,13 +31,21 @@ "generator": "Ninja", "configurationType": "RelWithDebInfo", "inheritEnvironments": [ "msvc_x86" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -40,13 +56,21 @@ "generator": "Ninja", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", @@ -57,13 +81,21 @@ "generator": "Ninja", "configurationType": "RelWithDebInfo", "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", - "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", + "buildRoot": "${projectDir}\\out\\build\\${name}", + "installRoot": "${projectDir}\\out\\install\\${name}", "cmakeCommandArgs": "", "variables": [ { "name": "ANTLR4_INSTALL", "value": "1" + }, + { + "name": "WITH_STATIC_CRT", + "value": "OFF" + }, + { + "name": "WITH_DEMO", + "value": "OFF" } ], "buildCommandArgs": "-v", diff --git a/runtime/Cpp/README.md b/runtime/Cpp/README.md index 7ce591ca50..622289ba77 100644 --- a/runtime/Cpp/README.md +++ b/runtime/Cpp/README.md @@ -43,9 +43,9 @@ If you are compiling with cmake, the minimum version required is cmake 2.8. By default, the libraries produced by the CMake build target C++11. If you want to target a different C++ standard, you can explicitly pass the standard - e.g. `-DCMAKE_CXX_STANDARD=17`. #### Compiling on Windows with Visual Studio using he Visual Studio projects -Simply open the VS project from the runtime folder (VS 2013+) and build it. +Simply open the VS project from the runtime folder (VS 2019+) and build it. -#### Compiling on Windows using cmake with Visual Studio VS2017 and later +#### Compiling on Windows using cmake with Visual Studio VS2019 and later Use the "Open Folder" Feature from the File->Open->Folder menu to open the runtime/Cpp directory. It will automatically use the CMake description to open up a Visual Studio Solution. diff --git a/runtime/Cpp/VERSION b/runtime/Cpp/VERSION index ad96464c4b..a162ea75a9 100644 --- a/runtime/Cpp/VERSION +++ b/runtime/Cpp/VERSION @@ -1 +1 @@ -4.10.1 +4.11.0 diff --git a/runtime/Cpp/cmake/Antlr4Package.md b/runtime/Cpp/cmake/Antlr4Package.md index 653cf5ec1d..530aea1c10 100644 --- a/runtime/Cpp/cmake/Antlr4Package.md +++ b/runtime/Cpp/cmake/Antlr4Package.md @@ -96,7 +96,7 @@ target_link_libraries( Parsertest PRIVATE find_package(antlr4-generator REQUIRED) # Set path to generator - set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.10.1-complete.jar) + set(ANTLR4_JAR_LOCATION ${PROJECT_SOURCE_DIR}/thirdparty/antlr/antlr-4.11.0-complete.jar) # generate lexer antlr4_generate( diff --git a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake index a64233ae04..acb52da16c 100644 --- a/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake +++ b/runtime/Cpp/cmake/ExternalAntlr4Cpp.cmake @@ -1,5 +1,9 @@ cmake_minimum_required(VERSION 3.7) +if(POLICY CMP0114) + cmake_policy(SET CMP0114 NEW) +endif() + include(ExternalProject) set(ANTLR4_ROOT ${CMAKE_CURRENT_BINARY_DIR}/antlr4_runtime/src/antlr4_runtime) @@ -11,6 +15,10 @@ if(NOT DEFINED ANTLR4_TAG) set(ANTLR4_TAG master) endif() +# Ensure that the include dir already exists at configure time (to avoid cmake erroring +# on non-existent include dirs) +file(MAKE_DIRECTORY "${ANTLR4_INCLUDE_DIRS}") + if(${CMAKE_GENERATOR} MATCHES "Visual Studio.*") set(ANTLR4_OUTPUT_DIR ${ANTLR4_ROOT}/runtime/Cpp/dist/$(Configuration)) elseif(${CMAKE_GENERATOR} MATCHES "Xcode.*") @@ -38,7 +46,7 @@ else() set(ANTLR4_SHARED_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dll.a) set(ANTLR4_RUNTIME_LIBRARIES - ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.10.1.dll) + ${ANTLR4_OUTPUT_DIR}/cygantlr4-runtime-4.11.0.dll) elseif(APPLE) set(ANTLR4_RUNTIME_LIBRARIES ${ANTLR4_OUTPUT_DIR}/libantlr4-runtime.dylib) @@ -88,6 +96,7 @@ if(ANTLR4_ZIP_REPOSITORY) CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DWITH_STATIC_CRT:BOOL=${ANTLR4_WITH_STATIC_CRT} + -DDISABLE_WARNINGS:BOOL=ON # -DCMAKE_CXX_STANDARD:STRING=17 # if desired, compile the runtime with a different C++ standard # -DCMAKE_CXX_STANDARD:STRING=${CMAKE_CXX_STANDARD} # alternatively, compile the runtime with the same C++ standard as the outer project INSTALL_COMMAND "" @@ -106,6 +115,7 @@ else() CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DWITH_STATIC_CRT:BOOL=${ANTLR4_WITH_STATIC_CRT} + -DDISABLE_WARNINGS:BOOL=ON # -DCMAKE_CXX_STANDARD:STRING=17 # if desired, compile the runtime with a different C++ standard # -DCMAKE_CXX_STANDARD:STRING=${CMAKE_CXX_STANDARD} # alternatively, compile the runtime with the same C++ standard as the outer project INSTALL_COMMAND "" @@ -135,6 +145,10 @@ add_library(antlr4_static STATIC IMPORTED) add_dependencies(antlr4_static antlr4_runtime-build_static) set_target_properties(antlr4_static PROPERTIES IMPORTED_LOCATION ${ANTLR4_STATIC_LIBRARIES}) +target_include_directories(antlr4_static + INTERFACE + ${ANTLR4_INCLUDE_DIRS} +) ExternalProject_Add_Step( antlr4_runtime @@ -152,6 +166,11 @@ add_library(antlr4_shared SHARED IMPORTED) add_dependencies(antlr4_shared antlr4_runtime-build_shared) set_target_properties(antlr4_shared PROPERTIES IMPORTED_LOCATION ${ANTLR4_RUNTIME_LIBRARIES}) +target_include_directories(antlr4_shared + INTERFACE + ${ANTLR4_INCLUDE_DIRS} +) + if(ANTLR4_SHARED_LIBRARIES) set_target_properties(antlr4_shared PROPERTIES IMPORTED_IMPLIB ${ANTLR4_SHARED_LIBRARIES}) diff --git a/runtime/Cpp/cmake/FindANTLR.cmake b/runtime/Cpp/cmake/FindANTLR.cmake index 418d9dee03..3916338e84 100644 --- a/runtime/Cpp/cmake/FindANTLR.cmake +++ b/runtime/Cpp/cmake/FindANTLR.cmake @@ -2,7 +2,7 @@ find_package(Java QUIET COMPONENTS Runtime) if(NOT ANTLR_EXECUTABLE) find_program(ANTLR_EXECUTABLE - NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.10.1-complete.jar) + NAMES antlr.jar antlr4.jar antlr-4.jar antlr-4.11.0-complete.jar) endif() if(ANTLR_EXECUTABLE AND Java_JAVA_EXECUTABLE) @@ -14,7 +14,7 @@ if(ANTLR_EXECUTABLE AND Java_JAVA_EXECUTABLE) OUTPUT_STRIP_TRAILING_WHITESPACE) if(ANTLR_COMMAND_RESULT EQUAL 0) - string(REGEX MATCH "Version [0-9]+(\\.[0-9])*" ANTLR_VERSION ${ANTLR_COMMAND_OUTPUT}) + string(REGEX MATCH "Version [0-9]+(\\.[0-9]+)*" ANTLR_VERSION ${ANTLR_COMMAND_OUTPUT}) string(REPLACE "Version " "" ANTLR_VERSION ${ANTLR_VERSION}) else() message( diff --git a/runtime/Cpp/cmake/README.md b/runtime/Cpp/cmake/README.md index 08186b120f..0cd0b1f39e 100644 --- a/runtime/Cpp/cmake/README.md +++ b/runtime/Cpp/cmake/README.md @@ -29,8 +29,8 @@ set(ANTLR4_WITH_STATIC_CRT OFF) # By default the latest version of antlr4 will be used. You can specify a # specific, stable version by setting a repository tag value or a link # to a zip file containing the libary source. -# set(ANTLR4_TAG 4.10.1) -# set(ANTLR4_ZIP_REPOSITORY https://github.com/antlr/antlr4/archive/refs/tags/4.10.1.zip) +# set(ANTLR4_TAG 4.11.0) +# set(ANTLR4_ZIP_REPOSITORY https://github.com/antlr/antlr4/archive/refs/tags/4.11.0.zip) # add external build for antlrcpp include(ExternalAntlr4Cpp) @@ -39,7 +39,7 @@ include_directories(${ANTLR4_INCLUDE_DIRS}) # set variable pointing to the antlr tool that supports C++ # this is not required if the jar file can be found under PATH environment -set(ANTLR_EXECUTABLE /home/user/antlr-4.10.1-complete.jar) +set(ANTLR_EXECUTABLE /home/user/antlr-4.11.0-complete.jar) # add macros to generate ANTLR Cpp code from grammar find_package(ANTLR REQUIRED) diff --git a/runtime/Cpp/cmake/antlr4-runtime.cmake.in b/runtime/Cpp/cmake/antlr4-runtime.cmake.in index 860aeb6012..697b36c628 100644 --- a/runtime/Cpp/cmake/antlr4-runtime.cmake.in +++ b/runtime/Cpp/cmake/antlr4-runtime.cmake.in @@ -5,6 +5,9 @@ set(ANTLR_VERSION @ANTLR_VERSION@) set_and_check(ANTLR4_INCLUDE_DIR "@PACKAGE_ANTLR4_INCLUDE_DIR@") set_and_check(ANTLR4_LIB_DIR "@PACKAGE_ANTLR4_LIB_DIR@") +include(CMakeFindDependencyMacro) +find_dependency(Threads) + include(${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake) check_required_components(antlr) diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj similarity index 94% rename from runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj rename to runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj index f004fb06ce..ef29a4ecce 100644 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj @@ -39,58 +39,58 @@ Win32Proj antlr4cppdemo antlr4cpp-demo - 8.1 + 10.0 Application true - v140 + v143 Unicode Application true - v140 + v143 Unicode Application true - v140 + v143 Unicode Application true - v140 + v143 Unicode Application false - v140 + v143 true Unicode Application false - v140 + v143 true Unicode Application false - v140 + v143 true Unicode Application false - v140 + v143 true Unicode @@ -124,42 +124,42 @@ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ @@ -216,6 +216,7 @@ 4251 true false + stdcpp17 Console @@ -236,6 +237,7 @@ 4251 true false + stdcpp17 Console @@ -350,9 +352,10 @@ + - + {a9762991-1b57-4dce-90c0-ee42b96947be} diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters similarity index 96% rename from runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters rename to runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters index ed56184124..e191b9354b 100644 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2015.vcxproj.filters +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo-vs2022.vcxproj.filters @@ -59,5 +59,8 @@ generated + + Header Files + \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj deleted file mode 100644 index ec6240de0e..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj +++ /dev/null @@ -1,349 +0,0 @@ - - - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Debug Static - Win32 - - - Debug Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - - {24EC5104-7402-4C76-B66B-27ADBE062D68} - Win32Proj - antlr4cppdemo - antlr4cpp-demo - - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - true - v120 - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - Application - false - v120 - true - Unicode - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) - true - $(SolutionDir)..\generated;$(SolutionDir)..\..\runtime\src;$(SolutionDir)..\..\runtime\src\atn;$(SolutionDir)..\..\runtime\src\dfa;$(SolutionDir)..\..\runtime\src\misc;$(SolutionDir)..\..\runtime\src\support;$(SolutionDir)..\..\runtime\src\tree;$(SolutionDir)..\..\runtime\src\tree\xpath;$(SolutionDir)..\..\runtime\src\tree\pattern;%(AdditionalIncludeDirectories) - - - 4251 - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - - - {a9762991-1b57-4dce-90c0-ee42b96947be} - - - - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters deleted file mode 100644 index ed56184124..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/antlr4-cpp-demo.vcxproj.filters +++ /dev/null @@ -1,63 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {ef397b7b-1192-4d44-93ed-fadaec7622e8} - - - - - Source Files - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - - - generated - - - generated - - - generated - - - generated - - - generated - - - generated - - - \ No newline at end of file diff --git a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp index fa470e5ed6..b620ad018b 100644 --- a/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp +++ b/runtime/Cpp/demo/Windows/antlr4-cpp-demo/main.cpp @@ -25,17 +25,15 @@ using namespace antlr4; int main(int argc, const char * argv[]) { - ANTLRInputStream input("🍴 = 🍐 + \"😎\";(((x * π))) * µ + ∰; a + (x * (y ? 0 : 1) + z);"); + ANTLRInputStream input("a = b + \"c\";(((x * d))) * e + f; a + (x * (y ? 0 : 1) + z);"); TLexer lexer(&input); CommonTokenStream tokens(&lexer); TParser parser(&tokens); tree::ParseTree *tree = parser.main(); - std::wstring s = antlrcpp::s2ws(tree->toStringTree(&parser)) + L"\n"; - - OutputDebugString(s.data()); // Only works properly since VS 2015. - //std::wcout << "Parse Tree: " << s << std::endl; Unicode output in the console is very limited. + auto s = tree->toStringTree(&parser); + std::cout << "Parse Tree: " << s << std::endl; return 0; } diff --git a/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln b/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln deleted file mode 100644 index 931aeb3eb2..0000000000 --- a/runtime/Cpp/demo/Windows/antlr4cpp-vs2013.sln +++ /dev/null @@ -1,58 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.40629.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2013", "..\..\runtime\antlr4cpp-vs2013.vcxproj", "{A9762991-1B57-4DCE-90C0-EE42B96947BE}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug DLL|Win32 = Debug DLL|Win32 - Debug DLL|x64 = Debug DLL|x64 - Debug Static|Win32 = Debug Static|Win32 - Debug Static|x64 = Debug Static|x64 - Release DLL|Win32 = Release DLL|Win32 - Release DLL|x64 = Release DLL|x64 - Release Static|Win32 = Release Static|Win32 - Release Static|x64 = Release Static|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|Win32.Build.0 = Debug DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|Win32.ActiveCfg = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|Win32.Build.0 = Debug Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug Static|x64.Build.0 = Debug Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|Win32.ActiveCfg = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|Win32.Build.0 = Release DLL|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release DLL|x64.Build.0 = Release DLL|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|Win32.ActiveCfg = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|Win32.Build.0 = Release Static|Win32 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.ActiveCfg = Release Static|x64 - {24EC5104-7402-4C76-B66B-27ADBE062D68}.Release Static|x64.Build.0 = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|Win32.Build.0 = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|Win32.ActiveCfg = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|Win32.Build.0 = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.Build.0 = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|Win32.ActiveCfg = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|Win32.Build.0 = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.Build.0 = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|Win32.ActiveCfg = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|Win32.Build.0 = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.ActiveCfg = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.Build.0 = Release Static|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln b/runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln similarity index 65% rename from runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln rename to runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln index 6bf253d08d..bcda88e1e6 100644 --- a/runtime/Cpp/demo/Windows/antlr4cpp-vs2015.sln +++ b/runtime/Cpp/demo/Windows/antlr4cpp-vs2022.sln @@ -1,11 +1,11 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.32014.148 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2015", "..\..\runtime\antlr4cpp-vs2015.vcxproj", "{A9762991-1B57-4DCE-90C0-EE42B96947BE}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-vs2022", "..\..\runtime\antlr4cpp-vs2022.vcxproj", "{52618D4B-6EC4-49AD-8B83-52686244E8F3}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo-vs2015.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "antlr4cpp-demo", "antlr4-cpp-demo\antlr4-cpp-demo-vs2022.vcxproj", "{24EC5104-7402-4C76-B66B-27ADBE062D68}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -19,22 +19,22 @@ Global Release Static|x86 = Release Static|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x64.Build.0 = Debug DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug DLL|x86.Build.0 = Debug DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.ActiveCfg = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x64.Build.0 = Debug Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x86.ActiveCfg = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Debug Static|x86.Build.0 = Debug Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.ActiveCfg = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x64.Build.0 = Release DLL|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x86.ActiveCfg = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release DLL|x86.Build.0 = Release DLL|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.ActiveCfg = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x64.Build.0 = Release Static|x64 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x86.ActiveCfg = Release Static|Win32 - {A9762991-1B57-4DCE-90C0-EE42B96947BE}.Release Static|x86.Build.0 = Release Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x64.Build.0 = Debug DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug DLL|x86.Build.0 = Debug DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x64.ActiveCfg = Debug Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x64.Build.0 = Debug Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x86.ActiveCfg = Debug Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Debug Static|x86.Build.0 = Debug Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x64.ActiveCfg = Release DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x64.Build.0 = Release DLL|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x86.ActiveCfg = Release DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release DLL|x86.Build.0 = Release DLL|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x64.ActiveCfg = Release Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x64.Build.0 = Release Static|x64 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x86.ActiveCfg = Release Static|Win32 + {52618D4B-6EC4-49AD-8B83-52686244E8F3}.Release Static|x86.Build.0 = Release Static|Win32 {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.ActiveCfg = Debug DLL|x64 {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x64.Build.0 = Debug DLL|x64 {24EC5104-7402-4C76-B66B-27ADBE062D68}.Debug DLL|x86.ActiveCfg = Debug DLL|Win32 @@ -55,4 +55,7 @@ Global GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {93CE9298-807C-4EAD-B1E6-7109DD1A78FA} + EndGlobalSection EndGlobal diff --git a/runtime/Cpp/demo/generate.cmd b/runtime/Cpp/demo/generate.cmd index dfb75838ea..d4a7f0c929 100644 --- a/runtime/Cpp/demo/generate.cmd +++ b/runtime/Cpp/demo/generate.cmd @@ -6,7 +6,7 @@ :: Download the ANLTR jar and place it in the same folder as this script (or adjust the LOCATION var accordingly). -set LOCATION=antlr-4.10.1-complete.jar +set LOCATION=antlr-4.11.0-complete.jar java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Cpp -listener -visitor -o generated/ -package antlrcpptest -XdbgST TLexer.g4 TParser.g4 ::java -jar %LOCATION% -Dlanguage=Java -listener -visitor -o generated/ -package antlrcpptest TLexer.g4 TParser.g4 diff --git a/runtime/Cpp/deploy-windows.cmd b/runtime/Cpp/deploy-windows.cmd index 8fc22ab5b1..0a7b3564c0 100644 --- a/runtime/Cpp/deploy-windows.cmd +++ b/runtime/Cpp/deploy-windows.cmd @@ -8,58 +8,58 @@ if exist bin rmdir /S /Q runtime\bin if exist obj rmdir /S /Q runtime\obj if exist lib rmdir /S /Q lib if exist antlr4-runtime rmdir /S /Q antlr4-runtime -if exist antlr4-cpp-runtime-vs2017.zip erase antlr4-cpp-runtime-vs2017.zip if exist antlr4-cpp-runtime-vs2019.zip erase antlr4-cpp-runtime-vs2019.zip +if exist antlr4-cpp-runtime-vs2022.zip erase antlr4-cpp-runtime-vs2022.zip rem Headers echo Copying header files ... xcopy runtime\src\*.h antlr4-runtime\ /s /q rem Binaries -rem VS 2017 disabled by default. Change the X to a C to enable it. -if exist "X:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" ( +rem VS 2019 disabled by default. Change the X to a C to enable it. +if exist "X:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" ( echo. - - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\%1\Common7\Tools\VsDevCmd.bat" + + call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2017.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - - 7z a antlr4-cpp-runtime-vs2017.zip antlr4-runtime + + 7z a antlr4-cpp-runtime-vs2019.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2017.zip lib - + 7z a antlr4-cpp-runtime-vs2019.zip lib + rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - - rem if exist antlr4-cpp-runtime-vs2017.zip copy antlr4-cpp-runtime-vs2017.zip ~/antlr/sites/website-antlr4/download + + rem if exist antlr4-cpp-runtime-vs2019.zip copy antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download ) -set VCTargetsPath=C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\MSBuild\Microsoft\VC\v160\ -if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" ( +set VCTargetsPath=C:\Program Files\Microsoft Visual Studio\2022\%1\MSBuild\Microsoft\VC\v170\ +if exist "C:\Program Files\Microsoft Visual Studio\2022\%1\Common7\Tools\VsDevCmd.bat" ( echo. - call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\Common7\Tools\VsDevCmd.bat" + call "C:\Program Files\Microsoft Visual Studio\2022\%1\Common7\Tools\VsDevCmd.bat" pushd runtime - msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=Win32 - msbuild antlr4cpp-vs2019.vcxproj /p:configuration="Release DLL" /p:platform=x64 + msbuild antlr4cpp-vs2022.vcxproj /p:configuration="Release DLL" /p:platform=Win32 + msbuild antlr4cpp-vs2022.vcxproj /p:configuration="Release DLL" /p:platform=x64 popd - - 7z a antlr4-cpp-runtime-vs2019.zip antlr4-runtime + + 7z a antlr4-cpp-runtime-vs2022.zip antlr4-runtime xcopy runtime\bin\*.dll lib\ /s xcopy runtime\bin\*.lib lib\ /s - 7z a antlr4-cpp-runtime-vs2019.zip lib - + 7z a antlr4-cpp-runtime-vs2022.zip lib + rmdir /S /Q lib rmdir /S /Q runtime\bin rmdir /S /Q runtime\obj - - rem if exist antlr4-cpp-runtime-vs2019.zip copy antlr4-cpp-runtime-vs2019.zip ~/antlr/sites/website-antlr4/download + + rem if exist antlr4-cpp-runtime-vs2022.zip copy antlr4-cpp-runtime-vs2022.zip ~/antlr/sites/website-antlr4/download ) rmdir /S /Q antlr4-runtime @@ -70,7 +70,7 @@ goto end :Usage -echo This script builds Visual Studio 2017 and/or 2019 libraries of the ANTLR4 runtime. +echo This script builds Visual Studio 2019 and/or 2022 libraries of the ANTLR4 runtime. echo You have to specify the type of your VS installation (Community, Professional etc.) to construct echo the correct build tools path. echo. diff --git a/runtime/Cpp/runtime/CMakeLists.txt b/runtime/Cpp/runtime/CMakeLists.txt index baf46cac9b..a4e4d1c6b0 100644 --- a/runtime/Cpp/runtime/CMakeLists.txt +++ b/runtime/Cpp/runtime/CMakeLists.txt @@ -4,6 +4,7 @@ include_directories( ${PROJECT_SOURCE_DIR}/runtime/src ${PROJECT_SOURCE_DIR}/runtime/src/atn ${PROJECT_SOURCE_DIR}/runtime/src/dfa + ${PROJECT_SOURCE_DIR}/runtime/src/internal ${PROJECT_SOURCE_DIR}/runtime/src/misc ${PROJECT_SOURCE_DIR}/runtime/src/support ${PROJECT_SOURCE_DIR}/runtime/src/tree @@ -16,6 +17,7 @@ file(GLOB libantlrcpp_SRC "${PROJECT_SOURCE_DIR}/runtime/src/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/atn/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/dfa/*.cpp" + "${PROJECT_SOURCE_DIR}/runtime/src/internal/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/misc/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/support/*.cpp" "${PROJECT_SOURCE_DIR}/runtime/src/tree/*.cpp" @@ -26,16 +28,12 @@ file(GLOB libantlrcpp_SRC add_library(antlr4_shared SHARED ${libantlrcpp_SRC}) add_library(antlr4_static STATIC ${libantlrcpp_SRC}) -set(LIB_OUTPUT_DIR "${CMAKE_HOME_DIRECTORY}/dist") # put generated libraries here. -message(STATUS "Output libraries to ${LIB_OUTPUT_DIR}") - -# make sure 'make' works fine even if ${LIB_OUTPUT_DIR} is deleted. -add_custom_target(make_lib_output_dir ALL - COMMAND ${CMAKE_COMMAND} -E make_directory ${LIB_OUTPUT_DIR} - ) - -add_dependencies(antlr4_shared make_lib_output_dir) -add_dependencies(antlr4_static make_lib_output_dir) +# Make sure to link against threads (pthreads) library in order to be able to +# make use of std::call_once in the code without producing runtime errors +# (see also https://github.com/antlr/antlr4/issues/3708 and/or https://stackoverflow.com/q/51584960). +find_package(Threads REQUIRED) +target_link_libraries(antlr4_shared Threads::Threads) +target_link_libraries(antlr4_static Threads::Threads) if (ANTLR_BUILD_CPP_TESTS) include(FetchContent) @@ -45,7 +43,9 @@ if (ANTLR_BUILD_CPP_TESTS) URL https://github.com/google/googletest/archive/e2239ee6043f73722e7aa812a459f54a28552929.zip ) - set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + if(WITH_STATIC_CRT) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + endif() FetchContent_MakeAvailable(googletest) @@ -69,10 +69,7 @@ if (ANTLR_BUILD_CPP_TESTS) gtest_discover_tests(antlr4_tests) endif() -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - target_link_libraries(antlr4_shared ${UUID_LIBRARIES}) - target_link_libraries(antlr4_static ${UUID_LIBRARIES}) -elseif(APPLE) +if(APPLE) target_link_libraries(antlr4_shared ${COREFOUNDATION_LIBRARY}) target_link_libraries(antlr4_static ${COREFOUNDATION_LIBRARY}) endif() @@ -84,56 +81,55 @@ else() endif() -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-dollar-in-identifier-extension -Wno-four-char-constants") -elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Intel") +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") set(disabled_compile_warnings "${disabled_compile_warnings} -Wno-multichar") endif() set(extra_share_compile_flags "") set(extra_static_compile_flags "") -if(WIN32) - set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS") - set(extra_static_compile_flags "-DANTLR4CPP_STATIC") -endif(WIN32) -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - if(WITH_STATIC_CRT) - target_compile_options(antlr4_shared PRIVATE "/MT$<$:d>") - target_compile_options(antlr4_static PRIVATE "/MT$<$:d>") - else() - target_compile_options(antlr4_shared PRIVATE "/MD$<$:d>") - target_compile_options(antlr4_static PRIVATE "/MD$<$:d>") - endif() -endif() - set(static_lib_suffix "") -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - set(static_lib_suffix "-static") -endif() -if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") - set(extra_share_compile_flags "-DANTLR4CPP_EXPORTS -MP /wd4251") - set(extra_static_compile_flags "-DANTLR4CPP_STATIC -MP") +if (WIN32) + set(static_lib_suffix "-static") + target_compile_definitions(antlr4_shared PUBLIC ANTLR4CPP_EXPORTS) + target_compile_definitions(antlr4_static PUBLIC ANTLR4CPP_STATIC) + if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + set(extra_share_compile_flags "-MP /wd4251") + set(extra_static_compile_flags "-MP") + endif() endif() set_target_properties(antlr4_shared PROPERTIES VERSION ${ANTLR_VERSION} SOVERSION ${ANTLR_VERSION} OUTPUT_NAME antlr4-runtime - LIBRARY_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - # TODO: test in windows. DLL is treated as runtime. - # see https://cmake.org/cmake/help/v3.0/prop_tgt/LIBRARY_OUTPUT_DIRECTORY.html - RUNTIME_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} - ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} COMPILE_FLAGS "${disabled_compile_warnings} ${extra_share_compile_flags}") set_target_properties(antlr4_static PROPERTIES VERSION ${ANTLR_VERSION} SOVERSION ${ANTLR_VERSION} OUTPUT_NAME "antlr4-runtime${static_lib_suffix}" - ARCHIVE_OUTPUT_DIRECTORY ${LIB_OUTPUT_DIR} + COMPILE_PDB_NAME "antlr4-runtime${static_lib_suffix}" COMPILE_FLAGS "${disabled_compile_warnings} ${extra_static_compile_flags}") +if (ANTLR_BUILD_CPP_TESTS) + # Copy the generated binaries to dist folder (required by test suite) + add_custom_command( + TARGET antlr4_shared + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist) + + add_custom_command( + TARGET antlr4_static + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_HOME_DIRECTORY}/dist + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ ${CMAKE_HOME_DIRECTORY}/dist) +endif() + install(TARGETS antlr4_shared EXPORT antlr4-targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj deleted file mode 100644 index 83f76113ef..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj +++ /dev/null @@ -1,643 +0,0 @@ - - - - - Debug Static - Win32 - - - Debug Static - x64 - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - - {229A61DC-1207-4E4E-88B0-F4CB7205672D} - Win32Proj - antlr4cpp - - - - DynamicLibrary - true - Unicode - v120 - - - StaticLibrary - true - Unicode - v120 - - - DynamicLibrary - true - Unicode - v120 - - - StaticLibrary - true - Unicode - v120 - - - DynamicLibrary - false - true - Unicode - v120 - - - StaticLibrary - false - true - Unicode - v120 - - - DynamicLibrary - false - true - Unicode - v120 - - - StaticLibrary - false - true - Unicode - v120 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2013\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - - Level4 - Disabled - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src/tree;src;%(AdditionalIncludeDirectories) - - - - - 4251 - stdcpp17 - - - Windows - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters deleted file mode 100644 index 8573ee8373..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj.filters +++ /dev/null @@ -1,987 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {587a2726-4856-4d21-937a-fbaebaa90232} - - - {2662156f-1508-4dad-b991-a8298a6db9bf} - - - {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} - - - {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} - - - {89fd3896-0ab1-476d-8d64-a57f10a5e73b} - - - {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} - - - {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} - - - {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} - - - {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} - - - {1893fffe-7a2b-4708-8ce5-003aa9b749f7} - - - {053a0632-27bc-4043-b5e8-760951b3b5b9} - - - {048c180d-44cf-49ca-a7aa-d0053fea07f5} - - - {3181cae5-cc15-4050-8c45-22af44a823de} - - - {290632d2-c56e-4005-a417-eb83b9531e1a} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\xpath - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\misc - - - Header Files - - - Header Files - - - Header Files\support - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files - - - Header Files - - - Source Files\support - - - Header Files\tree - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\misc - - - Source Files\misc - - - Source Files\misc - - - Source Files\support - - - Source Files\support - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\misc - - - Source Files - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj deleted file mode 100644 index 8ad1d01b6f..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj +++ /dev/null @@ -1,659 +0,0 @@ - - - - - Debug Static - Win32 - - - Debug Static - x64 - - - Debug DLL - Win32 - - - Debug DLL - x64 - - - Release Static - Win32 - - - Release Static - x64 - - - Release DLL - Win32 - - - Release DLL - x64 - - - - {83BE66CD-9C4F-4F84-B72A-DD1855C8FC8A} - Win32Proj - antlr4cpp - 10.0.16299.0 - - - - DynamicLibrary - true - Unicode - v141 - - - StaticLibrary - true - Unicode - v141 - - - DynamicLibrary - true - Unicode - v141 - - - StaticLibrary - true - Unicode - v141 - - - DynamicLibrary - false - true - Unicode - v141 - - - StaticLibrary - false - true - Unicode - v141 - - - DynamicLibrary - false - true - Unicode - v141 - - - StaticLibrary - false - true - Unicode - v141 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - true - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - false - $(SolutionDir)bin\vs-2017\$(PlatformTarget)\$(Configuration)\ - $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ - antlr4-runtime - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - stdcpp17 - - - Windows - true - - - - - Level4 - Disabled - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - false - stdcpp17 - - - Windows - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_DLL;ANTLR4CPP_EXPORTS;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - stdcpp17 - - - Windows - true - true - true - - - - - Level4 - MaxSpeed - true - true - ANTLR4CPP_STATIC;%(PreprocessorDefinitions) - src;%(AdditionalIncludeDirectories) - - - - - 4251 - true - stdcpp17 - - - Windows - true - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters deleted file mode 100644 index 8573ee8373..0000000000 --- a/runtime/Cpp/runtime/antlr4cpp-vs2017.vcxproj.filters +++ /dev/null @@ -1,987 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - {587a2726-4856-4d21-937a-fbaebaa90232} - - - {2662156f-1508-4dad-b991-a8298a6db9bf} - - - {5b1e59b1-7fa5-46a5-8d92-965bd709cca0} - - - {9de9fe74-5d67-441d-a972-3cebe6dfbfcc} - - - {89fd3896-0ab1-476d-8d64-a57f10a5e73b} - - - {23939d7b-8e11-421e-80eb-b2cfdfdd64e9} - - - {05f2bacb-b5b2-4ca3-abe1-ca9a7239ecaa} - - - {d3b2ae2d-836b-4c73-8180-aca4ebb7d658} - - - {6674a0f0-c65d-4a00-a9e5-1f243b89d0a2} - - - {1893fffe-7a2b-4708-8ce5-003aa9b749f7} - - - {053a0632-27bc-4043-b5e8-760951b3b5b9} - - - {048c180d-44cf-49ca-a7aa-d0053fea07f5} - - - {3181cae5-cc15-4050-8c45-22af44a823de} - - - {290632d2-c56e-4005-a417-eb83b9531e1a} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\dfa - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\misc - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\support - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\pattern - - - Header Files\tree\xpath - - - Header Files - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\atn - - - Header Files\misc - - - Header Files - - - Header Files - - - Header Files\support - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files\tree\xpath - - - Header Files - - - Header Files - - - Source Files\support - - - Header Files\tree - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\dfa - - - Source Files\misc - - - Source Files\misc - - - Source Files\misc - - - Source Files\support - - - Source Files\support - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\tree\pattern - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files\tree\xpath - - - Source Files - - - Source Files\tree - - - Source Files\tree - - - Source Files - - - Source Files - - - Source Files - - - Source Files\atn - - - Source Files\atn - - - Source Files\misc - - - Source Files - - - Source Files - - - Source Files - - - Source Files\support - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree - - - Source Files\tree\pattern - - - \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj index d5df910b8a..468c33a666 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj @@ -354,7 +354,6 @@ - @@ -363,19 +362,13 @@ - - - - - - @@ -393,30 +386,24 @@ - - - + + - - - - - @@ -436,6 +423,7 @@ + @@ -460,13 +448,12 @@ - + - @@ -481,7 +468,6 @@ - @@ -501,12 +487,10 @@ - - @@ -523,12 +507,10 @@ - - @@ -558,6 +540,9 @@ + + + @@ -590,6 +575,7 @@ + @@ -601,7 +587,6 @@ - @@ -615,10 +600,12 @@ + - + + @@ -641,8 +628,6 @@ - - @@ -664,4 +649,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters index 8573ee8373..f2cd0aa9d4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2019.vcxproj.filters @@ -55,6 +55,12 @@ {290632d2-c56e-4005-a417-eb83b9531e1a} + + {b4b32b3f-e97a-448a-98e6-cbf901862bd4} + + + {f8c2bdf9-7e81-4f31-ba17-06b16ba2f081} + @@ -177,9 +183,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -261,9 +264,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -309,9 +309,6 @@ Header Files\atn - - Header Files\atn - Header Files\dfa @@ -333,9 +330,6 @@ Header Files\misc - - Header Files\misc - Header Files\support @@ -348,9 +342,6 @@ Header Files\support - - Header Files\support - Header Files\tree @@ -375,12 +366,6 @@ Header Files\tree - - Header Files\tree - - - Header Files\tree - Header Files\tree @@ -525,18 +510,36 @@ Header Files - - Header Files - - - Source Files\support - Header Files\tree Header Files + + Header Files\internal + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + @@ -638,9 +641,6 @@ Source Files - - Source Files\atn - Source Files\atn @@ -662,9 +662,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -674,21 +671,9 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - Source Files\atn - - Source Files\atn - Source Files\atn @@ -701,9 +686,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -713,12 +695,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -734,12 +710,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -752,18 +722,9 @@ Source Files\atn - - Source Files\atn - Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -797,9 +758,6 @@ Source Files\support - - Source Files\support - Source Files\tree @@ -947,9 +905,6 @@ Source Files - - Source Files\atn - Source Files\atn @@ -968,20 +923,26 @@ Source Files\support - - Source Files\tree - Source Files\tree Source Files\tree - - Source Files\tree - Source Files\tree\pattern + + Source Files\internal + + + Source Files\support + + + Source Files\atn + + + Source Files\atn + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj similarity index 92% rename from runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj rename to runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj index 8fb5cf9806..9813fd61ef 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2015.vcxproj +++ b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj @@ -1,5 +1,5 @@  - + Debug Static @@ -35,63 +35,63 @@ - {A9762991-1B57-4DCE-90C0-EE42B96947BE} + {52618D4B-6EC4-49AD-8B83-52686244E8F3} Win32Proj antlr4cpp - 8.1 + 10.0 DynamicLibrary true Unicode - v140 + v143 StaticLibrary true Unicode - v140 + v143 DynamicLibrary true Unicode - v140 + v143 StaticLibrary true Unicode - v140 + v143 DynamicLibrary false true Unicode - v140 + v143 StaticLibrary false true Unicode - v140 + v143 DynamicLibrary false true Unicode - v140 + v143 StaticLibrary false true Unicode - v140 + v143 @@ -123,49 +123,49 @@ true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime true - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime false - $(SolutionDir)bin\vs-2015\$(PlatformTarget)\$(Configuration)\ + $(SolutionDir)bin\vs-2022\$(PlatformTarget)\$(Configuration)\ $(SolutionDir)obj\$(PlatformTarget)\$(Configuration)\$(ProjectName)\ antlr4-runtime @@ -182,6 +182,7 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -202,6 +203,7 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -222,6 +224,7 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -242,6 +245,7 @@ 4251 true false + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -263,6 +267,7 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -286,6 +291,7 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -309,6 +315,7 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -332,6 +339,7 @@ 4251 true + /Zc:__cplusplus %(AdditionalOptions) stdcpp17 @@ -346,7 +354,6 @@ - @@ -355,19 +362,13 @@ - - - - - - @@ -385,30 +386,24 @@ - - - + + - - - - - @@ -428,6 +423,7 @@ + @@ -452,13 +448,12 @@ - + - @@ -473,7 +468,6 @@ - @@ -493,12 +487,10 @@ - - @@ -515,12 +507,10 @@ - - @@ -550,6 +540,9 @@ + + + @@ -582,6 +575,7 @@ + @@ -593,7 +587,6 @@ - @@ -607,10 +600,12 @@ + - + + @@ -633,8 +628,6 @@ - - @@ -656,4 +649,4 @@ - + \ No newline at end of file diff --git a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters similarity index 93% rename from runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters rename to runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters index 0105b80e74..f2cd0aa9d4 100644 --- a/runtime/Cpp/runtime/antlr4cpp-vs2013.vcxproj.filters +++ b/runtime/Cpp/runtime/antlr4cpp-vs2022.vcxproj.filters @@ -55,6 +55,12 @@ {290632d2-c56e-4005-a417-eb83b9531e1a} + + {b4b32b3f-e97a-448a-98e6-cbf901862bd4} + + + {f8c2bdf9-7e81-4f31-ba17-06b16ba2f081} + @@ -177,9 +183,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -261,9 +264,6 @@ Header Files\atn - - Header Files\atn - Header Files\atn @@ -309,9 +309,6 @@ Header Files\atn - - Header Files\atn - Header Files\dfa @@ -333,9 +330,6 @@ Header Files\misc - - Header Files\misc - Header Files\support @@ -348,9 +342,6 @@ Header Files\support - - Header Files\support - Header Files\tree @@ -375,21 +366,12 @@ Header Files\tree - - Header Files\tree - - - Header Files\tree - Header Files\tree Header Files\tree - - Header Files\tree - Header Files\tree @@ -528,12 +510,36 @@ Header Files - - Header Files - Header Files\tree + + Header Files + + + Header Files\internal + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\support + + + Header Files\atn + + + Header Files\atn + + + Header Files\atn + @@ -635,9 +641,6 @@ Source Files - - Source Files\atn - Source Files\atn @@ -659,9 +662,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -671,21 +671,9 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - - - Source Files\atn - Source Files\atn - - Source Files\atn - Source Files\atn @@ -698,9 +686,6 @@ Source Files\atn - - Source Files\atn - Source Files\atn @@ -710,12 +695,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -731,12 +710,6 @@ Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -749,18 +722,9 @@ Source Files\atn - - Source Files\atn - Source Files\atn - - Source Files\atn - - - Source Files\atn - Source Files\atn @@ -794,9 +758,6 @@ Source Files\support - - Source Files\support - Source Files\tree @@ -935,12 +896,21 @@ Source Files\tree + + Source Files + Source Files Source Files + + Source Files\atn + + + Source Files\misc + Source Files @@ -950,8 +920,8 @@ Source Files - - Source Files\tree + + Source Files\support Source Files\tree @@ -959,23 +929,20 @@ Source Files\tree - - Source Files\tree + + Source Files\tree\pattern - + + Source Files\internal + + Source Files\support - + Source Files\atn - + Source Files\atn - - Source Files\tree\pattern - - - Source Files\misc - \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.nuspec b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.nuspec new file mode 100644 index 0000000000..7df6947c86 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.nuspec @@ -0,0 +1,23 @@ + + + + ANTLR4.Runtime.cpp.vs$vs$.noarch + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ $link$ + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + See project URL + Copyright 2014-2022 + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets new file mode 100644 index 0000000000..d74dcbe5fa --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.noarch.targets @@ -0,0 +1,8 @@ + + + + + $(MSBuildThisFileDirectory)inc;%(AdditionalIncludeDirectories) + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec new file mode 100644 index 0000000000..3481b2a743 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.nuspec @@ -0,0 +1,30 @@ + + + + ANTLR4.Runtime.cpp.vs$vs$.shared + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ shared + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + releaseNotes + Copyright 2006-2022 + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props new file mode 100644 index 0000000000..9d6b576541 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.props @@ -0,0 +1,21 @@ + + + + $(MSBuildThisFileDirectory)x86\dbg + + + $(MSBuildThisFileDirectory)x86\dbg + + + $(MSBuildThisFileDirectory)x64\dbg + + + $(MSBuildThisFileDirectory)x86\rel + + + $(MSBuildThisFileDirectory)x86\rel + + + $(MSBuildThisFileDirectory)x64\rel + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets new file mode 100644 index 0000000000..c3733357f5 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.shared.targets @@ -0,0 +1,44 @@ + + + + + antlr4-runtime.lib;%(AdditionalDependencies) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec new file mode 100644 index 0000000000..d5c7b7ae7d --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.nuspec @@ -0,0 +1,29 @@ + + + + ANTLR4.Runtime.cpp.vs$vs$.static + $version$$pre$ + ANTLR4 Runtime c++ vs$vs$ static + Terence Parr & Contributors + true + BSL-1.0 + image\antlr4.jpg + https://www.antlr.org/ + ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build and walk parse trees.. + releaseNotes + Copyright 2006-2022 + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets new file mode 100644 index 0000000000..05a51c7b3c --- /dev/null +++ b/runtime/Cpp/runtime/nuget/ANTLR4.Runtime.cpp.static.targets @@ -0,0 +1,44 @@ + + + + + antlr4-runtime.lib;%(AdditionalDependencies) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\dbg;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x86\rel;%(AdditionalLibraryDirectories) + + + $(MSBuildThisFileDirectory)x64\rel;%(AdditionalLibraryDirectories) + + + diff --git a/runtime/Cpp/runtime/nuget/antlr4.jpg b/runtime/Cpp/runtime/nuget/antlr4.jpg new file mode 100644 index 0000000000..04be6f240d Binary files /dev/null and b/runtime/Cpp/runtime/nuget/antlr4.jpg differ diff --git a/runtime/Cpp/runtime/nuget/pack.cmd b/runtime/Cpp/runtime/nuget/pack.cmd new file mode 100644 index 0000000000..8eb70375c7 --- /dev/null +++ b/runtime/Cpp/runtime/nuget/pack.cmd @@ -0,0 +1,93 @@ +echo off +rem echo Usage: +rem echo ------ +rem echo pack vsvers version [pre] // pack 2019 4.9.1 -beta +rem echo ------ +setlocal enableextensions enabledelayedexpansion + +if "%1"=="" goto usage +if "%2"=="" goto usage +set PRE=%3 +set PLATFORM=Win32 + +rem -version ^^[16.0^^,17.0^^) +set VS_VERSION=vs%1 +rem should be set "VSWHERE='%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe -property installationPath -version ^[16.0^,17.0^)'" +if %VS_VERSION%==vs2019 ( + set "VSWHERE='C:\PROGRA~2\"Microsoft Visual Studio"\Installer\vswhere.exe -latest -property installationPath -version ^[16.0^,17.0^)'" +) else ( +if %VS_VERSION%==vs2022 ( + set "VSWHERE='C:\PROGRA~2\"Microsoft Visual Studio"\Installer\vswhere.exe -latest -property installationPath -version ^[17.0^,18.0^)'" +) +) +for /f " delims=" %%a in (%VSWHERE%) do @set "VSCOMNTOOLS=%%a" + +echo ============= %VSCOMNTOOLS% ============= + +if %VS_VERSION%==vs2019 ( + set VS_VARSALL=..\..\VC\Auxiliary\Build\vcvarsall.bat + set "VS160COMNTOOLS=%VSCOMNTOOLS%\Common7\Tools\" +) else ( + if %VS_VERSION%==vs2022 ( + set VS_VARSALL=..\..\VC\Auxiliary\Build\vcvarsall.bat + set "VS170COMNTOOLS=%VSCOMNTOOLS%\Common7\Tools\" + ) else ( + set VS_VARSALL=..\..\VC\vcvarsall.bat + ) +) + +if not defined VCINSTALLDIR ( + if %VS_VERSION%==vs2019 ( + if %PLATFORM%==x64 ( + call "%VS160COMNTOOLS%%VS_VARSALL%" x86_amd64 8.1 + ) else ( + call "%VS160COMNTOOLS%%VS_VARSALL%" x86 8.1 + ) + ) else ( + if %VS_VERSION%==vs2022 ( + if %PLATFORM%==x64 ( + call "%VS170COMNTOOLS%%VS_VARSALL%" x86_amd64 8.1 + ) else ( + call "%VS170COMNTOOLS%%VS_VARSALL%" x86 8.1 + ) + ) + ) +) + +if not defined VSINSTALLDIR ( + echo Error: No Visual cpp environment found. + echo Please run this script from a Visual Studio Command Prompt + echo or run "%%VSnnCOMNTOOLS%%\vsvars32.bat" first. + goto :buildfailed +) + + +pushd ..\ +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Debug DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Release DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Debug Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=Win32 -p:Configuration="Release Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Debug DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Release DLL" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Debug Static" +call msbuild antlr4cpp-vs%1.vcxproj -t:rebuild -p:Platform=x64 -p:Configuration="Release Static" +popd + +del *nupkg +echo nuget pack ANTLR4.Runtime.cpp.noarch.nuspec -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.noarch.nuspec -p vs=%1 -p version=%2 -p pre=%pre% +echo nuget pack ANTLR4.Runtime.cpp.shared.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.shared.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +echo nuget pack ANTLR4.Runtime.cpp.static.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% +call nuget pack ANTLR4.Runtime.cpp.static.nuspec -symbols -p vs=%1 -p version=%2 -p pre=%pre% + +goto exit +:usage +echo Usage: +echo ------ +echo "pack vsvers version [pre]" // pack 2019 4.9.1 -beta +echo ------ +:exit +:buildfailed +endlocal +rem echo on \ No newline at end of file diff --git a/runtime/Cpp/runtime/src/FlatHashMap.h b/runtime/Cpp/runtime/src/FlatHashMap.h new file mode 100644 index 0000000000..b9b9fd5f53 --- /dev/null +++ b/runtime/Cpp/runtime/src/FlatHashMap.h @@ -0,0 +1,57 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/container/flat_hash_map.h" +#else +#include +#endif + +// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this +// is fine, however in some using custom containers may be preferred. This header allows that by +// optionally supporting some alternative implementations and allowing for more easier patching of +// other alternatives. + +namespace antlr4 { + +#if ANTLR4CPP_USING_ABSEIL + template ::hasher, + typename Equal = typename absl::flat_hash_map::key_equal, + typename Allocator = typename absl::flat_hash_map::allocator_type> + using FlatHashMap = absl::flat_hash_map; +#else + template ::hasher, + typename Equal = typename std::unordered_map::key_equal, + typename Allocator = typename std::unordered_map::allocator_type> + using FlatHashMap = std::unordered_map; +#endif + +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/FlatHashSet.h b/runtime/Cpp/runtime/src/FlatHashSet.h new file mode 100644 index 0000000000..651612ab79 --- /dev/null +++ b/runtime/Cpp/runtime/src/FlatHashSet.h @@ -0,0 +1,57 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/container/flat_hash_set.h" +#else +#include +#endif + +// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this +// is fine, however in some using custom containers may be preferred. This header allows that by +// optionally supporting some alternative implementations and allowing for more easier patching of +// other alternatives. + +namespace antlr4 { + +#if ANTLR4CPP_USING_ABSEIL + template ::hasher, + typename Equal = typename absl::flat_hash_set::key_equal, + typename Allocator = typename absl::flat_hash_set::allocator_type> + using FlatHashSet = absl::flat_hash_set; +#else + template ::hasher, + typename Equal = typename std::unordered_set::key_equal, + typename Allocator = typename std::unordered_set::allocator_type> + using FlatHashSet = std::unordered_set; +#endif + +} // namespace antlr4 diff --git a/runtime/Cpp/runtime/src/Parser.cpp b/runtime/Cpp/runtime/src/Parser.cpp index e01569cb2f..337bcba17a 100755 --- a/runtime/Cpp/runtime/src/Parser.cpp +++ b/runtime/Cpp/runtime/src/Parser.cpp @@ -20,6 +20,7 @@ #include "Exceptions.h" #include "ANTLRErrorListener.h" #include "tree/pattern/ParseTreePattern.h" +#include "internal/Synchronization.h" #include "atn/ProfilingATNSimulator.h" #include "atn/ParseInfo.h" @@ -28,6 +29,7 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; using namespace antlrcpp; namespace { @@ -573,7 +575,7 @@ std::vector Parser::getRuleInvocationStack(RuleContext *p) { std::vector Parser::getDFAStrings() { atn::ParserATNSimulator *simulator = getInterpreter(); if (!simulator->decisionToDFA.empty()) { - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); std::vector s; for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) { @@ -588,7 +590,7 @@ std::vector Parser::getDFAStrings() { void Parser::dumpDFA() { atn::ParserATNSimulator *simulator = getInterpreter(); if (!simulator->decisionToDFA.empty()) { - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); bool seenOne = false; for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) { dfa::DFA &dfa = simulator->decisionToDFA[d]; diff --git a/runtime/Cpp/runtime/src/Recognizer.cpp b/runtime/Cpp/runtime/src/Recognizer.cpp index 8a944619d5..c8a183324c 100755 --- a/runtime/Cpp/runtime/src/Recognizer.cpp +++ b/runtime/Cpp/runtime/src/Recognizer.cpp @@ -18,6 +18,7 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; std::map> Recognizer::_tokenTypeMapCache; std::map, std::map> Recognizer::_ruleIndexMapCache; @@ -33,7 +34,7 @@ Recognizer::~Recognizer() { std::map Recognizer::getTokenTypeMap() { const dfa::Vocabulary& vocabulary = getVocabulary(); - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); std::map result; auto iterator = _tokenTypeMapCache.find(&vocabulary); if (iterator != _tokenTypeMapCache.end()) { @@ -63,7 +64,7 @@ std::map Recognizer::getRuleIndexMap() { throw "The current recognizer does not provide a list of rule names."; } - std::lock_guard lck(_mutex); + UniqueLock lck(_mutex); std::map result; auto iterator = _ruleIndexMapCache.find(ruleNames); if (iterator != _ruleIndexMapCache.end()) { diff --git a/runtime/Cpp/runtime/src/Recognizer.h b/runtime/Cpp/runtime/src/Recognizer.h index 849075b360..bc62aea239 100755 --- a/runtime/Cpp/runtime/src/Recognizer.h +++ b/runtime/Cpp/runtime/src/Recognizer.h @@ -8,6 +8,7 @@ #include "ProxyErrorListener.h" #include "support/Casts.h" #include "atn/SerializedATNView.h" +#include "internal/Synchronization.h" namespace antlr4 { @@ -142,7 +143,7 @@ namespace antlr4 { atn::ATNSimulator *_interpreter; // Set and deleted in descendants (or the profiler). // Mutex to manage synchronized access for multithreading. - std::mutex _mutex; + internal::Mutex _mutex; private: static std::map> _tokenTypeMapCache; diff --git a/runtime/Cpp/runtime/src/Version.h b/runtime/Cpp/runtime/src/Version.h index cf8d3ca29d..2e4661883b 100644 --- a/runtime/Cpp/runtime/src/Version.h +++ b/runtime/Cpp/runtime/src/Version.h @@ -28,8 +28,8 @@ #include "antlr4-common.h" #define ANTLRCPP_VERSION_MAJOR 4 -#define ANTLRCPP_VERSION_MINOR 10 -#define ANTLRCPP_VERSION_PATCH 1 +#define ANTLRCPP_VERSION_MINOR 11 +#define ANTLRCPP_VERSION_PATCH 0 #define ANTLRCPP_MAKE_VERSION(major, minor, patch) ((major) * 100000 + (minor) * 1000 + (patch)) diff --git a/runtime/Cpp/runtime/src/antlr4-common.h b/runtime/Cpp/runtime/src/antlr4-common.h index 4af009ed13..d7f9a65fa1 100644 --- a/runtime/Cpp/runtime/src/antlr4-common.h +++ b/runtime/Cpp/runtime/src/antlr4-common.h @@ -21,9 +21,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -86,6 +84,11 @@ #define ANTLR4CPP_INTERNAL_STRINGIFY(x) #x #define ANTLR4CPP_STRINGIFY(x) ANTLR4CPP_INTERNAL_STRINGIFY(x) +// We use everything from the C++ standard library by default. +#ifndef ANTLR4CPP_USING_ABSEIL +#define ANTLR4CPP_USING_ABSEIL 0 +#endif + #include "support/Declarations.h" // We have to undefine this symbol as ANTLR will use this name for own members and even diff --git a/runtime/Cpp/runtime/src/antlr4-runtime.h b/runtime/Cpp/runtime/src/antlr4-runtime.h index 516bfc0970..50b85aa4fc 100644 --- a/runtime/Cpp/runtime/src/antlr4-runtime.h +++ b/runtime/Cpp/runtime/src/antlr4-runtime.h @@ -165,5 +165,4 @@ #include "tree/xpath/XPathTokenElement.h" #include "tree/xpath/XPathWildcardAnywhereElement.h" #include "tree/xpath/XPathWildcardElement.h" - - +#include "internal/Synchronization.h" diff --git a/runtime/Cpp/runtime/src/atn/ATN.cpp b/runtime/Cpp/runtime/src/atn/ATN.cpp index 20a320d8e2..c434c933dd 100755 --- a/runtime/Cpp/runtime/src/atn/ATN.cpp +++ b/runtime/Cpp/runtime/src/atn/ATN.cpp @@ -18,6 +18,7 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; using namespace antlrcpp; ATN::ATN() : ATN(ATNType::LEXER, 0) {} @@ -38,7 +39,7 @@ misc::IntervalSet ATN::nextTokens(ATNState *s, RuleContext *ctx) const { misc::IntervalSet const& ATN::nextTokens(ATNState *s) const { if (!s->_nextTokenUpdated) { - std::unique_lock lock(_mutex); + UniqueLock lock(_mutex); if (!s->_nextTokenUpdated) { s->_nextTokenWithinRule = nextTokens(s, nullptr); s->_nextTokenUpdated = true; diff --git a/runtime/Cpp/runtime/src/atn/ATN.h b/runtime/Cpp/runtime/src/atn/ATN.h index b8a93e0e47..1fc3fa32c6 100755 --- a/runtime/Cpp/runtime/src/atn/ATN.h +++ b/runtime/Cpp/runtime/src/atn/ATN.h @@ -6,6 +6,7 @@ #pragma once #include "RuleContext.h" +#include "internal/Synchronization.h" // GCC generates a warning when forward-declaring ATN if ATN has already been // declared due to the attributes added by ANTLR4CPP_PUBLIC. @@ -123,9 +124,9 @@ namespace atn { friend class LexerATNSimulator; friend class ParserATNSimulator; - mutable std::mutex _mutex; - mutable std::shared_mutex _stateMutex; - mutable std::shared_mutex _edgeMutex; + mutable internal::Mutex _mutex; + mutable internal::SharedMutex _stateMutex; + mutable internal::SharedMutex _edgeMutex; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp index a9f83a61cd..be4d5bfa8c 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfig.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNConfig.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -23,7 +23,7 @@ inline constexpr size_t SUPPRESS_PRECEDENCE_FILTER = 0x40000000; } ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref context) - : ATNConfig(state, alt, std::move(context), 0, SemanticContext::NONE) {} + : ATNConfig(state, alt, std::move(context), 0, SemanticContext::Empty::Instance) {} ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref context, Ref semanticContext) : ATNConfig(state, alt, std::move(context), 0, std::move(semanticContext)) {} @@ -94,7 +94,7 @@ std::string ATNConfig::toString(bool showAlt) const { if (context) { ss << ",[" << context->toString() << "]"; } - if (semanticContext != nullptr && semanticContext != SemanticContext::NONE) { + if (semanticContext != nullptr && semanticContext != SemanticContext::Empty::Instance) { ss << ",[" << semanticContext->toString() << "]"; } if (getOuterContextDepth() > 0) { diff --git a/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp b/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp index d44e46d5ee..4ebdf8882b 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -43,7 +43,7 @@ bool ATNConfigSet::add(const Ref &config, PredictionContextMergeCache if (_readonly) { throw IllegalStateException("This set is readonly"); } - if (config->semanticContext != SemanticContext::NONE) { + if (config->semanticContext != SemanticContext::Empty::Instance) { hasSemanticContext = true; } if (config->getOuterContextDepth() > 0) { @@ -114,7 +114,7 @@ std::vector> ATNConfigSet::getPredicates() const { std::vector> preds; preds.reserve(configs.size()); for (const auto &c : configs) { - if (c->semanticContext != SemanticContext::NONE) { + if (c->semanticContext != SemanticContext::Empty::Instance) { preds.push_back(c->semanticContext); } } diff --git a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h index 301a84a83e..d147f183a0 100755 --- a/runtime/Cpp/runtime/src/atn/ATNConfigSet.h +++ b/runtime/Cpp/runtime/src/atn/ATNConfigSet.h @@ -10,6 +10,7 @@ #include "support/BitSet.h" #include "atn/PredictionContext.h" #include "atn/ATNConfig.h" +#include "FlatHashSet.h" namespace antlr4 { namespace atn { @@ -130,7 +131,7 @@ namespace atn { virtual bool equals(const ATNConfig &lhs, const ATNConfig &rhs) const; - using LookupContainer = std::unordered_set; + using LookupContainer = FlatHashSet; /// All configs but hashed by (s, i, _, pi) not including context. Wiped out /// when we go readonly as this set becomes a DFA state. diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp index 7743d89d89..c1e1499f85 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp @@ -6,31 +6,16 @@ #include "atn/ATNDeserializationOptions.h" #include "Exceptions.h" -#include -#include - using namespace antlr4; using namespace antlr4::atn; -namespace { - -std::once_flag defaultATNDeserializationOptionsOnceFlag; -std::unique_ptr defaultATNDeserializationOptions; - -void initializeDefaultATNDeserializationOptions() { - defaultATNDeserializationOptions.reset(new ATNDeserializationOptions()); -} - -} - ATNDeserializationOptions::ATNDeserializationOptions(ATNDeserializationOptions *options) : _readOnly(false), _verifyATN(options->_verifyATN), _generateRuleBypassTransitions(options->_generateRuleBypassTransitions) {} const ATNDeserializationOptions& ATNDeserializationOptions::getDefaultOptions() { - std::call_once(defaultATNDeserializationOptionsOnceFlag, - initializeDefaultATNDeserializationOptions); - return *defaultATNDeserializationOptions; + static const ATNDeserializationOptions* const defaultOptions = new ATNDeserializationOptions(); + return *defaultOptions; } void ATNDeserializationOptions::makeReadOnly() { diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp index fb35dfec0e..47ad15d628 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -53,7 +53,6 @@ #include "atn/ATNDeserializer.h" #include -#include #include #include diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp index d973343426..ef1b1cf2f1 100755 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp @@ -16,6 +16,7 @@ #include "misc/Interval.h" #include "dfa/DFA.h" #include "Lexer.h" +#include "internal/Synchronization.h" #include "dfa/DFAState.h" #include "atn/LexerATNConfig.h" @@ -28,6 +29,7 @@ using namespace antlr4; using namespace antlr4::atn; +using namespace antlr4::internal; using namespace antlrcpp; void LexerATNSimulator::SimState::reset() { @@ -65,7 +67,7 @@ size_t LexerATNSimulator::match(CharStream *input, size_t mode) { const dfa::DFA &dfa = _decisionToDFA[mode]; dfa::DFAState* s0; { - std::shared_lock stateLock(atn._stateMutex); + SharedLock stateLock(atn._stateMutex); s0 = dfa.s0; } if (s0 == nullptr) { @@ -167,7 +169,7 @@ size_t LexerATNSimulator::execATN(CharStream *input, dfa::DFAState *ds0) { dfa::DFAState *LexerATNSimulator::getExistingTargetState(dfa::DFAState *s, size_t t) { dfa::DFAState* retval = nullptr; - std::shared_lock edgeLock(atn._edgeMutex); + SharedLock edgeLock(atn._edgeMutex); if (t <= MAX_DFA_EDGE) { auto iterator = s->edges.find(t - MIN_DFA_EDGE); #if DEBUG_ATN == 1 @@ -513,7 +515,7 @@ void LexerATNSimulator::addDFAEdge(dfa::DFAState *p, size_t t, dfa::DFAState *q) return; } - std::unique_lock edgeLock(atn._edgeMutex); + UniqueLock edgeLock(atn._edgeMutex); p->edges[t - MIN_DFA_EDGE] = q; // connect } @@ -545,7 +547,7 @@ dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs, bool suppre dfa::DFA &dfa = _decisionToDFA[_mode]; { - std::unique_lock stateLock(atn._stateMutex); + UniqueLock stateLock(atn._stateMutex); auto [existing, inserted] = dfa.states.insert(proposed); if (!inserted) { delete proposed; diff --git a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp index c799150e68..0111e9e792 100755 --- a/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp @@ -21,6 +21,7 @@ #include "atn/RuleStopState.h" #include "atn/ATNConfigSet.h" #include "atn/ATNConfig.h" +#include "internal/Synchronization.h" #include "atn/StarLoopEntryState.h" #include "atn/BlockStartState.h" @@ -42,7 +43,7 @@ using namespace antlr4; using namespace antlr4::atn; - +using namespace antlr4::internal; using namespace antlrcpp; const bool ParserATNSimulator::TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = ParserATNSimulator::getLrLoopSetting(); @@ -107,11 +108,11 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, dfa::DFAState *s0; { - std::shared_lock stateLock(atn._stateMutex); + SharedLock stateLock(atn._stateMutex); if (dfa.isPrecedenceDfa()) { // the start state for a precedence DFA depends on the current // parser precedence, and is provided by a DFA method. - std::shared_lock edgeLock(atn._edgeMutex); + SharedLock edgeLock(atn._edgeMutex); s0 = dfa.getPrecedenceStartState(parser->getPrecedence()); } else { // the start state for a "regular" DFA is just s0 @@ -123,7 +124,7 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, auto s0_closure = computeStartState(dfa.atnStartState, &ParserRuleContext::EMPTY, false); std::unique_ptr newState; std::unique_ptr oldState; - std::unique_lock stateLock(atn._stateMutex); + UniqueLock stateLock(atn._stateMutex); dfa::DFAState* ds0 = dfa.s0; if (dfa.isPrecedenceDfa()) { /* If this is a precedence DFA, we use applyPrecedenceFilter @@ -135,7 +136,7 @@ size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, ds0->configs = std::move(s0_closure); // not used for prediction but useful to know start configs anyway newState = std::make_unique(applyPrecedenceFilter(ds0->configs.get())); s0 = addDFAState(dfa, newState.get()); - std::unique_lock edgeLock(atn._edgeMutex); + UniqueLock edgeLock(atn._edgeMutex); dfa.setPrecedenceStartState(parser->getPrecedence(), s0); } else { newState = std::make_unique(std::move(s0_closure)); @@ -272,7 +273,7 @@ size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream dfa::DFAState *ParserATNSimulator::getExistingTargetState(dfa::DFAState *previousD, size_t t) { dfa::DFAState* retval; - std::shared_lock edgeLock(atn._edgeMutex); + SharedLock edgeLock(atn._edgeMutex); auto iterator = previousD->edges.find(t); retval = (iterator == previousD->edges.end()) ? nullptr : iterator->second; return retval; @@ -691,8 +692,8 @@ std::vector> ParserATNSimulator::getPredsForAmbigAlts size_t nPredAlts = 0; for (size_t i = 1; i <= nalts; i++) { if (altToPred[i] == nullptr) { - altToPred[i] = SemanticContext::NONE; - } else if (altToPred[i] != SemanticContext::NONE) { + altToPred[i] = SemanticContext::Empty::Instance; + } else if (altToPred[i] != SemanticContext::Empty::Instance) { nPredAlts++; } } @@ -711,7 +712,7 @@ std::vector> ParserATNSimulator::getPredsForAmbigAlts std::vector ParserATNSimulator::getPredicatePredictions(const antlrcpp::BitSet &ambigAlts, const std::vector> &altToPred) { bool containsPredicate = std::find_if(altToPred.begin(), altToPred.end(), [](const Ref &context) { - return context != SemanticContext::NONE; + return context != SemanticContext::Empty::Instance; }) != altToPred.end(); std::vector pairs; if (containsPredicate) { @@ -766,7 +767,7 @@ std::pair ParserATNSimulator::splitAccordingToSe ATNConfigSet *succeeded(new ATNConfigSet(configs->fullCtx)); ATNConfigSet *failed(new ATNConfigSet(configs->fullCtx)); for (const auto &c : configs->configs) { - if (c->semanticContext != SemanticContext::NONE) { + if (c->semanticContext != SemanticContext::Empty::Instance) { bool predicateEvaluationResult = evalSemanticContext(c->semanticContext, outerContext, c->alt, configs->fullCtx); if (predicateEvaluationResult) { succeeded->add(c); @@ -784,7 +785,7 @@ BitSet ParserATNSimulator::evalSemanticContext(const std::vector stateLock(atn._stateMutex); + UniqueLock stateLock(atn._stateMutex); to = addDFAState(dfa, to); // used existing if possible not incoming } if (from == nullptr || t > (int)atn.maxTokenType) { @@ -1269,7 +1270,7 @@ dfa::DFAState *ParserATNSimulator::addDFAEdge(dfa::DFA &dfa, dfa::DFAState *from } { - std::unique_lock edgeLock(atn._edgeMutex); + UniqueLock edgeLock(atn._edgeMutex); from->edges[t] = to; // connect } diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextCache.h b/runtime/Cpp/runtime/src/atn/PredictionContextCache.h index 909cae1887..78c8210d97 100644 --- a/runtime/Cpp/runtime/src/atn/PredictionContextCache.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContextCache.h @@ -25,9 +25,8 @@ #pragma once -#include - #include "atn/PredictionContext.h" +#include "FlatHashSet.h" namespace antlr4 { namespace atn { @@ -56,8 +55,8 @@ namespace atn { const Ref &rhs) const; }; - std::unordered_set, - PredictionContextHasher, PredictionContextComparer> _data; + FlatHashSet, + PredictionContextHasher, PredictionContextComparer> _data; }; } // namespace atn diff --git a/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h index efbeb10994..efaeaef578 100644 --- a/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h +++ b/runtime/Cpp/runtime/src/atn/PredictionContextMergeCache.h @@ -25,11 +25,11 @@ #pragma once -#include #include #include "atn/PredictionContext.h" #include "atn/PredictionContextMergeCacheOptions.h" +#include "FlatHashMap.h" namespace antlr4 { namespace atn { @@ -84,8 +84,8 @@ namespace atn { void compact(const Entry *preserve); - using Container = std::unordered_map, - PredictionContextHasher, PredictionContextComparer>; + using Container = FlatHashMap, + PredictionContextHasher, PredictionContextComparer>; const PredictionContextMergeCacheOptions _options; diff --git a/runtime/Cpp/runtime/src/atn/PredictionMode.cpp b/runtime/Cpp/runtime/src/atn/PredictionMode.cpp index 8537747813..9db0b8bdb9 100755 --- a/runtime/Cpp/runtime/src/atn/PredictionMode.cpp +++ b/runtime/Cpp/runtime/src/atn/PredictionMode.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -62,7 +62,7 @@ bool PredictionModeClass::hasSLLConflictTerminatingPrediction(PredictionMode mod // dup configs, tossing out semantic predicates ATNConfigSet dup(true); for (auto &config : configs->configs) { - Ref c = std::make_shared(*config, SemanticContext::NONE); + Ref c = std::make_shared(*config, SemanticContext::Empty::Instance); dup.add(c); } std::vector altsets = getConflictingAltSubsets(&dup); diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp index 7ac88cceeb..7d7fe068df 100755 --- a/runtime/Cpp/runtime/src/atn/SemanticContext.cpp +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -137,7 +137,7 @@ bool SemanticContext::PrecedencePredicate::eval(Recognizer *parser, RuleContext Ref SemanticContext::PrecedencePredicate::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const { if (parser->precpred(parserCallStack, precedence)) { - return SemanticContext::NONE; + return SemanticContext::Empty::Instance; } return nullptr; } @@ -237,7 +237,7 @@ Ref SemanticContext::AND::evalPrecedence(Recognizer *pars // The AND context is false if any element is false. return nullptr; } - if (evaluated != NONE) { + if (evaluated != Empty::Instance) { // Reduce the result by skipping true elements. operands.push_back(std::move(evaluated)); } @@ -249,7 +249,7 @@ Ref SemanticContext::AND::evalPrecedence(Recognizer *pars if (operands.empty()) { // All elements were true, so the AND context is true. - return NONE; + return Empty::Instance; } Ref result = std::move(operands[0]); @@ -337,9 +337,9 @@ Ref SemanticContext::OR::evalPrecedence(Recognizer *parse for (const auto &context : getOperands()) { auto evaluated = context->evalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == NONE) { + if (evaluated == Empty::Instance) { // The OR context is true if any element is true. - return NONE; + return Empty::Instance; } if (evaluated != nullptr) { // Reduce the result by skipping false elements. @@ -374,18 +374,18 @@ std::string SemanticContext::OR::toString() const { //------------------ SemanticContext ----------------------------------------------------------------------------------- -const Ref SemanticContext::NONE = std::make_shared(INVALID_INDEX, INVALID_INDEX, false); +const Ref SemanticContext::Empty::Instance = std::make_shared(INVALID_INDEX, INVALID_INDEX, false); Ref SemanticContext::evalPrecedence(Recognizer * /*parser*/, RuleContext * /*parserCallStack*/) const { return shared_from_this(); } Ref SemanticContext::And(Ref a, Ref b) { - if (!a || a == NONE) { + if (!a || a == Empty::Instance) { return b; } - if (!b || b == NONE) { + if (!b || b == Empty::Instance) { return a; } @@ -405,8 +405,8 @@ Ref SemanticContext::Or(Ref a, Ref return a; } - if (a == NONE || b == NONE) { - return NONE; + if (a == Empty::Instance || b == Empty::Instance) { + return Empty::Instance; } Ref result = std::make_shared(std::move(a), std::move(b)); diff --git a/runtime/Cpp/runtime/src/atn/SemanticContext.h b/runtime/Cpp/runtime/src/atn/SemanticContext.h index 367b726a4a..8116fc0b56 100755 --- a/runtime/Cpp/runtime/src/atn/SemanticContext.h +++ b/runtime/Cpp/runtime/src/atn/SemanticContext.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -20,12 +20,6 @@ namespace atn { /// SemanticContext within the scope of this outer class. class ANTLR4CPP_PUBLIC SemanticContext : public std::enable_shared_from_this { public: - /** - * The default {@link SemanticContext}, which is semantically equivalent to - * a predicate of the form {@code {true}?}. - */ - static const Ref NONE; - virtual ~SemanticContext() = default; SemanticContextType getContextType() const { return _contextType; } @@ -76,6 +70,7 @@ namespace atn { /// See also: ParserATNSimulator::getPredsForAmbigAlts. static Ref Or(Ref a, Ref b); + class Empty; class Predicate; class PrecedencePredicate; class Operator; @@ -97,6 +92,15 @@ namespace atn { return !operator==(lhs, rhs); } + class ANTLR4CPP_PUBLIC SemanticContext::Empty : public SemanticContext{ + public: + /** + * The default {@link SemanticContext}, which is semantically equivalent to + * a predicate of the form {@code {true}?}. + */ + static const Ref Instance; + }; + class ANTLR4CPP_PUBLIC SemanticContext::Predicate final : public SemanticContext { public: static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::PREDICATE; } diff --git a/runtime/Cpp/runtime/src/dfa/DFAState.h b/runtime/Cpp/runtime/src/dfa/DFAState.h index 6d94176648..5b33a88512 100755 --- a/runtime/Cpp/runtime/src/dfa/DFAState.h +++ b/runtime/Cpp/runtime/src/dfa/DFAState.h @@ -8,6 +8,7 @@ #include "antlr4-common.h" #include "atn/ATNConfigSet.h" +#include "FlatHashMap.h" namespace antlr4 { namespace dfa { @@ -63,7 +64,7 @@ namespace dfa { /// maps to {@code edges[0]}. // ml: this is a sparse list, so we use a map instead of a vector. // Watch out: we no longer have the -1 offset, as it isn't needed anymore. - std::unordered_map edges; + FlatHashMap edges; /// if accept state, what ttype do we match or alt do we predict? /// This is set to when {@code !=null} or diff --git a/runtime/Cpp/runtime/src/internal/Synchronization.cpp b/runtime/Cpp/runtime/src/internal/Synchronization.cpp new file mode 100644 index 0000000000..dd30ef971b --- /dev/null +++ b/runtime/Cpp/runtime/src/internal/Synchronization.cpp @@ -0,0 +1,100 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "internal/Synchronization.h" + +using namespace antlr4::internal; + +void Mutex::lock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.Lock(); +#else + _impl.lock(); +#endif +} + +bool Mutex::try_lock() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.TryLock(); +#else + return _impl.try_lock(); +#endif +} + +void Mutex::unlock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.Unlock(); +#else + _impl.unlock(); +#endif +} + +void SharedMutex::lock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.WriterLock(); +#else + _impl.lock(); +#endif +} + +bool SharedMutex::try_lock() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.WriterTryLock(); +#else + return _impl.try_lock(); +#endif +} + +void SharedMutex::unlock() { +#if ANTLR4CPP_USING_ABSEIL + _impl.WriterUnlock(); +#else + _impl.unlock(); +#endif +} + +void SharedMutex::lock_shared() { +#if ANTLR4CPP_USING_ABSEIL + _impl.ReaderLock(); +#else + _impl.lock_shared(); +#endif +} + +bool SharedMutex::try_lock_shared() { +#if ANTLR4CPP_USING_ABSEIL + return _impl.ReaderTryLock(); +#else + return _impl.try_lock_shared(); +#endif +} + +void SharedMutex::unlock_shared() { +#if ANTLR4CPP_USING_ABSEIL + _impl.ReaderUnlock(); +#else + _impl.unlock_shared(); +#endif +} diff --git a/runtime/Cpp/runtime/src/internal/Synchronization.h b/runtime/Cpp/runtime/src/internal/Synchronization.h new file mode 100644 index 0000000000..4f969a8ab6 --- /dev/null +++ b/runtime/Cpp/runtime/src/internal/Synchronization.h @@ -0,0 +1,154 @@ +// Copyright 2012-2022 The ANTLR Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted +// provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions +// and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of +// conditions and the following disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific prior written +// permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include "antlr4-common.h" + +#include +#include +#include + +#if ANTLR4CPP_USING_ABSEIL +#include "absl/base/call_once.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS ABSL_NO_THREAD_SAFETY_ANALYSIS +#else +#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS +#endif + +// By default ANTLRv4 uses synchronization primitives provided by the C++ standard library. In most +// deployments this is fine, however in some using custom synchronization primitives may be +// preferred. This header allows that by optionally supporting some alternative implementations and +// allowing for more easier patching of other alternatives. + +namespace antlr4::internal { + + // Must be compatible with C++ standard library Mutex requirement. + class ANTLR4CPP_PUBLIC Mutex final { + public: + Mutex() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + Mutex(const Mutex&) = delete; + Mutex(Mutex&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + Mutex& operator=(const Mutex&) = delete; + Mutex& operator=(Mutex&&) = delete; + + void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + private: +#if ANTLR4CPP_USING_ABSEIL + absl::Mutex _impl; +#else + std::mutex _impl; +#endif + }; + + template + using UniqueLock = std::unique_lock; + + // Must be compatible with C++ standard library SharedMutex requirement. + class ANTLR4CPP_PUBLIC SharedMutex final { + public: + SharedMutex() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + SharedMutex(const SharedMutex&) = delete; + SharedMutex(SharedMutex&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + SharedMutex& operator=(const SharedMutex&) = delete; + SharedMutex& operator=(SharedMutex&&) = delete; + + void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + bool try_lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + void unlock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS; + + private: +#if ANTLR4CPP_USING_ABSEIL + absl::Mutex _impl; +#else + std::shared_mutex _impl; +#endif + }; + + template + using SharedLock = std::shared_lock; + + class OnceFlag; + + template + void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args); + + // Must be compatible with std::once_flag. + class ANTLR4CPP_PUBLIC OnceFlag final { + public: + constexpr OnceFlag() = default; + + // No copying or moving, we are as strict as possible to support other implementations. + OnceFlag(const OnceFlag&) = delete; + OnceFlag(OnceFlag&&) = delete; + + // No copying or moving, we are as strict as possible to support other implementations. + OnceFlag& operator=(const OnceFlag&) = delete; + OnceFlag& operator=(OnceFlag&&) = delete; + + private: + template + friend void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args); + +#if ANTLR4CPP_USING_ABSEIL + absl::once_flag _impl; +#else + std::once_flag _impl; +#endif + }; + + template + void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args) { +#if ANTLR4CPP_USING_ABSEIL + absl::call_once(onceFlag._impl, std::forward(callable), std::forward(args)...); +#else + std::call_once(onceFlag._impl, std::forward(callable), std::forward(args)...); +#endif + } + +} // namespace antlr4::internal diff --git a/runtime/Cpp/runtime/src/misc/MurmurHash.cpp b/runtime/Cpp/runtime/src/misc/MurmurHash.cpp index 73562cd9bd..09072c9f7e 100755 --- a/runtime/Cpp/runtime/src/misc/MurmurHash.cpp +++ b/runtime/Cpp/runtime/src/misc/MurmurHash.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. * Use of this file is governed by the BSD 3-clause license that * can be found in the LICENSE.txt file in the project root. */ @@ -63,23 +63,6 @@ size_t MurmurHash::update(size_t hash, size_t value) { return hash; } -size_t MurmurHash::update(size_t hash, const void *data, size_t size) { - size_t value; - const uint8_t *bytes = static_cast(data); - while (size >= sizeof(size_t)) { - std::memcpy(&value, bytes, sizeof(size_t)); - hash = update(hash, value); - bytes += sizeof(size_t); - size -= sizeof(size_t); - } - if (size != 0) { - value = 0; - std::memcpy(&value, bytes, size); - hash = update(hash, value); - } - return hash; -} - size_t MurmurHash::finish(size_t hash, size_t entryCount) { hash ^= entryCount * 8; hash ^= hash >> 33; @@ -118,3 +101,20 @@ size_t MurmurHash::finish(size_t hash, size_t entryCount) { #else #error "Expected sizeof(size_t) to be 4 or 8." #endif + +size_t MurmurHash::update(size_t hash, const void *data, size_t size) { + size_t value; + const uint8_t *bytes = static_cast(data); + while (size >= sizeof(size_t)) { + std::memcpy(&value, bytes, sizeof(size_t)); + hash = update(hash, value); + bytes += sizeof(size_t); + size -= sizeof(size_t); + } + if (size != 0) { + value = 0; + std::memcpy(&value, bytes, size); + hash = update(hash, value); + } + return hash; +} diff --git a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp index 48318f9a28..506d2e1179 100644 --- a/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp +++ b/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp @@ -37,7 +37,7 @@ struct XPathLexerStaticData final { std::unique_ptr atn; }; -std::once_flag xpathLexerOnceFlag; +::antlr4::internal::OnceFlag xpathLexerOnceFlag; XPathLexerStaticData *xpathLexerStaticData = nullptr; void xpathLexerInitialize() { @@ -178,5 +178,5 @@ void XPathLexer::IDAction(antlr4::RuleContext *context, size_t actionIndex) { } void XPathLexer::initialize() { - std::call_once(xpathLexerOnceFlag, xpathLexerInitialize); + ::antlr4::internal::call_once(xpathLexerOnceFlag, xpathLexerInitialize); } diff --git a/runtime/Dart/lib/src/atn/src/atn_config.dart b/runtime/Dart/lib/src/atn/src/atn_config.dart index 2b414fa9a1..82acda6aa1 100644 --- a/runtime/Dart/lib/src/atn/src/atn_config.dart +++ b/runtime/Dart/lib/src/atn/src/atn_config.dart @@ -89,7 +89,7 @@ class ATNConfig { this.state, this.alt, this.context, [ - this.semanticContext = SemanticContext.NONE, + this.semanticContext = EmptySemanticContext.Instance, ]) : reachesIntoOuterContext = 0; ATNConfig.dup( @@ -169,7 +169,7 @@ class ATNConfig { buf.write(context.toString()); buf.write(']'); } - if (semanticContext != SemanticContext.NONE) { + if (semanticContext != EmptySemanticContext.Instance) { buf.write(','); buf.write(semanticContext); } @@ -194,7 +194,7 @@ class LexerATNConfig extends ATNConfig { int alt, PredictionContext context, [ this.lexerActionExecutor, - ]) : super(state, alt, context, SemanticContext.NONE) { + ]) : super(state, alt, context, EmptySemanticContext.Instance) { passedThroughNonGreedyDecision = false; } diff --git a/runtime/Dart/lib/src/atn/src/atn_config_set.dart b/runtime/Dart/lib/src/atn/src/atn_config_set.dart index 78f3d1593d..874eef6534 100644 --- a/runtime/Dart/lib/src/atn/src/atn_config_set.dart +++ b/runtime/Dart/lib/src/atn/src/atn_config_set.dart @@ -109,7 +109,7 @@ class ATNConfigSet extends Iterable { mergeCache, ]) { if (readOnly) throw StateError('This set is readonly'); - if (config.semanticContext != SemanticContext.NONE) { + if (config.semanticContext != EmptySemanticContext.Instance) { hasSemanticContext = true; } if (config.outerContextDepth > 0) { @@ -176,7 +176,7 @@ class ATNConfigSet extends Iterable { List get predicates { final preds = []; for (var c in configs) { - if (c.semanticContext != SemanticContext.NONE) { + if (c.semanticContext != EmptySemanticContext.Instance) { preds.add(c.semanticContext); } } diff --git a/runtime/Dart/lib/src/atn/src/atn_simulator.dart b/runtime/Dart/lib/src/atn/src/atn_simulator.dart index 4c3eb3e607..be598c4973 100644 --- a/runtime/Dart/lib/src/atn/src/atn_simulator.dart +++ b/runtime/Dart/lib/src/atn/src/atn_simulator.dart @@ -77,7 +77,7 @@ class PredictionContextCache { /// return that one instead and do not add a new context to the cache. /// Protect shared cache from unsafe thread access. PredictionContext add(PredictionContext ctx) { - if (ctx == PredictionContext.EMPTY) return PredictionContext.EMPTY; + if (ctx == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; final existing = cache[ctx]; if (existing != null) { // System.out.println(name+" reuses "+existing); diff --git a/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart index 0f983c02ef..3801f727f3 100644 --- a/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart +++ b/runtime/Dart/lib/src/atn/src/lexer_atn_simulator.dart @@ -401,7 +401,7 @@ class LexerATNSimulator extends ATNSimulator { } ATNConfigSet computeStartState(CharStream input, ATNState p) { - PredictionContext initialContext = PredictionContext.EMPTY; + PredictionContext initialContext = EmptyPredictionContext.Instance; ATNConfigSet configs = OrderedATNConfigSet(); for (var i = 0; i < p.numberOfTransitions; i++) { final target = p.transition(i).target; @@ -445,7 +445,7 @@ class LexerATNSimulator extends ATNSimulator { configs.add(LexerATNConfig.dup( config, config.state, - context: PredictionContext.EMPTY, + context: EmptyPredictionContext.Instance, )); currentAltReachedAcceptState = true; } diff --git a/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart index 6f14f1cb36..cd814692c1 100644 --- a/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart +++ b/runtime/Dart/lib/src/atn/src/parser_atn_simulator.dart @@ -1234,8 +1234,8 @@ class ParserATNSimulator extends ATNSimulator { var nPredAlts = 0; for (var i = 1; i <= nalts; i++) { if (altToPred[i] == null) { - altToPred[i] = SemanticContext.NONE; - } else if (altToPred[i] != SemanticContext.NONE) { + altToPred[i] = EmptySemanticContext.Instance; + } else if (altToPred[i] != EmptySemanticContext.Instance) { nPredAlts++; } } @@ -1266,7 +1266,7 @@ class ParserATNSimulator extends ATNSimulator { if (ambigAlts != null && ambigAlts[i]) { pairs.add(PredPrediction(pred!, i)); } - if (pred != SemanticContext.NONE) containsPredicate = true; + if (pred != EmptySemanticContext.Instance) containsPredicate = true; } if (!containsPredicate) { @@ -1370,7 +1370,7 @@ class ParserATNSimulator extends ATNSimulator { final succeeded = ATNConfigSet(configs.fullCtx); final failed = ATNConfigSet(configs.fullCtx); for (var c in configs) { - if (c.semanticContext != SemanticContext.NONE) { + if (c.semanticContext != EmptySemanticContext.Instance) { final predicateEvaluationResult = evalSemanticContextOne( c.semanticContext, outerContext, @@ -1390,7 +1390,7 @@ class ParserATNSimulator extends ATNSimulator { } /// Look through a list of predicate/alt pairs, returning alts for the - /// pairs that win. A [NONE] predicate indicates an alt containing an + /// pairs that win. A [Instance] predicate indicates an alt containing an /// unpredicated config which behaves as "always true." If !complete /// then we stop at the first predicate that evaluates to true. This /// includes pairs with null predicates. @@ -1401,7 +1401,7 @@ class ParserATNSimulator extends ATNSimulator { ) { final predictions = BitSet(); for (var pair in predPredictions) { - if (pair.pred == SemanticContext.NONE) { + if (pair.pred == EmptySemanticContext.Instance) { predictions.set(pair.alt); if (!complete) { break; @@ -1511,7 +1511,7 @@ class ParserATNSimulator extends ATNSimulator { ATNConfig.dup( config, state: config.state, - context: PredictionContext.EMPTY, + context: EmptyPredictionContext.Instance, ), mergeCache); continue; @@ -2431,7 +2431,7 @@ extension PredictionModeExtension on PredictionMode { // dup configs, tossing out semantic predicates final dup = ATNConfigSet(); for (var c in configs) { - c = ATNConfig.dup(c, semanticContext: SemanticContext.NONE); + c = ATNConfig.dup(c, semanticContext: EmptySemanticContext.Instance); dup.add(c); } configs = dup; diff --git a/runtime/Dart/lib/src/atn/src/semantic_context.dart b/runtime/Dart/lib/src/atn/src/semantic_context.dart index a78e1f6573..6661e8fef6 100644 --- a/runtime/Dart/lib/src/atn/src/semantic_context.dart +++ b/runtime/Dart/lib/src/atn/src/semantic_context.dart @@ -4,6 +4,7 @@ * can be found in the LICENSE.txt file in the project root. */ +import 'package:antlr4/src/atn/src/atn_simulator.dart'; import 'package:collection/collection.dart'; import '../../recognizer.dart'; @@ -17,10 +18,6 @@ import '../../util/murmur_hash.dart'; ///

    I have scoped the [AND], [OR], and [Predicate] subclasses of /// [SemanticContext] within the scope of this outer class.

    abstract class SemanticContext { - /// The default [SemanticContext], which is semantically equivalent to - /// a predicate of the form {@code {true}?}. - static const SemanticContext NONE = Predicate(); - const SemanticContext(); /// For context independent predicates, we evaluate them without a local @@ -60,8 +57,8 @@ abstract class SemanticContext { } static SemanticContext? and(SemanticContext? a, SemanticContext? b) { - if (a == null || a == NONE) return b; - if (b == null || b == NONE) return a; + if (a == null || a == EmptySemanticContext.Instance) return b; + if (b == null || b == EmptySemanticContext.Instance) return a; final result = AND(a, b); if (result.opnds.length == 1) { return result.opnds[0]; @@ -75,7 +72,7 @@ abstract class SemanticContext { static SemanticContext? or(SemanticContext? a, SemanticContext? b) { if (a == null) return b; if (b == null) return a; - if (a == NONE || b == NONE) return NONE; + if (a == EmptySemanticContext.Instance || b == EmptySemanticContext.Instance) return EmptySemanticContext.Instance; final result = OR(a, b); if (result.opnds.length == 1) { return result.opnds[0]; @@ -95,6 +92,17 @@ abstract class SemanticContext { } } +class EmptySemanticContext extends SemanticContext { + /// The default [SemanticContext], which is semantically equivalent to + /// a predicate of the form {@code {true}?}. + static const SemanticContext Instance = Predicate(); + + @override + bool eval(Recognizer parser, RuleContext? parserCallStack) { + return false; + } +} + class Predicate extends SemanticContext { final int ruleIndex; final int predIndex; @@ -150,7 +158,7 @@ class PrecedencePredicate extends SemanticContext RuleContext? parserCallStack, ) { if (parser.precpred(parserCallStack, precedence)) { - return SemanticContext.NONE; + return EmptySemanticContext.Instance; } else { return null; } @@ -273,7 +281,7 @@ class AND extends Operator { if (evaluated == null) { // The AND context is false if any element is false return null; - } else if (evaluated != SemanticContext.NONE) { + } else if (evaluated != EmptySemanticContext.Instance) { // Reduce the result by skipping true elements operands.add(evaluated); } @@ -285,7 +293,7 @@ class AND extends Operator { if (operands.isEmpty) { // all elements were true, so the AND context is true - return SemanticContext.NONE; + return EmptySemanticContext.Instance; } SemanticContext? result = operands[0]; @@ -374,9 +382,9 @@ class OR extends Operator { for (var context in opnds) { final evaluated = context.evalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == SemanticContext.NONE) { + if (evaluated == EmptySemanticContext.Instance) { // The OR context is true if any element is true - return SemanticContext.NONE; + return EmptySemanticContext.Instance; } else if (evaluated != null) { // Reduce the result by skipping false elements operands.add(evaluated); diff --git a/runtime/Dart/lib/src/ll1_analyzer.dart b/runtime/Dart/lib/src/ll1_analyzer.dart index 5ae5d2673b..b2821a7be6 100644 --- a/runtime/Dart/lib/src/ll1_analyzer.dart +++ b/runtime/Dart/lib/src/ll1_analyzer.dart @@ -38,7 +38,7 @@ class LL1Analyzer { _LOOK( s.transition(n).target, null, - PredictionContext.EMPTY, + EmptyPredictionContext.Instance, lookAlt, lookBusy, BitSet(), @@ -154,7 +154,7 @@ class LL1Analyzer { return; } - if (ctx != PredictionContext.EMPTY) { + if (ctx != EmptyPredictionContext.Instance) { // run thru all possible stack tops in ctx final removed = calledRuleStack[s.ruleIndex]; try { diff --git a/runtime/Dart/lib/src/prediction_context.dart b/runtime/Dart/lib/src/prediction_context.dart index 2e0843a91c..11b601ea74 100644 --- a/runtime/Dart/lib/src/prediction_context.dart +++ b/runtime/Dart/lib/src/prediction_context.dart @@ -13,10 +13,6 @@ import 'rule_context.dart'; import 'util/murmur_hash.dart'; abstract class PredictionContext { - /// Represents {@code $} in local context prediction, which means wildcard. - /// {@code *+x = *}. - static final EmptyPredictionContext EMPTY = EmptyPredictionContext(); - /// Represents {@code $} in an array in full context mode, when {@code $} /// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, /// {@code $} = {@link #EMPTY_RETURN_STATE}. @@ -58,11 +54,11 @@ abstract class PredictionContext { // if we are in RuleContext of start rule, s, then PredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) if (outerContext.parent == null || outerContext == RuleContext.EMPTY) { - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; } // If we have a parent, convert it to a PredictionContext graph - PredictionContext parent = EMPTY; + PredictionContext parent = EmptyPredictionContext.Instance; parent = PredictionContext.fromRuleContext(atn, outerContext.parent); final state = atn.states[outerContext.invokingState]!; @@ -81,7 +77,7 @@ abstract class PredictionContext { /// This means only the {@link #EMPTY} (wildcard? not sure) context is in set. */ bool get isEmpty { - return this == EMPTY; + return this == EmptyPredictionContext.Instance; } bool hasEmptyPath() { @@ -298,18 +294,18 @@ abstract class PredictionContext { bool rootIsWildcard, ) { if (rootIsWildcard) { - if (a == EMPTY) return EMPTY; // * + b = * - if (b == EMPTY) return EMPTY; // a + * = * + if (a == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // * + b = * + if (b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // a + * = * } else { - if (a == EMPTY && b == EMPTY) return EMPTY; // $ + $ = $ - if (a == EMPTY) { + if (a == EmptyPredictionContext.Instance && b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // $ + $ = $ + if (a == EmptyPredictionContext.Instance) { // $ + x = [x,$] final payloads = [b.returnState, EMPTY_RETURN_STATE]; final parents = [b.parent, null]; PredictionContext joined = ArrayPredictionContext(parents, payloads); return joined; } - if (b == EMPTY) { + if (b == EmptyPredictionContext.Instance) { // x + $ = [x,$] ($ is always last if present) final payloads = [a.returnState, EMPTY_RETURN_STATE]; final parents = [a.parent, null]; @@ -431,7 +427,7 @@ abstract class PredictionContext { return a_; } - mergedParents = List.generate(k, (n) => mergedParents[n]!); + mergedParents = List.generate(k, (n) => mergedParents[n]); mergedReturnStates = List.generate(k, (n) => mergedReturnStates[n]); } @@ -518,7 +514,7 @@ abstract class PredictionContext { } for (var current in nodes) { - if (current == EMPTY) continue; + if (current == EmptyPredictionContext.Instance) continue; for (var i = 0; i < current.length; i++) { if (current.getParent(i) == null) continue; final s = current.id.toString(); @@ -590,7 +586,7 @@ abstract class PredictionContext { PredictionContext updated; if (parents.isEmpty) { - updated = EMPTY; + updated = EmptyPredictionContext.Instance; } else if (parents.length == 1) { updated = SingletonPredictionContext.create( parents[0], context.getReturnState(0)); @@ -706,7 +702,7 @@ abstract class PredictionContext { } } stateNumber = p.getReturnState(index); - p = p.getParent(index) ?? EMPTY; + p = p.getParent(index) ?? EmptyPredictionContext.Instance; } localBuffer.write(']'); result.add(localBuffer.toString()); @@ -737,7 +733,7 @@ class SingletonPredictionContext extends PredictionContext { ) { if (returnState == PredictionContext.EMPTY_RETURN_STATE && parent == null) { // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY; + return EmptyPredictionContext.Instance; } return SingletonPredictionContext(parent, returnState); } @@ -789,6 +785,10 @@ class SingletonPredictionContext extends PredictionContext { } class EmptyPredictionContext extends SingletonPredictionContext { + /// Represents {@code $} in local context prediction, which means wildcard. + /// {@code *+x = *}. + static final EmptyPredictionContext Instance = EmptyPredictionContext(); + EmptyPredictionContext() : super(null, PredictionContext.EMPTY_RETURN_STATE); @override diff --git a/runtime/Dart/lib/src/runtime_meta_data.dart b/runtime/Dart/lib/src/runtime_meta_data.dart index b6d6c73cfe..5c36ff64b9 100644 --- a/runtime/Dart/lib/src/runtime_meta_data.dart +++ b/runtime/Dart/lib/src/runtime_meta_data.dart @@ -66,7 +66,7 @@ class RuntimeMetaData { /// omitted, the {@code -} (hyphen-minus) appearing before it is also /// omitted. /// - static final String VERSION = '4.10.1'; + static final String VERSION = '4.11.0'; /// Gets the currently executing version of the ANTLR 4 runtime library. /// diff --git a/runtime/Dart/pubspec.yaml b/runtime/Dart/pubspec.yaml index 388fd3f2cb..bed3fb95b8 100644 --- a/runtime/Dart/pubspec.yaml +++ b/runtime/Dart/pubspec.yaml @@ -1,5 +1,5 @@ name: "antlr4" -version: "4.10.1" +version: "4.11.0" description: "New Dart runtime for ANTLR4." homepage: "https://github.com/antlr/antlr4" license: "BSD-3-Clause" diff --git a/runtime/Go/antlr/antlrdoc.go b/runtime/Go/antlr/antlrdoc.go new file mode 100644 index 0000000000..4d7825f965 --- /dev/null +++ b/runtime/Go/antlr/antlrdoc.go @@ -0,0 +1,68 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. + +Here is a general template for an ANTLR based recognizer in Go: + + . + ├── myproject + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.11.0-complete.jar + │ ├── error_listeners.go + │ ├── generate.go + │ ├── generate.sh + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package they exist in. +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.11.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package tgram *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. + +From the command line at the root of your package “myproject” you can then simply issue the command: + + go generate ./... + +# Copyright Notice + +Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +*/ +package antlr diff --git a/runtime/Go/antlr/atn.go b/runtime/Go/antlr/atn.go index a4e2079e65..98010d2e6e 100644 --- a/runtime/Go/antlr/atn.go +++ b/runtime/Go/antlr/atn.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -6,11 +6,24 @@ package antlr import "sync" +// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or +// which is invalid for a particular struct such as [*antlr.BaseRuleContext] var ATNInvalidAltNumber int +// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term +// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” +// in existence. +// +// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. +// +// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network +// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf +// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network type ATN struct { // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Used to build DFA predictors for them. + // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + // can go back later and build DFA predictors for them. This includes + // all the rules, subrules, optional blocks, ()+, ()* etc... DecisionToState []DecisionState // grammarType is the ATN type and is used for deserializing ATNs from strings. @@ -45,6 +58,8 @@ type ATN struct { edgeMu sync.RWMutex } +// NewATN returns a new ATN struct representing the given grammarType and is used +// for runtime deserialization of ATNs from the code generated by the ANTLR tool func NewATN(grammarType int, maxTokenType int) *ATN { return &ATN{ grammarType: grammarType, @@ -53,7 +68,7 @@ func NewATN(grammarType int, maxTokenType int) *ATN { } } -// NextTokensInContext computes the set of valid tokens that can occur starting +// NextTokensInContext computes and returns the set of valid tokens that can occur starting // in state s. If ctx is nil, the set of tokens will not include what can follow // the rule surrounding s. In other words, the set will be restricted to tokens // reachable staying within the rule of s. @@ -61,8 +76,8 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { return NewLL1Analyzer(a).Look(s, nil, ctx) } -// NextTokensNoContext computes the set of valid tokens that can occur starting -// in s and staying in same rule. Token.EPSILON is in set if we reach end of +// NextTokensNoContext computes and returns the set of valid tokens that can occur starting +// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of // rule. func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { a.mu.Lock() @@ -76,6 +91,8 @@ func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { return iset } +// NextTokens computes and returns the set of valid tokens starting in state s, by +// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { if ctx == nil { return a.NextTokensNoContext(s) diff --git a/runtime/Go/antlr/atn_config.go b/runtime/Go/antlr/atn_config.go index 97ba417f74..7619fa172e 100644 --- a/runtime/Go/antlr/atn_config.go +++ b/runtime/Go/antlr/atn_config.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -8,19 +8,14 @@ import ( "fmt" ) -type comparable interface { - equals(other interface{}) bool -} - // ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic // context). The syntactic context is a graph-structured stack node whose // path(s) to the root is the rule invocation(s) chain used to arrive at the // state. The semantic context is the tree of semantic predicates encountered // before reaching an ATN state. type ATNConfig interface { - comparable - - hash() int + Equals(o Collectable[ATNConfig]) bool + Hash() int GetState() ATNState GetAlt() int @@ -47,7 +42,7 @@ type BaseATNConfig struct { reachesIntoOuterContext int } -func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup +func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup return &BaseATNConfig{ state: old.state, alt: old.alt, @@ -135,11 +130,16 @@ func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { b.reachesIntoOuterContext = v } +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// // An ATN configuration is equal to another if both have the same state, they // predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) equals(o interface{}) bool { +func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { if b == o { return true + } else if o == nil { + return false } var other, ok = o.(*BaseATNConfig) @@ -153,30 +153,32 @@ func (b *BaseATNConfig) equals(o interface{}) bool { if b.context == nil { equal = other.context == nil } else { - equal = b.context.equals(other.context) + equal = b.context.Equals(other.context) } var ( nums = b.state.GetStateNumber() == other.state.GetStateNumber() alts = b.alt == other.alt - cons = b.semanticContext.equals(other.semanticContext) + cons = b.semanticContext.Equals(other.semanticContext) sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed ) return nums && alts && cons && sups && equal } -func (b *BaseATNConfig) hash() int { +// Hash is the default hash function for BaseATNConfig, when no specialist hash function +// is required for a collection +func (b *BaseATNConfig) Hash() int { var c int if b.context != nil { - c = b.context.hash() + c = b.context.Hash() } h := murmurInit(7) h = murmurUpdate(h, b.state.GetStateNumber()) h = murmurUpdate(h, b.alt) h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.hash()) + h = murmurUpdate(h, b.semanticContext.Hash()) return murmurFinish(h, 4) } @@ -243,7 +245,9 @@ func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *Lex return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} } -func (l *LexerATNConfig) hash() int { +// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Hash() int { var f int if l.passedThroughNonGreedyDecision { f = 1 @@ -253,15 +257,20 @@ func (l *LexerATNConfig) hash() int { h := murmurInit(7) h = murmurUpdate(h, l.state.GetStateNumber()) h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.hash()) - h = murmurUpdate(h, l.semanticContext.hash()) + h = murmurUpdate(h, l.context.Hash()) + h = murmurUpdate(h, l.semanticContext.Hash()) h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.hash()) + h = murmurUpdate(h, l.lexerActionExecutor.Hash()) h = murmurFinish(h, 6) return h } -func (l *LexerATNConfig) equals(other interface{}) bool { +// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { + if l == other { + return true + } var othert, ok = other.(*LexerATNConfig) if l == other { @@ -275,7 +284,7 @@ func (l *LexerATNConfig) equals(other interface{}) bool { var b bool if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) + b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) } else { b = othert.lexerActionExecutor != nil } @@ -284,10 +293,9 @@ func (l *LexerATNConfig) equals(other interface{}) bool { return false } - return l.BaseATNConfig.equals(othert.BaseATNConfig) + return l.BaseATNConfig.Equals(othert.BaseATNConfig) } - func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { var ds, ok = target.(DecisionState) diff --git a/runtime/Go/antlr/atn_config_set.go b/runtime/Go/antlr/atn_config_set.go index 49ad4a7197..582f5fa4b5 100644 --- a/runtime/Go/antlr/atn_config_set.go +++ b/runtime/Go/antlr/atn_config_set.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -7,18 +7,17 @@ package antlr import "fmt" type ATNConfigSet interface { - hash() int + Hash() int + Equals(o Collectable[ATNConfig]) bool Add(ATNConfig, *DoubleDict) bool AddAll([]ATNConfig) bool - GetStates() Set + GetStates() *JStore[ATNState, Comparator[ATNState]] GetPredicates() []SemanticContext GetItems() []ATNConfig OptimizeConfigs(interpreter *BaseATNSimulator) - Equals(other interface{}) bool - Length() int IsEmpty() bool Contains(ATNConfig) bool @@ -57,7 +56,7 @@ type BaseATNConfigSet struct { // effectively doubles the number of objects associated with ATNConfigs. All // keys are hashed by (s, i, _, pi), not including the context. Wiped out when // read-only because a set becomes a DFA state. - configLookup Set + configLookup *JStore[ATNConfig, Comparator[ATNConfig]] // configs is the added elements. configs []ATNConfig @@ -83,7 +82,7 @@ type BaseATNConfigSet struct { // readOnly is whether it is read-only. Do not // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not protect other fields; conflictingAlts + // sets and those must not change. It not, protect other fields; conflictingAlts // in particular, which is assigned after readOnly. readOnly bool @@ -104,7 +103,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet { func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { return &BaseATNConfigSet{ cachedHash: -1, - configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2), + configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](&ATNConfigComparator[ATNConfig]{}), fullCtx: fullCtx, } } @@ -126,9 +125,11 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { b.dipsIntoOuterContext = true } - existing := b.configLookup.Add(config).(ATNConfig) + existing, present := b.configLookup.Put(config) - if existing == config { + // The config was not already in the set + // + if !present { b.cachedHash = -1 b.configs = append(b.configs, config) // Track order here return true @@ -154,11 +155,14 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { return true } -func (b *BaseATNConfigSet) GetStates() Set { - states := newArray2DHashSet(nil, nil) +func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](&ObjEqComparator[ATNState]{}) for i := 0; i < len(b.configs); i++ { - states.Add(b.configs[i].GetState()) + states.Put(b.configs[i].GetState()) } return states @@ -214,7 +218,34 @@ func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { return false } -func (b *BaseATNConfigSet) Equals(other interface{}) bool { +// Compare is a hack function just to verify that adding DFAstares to the known +// set works, so long as comparison of ATNConfigSet s works. For that to work, we +// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't +// know the order, so we do this inefficient hack. If this proves the point, then +// we can change the config set to a better structure. +func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + + for _, c := range b.configs { + found := false + for _, c2 := range bs.configs { + if c.Equals(c2) { + found = true + break + } + } + + if !found { + return false + } + + } + return true +} + +func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { if b == other { return true } else if _, ok := other.(*BaseATNConfigSet); !ok { @@ -224,15 +255,15 @@ func (b *BaseATNConfigSet) Equals(other interface{}) bool { other2 := other.(*BaseATNConfigSet) return b.configs != nil && - // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? b.fullCtx == other2.fullCtx && b.uniqueAlt == other2.uniqueAlt && b.conflictingAlts == other2.conflictingAlts && b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) } -func (b *BaseATNConfigSet) hash() int { +func (b *BaseATNConfigSet) Hash() int { if b.readOnly { if b.cachedHash == -1 { b.cachedHash = b.hashCodeConfigs() @@ -247,7 +278,7 @@ func (b *BaseATNConfigSet) hash() int { func (b *BaseATNConfigSet) hashCodeConfigs() int { h := 1 for _, config := range b.configs { - h = 31*h + config.hash() + h = 31*h + config.Hash() } return h } @@ -283,7 +314,7 @@ func (b *BaseATNConfigSet) Clear() { b.configs = make([]ATNConfig, 0) b.cachedHash = -1 - b.configLookup = newArray2DHashSet(nil, equalATNConfigs) + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](&BaseATNConfigComparator[ATNConfig]{}) } func (b *BaseATNConfigSet) FullContext() bool { @@ -365,7 +396,8 @@ type OrderedATNConfigSet struct { func NewOrderedATNConfigSet() *OrderedATNConfigSet { b := NewBaseATNConfigSet(false) - b.configLookup = newArray2DHashSet(nil, nil) + // This set uses the standard Hash() and Equals() from ATNConfig + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) return &OrderedATNConfigSet{BaseATNConfigSet: b} } @@ -375,7 +407,7 @@ func hashATNConfig(i interface{}) int { hash := 7 hash = 31*hash + o.GetState().GetStateNumber() hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().hash() + hash = 31*hash + o.GetSemanticContext().Hash() return hash } @@ -403,5 +435,5 @@ func equalATNConfigs(a, b interface{}) bool { return false } - return ai.GetSemanticContext().equals(bi.GetSemanticContext()) + return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) } diff --git a/runtime/Go/antlr/atn_deserialization_options.go b/runtime/Go/antlr/atn_deserialization_options.go index cb8eafb0b2..3c975ec7bf 100644 --- a/runtime/Go/antlr/atn_deserialization_options.go +++ b/runtime/Go/antlr/atn_deserialization_options.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/atn_deserializer.go b/runtime/Go/antlr/atn_deserializer.go index aea9bbfa93..3888856b4b 100644 --- a/runtime/Go/antlr/atn_deserializer.go +++ b/runtime/Go/antlr/atn_deserializer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/atn_simulator.go b/runtime/Go/antlr/atn_simulator.go index d5454d6d5d..41529115fa 100644 --- a/runtime/Go/antlr/atn_simulator.go +++ b/runtime/Go/antlr/atn_simulator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/atn_state.go b/runtime/Go/antlr/atn_state.go index 3835bb2e93..1f2a56bc31 100644 --- a/runtime/Go/antlr/atn_state.go +++ b/runtime/Go/antlr/atn_state.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -49,7 +49,8 @@ type ATNState interface { AddTransition(Transition, int) String() string - hash() int + Hash() int + Equals(Collectable[ATNState]) bool } type BaseATNState struct { @@ -123,7 +124,7 @@ func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { as.NextTokenWithinRule = v } -func (as *BaseATNState) hash() int { +func (as *BaseATNState) Hash() int { return as.stateNumber } @@ -131,7 +132,7 @@ func (as *BaseATNState) String() string { return strconv.Itoa(as.stateNumber) } -func (as *BaseATNState) equals(other interface{}) bool { +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { if ot, ok := other.(ATNState); ok { return as.stateNumber == ot.GetStateNumber() } diff --git a/runtime/Go/antlr/atn_type.go b/runtime/Go/antlr/atn_type.go index a7b48976b3..3a515a145f 100644 --- a/runtime/Go/antlr/atn_type.go +++ b/runtime/Go/antlr/atn_type.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/char_stream.go b/runtime/Go/antlr/char_stream.go index 70c1207f7f..c33f0adb5e 100644 --- a/runtime/Go/antlr/char_stream.go +++ b/runtime/Go/antlr/char_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/common_token_factory.go b/runtime/Go/antlr/common_token_factory.go index 330ff8f31f..1bb0314ea0 100644 --- a/runtime/Go/antlr/common_token_factory.go +++ b/runtime/Go/antlr/common_token_factory.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/common_token_stream.go b/runtime/Go/antlr/common_token_stream.go index c90e9b8904..c6c9485a20 100644 --- a/runtime/Go/antlr/common_token_stream.go +++ b/runtime/Go/antlr/common_token_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -331,10 +331,12 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { c.lazyInit() - c.Fill() if interval == nil { + c.Fill() interval = NewInterval(0, len(c.tokens)-1) + } else { + c.Sync(interval.Stop) } start := interval.Start diff --git a/runtime/Go/antlr/common_token_stream_test.go b/runtime/Go/antlr/common_token_stream_test.go index c98f29bd42..e7c75d49b1 100644 --- a/runtime/Go/antlr/common_token_stream_test.go +++ b/runtime/Go/antlr/common_token_stream_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -152,3 +152,27 @@ func TestCommonTokenStreamCannotConsumeEOF(t *testing.T) { assert.Equal(1, tokens.Size()) assert.Panics(tokens.Consume) } + +func TestCommonTokenStreamGetTextFromInterval(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, " ", LexerHidden), // 8 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + assert.Equal("x", tokens.GetTextFromInterval(&Interval{Start: 1, Stop: 1})) + assert.Equal(len(tokens.tokens), 2) + assert.Equal(" x =34 ; \n", tokens.GetTextFromInterval(nil)) + assert.Equal(len(tokens.tokens), 11) +} diff --git a/runtime/Go/antlr/comparators.go b/runtime/Go/antlr/comparators.go new file mode 100644 index 0000000000..fbe76c33e0 --- /dev/null +++ b/runtime/Go/antlr/comparators.go @@ -0,0 +1,137 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// This file contains all the implementations of custom comparators used for generic collections when the +// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would +// put the comparators in the source file for the struct themselves, but given the organization of this code is +// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by +// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig +// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - +// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. +// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. + +// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of +// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some +// type safety and avoid having to implement this for every type that we want to perform comparison on. +// +// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which +// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +type ObjEqComparator[T Collectable[T]] struct{} + +// Equals2 delegates to the Equals() method of type T +func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { + return o1.Equals(o2) +} + +// Hash1 delegates to the Hash() method of type T +func (c *ObjEqComparator[T]) Hash1(o T) int { + + return o.Hash() +} + +type SemCComparator[T Collectable[T]] struct{} + +// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type ATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets +type ATNAltConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetContext().Equals(o2.GetContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { + h := murmurInit(7) + h = murmurUpdate(h, o.GetState().GetStateNumber()) + h = murmurUpdate(h, o.GetContext().Hash()) + return murmurFinish(h, 2) +} + +// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type BaseATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just +// delegates to the standard Hash() method of the ATNConfig type. +func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { + + return o.Hash() +} diff --git a/runtime/Go/antlr/dfa.go b/runtime/Go/antlr/dfa.go index d55a2a87d5..5326baff95 100644 --- a/runtime/Go/antlr/dfa.go +++ b/runtime/Go/antlr/dfa.go @@ -1,13 +1,9 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. package antlr -import ( - "sort" -) - type DFA struct { // atnStartState is the ATN state in which this was created atnStartState DecisionState @@ -15,8 +11,15 @@ type DFA struct { decision int // states is all the DFA states. Use Map to get the old state back; Set can only - // indicate whether it is there. - states map[int]*DFAState + // indicate whether it is there. Go maps implement key hash collisions and so on and are very + // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them + // to see if they really are the same object. + // + // + states *JStore[*DFAState, *ObjEqComparator[*DFAState]] + + numstates int s0 *DFAState @@ -29,7 +32,7 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA { dfa := &DFA{ atnStartState: atnStartState, decision: decision, - states: make(map[int]*DFAState), + states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](&ObjEqComparator[*DFAState]{}), } if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { dfa.precedenceDfa = true @@ -92,7 +95,8 @@ func (d *DFA) getPrecedenceDfa() bool { // true or nil otherwise, and d.precedenceDfa is updated. func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { if d.getPrecedenceDfa() != precedenceDfa { - d.setStates(make(map[int]*DFAState)) + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](&ObjEqComparator[*DFAState]{}) + d.numstates = 0 if precedenceDfa { precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) @@ -117,38 +121,12 @@ func (d *DFA) setS0(s *DFAState) { d.s0 = s } -func (d *DFA) getState(hash int) (*DFAState, bool) { - s, ok := d.states[hash] - return s, ok -} - -func (d *DFA) setStates(states map[int]*DFAState) { - d.states = states -} - -func (d *DFA) setState(hash int, state *DFAState) { - d.states[hash] = state -} - -func (d *DFA) numStates() int { - return len(d.states) -} - -type dfaStateList []*DFAState - -func (d dfaStateList) Len() int { return len(d) } -func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } -func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } - // sortedStates returns the states in d sorted by their state number. func (d *DFA) sortedStates() []*DFAState { - vs := make([]*DFAState, 0, len(d.states)) - - for _, v := range d.states { - vs = append(vs, v) - } - sort.Sort(dfaStateList(vs)) + vs := d.states.SortedSlice(func(i, j *DFAState) bool { + return i.stateNumber < j.stateNumber + }) return vs } diff --git a/runtime/Go/antlr/dfa_serializer.go b/runtime/Go/antlr/dfa_serializer.go index bf2ccc06cd..84d0a31e53 100644 --- a/runtime/Go/antlr/dfa_serializer.go +++ b/runtime/Go/antlr/dfa_serializer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/dfa_state.go b/runtime/Go/antlr/dfa_state.go index 970ed19865..c90dec55c8 100644 --- a/runtime/Go/antlr/dfa_state.go +++ b/runtime/Go/antlr/dfa_state.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -90,16 +90,16 @@ func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { } // GetAltSet gets the set of all alts mentioned by all ATN configurations in d. -func (d *DFAState) GetAltSet() Set { - alts := newArray2DHashSet(nil, nil) +func (d *DFAState) GetAltSet() []int { + var alts []int if d.configs != nil { for _, c := range d.configs.GetItems() { - alts.Add(c.GetAlt()) + alts = append(alts, c.GetAlt()) } } - if alts.Len() == 0 { + if len(alts) == 0 { return nil } @@ -130,27 +130,6 @@ func (d *DFAState) setPrediction(v int) { d.prediction = v } -// equals returns whether d equals other. Two DFAStates are equal if their ATN -// configuration sets are the same. This method is used to see if a state -// already exists. -// -// Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. This is -// necessary to show that the algorithm terminates. -// -// Cannot test the DFA state numbers here because in -// ParserATNSimulator.addDFAState we need to know if any other state exists that -// has d exact set of ATN configurations. The stateNumber is irrelevant. -func (d *DFAState) equals(other interface{}) bool { - if d == other { - return true - } else if _, ok := other.(*DFAState); !ok { - return false - } - - return d.configs.Equals(other.(*DFAState).configs) -} - func (d *DFAState) String() string { var s string if d.isAcceptState { @@ -164,8 +143,27 @@ func (d *DFAState) String() string { return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) } -func (d *DFAState) hash() int { +func (d *DFAState) Hash() int { h := murmurInit(7) - h = murmurUpdate(h, d.configs.hash()) + h = murmurUpdate(h, d.configs.Hash()) return murmurFinish(h, 1) } + +// Equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) Equals(o Collectable[*DFAState]) bool { + if d == o { + return true + } + + return d.configs.Equals(o.(*DFAState).configs) +} diff --git a/runtime/Go/antlr/diagnostic_error_listener.go b/runtime/Go/antlr/diagnostic_error_listener.go index 1fec43d9dc..c55bcc19b2 100644 --- a/runtime/Go/antlr/diagnostic_error_listener.go +++ b/runtime/Go/antlr/diagnostic_error_listener.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -87,7 +87,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa return strconv.Itoa(decision) + " (" + ruleName + ")" } -// // Computes the set of conflicting or ambiguous alternatives from a // configuration set, if that information was not already provided by the // parser. @@ -97,7 +96,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa // @param configs The conflicting or ambiguous configuration set. // @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise // returns the set of alternatives represented in {@code configs}. -// func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { if ReportedAlts != nil { return ReportedAlts diff --git a/runtime/Go/antlr/error_listener.go b/runtime/Go/antlr/error_listener.go index 028e1a9d7f..f679f0dcd5 100644 --- a/runtime/Go/antlr/error_listener.go +++ b/runtime/Go/antlr/error_listener.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -48,12 +48,9 @@ func NewConsoleErrorListener() *ConsoleErrorListener { return new(ConsoleErrorListener) } -// // Provides a default instance of {@link ConsoleErrorListener}. -// var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() -// // {@inheritDoc} // //

    @@ -64,7 +61,6 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() //

     // line line:charPositionInLine msg
     // 
    -// func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) } diff --git a/runtime/Go/antlr/error_strategy.go b/runtime/Go/antlr/error_strategy.go index c4080dbfd1..5c0a637ba4 100644 --- a/runtime/Go/antlr/error_strategy.go +++ b/runtime/Go/antlr/error_strategy.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -23,7 +23,6 @@ type ErrorStrategy interface { // This is the default implementation of {@link ANTLRErrorStrategy} used for // error Reporting and recovery in ANTLR parsers. -// type DefaultErrorStrategy struct { errorRecoveryMode bool lastErrorIndex int @@ -61,12 +60,10 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) { d.endErrorCondition(recognizer) } -// // This method is called to enter error recovery mode when a recognition // exception is Reported. // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { d.errorRecoveryMode = true } @@ -75,28 +72,23 @@ func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { return d.errorRecoveryMode } -// // This method is called to leave error recovery mode after recovering from // a recognition exception. // // @param recognizer -// func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { d.errorRecoveryMode = false d.lastErrorStates = nil d.lastErrorIndex = -1 } -// // {@inheritDoc} // //

    The default implementation simply calls {@link //endErrorCondition}.

    -// func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { d.endErrorCondition(recognizer) } -// // {@inheritDoc} // //

    The default implementation returns immediately if the handler is already @@ -114,7 +106,6 @@ func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { //

  • All other types: calls {@link Parser//NotifyErrorListeners} to Report // the exception
  • // -// func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { // if we've already Reported an error and have not Matched a token // yet successfully, don't Report any errors. @@ -142,7 +133,6 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep //

    The default implementation reSynchronizes the parser by consuming tokens // until we find one in the reSynchronization set--loosely the set of tokens // that can follow the current rule.

    -// func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { if d.lastErrorIndex == recognizer.GetInputStream().Index() && @@ -206,7 +196,6 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException // compare token set at the start of the loop and at each iteration. If for // some reason speed is suffering for you, you can turn off d // functionality by simply overriding d method as a blank { }.

    -// func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // If already recovering, don't try to Sync if d.InErrorRecoveryMode(recognizer) { @@ -247,7 +236,6 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // // @param recognizer the parser instance // @param e the recognition exception -// func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { tokens := recognizer.GetTokenStream() var input string @@ -264,7 +252,6 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// // This is called by {@link //ReportError} when the exception is an // {@link InputMisMatchException}. // @@ -272,14 +259,12 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N // // @param recognizer the parser instance // @param e the recognition exception -// func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// // This is called by {@link //ReportError} when the exception is a // {@link FailedPredicateException}. // @@ -287,7 +272,6 @@ func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *Inpu // // @param recognizer the parser instance // @param e the recognition exception -// func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] msg := "rule " + ruleName + " " + e.message @@ -310,7 +294,6 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile // {@link Parser//NotifyErrorListeners}.

    // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -339,7 +322,6 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { // {@link Parser//NotifyErrorListeners}.

    // // @param recognizer the parser instance -// func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -392,15 +374,14 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { // derivation: // //
    -// => ID '=' '(' INT ')' ('+' atom)* ''
    +// => ID '=' '(' INT ')' ('+' atom)* ”
     // ^
     // 
    // -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} +// The attempt to Match {@code ')'} will fail when it sees {@code ”} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} // is in the set of tokens that can follow the {@code ')'} token reference // in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // SINGLE TOKEN DELETION MatchedSymbol := d.SingleTokenDeletion(recognizer) @@ -418,7 +399,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { panic(NewInputMisMatchException(recognizer)) } -// // This method implements the single-token insertion inline error recovery // strategy. It is called by {@link //recoverInline} if the single-token // deletion strategy fails to recover from the mismatched input. If this @@ -434,7 +414,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // @param recognizer the parser instance // @return {@code true} if single-token insertion is a viable recovery // strategy for the current mismatched input, otherwise {@code false} -// func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { currentSymbolType := recognizer.GetTokenStream().LA(1) // if current token is consistent with what could come after current @@ -469,7 +448,6 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { // @return the successfully Matched {@link Token} instance if single-token // deletion successfully recovers from the mismatched input, otherwise // {@code nil} -// func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { NextTokenType := recognizer.GetTokenStream().LA(2) expecting := d.GetExpectedTokens(recognizer) @@ -507,7 +485,6 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { // a CommonToken of the appropriate type. The text will be the token. // If you change what tokens must be created by the lexer, // override d method to create the appropriate tokens. -// func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { currentSymbol := recognizer.GetCurrentToken() expecting := d.GetExpectedTokens(recognizer) @@ -546,7 +523,6 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet // the token). This is better than forcing you to override a method in // your token objects because you don't have to go modify your lexer // so that it creates a NewJava type. -// func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { if t == nil { return "" @@ -578,7 +554,7 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // from within the rule i.e., the FIRST computation done by // ANTLR stops at the end of a rule. // -// EXAMPLE +// # EXAMPLE // // When you find a "no viable alt exception", the input is not // consistent with any of the alternatives for rule r. The best @@ -597,7 +573,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // c : ID // | INT // -// // At each rule invocation, the set of tokens that could follow // that rule is pushed on a stack. Here are the various // context-sensitive follow sets: @@ -660,7 +635,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // // Like Grosch I implement context-sensitive FOLLOW sets that are combined // at run-time upon error to avoid overhead during parsing. -// func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { atn := recognizer.GetInterpreter().atn ctx := recognizer.GetParserRuleContext() @@ -733,7 +707,6 @@ func NewBailErrorStrategy() *BailErrorStrategy { // in a {@link ParseCancellationException} so it is not caught by the // rule func catches. Use {@link Exception//getCause()} to get the // original {@link RecognitionException}. -// func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context := recognizer.GetParserRuleContext() for context != nil { @@ -749,7 +722,6 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { // Make sure we don't attempt to recover inline if the parser // successfully recovers, it won't panic an exception. -// func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { b.Recover(recognizer, NewInputMisMatchException(recognizer)) diff --git a/runtime/Go/antlr/errors.go b/runtime/Go/antlr/errors.go index 2ef74926ec..3954c13782 100644 --- a/runtime/Go/antlr/errors.go +++ b/runtime/Go/antlr/errors.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -74,7 +74,6 @@ func (b *BaseRecognitionException) GetInputStream() IntStream { //

    If the state number is not known, b method returns -1.

    -// // Gets the set of input symbols which could potentially follow the // previously Matched symbol at the time b exception was panicn. // @@ -136,7 +135,6 @@ type NoViableAltException struct { // to take based upon the remaining input. It tracks the starting token // of the offending input and also knows where the parser was // in the various paths when the error. Reported by ReportNoViableAlternative() -// func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { if ctx == nil { @@ -177,7 +175,6 @@ type InputMisMatchException struct { // This signifies any kind of mismatched input exceptions such as // when the current input does not Match the expected token. -// func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { i := new(InputMisMatchException) diff --git a/runtime/Go/antlr/file_stream.go b/runtime/Go/antlr/file_stream.go index 842170c086..bd6ad5efe3 100644 --- a/runtime/Go/antlr/file_stream.go +++ b/runtime/Go/antlr/file_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/go.mod b/runtime/Go/antlr/go.mod index 004cf34655..f1d253cc0a 100644 --- a/runtime/Go/antlr/go.mod +++ b/runtime/Go/antlr/go.mod @@ -1,3 +1,6 @@ +// Deprecated: Please switch to the new v4 module path: github.com/antlr/antlr4/runtime/Go/antlr/v4 - see https://github.com/antlr/antlr4/blob/master/doc/go-target.md module github.com/antlr/antlr4/runtime/Go/antlr -go 1.16 +go 1.18 + +require golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e diff --git a/runtime/Go/antlr/go.sum b/runtime/Go/antlr/go.sum new file mode 100644 index 0000000000..2b05f22a47 --- /dev/null +++ b/runtime/Go/antlr/go.sum @@ -0,0 +1,2 @@ +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= diff --git a/runtime/Go/antlr/input_stream.go b/runtime/Go/antlr/input_stream.go index 5ff270f536..a8b889cedb 100644 --- a/runtime/Go/antlr/input_stream.go +++ b/runtime/Go/antlr/input_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/int_stream.go b/runtime/Go/antlr/int_stream.go index 438e0ea6e7..4778878bd0 100644 --- a/runtime/Go/antlr/int_stream.go +++ b/runtime/Go/antlr/int_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/interval_set.go b/runtime/Go/antlr/interval_set.go index 1e9393adb6..c1e155e818 100644 --- a/runtime/Go/antlr/interval_set.go +++ b/runtime/Go/antlr/interval_set.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -223,6 +223,10 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } +func (i *IntervalSet) GetIntervals() []*Interval { + return i.intervals +} + func (i *IntervalSet) toCharString() string { names := make([]string, len(i.intervals)) diff --git a/runtime/Go/antlr/jcollect.go b/runtime/Go/antlr/jcollect.go new file mode 100644 index 0000000000..8fb01c5bd9 --- /dev/null +++ b/runtime/Go/antlr/jcollect.go @@ -0,0 +1,195 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import "sort" + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn + + kh := s.comparator.Hash1(value) + + for _, v1 := range s.store[kh] { + if s.comparator.Equals2(value, v1) { + return v1, true + } + } + s.store[kh] = append(s.store[kh], value) + s.len++ + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn + + kh := s.comparator.Hash1(key) + + for _, v := range s.store[kh] { + if s.comparator.Equals2(key, v) { + return v, true + } + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn + + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + for _, v := range e { + vs = append(vs, v) + } + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { + return &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } +} + +func (m *JMap[K, V, C]) Put(key K, val V) { + kh := m.comparator.Hash1(key) + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + m.len++ +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + + var none V + kh := m.comparator.Hash1(key) + for _, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + return e.val, true + } + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return len(m.store) +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} diff --git a/runtime/Go/antlr/jcollect_test.go b/runtime/Go/antlr/jcollect_test.go new file mode 100644 index 0000000000..816307a02c --- /dev/null +++ b/runtime/Go/antlr/jcollect_test.go @@ -0,0 +1,15 @@ +package antlr + +import "testing" + +func Test_try(t *testing.T) { + tests := []struct { + name string + }{ + {"Test_try"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + }) + } +} diff --git a/runtime/Go/antlr/lexer.go b/runtime/Go/antlr/lexer.go index b04f04572f..6533f05164 100644 --- a/runtime/Go/antlr/lexer.go +++ b/runtime/Go/antlr/lexer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -232,8 +232,6 @@ func (b *BaseLexer) NextToken() Token { } return b.token } - - return nil } // Instruct the lexer to Skip creating a token for current lexer rule @@ -342,7 +340,7 @@ func (b *BaseLexer) GetCharIndex() int { } // Return the text Matched so far for the current token or any text override. -//Set the complete text of l token it wipes any previous changes to the text. +// Set the complete text of l token it wipes any previous changes to the text. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text diff --git a/runtime/Go/antlr/lexer_action.go b/runtime/Go/antlr/lexer_action.go index 5a325be137..111656c295 100644 --- a/runtime/Go/antlr/lexer_action.go +++ b/runtime/Go/antlr/lexer_action.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -21,8 +21,8 @@ type LexerAction interface { getActionType() int getIsPositionDependent() bool execute(lexer Lexer) - hash() int - equals(other LexerAction) bool + Hash() int + Equals(other LexerAction) bool } type BaseLexerAction struct { @@ -51,15 +51,14 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { return b.isPositionDependent } -func (b *BaseLexerAction) hash() int { +func (b *BaseLexerAction) Hash() int { return b.actionType } -func (b *BaseLexerAction) equals(other LexerAction) bool { +func (b *BaseLexerAction) Equals(other LexerAction) bool { return b == other } -// // Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. // //

    The {@code Skip} command does not have any parameters, so l action is @@ -85,7 +84,8 @@ func (l *LexerSkipAction) String() string { return "skip" } -// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// // with the assigned type. type LexerTypeAction struct { *BaseLexerAction @@ -104,14 +104,14 @@ func (l *LexerTypeAction) execute(lexer Lexer) { lexer.SetType(l.thetype) } -func (l *LexerTypeAction) hash() int { +func (l *LexerTypeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.thetype) return murmurFinish(h, 2) } -func (l *LexerTypeAction) equals(other LexerAction) bool { +func (l *LexerTypeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerTypeAction); !ok { @@ -148,14 +148,14 @@ func (l *LexerPushModeAction) execute(lexer Lexer) { lexer.PushMode(l.mode) } -func (l *LexerPushModeAction) hash() int { +func (l *LexerPushModeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.mode) return murmurFinish(h, 2) } -func (l *LexerPushModeAction) equals(other LexerAction) bool { +func (l *LexerPushModeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerPushModeAction); !ok { @@ -245,14 +245,14 @@ func (l *LexerModeAction) execute(lexer Lexer) { lexer.SetMode(l.mode) } -func (l *LexerModeAction) hash() int { +func (l *LexerModeAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.mode) return murmurFinish(h, 2) } -func (l *LexerModeAction) equals(other LexerAction) bool { +func (l *LexerModeAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerModeAction); !ok { @@ -303,7 +303,7 @@ func (l *LexerCustomAction) execute(lexer Lexer) { lexer.Action(nil, l.ruleIndex, l.actionIndex) } -func (l *LexerCustomAction) hash() int { +func (l *LexerCustomAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.ruleIndex) @@ -311,13 +311,14 @@ func (l *LexerCustomAction) hash() int { return murmurFinish(h, 3) } -func (l *LexerCustomAction) equals(other LexerAction) bool { +func (l *LexerCustomAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerCustomAction); !ok { return false } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex } } @@ -344,14 +345,14 @@ func (l *LexerChannelAction) execute(lexer Lexer) { lexer.SetChannel(l.channel) } -func (l *LexerChannelAction) hash() int { +func (l *LexerChannelAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.actionType) h = murmurUpdate(h, l.channel) return murmurFinish(h, 2) } -func (l *LexerChannelAction) equals(other LexerAction) bool { +func (l *LexerChannelAction) Equals(other LexerAction) bool { if l == other { return true } else if _, ok := other.(*LexerChannelAction); !ok { @@ -412,10 +413,10 @@ func (l *LexerIndexedCustomAction) execute(lexer Lexer) { l.lexerAction.execute(lexer) } -func (l *LexerIndexedCustomAction) hash() int { +func (l *LexerIndexedCustomAction) Hash() int { h := murmurInit(0) h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) + h = murmurUpdate(h, l.lexerAction.Hash()) return murmurFinish(h, 2) } @@ -425,6 +426,7 @@ func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { } else if _, ok := other.(*LexerIndexedCustomAction); !ok { return false } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) } } diff --git a/runtime/Go/antlr/lexer_action_executor.go b/runtime/Go/antlr/lexer_action_executor.go index 056941dd6e..be1ba7a7e3 100644 --- a/runtime/Go/antlr/lexer_action_executor.go +++ b/runtime/Go/antlr/lexer_action_executor.go @@ -1,9 +1,11 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. package antlr +import "golang.org/x/exp/slices" + // Represents an executor for a sequence of lexer actions which traversed during // the Matching operation of a lexer rule (token). // @@ -12,8 +14,8 @@ package antlr // not cause bloating of the {@link DFA} created for the lexer.

    type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int + lexerActions []LexerAction + cachedHash int } func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { @@ -30,7 +32,7 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { // of the performance-critical {@link LexerATNConfig//hashCode} operation. l.cachedHash = murmurInit(57) for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) } return l @@ -151,14 +153,17 @@ func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex } } -func (l *LexerActionExecutor) hash() int { +func (l *LexerActionExecutor) Hash() int { if l == nil { + // TODO: Why is this here? l should not be nil return 61 } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode return l.cachedHash } -func (l *LexerActionExecutor) equals(other interface{}) bool { +func (l *LexerActionExecutor) Equals(other interface{}) bool { if l == other { return true } @@ -169,5 +174,13 @@ func (l *LexerActionExecutor) equals(other interface{}) bool { if othert == nil { return false } - return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) } diff --git a/runtime/Go/antlr/lexer_atn_simulator.go b/runtime/Go/antlr/lexer_atn_simulator.go index dc05153ea4..c573b75210 100644 --- a/runtime/Go/antlr/lexer_atn_simulator.go +++ b/runtime/Go/antlr/lexer_atn_simulator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -591,19 +591,24 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } - hash := proposed.hash() dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, ok := dfa.getState(hash) - if ok { + existing, present := dfa.states.Get(proposed) + if present { + + // This state was already present, so just return it. + // proposed = existing } else { - proposed.stateNumber = dfa.numStates() + + // We need to add the new state + // + proposed.stateNumber = dfa.states.Len() configs.SetReadOnly(true) proposed.configs = configs - dfa.setState(hash, proposed) + dfa.states.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) diff --git a/runtime/Go/antlr/ll1_analyzer.go b/runtime/Go/antlr/ll1_analyzer.go index 6ffb37de69..a9e202d041 100644 --- a/runtime/Go/antlr/ll1_analyzer.go +++ b/runtime/Go/antlr/ll1_analyzer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -14,14 +14,15 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer { return la } -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// +// - Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +// +// / const ( LL1AnalyzerHitPred = TokenInvalidType ) -//* +// * // Calculates the SLL(1) expected lookahead set for each outgoing transition // of an {@link ATNState}. The returned array has one element for each // outgoing transition in {@code s}. If the closure from transition @@ -38,7 +39,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() - lookBusy := newArray2DHashSet(nil, nil) + lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) seeThruPreds := false // fail to get lookahead upon pred la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) // Wipe out lookahead for la alternative if we found nothing @@ -50,7 +51,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -//* +// * // Compute set of tokens that can follow {@code s} in the ATN in the // specified {@code ctx}. // @@ -67,7 +68,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { // // @return The set of tokens that can follow {@code s} in the ATN in the // specified {@code ctx}. -/// +// / func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() seeThruPreds := true // ignore preds get all lookahead @@ -75,7 +76,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true) + la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}), NewBitSet(), seeThruPreds, true) return r } @@ -109,14 +110,14 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { c := NewBaseATNConfig6(s, 0, ctx) @@ -124,8 +125,11 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } - lookBusy.Add(c) + _, present := lookBusy.Put(c) + if present { + return + } if s == stopState { if ctx == nil { look.addOne(TokenEpsilon) @@ -198,7 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) diff --git a/runtime/Go/antlr/parser.go b/runtime/Go/antlr/parser.go index 2ab2f56052..d26bf06392 100644 --- a/runtime/Go/antlr/parser.go +++ b/runtime/Go/antlr/parser.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -91,7 +91,6 @@ func NewBaseParser(input TokenStream) *BaseParser { // bypass alternatives. // // @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// @@ -230,7 +229,6 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { // @param listener the listener to add // // @panics nilPointerException if {@code} listener is {@code nil} -// func (p *BaseParser) AddParseListener(listener ParseTreeListener) { if listener == nil { panic("listener") @@ -241,13 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) { p.parseListeners = append(p.parseListeners, listener) } -// // Remove {@code listener} from the list of parse listeners. // //

    If {@code listener} is {@code nil} or has not been added as a parse // listener, p.method does nothing.

    // @param listener the listener to remove -// func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -289,11 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// // Notify any parse listeners of an exit rule event. // // @see //addParseListener -// func (p *BaseParser) TriggerExitRuleEvent() { if p.parseListeners != nil { // reverse order walk of listeners @@ -330,7 +324,6 @@ func (p *BaseParser) setTokenFactory(factory TokenFactory) { // // @panics UnsupportedOperationException if the current parser does not // implement the {@link //getSerializedATN()} method. -// func (p *BaseParser) GetATNWithBypassAlts() { // TODO @@ -402,7 +395,6 @@ func (p *BaseParser) SetTokenStream(input TokenStream) { // Match needs to return the current input symbol, which gets put // into the label for the associated token ref e.g., x=ID. -// func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) } @@ -624,7 +616,6 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { // respectively. // // @see ATN//getExpectedTokens(int, RuleContext) -// func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } @@ -686,7 +677,7 @@ func (p *BaseParser) GetDFAStrings() string { func (p *BaseParser) DumpDFA() { seenOne := false for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { + if dfa.states.Len() > 0 { if seenOne { fmt.Println() } @@ -703,7 +694,6 @@ func (p *BaseParser) GetSourceName() string { // During a parse is sometimes useful to listen in on the rule entry and exit // events as well as token Matches. p.is for quick and dirty debugging. -// func (p *BaseParser) SetTrace(trace *TraceListener) { if trace == nil { p.RemoveParseListener(p.tracer) diff --git a/runtime/Go/antlr/parser_atn_simulator.go b/runtime/Go/antlr/parser_atn_simulator.go index 888d512975..c780e3c5f2 100644 --- a/runtime/Go/antlr/parser_atn_simulator.go +++ b/runtime/Go/antlr/parser_atn_simulator.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -111,7 +111,7 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou if s0 == nil { if outerContext == nil { - outerContext = RuleContextEmpty + outerContext = ParserRuleContextEmpty } if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + @@ -119,7 +119,7 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) } fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) p.atn.stateMu.Lock() if dfa.getPrecedenceDfa() { @@ -174,12 +174,12 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou // Reporting insufficient predicates // cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds // +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { @@ -277,8 +277,6 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, t = input.LA(1) } } - - panic("Should not have reached p state") } // Get an existing target state for an edge in the DFA. If the target state @@ -570,7 +568,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // if reach == nil { reach = NewBaseATNConfigSet(fullCtx) - closureBusy := newArray2DHashSet(nil, nil) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) treatEOFAsEpsilon := t == TokenEOF amount := len(intermediate.configs) for k := 0; k < amount; k++ { @@ -617,7 +615,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt return reach } -// // Return a configuration set containing only the configurations from // {@code configs} which are in a {@link RuleStopState}. If all // configurations in {@code configs} are already in a rule stop state, p @@ -636,7 +633,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // @return {@code configs} if all configurations in {@code configs} are in a // rule stop state, otherwise return a Newconfiguration set containing only // the configurations from {@code configs} which are in a rule stop state -// func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { return configs @@ -665,13 +661,12 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := newArray2DHashSet(nil, nil) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&BaseATNConfigComparator[ATNConfig]{}) p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs } -// // This method transforms the start state computed by // {@link //computeStartState} to the special start state used by a // precedence DFA for a particular precedence value. The transformation @@ -726,7 +721,6 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full // @return The transformed configuration set representing the start state // for a precedence DFA at a particular precedence level (determined by // calling {@link Parser//getPrecedence}). -// func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { statesFromAlt1 := make(map[int]PredictionContext) @@ -760,7 +754,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf // (basically a graph subtraction algorithm). if !config.getPrecedenceFilterSuppressed() { context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { + if context != nil && context.Equals(config.GetContext()) { // eliminated continue } @@ -824,7 +818,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre return pairs } -// // This method is used to improve the localization of error messages by // choosing an alternative rather than panicing a // {@link NoViableAltException} in particular prediction scenarios where the @@ -869,7 +862,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // @return The value to return from {@link //AdaptivePredict}, or // {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not // identified and {@link //AdaptivePredict} should Report an error instead. -// func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) semValidConfigs := cfgs[0] @@ -938,11 +930,11 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS } // Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. // +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { predictions := NewBitSet() for i := 0; i < len(predPredictions); i++ { @@ -972,13 +964,13 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { if ParserATNSimulatorDebug { fmt.Println("closure(" + config.String() + ")") fmt.Println("configs(" + configs.String() + ")") @@ -1031,7 +1023,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs } // Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1066,7 +1058,8 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - if closureBusy.Add(c) != c { + _, present := closureBusy.Put(c) + if present { // avoid infinite recursion for right-recursive rules continue } @@ -1077,9 +1070,13 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, fmt.Println("dips into outer ctx: " + c.String()) } } else { - if !t.getIsEpsilon() && closureBusy.Add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } } if _, ok := t.(*RuleTransition); ok { // latch when newDepth goes negative - once we step out of the entry context we can't return @@ -1104,7 +1101,16 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC // left-recursion elimination. For efficiency, also check if // the context has an empty stack case. If so, it would mean // global FOLLOW so we can't perform optimization - if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() { + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { return false } @@ -1117,8 +1123,8 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC return false } } - - decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState) + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) blockEndStateNum := decisionStartState.getEndState().stateNumber blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) @@ -1372,9 +1378,9 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { } // Used for debugging in AdaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. // +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { panic("Not implemented") @@ -1421,7 +1427,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { return alt } -// // Add an edge to the DFA, if possible. This method calls // {@link //addDFAState} to ensure the {@code to} state is present in the // DFA. If {@code from} is {@code nil}, or if {@code t} is outside the @@ -1440,7 +1445,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { // @return If {@code to} is {@code nil}, p method returns {@code nil} // otherwise p method returns the result of calling {@link //addDFAState} // on {@code to} -// func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { if ParserATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) @@ -1472,7 +1476,6 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA return to } -// // Add state {@code D} to the DFA if it is not already present, and return // the actual instance stored in the DFA. If a state equivalent to {@code D} // is already in the DFA, the existing state is returned. Otherwise p @@ -1486,25 +1489,27 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA // @return The state stored in the DFA. This will be either the existing // state if {@code D} is already in the DFA, or {@code D} itself if the // state was not already present. -// func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { if d == ATNSimulatorError { return d } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { + existing, present := dfa.states.Get(d) + if present { return existing } - d.stateNumber = dfa.numStates() + + // The state was not present, so update it with configs + // + d.stateNumber = dfa.states.Len() if !d.configs.ReadOnly() { d.configs.OptimizeConfigs(p.BaseATNSimulator) d.configs.SetReadOnly(true) } - dfa.setState(hash, d) + dfa.states.Put(d) if ParserATNSimulatorDebug { fmt.Println("adding NewDFA state: " + d.String()) } + return d } diff --git a/runtime/Go/antlr/parser_rule_context.go b/runtime/Go/antlr/parser_rule_context.go index 49cd10c5ff..1c8cee7479 100644 --- a/runtime/Go/antlr/parser_rule_context.go +++ b/runtime/Go/antlr/parser_rule_context.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -340,7 +340,7 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { ParserRuleContext diff --git a/runtime/Go/antlr/prediction_context.go b/runtime/Go/antlr/prediction_context.go index 9fdfd52b26..4fcad69a9c 100644 --- a/runtime/Go/antlr/prediction_context.go +++ b/runtime/Go/antlr/prediction_context.go @@ -1,10 +1,11 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. package antlr import ( + "golang.org/x/exp/slices" "strconv" ) @@ -26,10 +27,10 @@ var ( ) type PredictionContext interface { - hash() int + Hash() int + Equals(interface{}) bool GetParent(int) PredictionContext getReturnState(int) int - equals(PredictionContext) bool length() int isEmpty() bool hasEmptyPath() bool @@ -53,7 +54,7 @@ func (b *BasePredictionContext) isEmpty() bool { func calculateHash(parent PredictionContext, returnState int) int { h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) + h = murmurUpdate(h, parent.Hash()) h = murmurUpdate(h, returnState) return murmurFinish(h, 2) } @@ -86,7 +87,6 @@ func NewPredictionContextCache() *PredictionContextCache { // Add a context to the cache and return it. If the context already exists, // return that one instead and do not add a Newcontext to the cache. // Protect shared cache from unsafe thread access. -// func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { if ctx == BasePredictionContextEMPTY { return BasePredictionContextEMPTY @@ -160,28 +160,28 @@ func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { return b.returnState == BasePredictionContextEmptyReturnState } -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { +func (b *BaseSingletonPredictionContext) Hash() int { + return b.cachedHash +} + +func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { if b == other { return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { + } + if _, ok := other.(*BaseSingletonPredictionContext); !ok { return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different } otherP := other.(*BaseSingletonPredictionContext) - if b.returnState != other.getReturnState(0) { + if b.returnState != otherP.getReturnState(0) { return false - } else if b.parentCtx == nil { + } + if b.parentCtx == nil { return otherP.parentCtx == nil } - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - return b.cachedHash + return b.parentCtx.Equals(otherP.parentCtx) } func (b *BaseSingletonPredictionContext) String() string { @@ -215,7 +215,7 @@ func NewEmptyPredictionContext() *EmptyPredictionContext { p := new(EmptyPredictionContext) p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - + p.cachedHash = calculateEmptyHash() return p } @@ -231,7 +231,11 @@ func (e *EmptyPredictionContext) getReturnState(index int) int { return e.returnState } -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { +func (e *EmptyPredictionContext) Hash() int { + return e.cachedHash +} + +func (e *EmptyPredictionContext) Equals(other interface{}) bool { return e == other } @@ -254,7 +258,7 @@ func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) hash := murmurInit(1) for _, parent := range parents { - hash = murmurUpdate(hash, parent.hash()) + hash = murmurUpdate(hash, parent.Hash()) } for _, returnState := range returnStates { @@ -298,18 +302,31 @@ func (a *ArrayPredictionContext) getReturnState(index int) int { return a.returnStates[index] } -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { +// Equals is the default comparison function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Equals(o interface{}) bool { + if a == o { + return true + } + other, ok := o.(*ArrayPredictionContext) + if !ok { return false - } else if a.cachedHash != other.hash() { + } + if a.cachedHash != other.Hash() { return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(a.returnStates, other.returnStates) && + slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { + return x.Equals(y) + }) } -func (a *ArrayPredictionContext) hash() int { +// Hash is the default hash function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Hash() int { return a.BasePredictionContext.cachedHash } @@ -343,11 +360,11 @@ func (a *ArrayPredictionContext) String() string { // / func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { if outerContext == nil { - outerContext = RuleContextEmpty + outerContext = ParserRuleContextEmpty } // if we are in RuleContext of start rule, s, then BasePredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { return BasePredictionContextEMPTY } // If we have a parent, convert it to a BasePredictionContext graph @@ -359,11 +376,20 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti } func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { + + // Share same graph if both same + // + if a == b || a.Equals(b) { return a } + // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test + // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created + // from it. + // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion + // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from + // either of them. + ac, ok1 := a.(*BaseSingletonPredictionContext) bc, ok2 := b.(*BaseSingletonPredictionContext) @@ -380,17 +406,32 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) return b } } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + + // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters + // here. + // + // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here + + var arp, arb *ArrayPredictionContext + var ok bool + if arp, ok = a.(*ArrayPredictionContext); ok { + } else if _, ok = a.(*BaseSingletonPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } else if _, ok = a.(*EmptyPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + + if arb, ok = b.(*ArrayPredictionContext); ok { + } else if _, ok = b.(*BaseSingletonPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } else if _, ok = b.(*EmptyPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) + + // Both arp and arb + return mergeArrays(arp, arb, rootIsWildcard, mergeCache) } -// // Merge two {@link SingletonBasePredictionContext} instances. // //

    Stack tops equal, parents merge is same return left graph.
    @@ -423,11 +464,11 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) // / func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) + previous := mergeCache.Get(a.Hash(), b.Hash()) if previous != nil { return previous.(PredictionContext) } - previous = mergeCache.Get(b.hash(), a.hash()) + previous = mergeCache.Get(b.Hash(), a.Hash()) if previous != nil { return previous.(PredictionContext) } @@ -436,7 +477,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, rootMerge := mergeRoot(a, b, rootIsWildcard) if rootMerge != nil { if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), rootMerge) + mergeCache.set(a.Hash(), b.Hash(), rootMerge) } return rootMerge } @@ -456,7 +497,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, // Newjoined parent so create Newsingleton pointing to it, a' spc := SingletonBasePredictionContextCreate(parent, a.returnState) if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), spc) + mergeCache.set(a.Hash(), b.Hash(), spc) } return spc } @@ -478,7 +519,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, parents := []PredictionContext{singleParent, singleParent} apc := NewArrayPredictionContext(parents, payloads) if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) + mergeCache.set(a.Hash(), b.Hash(), apc) } return apc } @@ -494,12 +535,11 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, } apc := NewArrayPredictionContext(parents, payloads) if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) + mergeCache.set(a.Hash(), b.Hash(), apc) } return apc } -// // Handle case where at least one of {@code a} or {@code b} is // {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used // to represent {@link //EMPTY}. @@ -561,7 +601,6 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC return nil } -// // Merge two {@link ArrayBasePredictionContext} instances. // //

    Different tops, different parents.
    @@ -583,11 +622,11 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC // / func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) + previous := mergeCache.Get(a.Hash(), b.Hash()) if previous != nil { return previous.(PredictionContext) } - previous = mergeCache.Get(b.hash(), a.hash()) + previous = mergeCache.Get(b.Hash(), a.Hash()) if previous != nil { return previous.(PredictionContext) } @@ -608,7 +647,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * payload := a.returnStates[i] // $+$ = $ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax + axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax // -> // ax if bothDollars || axAX { @@ -651,7 +690,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * if k == 1 { // for just one merged element, return singleton top pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), pc) + mergeCache.set(a.Hash(), b.Hash(), pc) } return pc } @@ -663,27 +702,27 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache * // if we created same array as a or b, return that instead // TODO: track whether this is possible above during merge sort for speed + // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems if M == a { if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), a) + mergeCache.set(a.Hash(), b.Hash(), a) } return a } if M == b { if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), b) + mergeCache.set(a.Hash(), b.Hash(), b) } return b } combineCommonParents(mergedParents) if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), M) + mergeCache.set(a.Hash(), b.Hash(), M) } return M } -// // Make pass over all M {@code parents} merge any {@code equals()} // ones. // / diff --git a/runtime/Go/antlr/prediction_mode.go b/runtime/Go/antlr/prediction_mode.go index 15718f912b..270a89d393 100644 --- a/runtime/Go/antlr/prediction_mode.go +++ b/runtime/Go/antlr/prediction_mode.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -70,7 +70,6 @@ const ( PredictionModeLLExactAmbigDetection = 2 ) -// // Computes the SLL prediction termination condition. // //

    @@ -108,9 +107,9 @@ const ( // The single-alt-state thing lets prediction continue upon rules like // (otherwise, it would admit defeat too soon):

    // -//

    {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

    +//

    {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

    // -//

    When the ATN simulation reaches the state before {@code ''}, it has a +//

    When the ATN simulation reaches the state before {@code ”}, it has a // DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally // {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop // processing this node because alternative to has another way to continue, @@ -152,16 +151,15 @@ const ( // //

    Before testing these configurations against others, we have to merge // {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in +// For example, we test {@code (x+x')==x”} when looking for conflicts in // the following configurations.

    // -//

    {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

    +//

    {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

    // //

    If the configuration set has predicates (as indicated by // {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of // the configurations to strip out all of the predicates so that a standard // {@link ATNConfigSet} will merge everything ignoring predicates.

    -// func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { // Configs in rule stop states indicate reaching the end of the decision // rule (local context) or end of start rule (full context). If all @@ -229,7 +227,6 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { return true } -// // Full LL prediction termination. // //

    Can we stop looking ahead during ATN simulation or is there some @@ -334,7 +331,7 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { // // //

  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set +// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set // {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = // {@code {1}} => stop and predict 1
  • // @@ -369,31 +366,26 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { // two or one and three so we keep going. We can only stop prediction when // we need exact ambiguity detection when the sets look like // {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

    -// func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { return PredictionModegetSingleViableAlt(altsets) } -// // Determines if every alternative subset in {@code altsets} contains more // than one alternative. // // @param altsets a collection of alternative subsets // @return {@code true} if every {@link BitSet} in {@code altsets} has // {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { return !PredictionModehasNonConflictingAltSet(altsets) } -// // Determines if any single alternative subset in {@code altsets} contains // exactly one alternative. // // @param altsets a collection of alternative subsets // @return {@code true} if {@code altsets} contains a {@link BitSet} with // {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { for i := 0; i < len(altsets); i++ { alts := altsets[i] @@ -404,14 +396,12 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { return false } -// // Determines if any single alternative subset in {@code altsets} contains // more than one alternative. // // @param altsets a collection of alternative subsets // @return {@code true} if {@code altsets} contains a {@link BitSet} with // {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { for i := 0; i < len(altsets); i++ { alts := altsets[i] @@ -422,13 +412,11 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { return false } -// // Determines if every alternative subset in {@code altsets} is equivalent. // // @param altsets a collection of alternative subsets // @return {@code true} if every member of {@code altsets} is equal to the // others, otherwise {@code false} -// func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { var first *BitSet @@ -444,13 +432,11 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { return true } -// // Returns the unique alternative predicted by all alternative subsets in // {@code altsets}. If no such alternative exists, this method returns // {@link ATN//INVALID_ALT_NUMBER}. // // @param altsets a collection of alternative subsets -// func PredictionModegetUniqueAlt(altsets []*BitSet) int { all := PredictionModeGetAlts(altsets) if all.length() == 1 { @@ -466,7 +452,6 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int { // // @param altsets a collection of alternative subsets // @return the set of represented alternatives in {@code altsets} -// func PredictionModeGetAlts(altsets []*BitSet) *BitSet { all := NewBitSet() for _, alts := range altsets { @@ -475,44 +460,35 @@ func PredictionModeGetAlts(altsets []*BitSet) *BitSet { return all } -// -// This func gets the conflicting alt subsets from a configuration set. +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. // For each configuration {@code c} in {@code configs}: // //
     // map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
     // alt and not pred
     // 
    -// func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) + configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](&ATNAltConfigComparator[ATNConfig]{}) for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - alts, ok := configToAlts[key] + alts, ok := configToAlts.Get(c) if !ok { alts = NewBitSet() - configToAlts[key] = alts + configToAlts.Put(c, alts) } alts.add(c.GetAlt()) } - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values + return configToAlts.Values() } -// -// Get a map from state to alt subset from a configuration set. For each +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each // configuration {@code c} in {@code configs}: // //
     // map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
     // 
    -// func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { m := NewAltDict() diff --git a/runtime/Go/antlr/recognizer.go b/runtime/Go/antlr/recognizer.go index 93efcf355d..2cd05443b0 100644 --- a/runtime/Go/antlr/recognizer.go +++ b/runtime/Go/antlr/recognizer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.10.1" + runtimeVersion := "4.11.0" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } @@ -108,7 +108,6 @@ func (b *BaseRecognizer) SetState(v int) { // Get a map from rule names to rule indexes. // //

    Used for XPath and tree pattern compilation.

    -// func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") @@ -171,18 +170,18 @@ func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { } // How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. // // @deprecated This method is not called by the ANTLR 4 Runtime. Specific // implementations of {@link ANTLRErrorStrategy} may provide a similar // feature when necessary. For example, see // {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -// func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { if t == nil { return "" diff --git a/runtime/Go/antlr/rule_context.go b/runtime/Go/antlr/rule_context.go index 600cf8c062..210699ba23 100644 --- a/runtime/Go/antlr/rule_context.go +++ b/runtime/Go/antlr/rule_context.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/semantic_context.go b/runtime/Go/antlr/semantic_context.go index 9ada430779..f54926e760 100644 --- a/runtime/Go/antlr/semantic_context.go +++ b/runtime/Go/antlr/semantic_context.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -18,12 +18,12 @@ import ( // type SemanticContext interface { - comparable + Equals(other Collectable[SemanticContext]) bool + Hash() int evaluate(parser Recognizer, outerContext RuleContext) bool evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - hash() int String() string } @@ -78,7 +78,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { //The default {@link SemanticContext}, which is semantically equivalent to //a predicate of the form {@code {true}?}. -var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) +var SemanticContextNone = NewPredicate(-1, -1, false) func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { return p @@ -95,7 +95,7 @@ func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { return parser.Sempred(localctx, p.ruleIndex, p.predIndex) } -func (p *Predicate) equals(other interface{}) bool { +func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { if p == other { return true } else if _, ok := other.(*Predicate); !ok { @@ -107,7 +107,7 @@ func (p *Predicate) equals(other interface{}) bool { } } -func (p *Predicate) hash() int { +func (p *Predicate) Hash() int { h := murmurInit(0) h = murmurUpdate(h, p.ruleIndex) h = murmurUpdate(h, p.predIndex) @@ -151,17 +151,22 @@ func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { return p.precedence - other.precedence } -func (p *PrecedencePredicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*PrecedencePredicate); !ok { +func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { + + var op *PrecedencePredicate + var ok bool + if op, ok = other.(*PrecedencePredicate); !ok { return false - } else { - return p.precedence == other.(*PrecedencePredicate).precedence } + + if p == op { + return true + } + + return p.precedence == other.(*PrecedencePredicate).precedence } -func (p *PrecedencePredicate) hash() int { +func (p *PrecedencePredicate) Hash() int { h := uint32(1) h = 31*h + uint32(p.precedence) return int(h) @@ -171,10 +176,10 @@ func (p *PrecedencePredicate) String() string { return "{" + strconv.Itoa(p.precedence) + ">=prec}?" } -func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate { +func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { result := make([]*PrecedencePredicate, 0) - set.Each(func(v interface{}) bool { + set.Each(func(v SemanticContext) bool { if c2, ok := v.(*PrecedencePredicate); ok { result = append(result, c2) } @@ -193,21 +198,21 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := newArray2DHashSet(nil, nil) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](&ObjEqComparator[SemanticContext]{}) if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(a) + operands.Put(a) } if ba, ok := b.(*AND); ok { for _, o := range ba.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(b) + operands.Put(b) } precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) if len(precedencePredicates) > 0 { @@ -220,7 +225,7 @@ func NewAND(a, b SemanticContext) *AND { } } - operands.Add(reduced) + operands.Put(reduced) } vs := operands.Values() @@ -235,14 +240,15 @@ func NewAND(a, b SemanticContext) *AND { return and } -func (a *AND) equals(other interface{}) bool { +func (a *AND) Equals(other Collectable[SemanticContext]) bool { if a == other { return true - } else if _, ok := other.(*AND); !ok { + } + if _, ok := other.(*AND); !ok { return false } else { for i, v := range other.(*AND).opnds { - if !a.opnds[i].equals(v) { + if !a.opnds[i].Equals(v) { return false } } @@ -250,13 +256,11 @@ func (a *AND) equals(other interface{}) bool { } } -// // {@inheritDoc} // //

    // The evaluation of predicates by a context is short-circuiting, but // unordered.

    -// func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { for i := 0; i < len(a.opnds); i++ { if !a.opnds[i].evaluate(parser, outerContext) { @@ -304,18 +308,18 @@ func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) Semant return result } -func (a *AND) hash() int { +func (a *AND) Hash() int { h := murmurInit(37) // Init with a value different from OR for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) + h = murmurUpdate(h, op.Hash()) } return murmurFinish(h, len(a.opnds)) } -func (a *OR) hash() int { +func (a *OR) Hash() int { h := murmurInit(41) // Init with a value different from AND for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) + h = murmurUpdate(h, op.Hash()) } return murmurFinish(h, len(a.opnds)) } @@ -345,21 +349,21 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := newArray2DHashSet(nil, nil) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](&ObjEqComparator[SemanticContext]{}) if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(a) + operands.Put(a) } if ba, ok := b.(*OR); ok { for _, o := range ba.opnds { - operands.Add(o) + operands.Put(o) } } else { - operands.Add(b) + operands.Put(b) } precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) if len(precedencePredicates) > 0 { @@ -372,7 +376,7 @@ func NewOR(a, b SemanticContext) *OR { } } - operands.Add(reduced) + operands.Put(reduced) } vs := operands.Values() @@ -388,14 +392,14 @@ func NewOR(a, b SemanticContext) *OR { return o } -func (o *OR) equals(other interface{}) bool { +func (o *OR) Equals(other Collectable[SemanticContext]) bool { if o == other { return true } else if _, ok := other.(*OR); !ok { return false } else { for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { + if !o.opnds[i].Equals(v) { return false } } @@ -406,7 +410,6 @@ func (o *OR) equals(other interface{}) bool { //

    // The evaluation of predicates by o context is short-circuiting, but // unordered.

    -// func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { for i := 0; i < len(o.opnds); i++ { if o.opnds[i].evaluate(parser, outerContext) { diff --git a/runtime/Go/antlr/testing_assert_test.go b/runtime/Go/antlr/testing_assert_test.go index b9a1f03352..4a402a34f3 100644 --- a/runtime/Go/antlr/testing_assert_test.go +++ b/runtime/Go/antlr/testing_assert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/testing_lexer_b_test.go b/runtime/Go/antlr/testing_lexer_b_test.go index d07782b17f..2485abf780 100644 --- a/runtime/Go/antlr/testing_lexer_b_test.go +++ b/runtime/Go/antlr/testing_lexer_b_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -20,47 +20,16 @@ MULT : '*'; WS : ' '+; */ -var lexerB_serializedLexerAtn = []int32{ - 4, 0, 7, 38, 6, 65535, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, - 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 1, 0, 4, 0, 17, 8, 0, 11, 0, 12, 0, - 18, 1, 1, 4, 1, 22, 8, 1, 11, 1, 12, 1, 23, 1, 2, 1, 2, 1, 3, 1, 3, 1, - 4, 1, 4, 1, 5, 1, 5, 1, 6, 4, 6, 35, 8, 6, 11, 6, 12, 6, 36, 0, 0, 7, 1, - 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 1, 0, 0, 0, 40, 0, 1, 1, 0, 0, - 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, - 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 1, 16, 1, 0, 0, 0, 3, 21, 1, 0, - 0, 0, 5, 25, 1, 0, 0, 0, 7, 27, 1, 0, 0, 0, 9, 29, 1, 0, 0, 0, 11, 31, - 1, 0, 0, 0, 13, 34, 1, 0, 0, 0, 15, 17, 2, 97, 122, 0, 16, 15, 1, 0, 0, - 0, 17, 18, 1, 0, 0, 0, 18, 16, 1, 0, 0, 0, 18, 19, 1, 0, 0, 0, 19, 2, 1, - 0, 0, 0, 20, 22, 2, 48, 57, 0, 21, 20, 1, 0, 0, 0, 22, 23, 1, 0, 0, 0, - 23, 21, 1, 0, 0, 0, 23, 24, 1, 0, 0, 0, 24, 4, 1, 0, 0, 0, 25, 26, 5, 59, - 0, 0, 26, 6, 1, 0, 0, 0, 27, 28, 5, 61, 0, 0, 28, 8, 1, 0, 0, 0, 29, 30, - 5, 43, 0, 0, 30, 10, 1, 0, 0, 0, 31, 32, 5, 42, 0, 0, 32, 12, 1, 0, 0, - 0, 33, 35, 5, 32, 0, 0, 34, 33, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 34, - 1, 0, 0, 0, 36, 37, 1, 0, 0, 0, 37, 14, 1, 0, 0, 0, 4, 0, 18, 23, 36, 0, -} - -var lexerB_lexerDeserializer = NewATNDeserializer(nil) -var lexerB_lexerAtn = lexerB_lexerDeserializer.Deserialize(lexerB_serializedLexerAtn) - -var lexerB_lexerChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", -} - -var lexerB_lexerModeNames = []string{ - "DEFAULT_MODE", -} - -var lexerB_lexerLiteralNames = []string{ - "", "", "", "';'", "'='", "'+'", "'*'", -} - -var lexerB_lexerSymbolicNames = []string{ - "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", -} +import ( + "fmt" + "sync" + "unicode" +) -var lexerB_lexerRuleNames = []string{ - "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", -} +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter type LexerB struct { *BaseLexer @@ -69,27 +38,89 @@ type LexerB struct { // TODO: EOF string } -var lexerB_lexerDecisionToDFA = make([]*DFA, len(lexerB_lexerAtn.DecisionToState)) +var lexerbLexerStaticData struct { + once sync.Once + serializedATN []int32 + channelNames []string + modeNames []string + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *PredictionContextCache + atn *ATN + decisionToDFA []*DFA +} -func init() { - for index, ds := range lexerB_lexerAtn.DecisionToState { - lexerB_lexerDecisionToDFA[index] = NewDFA(ds, index) +func lexerbLexerInit() { + staticData := &lexerbLexerStaticData + staticData.channelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.modeNames = []string{ + "DEFAULT_MODE", + } + staticData.literalNames = []string{ + "", "", "", "';'", "'='", "'+'", "'*'", + } + staticData.symbolicNames = []string{ + "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", + } + staticData.ruleNames = []string{ + "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", + } + staticData.predictionContextCache = NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 7, 38, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 1, 0, 4, 0, 17, 8, 0, 11, 0, 12, 0, 18, + 1, 1, 4, 1, 22, 8, 1, 11, 1, 12, 1, 23, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, + 4, 1, 5, 1, 5, 1, 6, 4, 6, 35, 8, 6, 11, 6, 12, 6, 36, 0, 0, 7, 1, 1, 3, + 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 1, 0, 0, 40, 0, 1, 1, 0, 0, 0, 0, 3, + 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, + 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 1, 16, 1, 0, 0, 0, 3, 21, 1, 0, 0, 0, 5, + 25, 1, 0, 0, 0, 7, 27, 1, 0, 0, 0, 9, 29, 1, 0, 0, 0, 11, 31, 1, 0, 0, + 0, 13, 34, 1, 0, 0, 0, 15, 17, 2, 97, 122, 0, 16, 15, 1, 0, 0, 0, 17, 18, + 1, 0, 0, 0, 18, 16, 1, 0, 0, 0, 18, 19, 1, 0, 0, 0, 19, 2, 1, 0, 0, 0, + 20, 22, 2, 48, 57, 0, 21, 20, 1, 0, 0, 0, 22, 23, 1, 0, 0, 0, 23, 21, 1, + 0, 0, 0, 23, 24, 1, 0, 0, 0, 24, 4, 1, 0, 0, 0, 25, 26, 5, 59, 0, 0, 26, + 6, 1, 0, 0, 0, 27, 28, 5, 61, 0, 0, 28, 8, 1, 0, 0, 0, 29, 30, 5, 43, 0, + 0, 30, 10, 1, 0, 0, 0, 31, 32, 5, 42, 0, 0, 32, 12, 1, 0, 0, 0, 33, 35, + 5, 32, 0, 0, 34, 33, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 34, 1, 0, 0, 0, + 36, 37, 1, 0, 0, 0, 37, 14, 1, 0, 0, 0, 4, 0, 18, 23, 36, 0, + } + deserializer := NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = NewDFA(state, index) } } +// LexerBInit initializes any static state used to implement LexerB. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewLexerB(). You can call this function if you wish to initialize the static state ahead +// of time. +func LexerBInit() { + staticData := &lexerbLexerStaticData + staticData.once.Do(lexerbLexerInit) +} + +// NewLexerB produces a new lexer instance for the optional input antlr.CharStream. func NewLexerB(input CharStream) *LexerB { + LexerBInit() l := new(LexerB) l.BaseLexer = NewBaseLexer(input) - l.Interpreter = NewLexerATNSimulator(l, lexerB_lexerAtn, lexerB_lexerDecisionToDFA, NewPredictionContextCache()) - - l.channelNames = lexerB_lexerChannelNames - l.modeNames = lexerB_lexerModeNames - l.RuleNames = lexerB_lexerRuleNames - l.LiteralNames = lexerB_lexerLiteralNames - l.SymbolicNames = lexerB_lexerSymbolicNames + staticData := &lexerbLexerStaticData + l.Interpreter = NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + l.channelNames = staticData.channelNames + l.modeNames = staticData.modeNames + l.RuleNames = staticData.ruleNames + l.LiteralNames = staticData.literalNames + l.SymbolicNames = staticData.symbolicNames l.GrammarFileName = "LexerB.g4" - // TODO: l.EOF = TokenEOF + // TODO: l.EOF = antlr.TokenEOF return l } diff --git a/runtime/Go/antlr/token.go b/runtime/Go/antlr/token.go index 2d8e99095d..f73b06bc6a 100644 --- a/runtime/Go/antlr/token.go +++ b/runtime/Go/antlr/token.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -158,7 +158,6 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start // {@link Token//GetInputStream}.

    // // @param oldToken The token to copy. -// func (c *CommonToken) clone() *CommonToken { t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) t.tokenIndex = c.GetTokenIndex() diff --git a/runtime/Go/antlr/token_source.go b/runtime/Go/antlr/token_source.go index e023978fef..a3f36eaa67 100644 --- a/runtime/Go/antlr/token_source.go +++ b/runtime/Go/antlr/token_source.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/token_stream.go b/runtime/Go/antlr/token_stream.go index df92c81478..1527d43f60 100644 --- a/runtime/Go/antlr/token_stream.go +++ b/runtime/Go/antlr/token_stream.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/tokenstream_rewriter.go b/runtime/Go/antlr/tokenstream_rewriter.go index 96a03f02aa..b3e38af344 100644 --- a/runtime/Go/antlr/tokenstream_rewriter.go +++ b/runtime/Go/antlr/tokenstream_rewriter.go @@ -1,15 +1,15 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. + package antlr import ( -"bytes" -"fmt" + "bytes" + "fmt" ) - -// +// // Useful for rewriting out a buffered input token stream after doing some // augmentation or other manipulations on it. @@ -85,12 +85,10 @@ import ( // If you don't use named rewrite streams, a "default" stream is used as the // first example shows.

    - - -const( +const ( Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 + Program_Init_Size = 100 + Min_Token_Index = 0 ) // Define the rewrite operation hierarchy @@ -98,13 +96,13 @@ const( type RewriteOperation interface { // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream SetInstructionIndex(val int) SetIndex(int) SetText(string) @@ -114,63 +112,62 @@ type RewriteOperation interface { type BaseRewriteOperation struct { //Current index of rewrites list - instruction_index int + instruction_index int //Token buffer index - index int + index int //Substitution text - text string + text string //Actual operation name - op_name string + op_name string //Pointer to token steam - tokens TokenStream + tokens TokenStream } -func (op *BaseRewriteOperation)GetInstructionIndex() int{ +func (op *BaseRewriteOperation) GetInstructionIndex() int { return op.instruction_index } -func (op *BaseRewriteOperation)GetIndex() int{ +func (op *BaseRewriteOperation) GetIndex() int { return op.index } -func (op *BaseRewriteOperation)GetText() string{ +func (op *BaseRewriteOperation) GetText() string { return op.text } -func (op *BaseRewriteOperation)GetOpName() string{ +func (op *BaseRewriteOperation) GetOpName() string { return op.op_name } -func (op *BaseRewriteOperation)GetTokens() TokenStream{ +func (op *BaseRewriteOperation) GetTokens() TokenStream { return op.tokens } -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { op.instruction_index = val } -func (op *BaseRewriteOperation)SetIndex(val int) { +func (op *BaseRewriteOperation) SetIndex(val int) { op.index = val } -func (op *BaseRewriteOperation)SetText(val string){ +func (op *BaseRewriteOperation) SetText(val string) { op.text = val } -func (op *BaseRewriteOperation)SetOpName(val string){ +func (op *BaseRewriteOperation) SetOpName(val string) { op.op_name = val } -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { op.tokens = val } - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { return op.index } -func (op *BaseRewriteOperation) String() string { +func (op *BaseRewriteOperation) String() string { return fmt.Sprintf("<%s@%d:\"%s\">", op.op_name, op.tokens.Get(op.GetIndex()), @@ -179,26 +176,25 @@ func (op *BaseRewriteOperation) String() string { } - type InsertBeforeOp struct { BaseRewriteOperation } -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + op_name: "InsertBeforeOp", + tokens: stream, }} } -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { buffer.WriteString(op.tokens.Get(op.index).GetText()) } - return op.index+1 + return op.index + 1 } func (op *InsertBeforeOp) String() string { @@ -213,20 +209,20 @@ type InsertAfterOp struct { BaseRewriteOperation } -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, }} } func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { buffer.WriteString(op.tokens.Get(op.index).GetText()) } - return op.index+1 + return op.index + 1 } func (op *InsertAfterOp) String() string { @@ -235,28 +231,28 @@ func (op *InsertAfterOp) String() string { // I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp // instructions. -type ReplaceOp struct{ +type ReplaceOp struct { BaseRewriteOperation LastIndex int } -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + op_name: "ReplaceOp", + tokens: stream, }, - LastIndex:to, + LastIndex: to, } } -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { buffer.WriteString(op.text) } - return op.LastIndex +1 + return op.LastIndex + 1 } func (op *ReplaceOp) String() string { @@ -268,54 +264,54 @@ func (op *ReplaceOp) String() string { op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) } - type TokenStreamRewriter struct { //Our source stream - tokens TokenStream + tokens TokenStream // You may have multiple, named streams of rewrite operations. // I'm calling these things "programs." // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int } -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), }, - last_rewrite_token_indexes: map[string]int{}, + last_rewrite_token_indexes: map[string]int{}, } } -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { return tsr.tokens } -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ - is, ok := tsr.programs[program_name] - if ok{ +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { + is, ok := tsr.programs[program_name] + if ok { tsr.programs[program_name] = is[Min_Token_Index:instruction_index] } } -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { tsr.Rollback(Default_Program_Name, instruction_index) } -//Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ + +// Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included } -func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { tsr.DeleteProgram(Default_Program_Name) } -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { // to insert after, just insert before next index (even if past end) var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) rewrites := tsr.GetProgram(program_name) @@ -323,31 +319,31 @@ func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text tsr.AddToProgram(program_name, op) } -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { tsr.InsertAfter(Default_Program_Name, index, text) } -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { tsr.InsertAfter(program_name, token.GetTokenIndex(), text) } -func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ +func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) rewrites := tsr.GetProgram(program_name) op.SetInstructionIndex(len(rewrites)) tsr.AddToProgram(program_name, op) } -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { tsr.InsertBefore(Default_Program_Name, index, text) } -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { tsr.InsertBefore(program_name, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", from, to, tsr.tokens.Size())) } @@ -357,207 +353,216 @@ func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text tsr.AddToProgram(program_name, op) } -func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { tsr.Replace(Default_Program_Name, from, to, text) } -func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { tsr.ReplaceDefault(index, index, text) } -func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ +func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { tsr.ReplaceToken(Default_Program_Name, from, to, text) } -func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { tsr.ReplaceTokenDefault(index, index, text) } -func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ - tsr.Replace(program_name, from, to, "" ) +func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { + tsr.Replace(program_name, from, to, "") } -func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { tsr.Delete(Default_Program_Name, from, to) } -func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ - tsr.DeleteDefault(index,index) +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) } -func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { +func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { tsr.ReplaceToken(program_name, from, to, "") } -func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { tsr.DeleteToken(Default_Program_Name, from, to) } -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok{ + if !ok { return -1 } return i } -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { return tsr.GetLastRewriteTokenIndex(Default_Program_Name) } -func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { tsr.last_rewrite_token_indexes[program_name] = i } -func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { is := make([]RewriteOperation, 0, Program_Init_Size) tsr.programs[name] = is return is } -func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { is := tsr.GetProgram(name) is = append(is, op) tsr.programs[name] = is } -func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { is, ok := tsr.programs[name] - if !ok{ + if !ok { is = tsr.InitializeProgram(name) } return is } -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetTextDefault() string{ + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { return tsr.GetText( Default_Program_Name, NewInterval(0, tsr.tokens.Size()-1)) } -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { rewrites := tsr.programs[program_name] start := interval.Start - stop := interval.Stop + stop := interval.Stop // ensure start/end are in range stop = min(stop, tsr.tokens.Size()-1) - start = max(start,0) - if rewrites == nil || len(rewrites) == 0{ + start = max(start, 0) + if rewrites == nil || len(rewrites) == 0 { return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute } buf := bytes.Buffer{} // First, optimize instruction stream indexToOp := reduceToSingleOperationPerIndex(rewrites) // Walk buffer, executing instructions and emitting tokens - for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } } } return buf.String() } -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: // -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu // -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR // -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) // -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping // -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u // -// First we need to examine replaces. For any replace op: +// First we need to examine replaces. For any replace op: // -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. // -// Then we can deal with inserts: +// Then we can deal with inserts: // -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace // -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. // -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. // -// Return a map from token index to operation. -// -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ +// Return a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { // WALK REPLACES - for i:=0; i < len(rewrites); i++{ + for i := 0; i < len(rewrites); i++ { op := rewrites[i] - if op == nil{continue} + if op == nil { + continue + } rop, ok := op.(*ReplaceOp) - if !ok{continue} + if !ok { + continue + } // Wipe prior inserts within range - for j:=0; j rop.index && iop.index <=rop.LastIndex{ + } else if iop.index > rop.index && iop.index <= rop.LastIndex { // delete insert as it's a no-op. rewrites[iop.instruction_index] = nil } } } // Drop any prior replaces contained within - for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { // delete replace as it's a no-op. rewrites[prevop.instruction_index] = nil continue @@ -566,61 +571,67 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex // Delete special case of replace (text==null): // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint{ + if prevop.text == "" && rop.text == "" && !disjoint { rewrites[prevop.instruction_index] = nil rop.index = min(prevop.index, rop.index) rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - }else if !disjoint{ + } else if !disjoint { panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) } } } } // WALK INSERTS - for i:=0; i < len(rewrites); i++ { + for i := 0; i < len(rewrites); i++ { op := rewrites[i] - if op == nil{continue} + if op == nil { + continue + } //hack to replicate inheritance in composition _, iok := rewrites[i].(*InsertBeforeOp) _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok{continue} + if !iok && !aok { + continue + } iop := rewrites[i] // combine current insert with prior if any at same index // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ - panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) } } } } m := map[int]RewriteOperation{} - for i:=0; i < len(rewrites); i++{ + for i := 0; i < len(rewrites); i++ { op := rewrites[i] - if op == nil {continue} - if _, ok := m[op.GetIndex()]; ok{ + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { panic("should only be one op per index") } m[op.GetIndex()] = op @@ -628,22 +639,21 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit return m } - /* Quick fixing Go lack of overloads - */ +*/ -func max(a,b int)int{ - if a>b{ +func max(a, b int) int { + if a > b { return a - }else { + } else { return b } } -func min(a,b int)int{ - if aaa", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder", - func(r *TokenStreamRewriter){ - r.InsertBeforeDefault(0, "") - r.InsertAfterDefault(0, "") - r.InsertBeforeDefault(1, "") - r.InsertAfterDefault(1,"") - }), + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1, "") + }), NewLexerTest("aa", "

    a

    a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", - func(r *TokenStreamRewriter){ - r.InsertBeforeDefault(0, "

    ") - r.InsertBeforeDefault(0, "") - r.InsertAfterDefault(0, "

    ") - r.InsertAfterDefault(0, "") - r.InsertBeforeDefault(1, "") - r.InsertAfterDefault(1,"") - }), + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "

    ") + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "

    ") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1, "") + }), NewLexerTest("ab", "

    a

    !b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", - func(r *TokenStreamRewriter){ - r.InsertBeforeDefault(0, "

    ") - r.InsertBeforeDefault(0, "") - r.InsertBeforeDefault(0, "

    ") - r.InsertAfterDefault(0, "

    ") - r.InsertAfterDefault(0, "
    ") - r.InsertAfterDefault(0, "
    ") - r.InsertBeforeDefault(1, "!") - }), + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "

    ") + r.InsertBeforeDefault(0, "") + r.InsertBeforeDefault(0, "

    ") + r.InsertAfterDefault(0, "

    ") + r.InsertAfterDefault(0, "
    ") + r.InsertAfterDefault(0, "
    ") + r.InsertBeforeDefault(1, "!") + }), } - - for _,c := range tests{ - t.Run(c.description,func(t *testing.T) { + for _, c := range tests { + t.Run(c.description, func(t *testing.T) { rewriter := prepare_rewriter(c.input) c.ops(rewriter) - if len(c.expected_exception)>0{ + if len(c.expected_exception) > 0 { panic_tester(t, c.expected_exception, rewriter) - }else{ + } else { result := rewriter.GetTextDefault() - if result!=c.expected{ + if result != c.expected { t.Errorf("Expected:%s | Result: %s", c.expected, result) } } - } ) + }) } } - // Suppress unused import error var _ = fmt.Printf +var _ = sync.Once{} var _ = unicode.IsLetter -var serializedLexerAtn = []int32{ - 4, 0, 3, 13, 6, 65535, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 1, 0, 1, 0, - 1, 1, 1, 1, 1, 2, 1, 2, 0, 0, 3, 1, 1, 3, 2, 5, 3, 1, 0, 0, 0, 12, 0, 1, - 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 7, 1, 0, 0, 0, 3, 9, - 1, 0, 0, 0, 5, 11, 1, 0, 0, 0, 7, 8, 5, 97, 0, 0, 8, 2, 1, 0, 0, 0, 9, - 10, 5, 98, 0, 0, 10, 4, 1, 0, 0, 0, 11, 12, 5, 99, 0, 0, 12, 6, 1, 0, 0, - 0, 1, 0, 0, -} - -var lexerDeserializer = NewATNDeserializer(nil) -var lexerAtn = lexerDeserializer.Deserialize(serializedLexerAtn) - -var lexerChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", -} - -var lexerModeNames = []string{ - "DEFAULT_MODE", -} - -var lexerLiteralNames = []string{ - "", "'a'", "'b'", "'c'", -} - -var lexerSymbolicNames = []string{ - "", "A", "B", "C", -} - -var lexerRuleNames = []string{ - "A", "B", "C", -} - type LexerA struct { *BaseLexer channelNames []string @@ -367,26 +333,76 @@ type LexerA struct { // TODO: EOF string } -var lexerDecisionToDFA = make([]*DFA, len(lexerAtn.DecisionToState)) +var lexeraLexerStaticData struct { + once sync.Once + serializedATN []int32 + channelNames []string + modeNames []string + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *PredictionContextCache + atn *ATN + decisionToDFA []*DFA +} -func init() { - for index, ds := range lexerAtn.DecisionToState { - lexerDecisionToDFA[index] = NewDFA(ds, index) +func lexeraLexerInit() { + staticData := &lexeraLexerStaticData + staticData.channelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.modeNames = []string{ + "DEFAULT_MODE", + } + staticData.literalNames = []string{ + "", "'a'", "'b'", "'c'", + } + staticData.symbolicNames = []string{ + "", "A", "B", "C", + } + staticData.ruleNames = []string{ + "A", "B", "C", + } + staticData.predictionContextCache = NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 3, 13, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 1, 0, 1, 0, 1, + 1, 1, 1, 1, 2, 1, 2, 0, 0, 3, 1, 1, 3, 2, 5, 3, 1, 0, 0, 12, 0, 1, 1, 0, + 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 7, 1, 0, 0, 0, 3, 9, 1, 0, + 0, 0, 5, 11, 1, 0, 0, 0, 7, 8, 5, 97, 0, 0, 8, 2, 1, 0, 0, 0, 9, 10, 5, + 98, 0, 0, 10, 4, 1, 0, 0, 0, 11, 12, 5, 99, 0, 0, 12, 6, 1, 0, 0, 0, 1, + 0, 0, + } + deserializer := NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = NewDFA(state, index) } } -func NewLexerA(input CharStream) *LexerA { +// LexerAInit initializes any static state used to implement LexerA. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewLexerA(). You can call this function if you wish to initialize the static state ahead +// of time. +func LexerAInit() { + staticData := &lexeraLexerStaticData + staticData.once.Do(lexeraLexerInit) +} +// NewLexerA produces a new lexer instance for the optional input antlr.CharStream. +func NewLexerA(input CharStream) *LexerA { + LexerAInit() l := new(LexerA) - l.BaseLexer = NewBaseLexer(input) - l.Interpreter = NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, NewPredictionContextCache()) - - l.channelNames = lexerChannelNames - l.modeNames = lexerModeNames - l.RuleNames = lexerRuleNames - l.LiteralNames = lexerLiteralNames - l.SymbolicNames = lexerSymbolicNames + staticData := &lexeraLexerStaticData + l.Interpreter = NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + l.channelNames = staticData.channelNames + l.modeNames = staticData.modeNames + l.RuleNames = staticData.ruleNames + l.LiteralNames = staticData.literalNames + l.SymbolicNames = staticData.symbolicNames l.GrammarFileName = "LexerA.g4" // TODO: l.EOF = antlr.TokenEOF @@ -399,4 +415,3 @@ const ( LexerAB = 2 LexerAC = 3 ) - diff --git a/runtime/Go/antlr/trace_listener.go b/runtime/Go/antlr/trace_listener.go index e6fff992ff..7b663bf849 100644 --- a/runtime/Go/antlr/trace_listener.go +++ b/runtime/Go/antlr/trace_listener.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/transition.go b/runtime/Go/antlr/transition.go index 53056bd74f..36be4f7331 100644 --- a/runtime/Go/antlr/transition.go +++ b/runtime/Go/antlr/transition.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. diff --git a/runtime/Go/antlr/tree.go b/runtime/Go/antlr/tree.go index 08ce22bba3..85b4f137b5 100644 --- a/runtime/Go/antlr/tree.go +++ b/runtime/Go/antlr/tree.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -234,10 +234,8 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { } } -// // Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} // then by triggering the event specific to the given parse tree node -// func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) listener.EnterEveryRule(ctx) @@ -246,7 +244,6 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { // Exits a grammar rule by first triggering the event specific to the given parse tree node // then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -// func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) ctx.ExitRule(listener) diff --git a/runtime/Go/antlr/trees.go b/runtime/Go/antlr/trees.go index 80144ecade..d7dbb03228 100644 --- a/runtime/Go/antlr/trees.go +++ b/runtime/Go/antlr/trees.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -9,8 +9,9 @@ import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ // Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. +// +// node payloads to get the text for the nodes. Detect +// parse trees and extract data appropriately. func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { if recog != nil { @@ -80,8 +81,8 @@ func TreesGetChildren(t Tree) []Tree { } // Return a list of all ancestors of this node. The first node of -// list is the root and the last is the parent of this node. // +// list is the root and the last is the parent of this node. func TreesgetAncestors(t Tree) []Tree { ancestors := make([]Tree, 0) t = t.GetParent() diff --git a/runtime/Go/antlr/utils.go b/runtime/Go/antlr/utils.go index ec219df983..9fad5d916b 100644 --- a/runtime/Go/antlr/utils.go +++ b/runtime/Go/antlr/utils.go @@ -1,4 +1,4 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. // Use of this file is governed by the BSD 3-clause license that // can be found in the LICENSE.txt file in the project root. @@ -47,28 +47,25 @@ func (s *IntStack) Push(e int) { *s = append(*s, e) } -func standardEqualsFunction(a interface{}, b interface{}) bool { - - ac, oka := a.(comparable) - bc, okb := b.(comparable) +type comparable interface { + Equals(other Collectable[any]) bool +} - if !oka || !okb { - panic("Not Comparable") - } +func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - return ac.equals(bc) + return a.Equals(b) } func standardHashFunction(a interface{}) int { if h, ok := a.(hasher); ok { - return h.hash() + return h.Hash() } panic("Not Hasher") } type hasher interface { - hash() int + Hash() int } const bitsPerWord = 64 @@ -171,7 +168,7 @@ func (b *BitSet) equals(other interface{}) bool { // We only compare set bits, so we cannot rely on the two slices having the same size. Its // possible for two BitSets to have different slice lengths but the same set bits. So we only - // compare the relavent words and ignore the trailing zeros. + // compare the relevant words and ignore the trailing zeros. bLen := b.minLen() otherLen := otherBitSet.minLen() diff --git a/runtime/Go/antlr/utils_set.go b/runtime/Go/antlr/utils_set.go index 0d4eac698d..c9bd6751e3 100644 --- a/runtime/Go/antlr/utils_set.go +++ b/runtime/Go/antlr/utils_set.go @@ -8,8 +8,6 @@ const ( _loadFactor = 0.75 ) -var _ Set = (*array2DHashSet)(nil) - type Set interface { Add(value interface{}) (added interface{}) Len() int @@ -20,9 +18,9 @@ type Set interface { } type array2DHashSet struct { - buckets [][]interface{} + buckets [][]Collectable[any] hashcodeFunction func(interface{}) int - equalsFunction func(interface{}, interface{}) bool + equalsFunction func(Collectable[any], Collectable[any]) bool n int // How many elements in set threshold int // when to expand @@ -61,11 +59,11 @@ func (as *array2DHashSet) Values() []interface{} { return values } -func (as *array2DHashSet) Contains(value interface{}) bool { +func (as *array2DHashSet) Contains(value Collectable[any]) bool { return as.Get(value) != nil } -func (as *array2DHashSet) Add(value interface{}) interface{} { +func (as *array2DHashSet) Add(value Collectable[any]) interface{} { if as.n > as.threshold { as.expand() } @@ -98,7 +96,7 @@ func (as *array2DHashSet) expand() { b := as.getBuckets(o) bucketLength := newBucketLengths[b] - var newBucket []interface{} + var newBucket []Collectable[any] if bucketLength == 0 { // new bucket newBucket = as.createBucket(as.initialBucketCapacity) @@ -107,7 +105,7 @@ func (as *array2DHashSet) expand() { newBucket = newTable[b] if bucketLength == len(newBucket) { // expand - newBucketCopy := make([]interface{}, len(newBucket)<<1) + newBucketCopy := make([]Collectable[any], len(newBucket)<<1) copy(newBucketCopy[:bucketLength], newBucket) newBucket = newBucketCopy newTable[b] = newBucket @@ -124,7 +122,7 @@ func (as *array2DHashSet) Len() int { return as.n } -func (as *array2DHashSet) Get(o interface{}) interface{} { +func (as *array2DHashSet) Get(o Collectable[any]) interface{} { if o == nil { return nil } @@ -147,7 +145,7 @@ func (as *array2DHashSet) Get(o interface{}) interface{} { return nil } -func (as *array2DHashSet) innerAdd(o interface{}) interface{} { +func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { b := as.getBuckets(o) bucket := as.buckets[b] @@ -178,7 +176,7 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} { // full bucket, expand and add to end oldLength := len(bucket) - bucketCopy := make([]interface{}, oldLength<<1) + bucketCopy := make([]Collectable[any], oldLength<<1) copy(bucketCopy[:oldLength], bucket) bucket = bucketCopy as.buckets[b] = bucket @@ -187,22 +185,22 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} { return o } -func (as *array2DHashSet) getBuckets(value interface{}) int { +func (as *array2DHashSet) getBuckets(value Collectable[any]) int { hash := as.hashcodeFunction(value) return hash & (len(as.buckets) - 1) } -func (as *array2DHashSet) createBuckets(cap int) [][]interface{} { - return make([][]interface{}, cap) +func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { + return make([][]Collectable[any], cap) } -func (as *array2DHashSet) createBucket(cap int) []interface{} { - return make([]interface{}, cap) +func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { + return make([]Collectable[any], cap) } func newArray2DHashSetWithCap( hashcodeFunction func(interface{}) int, - equalsFunction func(interface{}, interface{}) bool, + equalsFunction func(Collectable[any], Collectable[any]) bool, initCap int, initBucketCap int, ) *array2DHashSet { @@ -231,7 +229,7 @@ func newArray2DHashSetWithCap( func newArray2DHashSet( hashcodeFunction func(interface{}) int, - equalsFunction func(interface{}, interface{}) bool, + equalsFunction func(Collectable[any], Collectable[any]) bool, ) *array2DHashSet { return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) } diff --git a/runtime/Go/antlr/v4/LICENSE b/runtime/Go/antlr/v4/LICENSE new file mode 100644 index 0000000000..52cf18e425 --- /dev/null +++ b/runtime/Go/antlr/v4/LICENSE @@ -0,0 +1,26 @@ +Copyright 2021 The ANTLR Project + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/runtime/Go/antlr/v4/antlrdoc.go b/runtime/Go/antlr/v4/antlrdoc.go new file mode 100644 index 0000000000..aa3a5b363d --- /dev/null +++ b/runtime/Go/antlr/v4/antlrdoc.go @@ -0,0 +1,68 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. + +Here is a general template for an ANTLR based recognizer in Go: + + . + ├── myproject + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.11.0-complete.jar + │ ├── error_listeners.go + │ ├── generate.go + │ ├── generate.sh + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package they exist in. +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.11.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. + +From the command line at the root of your package “myproject” you can then simply issue the command: + + go generate ./... + +# Copyright Notice + +Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +*/ +package antlr diff --git a/runtime/Go/antlr/v4/atn.go b/runtime/Go/antlr/v4/atn.go new file mode 100644 index 0000000000..98010d2e6e --- /dev/null +++ b/runtime/Go/antlr/v4/atn.go @@ -0,0 +1,176 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "sync" + +// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or +// which is invalid for a particular struct such as [*antlr.BaseRuleContext] +var ATNInvalidAltNumber int + +// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term +// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” +// in existence. +// +// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. +// +// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network +// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf +// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network +type ATN struct { + // DecisionToState is the decision points for all rules, subrules, optional + // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + // can go back later and build DFA predictors for them. This includes + // all the rules, subrules, optional blocks, ()+, ()* etc... + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + states []ATNState + + mu sync.Mutex + stateMu sync.RWMutex + edgeMu sync.RWMutex +} + +// NewATN returns a new ATN struct representing the given grammarType and is used +// for runtime deserialization of ATNs from the code generated by the ANTLR tool +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes and returns the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes and returns the set of valid tokens that can occur starting +// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + a.mu.Lock() + defer a.mu.Unlock() + iset := s.GetNextTokenWithinRule() + if iset == nil { + iset = a.NextTokensInContext(s, nil) + iset.readOnly = true + s.SetNextTokenWithinRule(iset) + } + return iset +} + +// NextTokens computes and returns the set of valid tokens starting in state s, by +// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/runtime/Go/antlr/v4/atn_config.go b/runtime/Go/antlr/v4/atn_config.go new file mode 100644 index 0000000000..7619fa172e --- /dev/null +++ b/runtime/Go/antlr/v4/atn_config.go @@ -0,0 +1,303 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive at the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig interface { + Equals(o Collectable[ATNConfig]) bool + Hash() int + + GetState() ATNState + GetAlt() int + GetSemanticContext() SemanticContext + + GetContext() PredictionContext + SetContext(PredictionContext) + + GetReachesIntoOuterContext() int + SetReachesIntoOuterContext(int) + + String() string + + getPrecedenceFilterSuppressed() bool + setPrecedenceFilterSuppressed(bool) +} + +type BaseATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int +} + +func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup + return &BaseATNConfig{ + state: old.state, + alt: old.alt, + context: old.context, + semanticContext: old.semanticContext, + reachesIntoOuterContext: old.reachesIntoOuterContext, + } +} + +func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig5(state, alt, context, SemanticContextNone) +} + +func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} +} + +func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) +} + +func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) +} + +func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") + } + + return &BaseATNConfig{ + state: state, + alt: c.GetAlt(), + context: context, + semanticContext: semanticContext, + reachesIntoOuterContext: c.GetReachesIntoOuterContext(), + precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), + } +} + +func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { + return b.precedenceFilterSuppressed +} + +func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { + b.precedenceFilterSuppressed = v +} + +func (b *BaseATNConfig) GetState() ATNState { + return b.state +} + +func (b *BaseATNConfig) GetAlt() int { + return b.alt +} + +func (b *BaseATNConfig) SetContext(v PredictionContext) { + b.context = v +} +func (b *BaseATNConfig) GetContext() PredictionContext { + return b.context +} + +func (b *BaseATNConfig) GetSemanticContext() SemanticContext { + return b.semanticContext +} + +func (b *BaseATNConfig) GetReachesIntoOuterContext() int { + return b.reachesIntoOuterContext +} + +func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { + b.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { + if b == o { + return true + } else if o == nil { + return false + } + + var other, ok = o.(*BaseATNConfig) + + if !ok { + return false + } + + var equal bool + + if b.context == nil { + equal = other.context == nil + } else { + equal = b.context.Equals(other.context) + } + + var ( + nums = b.state.GetStateNumber() == other.state.GetStateNumber() + alts = b.alt == other.alt + cons = b.semanticContext.Equals(other.semanticContext) + sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for BaseATNConfig, when no specialist hash function +// is required for a collection +func (b *BaseATNConfig) Hash() int { + var c int + if b.context != nil { + c = b.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, b.state.GetStateNumber()) + h = murmurUpdate(h, b.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, b.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +func (b *BaseATNConfig) String() string { + var s1, s2, s3 string + + if b.context != nil { + s1 = ",[" + fmt.Sprint(b.context) + "]" + } + + if b.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(b.semanticContext) + } + + if b.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) +} + +type LexerATNConfig struct { + *BaseATNConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), + lexerActionExecutor: lexerActionExecutor, + } +} + +func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Hash() int { + var f int + if l.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, l.state.GetStateNumber()) + h = murmurUpdate(h, l.alt) + h = murmurUpdate(h, l.context.Hash()) + h = murmurUpdate(h, l.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, l.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { + if l == other { + return true + } + var othert, ok = other.(*LexerATNConfig) + + if l == other { + return true + } else if !ok { + return false + } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { + return false + } + + var b bool + + if l.lexerActionExecutor != nil { + b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) + } else { + b = othert.lexerActionExecutor != nil + } + + if b { + return false + } + + return l.BaseATNConfig.Equals(othert.BaseATNConfig) +} + +func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/runtime/Go/antlr/v4/atn_config_set.go b/runtime/Go/antlr/v4/atn_config_set.go new file mode 100644 index 0000000000..582f5fa4b5 --- /dev/null +++ b/runtime/Go/antlr/v4/atn_config_set.go @@ -0,0 +1,439 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type ATNConfigSet interface { + Hash() int + Equals(o Collectable[ATNConfig]) bool + Add(ATNConfig, *DoubleDict) bool + AddAll([]ATNConfig) bool + + GetStates() *JStore[ATNState, Comparator[ATNState]] + GetPredicates() []SemanticContext + GetItems() []ATNConfig + + OptimizeConfigs(interpreter *BaseATNSimulator) + + Length() int + IsEmpty() bool + Contains(ATNConfig) bool + ContainsFast(ATNConfig) bool + Clear() + String() string + + HasSemanticContext() bool + SetHasSemanticContext(v bool) + + ReadOnly() bool + SetReadOnly(bool) + + GetConflictingAlts() *BitSet + SetConflictingAlts(*BitSet) + + Alts() *BitSet + + FullContext() bool + + GetUniqueAlt() int + SetUniqueAlt(int) + + GetDipsIntoOuterContext() bool + SetDipsIntoOuterContext(bool) +} + +// BaseATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type BaseATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two BaseATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[ATNConfig, Comparator[ATNConfig]] + + // configs is the added elements. + configs []ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from a. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +func (b *BaseATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { + return &BaseATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](&ATNConfigComparator[ATNConfig]{}), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), where s is the +// ATNConfig.state, i is the ATNConfig.alt, and pi is the +// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates +// dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](&ObjEqComparator[ATNState]{}) + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *BaseATNConfigSet) HasSemanticContext() bool { + return b.hasSemanticContext +} + +func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { + b.hasSemanticContext = v +} + +func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { + preds := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + preds = append(preds, c) + } + } + + return preds +} + +func (b *BaseATNConfigSet) GetItems() []ATNConfig { + return b.configs +} + +func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + if b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare is a hack function just to verify that adding DFAstares to the known +// set works, so long as comparison of ATNConfigSet s works. For that to work, we +// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't +// know the order, so we do this inefficient hack. If this proves the point, then +// we can change the config set to a better structure. +func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + + for _, c := range b.configs { + found := false + for _, c2 := range bs.configs { + if c.Equals(c2) { + found = true + break + } + } + + if !found { + return false + } + + } + return true +} + +func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*BaseATNConfigSet); !ok { + return false + } + + other2 := other.(*BaseATNConfigSet) + + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + b.conflictingAlts == other2.conflictingAlts && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *BaseATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *BaseATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *BaseATNConfigSet) Length() int { + return len(b.configs) +} + +func (b *BaseATNConfigSet) IsEmpty() bool { + return len(b.configs) == 0 +} + +func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.Contains(item) +} + +func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set +} + +func (b *BaseATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + + b.configs = make([]ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](&BaseATNConfigComparator[ATNConfig]{}) +} + +func (b *BaseATNConfigSet) FullContext() bool { + return b.fullCtx +} + +func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { + return b.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { + b.dipsIntoOuterContext = v +} + +func (b *BaseATNConfigSet) GetUniqueAlt() int { + return b.uniqueAlt +} + +func (b *BaseATNConfigSet) SetUniqueAlt(v int) { + b.uniqueAlt = v +} + +func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { + return b.conflictingAlts +} + +func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { + b.conflictingAlts = v +} + +func (b *BaseATNConfigSet) ReadOnly() bool { + return b.readOnly +} + +func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { + b.readOnly = readOnly + + if readOnly { + b.configLookup = nil // Read only, so no need for the lookup cache + } +} + +func (b *BaseATNConfigSet) String() string { + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +type OrderedATNConfigSet struct { + *BaseATNConfigSet +} + +func NewOrderedATNConfigSet() *OrderedATNConfigSet { + b := NewBaseATNConfigSet(false) + + // This set uses the standard Hash() and Equals() from ATNConfig + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) + + return &OrderedATNConfigSet{BaseATNConfigSet: b} +} + +func hashATNConfig(i interface{}) int { + o := i.(ATNConfig) + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +func equalATNConfigs(a, b interface{}) bool { + if a == nil || b == nil { + return false + } + + if a == b { + return true + } + + var ai, ok = a.(ATNConfig) + var bi, ok1 = b.(ATNConfig) + + if !ok || !ok1 { + return false + } + + if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { + return false + } + + if ai.GetAlt() != bi.GetAlt() { + return false + } + + return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) +} diff --git a/runtime/Go/antlr/v4/atn_deserialization_options.go b/runtime/Go/antlr/v4/atn_deserialization_options.go new file mode 100644 index 0000000000..3c975ec7bf --- /dev/null +++ b/runtime/Go/antlr/v4/atn_deserialization_options.go @@ -0,0 +1,61 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "errors" + +var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func (opts *ATNDeserializationOptions) ReadOnly() bool { + return opts.readOnly +} + +func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.readOnly = readOnly +} + +func (opts *ATNDeserializationOptions) VerifyATN() bool { + return opts.verifyATN +} + +func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.verifyATN = verifyATN +} + +func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { + return opts.generateRuleBypassTransitions +} + +func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.generateRuleBypassTransitions = generateRuleBypassTransitions +} + +func DefaultATNDeserializationOptions() *ATNDeserializationOptions { + return NewATNDeserializationOptions(&defaultATNDeserializationOptions) +} + +func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + if other != nil { + *o = *other + o.readOnly = false + } + return o +} diff --git a/runtime/Go/antlr/v4/atn_deserializer.go b/runtime/Go/antlr/v4/atn_deserializer.go new file mode 100644 index 0000000000..3888856b4b --- /dev/null +++ b/runtime/Go/antlr/v4/atn_deserializer.go @@ -0,0 +1,683 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +const serializedVersion = 4 + +type loopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type blockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + options *ATNDeserializationOptions + data []int32 + pos int +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = &defaultATNDeserializationOptions + } + + return &ATNDeserializer{options: options} +} + +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +func (a *ATNDeserializer) Deserialize(data []int32) *ATN { + a.data = data + a.pos = 0 + a.checkVersion() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := a.readSets(atn, nil) + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != serializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").") + } +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + nstates := a.readInt() + + // Allocate worst case size. + loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates) + endStateNumbers := make([]blockStartStateIntPair, 0, nstates) + + // Preallocate states slice. + atn.states = make([]ATNState, 0, nstates) + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + continue + } + + ruleIndex := a.readInt() + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for _, pair := range loopBackStateNumbers { + pair.item0.loopBackState = atn.states[pair.item1] + } + + for _, pair := range endStateNumbers { + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) + + for i := range atn.ruleToStartState { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) + + for _, state := range atn.states { + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + atn.modeToStartState = make([]*TokensStartState, nmodes) + + for i := range atn.modeToStartState { + s := a.readInt() + + atn.modeToStartState[i] = atn.states[s].(*TokensStartState) + } +} + +func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { + m := a.readInt() + + // Preallocate the needed capacity. + if cap(sets)-len(sets) < m { + isets := make([]*IntervalSet, len(sets), len(sets)+m) + copy(isets, sets) + sets = isets + } + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := a.readInt() + i2 := a.readInt() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for _, state := range atn.states { + for _, t := range state.GetTransitions() { + var rt, ok = t.(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule { + if rt.precedence == 0 { + outermostPrecedenceReturn = rt.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for _, state := range atn.states { + if s2, ok := state.(BlockStartState); ok { + // We need to know the end state to set its start state + if s2.getEndState() == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.getEndState().startState != nil { + panic("IllegalState") + } + + s2.getEndState().startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) + + for i := range atn.lexerActions { + actionType := a.readInt() + data1 := a.readInt() + data2 := a.readInt() + atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2) + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the ATN to determine if a ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.options.VerifyATN() { + return + } + + // Verify assumptions + for _, state := range atn.states { + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2.transitions[0].getTarget().(type) { + case *StarBlockStartState: + _, ok := s2.transitions[1].getTarget().(*LoopEndState) + + a.checkCondition(ok, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok, "") + a.checkCondition(s2.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case BlockStartState: + a.checkCondition(s2.getEndState() != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) // data is 32 bits but int is at least that big +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/runtime/Go/antlr/v4/atn_simulator.go b/runtime/Go/antlr/v4/atn_simulator.go new file mode 100644 index 0000000000..41529115fa --- /dev/null +++ b/runtime/Go/antlr/v4/atn_simulator.go @@ -0,0 +1,50 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { + b := new(BaseATNSimulator) + + b.atn = atn + b.sharedContextCache = sharedContextCache + + return b +} + +func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { + if b.sharedContextCache == nil { + return context + } + + visited := make(map[PredictionContext]PredictionContext) + + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/runtime/Go/antlr/v4/atn_state.go b/runtime/Go/antlr/v4/atn_state.go new file mode 100644 index 0000000000..1f2a56bc31 --- /dev/null +++ b/runtime/Go/antlr/v4/atn_state.go @@ -0,0 +1,393 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewBaseATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + as.epsilonOnlyTransitions = false + } + + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } +} + +type BasicState struct { + *BaseATNState +} + +func NewBasicState() *BasicState { + b := NewBaseATNState() + + b.stateType = ATNStateBasic + + return &BasicState{BaseATNState: b} +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + *BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + *BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + *BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateBlockStart + + return &BasicBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + *BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + b := NewBaseATNState() + + b.stateType = ATNStateBlockEnd + + return &BlockEndState{BaseATNState: b} +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + *BaseATNState +} + +func NewRuleStopState() *RuleStopState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStop + + return &RuleStopState{BaseATNState: b} +} + +type RuleStartState struct { + *BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStart + + return &RuleStartState{BaseATNState: b} +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + *BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + b := NewBaseDecisionState() + + b.stateType = ATNStatePlusLoopBack + + return &PlusLoopbackState{BaseDecisionState: b} +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + *BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStatePlusBlockStart + + return &PlusBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + *BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateStarBlockStart + + return &StarBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + *BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + b := NewBaseATNState() + + b.stateType = ATNStateStarLoopBack + + return &StarLoopbackState{BaseATNState: b} +} + +type StarLoopEntryState struct { + *BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + b := NewBaseDecisionState() + + b.stateType = ATNStateStarLoopEntry + + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{BaseDecisionState: b} +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + *BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + b := NewBaseATNState() + + b.stateType = ATNStateLoopEnd + + return &LoopEndState{BaseATNState: b} +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + *BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + b := NewBaseDecisionState() + + b.stateType = ATNStateTokenStart + + return &TokensStartState{BaseDecisionState: b} +} diff --git a/runtime/Go/antlr/v4/atn_type.go b/runtime/Go/antlr/v4/atn_type.go new file mode 100644 index 0000000000..3a515a145f --- /dev/null +++ b/runtime/Go/antlr/v4/atn_type.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/runtime/Go/antlr/v4/atnconfigset_test.go b/runtime/Go/antlr/v4/atnconfigset_test.go new file mode 100644 index 0000000000..3f1e9cc6cb --- /dev/null +++ b/runtime/Go/antlr/v4/atnconfigset_test.go @@ -0,0 +1,73 @@ +package antlr + +import ( + "testing" +) + +// Test for Issue # 3319 +// To run, "cd antlr4/runtime/Go/antlr/", then "go test". +// In the old runtime code, the test would crash because it would try +// to compare a *LexerActionExecutor with nil, causing a nil pointer dereference. +// It only happens if there were different states that had equal stateNumber mod 16, +// and you created that ATNConfig with a nil LexerActionExecutor. (That's why this +// code has a hardwired constant of 16. + +func TestCompare(t *testing.T) { + var set = NewOrderedATNConfigSet() + var s0 = NewBaseATNState() + var s1 = NewBaseATNState() + var s2 = NewBaseATNState() + var s3 = NewBaseATNState() + var s16 = NewBaseATNState() + s16.SetStateNumber(16) + var s17 = NewBaseATNState() + s17.SetStateNumber(17) + var s18 = NewBaseATNState() + s18.SetStateNumber(18) + var s19 = NewBaseATNState() + s19.SetStateNumber(19) + var la0 = NewBaseLexerAction(1) + var la1 = NewBaseLexerAction(2) + var laa = make([]LexerAction, 2) + laa[0] = la0 + laa[1] = la1 + var ae = NewLexerActionExecutor(laa) + set.Add(NewLexerATNConfig5(s0, 0, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s0, 1, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s0, 2, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s1, 0, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s1, 1, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s1, 2, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s2, 0, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s2, 1, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s2, 2, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s3, 0, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s3, 1, BasePredictionContextEMPTY, ae), nil) + set.Add(NewLexerATNConfig5(s3, 2, BasePredictionContextEMPTY, ae), nil) + + set.Add(NewLexerATNConfig5(s0, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s0, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s0, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s1, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s1, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s1, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s2, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s2, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s2, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s3, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s3, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s3, 2, BasePredictionContextEMPTY, nil), nil) + + set.Add(NewLexerATNConfig5(s16, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s16, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s16, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s17, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s17, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s17, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s18, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s18, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s18, 2, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s19, 0, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s19, 1, BasePredictionContextEMPTY, nil), nil) + set.Add(NewLexerATNConfig5(s19, 2, BasePredictionContextEMPTY, nil), nil) +} diff --git a/runtime/Go/antlr/v4/char_stream.go b/runtime/Go/antlr/v4/char_stream.go new file mode 100644 index 0000000000..c33f0adb5e --- /dev/null +++ b/runtime/Go/antlr/v4/char_stream.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(*Interval) string +} diff --git a/runtime/Go/antlr/v4/common_token_factory.go b/runtime/Go/antlr/v4/common_token_factory.go new file mode 100644 index 0000000000..1bb0314ea0 --- /dev/null +++ b/runtime/Go/antlr/v4/common_token_factory.go @@ -0,0 +1,56 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/runtime/Go/antlr/v4/common_token_stream.go b/runtime/Go/antlr/v4/common_token_stream.go new file mode 100644 index 0000000000..c6c9485a20 --- /dev/null +++ b/runtime/Go/antlr/v4/common_token_stream.go @@ -0,0 +1,449 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index indexs into tokens of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of IntStream for a description of initializing methods. + index int + + // tokenSource is the TokenSource from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens is all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(marker int) {} + +func (c *CommonTokenStream) reset() { + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between i and EOF. +func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + return c.GetTextFromInterval(nil) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { + c.lazyInit() + + if interval == nil { + c.Fill() + interval = NewInterval(0, len(c.tokens)-1) + } else { + c.Sync(interval.Stop) + } + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/runtime/Go/antlr/v4/common_token_stream_test.go b/runtime/Go/antlr/v4/common_token_stream_test.go new file mode 100644 index 0000000000..e7c75d49b1 --- /dev/null +++ b/runtime/Go/antlr/v4/common_token_stream_test.go @@ -0,0 +1,178 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "testing" +) + +type commonTokenStreamTestLexer struct { + *BaseLexer + + tokens []Token + i int +} + +func (l *commonTokenStreamTestLexer) NextToken() Token { + tmp := l.tokens[l.i] + l.i++ + return tmp +} + +func TestCommonTokenStreamOffChannel(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + + assert.Equal("x", tokens.LT(1).GetText()) // must skip first off channel token + tokens.Consume() + assert.Equal("=", tokens.LT(1).GetText()) + assert.Equal("x", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal("34", tokens.LT(1).GetText()) + assert.Equal("=", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal(";", tokens.LT(1).GetText()) + assert.Equal("34", tokens.LT(-1).GetText()) + + tokens.Consume() + assert.Equal(TokenEOF, tokens.LT(1).GetTokenType()) + assert.Equal(";", tokens.LT(-1).GetText()) + + assert.Equal("34", tokens.LT(-2).GetText()) + assert.Equal("=", tokens.LT(-3).GetText()) + assert.Equal("x", tokens.LT(-4).GetText()) +} + +func TestCommonTokenStreamFetchOffChannel(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, " ", LexerHidden), // 8 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + + assert.Nil(tokens.GetHiddenTokensToLeft(0, -1)) + assert.Nil(tokens.GetHiddenTokensToRight(0, -1)) + + assert.Equal("[[@0,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.GetHiddenTokensToLeft(1, -1))) + assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.GetHiddenTokensToRight(1, -1))) + + assert.Nil(tokens.GetHiddenTokensToLeft(2, -1)) + assert.Nil(tokens.GetHiddenTokensToRight(2, -1)) + + assert.Equal("[[@2,0:0=' ',<1>,channel=1,0:-1]]", tokensToString(tokens.GetHiddenTokensToLeft(3, -1))) + assert.Nil(tokens.GetHiddenTokensToRight(3, -1)) + + assert.Nil(tokens.GetHiddenTokensToLeft(4, -1)) + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToRight(4, -1))) + + assert.Nil(tokens.GetHiddenTokensToLeft(5, -1)) + assert.Equal("[[@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToRight(5, -1))) + + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToLeft(6, -1))) + assert.Nil(tokens.GetHiddenTokensToRight(6, -1)) + + assert.Equal("[[@5,0:0=' ',<1>,channel=1,0:-1], [@6,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToLeft(7, -1))) + assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1], [@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToRight(7, -1))) + + assert.Nil(tokens.GetHiddenTokensToLeft(8, -1)) + assert.Equal("[[@9,0:0='\\n',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToRight(8, -1))) + + assert.Equal("[[@8,0:0=' ',<1>,channel=1,0:-1]]", + tokensToString(tokens.GetHiddenTokensToLeft(9, -1))) + assert.Nil(tokens.GetHiddenTokensToRight(9, -1)) + +} + +type commonTokenStreamTestLexerSingleEOF struct { + *BaseLexer + + tokens []Token + i int +} + +func (l *commonTokenStreamTestLexerSingleEOF) NextToken() Token { + return newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel) +} + +func TestCommonTokenStreamSingleEOF(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexerSingleEOF{} + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + + assert.Equal(TokenEOF, tokens.LA(1)) + assert.Equal(0, tokens.index) + assert.Equal(1, tokens.Size()) +} + +func TestCommonTokenStreamCannotConsumeEOF(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexerSingleEOF{} + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + tokens.Fill() + assert.Equal(TokenEOF, tokens.LA(1)) + assert.Equal(0, tokens.index) + assert.Equal(1, tokens.Size()) + assert.Panics(tokens.Consume) +} + +func TestCommonTokenStreamGetTextFromInterval(t *testing.T) { + assert := assertNew(t) + lexEngine := &commonTokenStreamTestLexer{ + tokens: []Token{ + newTestCommonToken(1, " ", LexerHidden), // 0 + newTestCommonToken(1, "x", LexerDefaultTokenChannel), // 1 + newTestCommonToken(1, " ", LexerHidden), // 2 + newTestCommonToken(1, "=", LexerDefaultTokenChannel), // 3 + newTestCommonToken(1, "34", LexerDefaultTokenChannel), // 4 + newTestCommonToken(1, " ", LexerHidden), // 5 + newTestCommonToken(1, " ", LexerHidden), // 6 + newTestCommonToken(1, ";", LexerDefaultTokenChannel), // 7 + newTestCommonToken(1, " ", LexerHidden), // 8 + newTestCommonToken(1, "\n", LexerHidden), // 9 + newTestCommonToken(TokenEOF, "", LexerDefaultTokenChannel), // 10 + }, + } + tokens := NewCommonTokenStream(lexEngine, TokenDefaultChannel) + assert.Equal("x", tokens.GetTextFromInterval(&Interval{Start: 1, Stop: 1})) + assert.Equal(len(tokens.tokens), 2) + assert.Equal(" x =34 ; \n", tokens.GetTextFromInterval(nil)) + assert.Equal(len(tokens.tokens), 11) +} diff --git a/runtime/Go/antlr/v4/comparators.go b/runtime/Go/antlr/v4/comparators.go new file mode 100644 index 0000000000..fbe76c33e0 --- /dev/null +++ b/runtime/Go/antlr/v4/comparators.go @@ -0,0 +1,137 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// This file contains all the implementations of custom comparators used for generic collections when the +// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would +// put the comparators in the source file for the struct themselves, but given the organization of this code is +// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by +// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig +// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - +// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. +// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. + +// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of +// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some +// type safety and avoid having to implement this for every type that we want to perform comparison on. +// +// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which +// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +type ObjEqComparator[T Collectable[T]] struct{} + +// Equals2 delegates to the Equals() method of type T +func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { + return o1.Equals(o2) +} + +// Hash1 delegates to the Hash() method of type T +func (c *ObjEqComparator[T]) Hash1(o T) int { + + return o.Hash() +} + +type SemCComparator[T Collectable[T]] struct{} + +// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type ATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets +type ATNAltConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetContext().Equals(o2.GetContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { + h := murmurInit(7) + h = murmurUpdate(h, o.GetState().GetStateNumber()) + h = murmurUpdate(h, o.GetContext().Hash()) + return murmurFinish(h, 2) +} + +// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type BaseATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just +// delegates to the standard Hash() method of the ATNConfig type. +func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { + + return o.Hash() +} diff --git a/runtime/Go/antlr/v4/dfa.go b/runtime/Go/antlr/v4/dfa.go new file mode 100644 index 0000000000..5326baff95 --- /dev/null +++ b/runtime/Go/antlr/v4/dfa.go @@ -0,0 +1,148 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. Go maps implement key hash collisions and so on and are very + // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them + // to see if they really are the same object. + // + // + states *JStore[*DFAState, *ObjEqComparator[*DFAState]] + + numstates int + + s0 *DFAState + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + dfa := &DFA{ + atnStartState: atnStartState, + decision: decision, + states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](&ObjEqComparator[*DFAState]{}), + } + if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { + dfa.precedenceDfa = true + dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0.isAcceptState = false + dfa.s0.requiresFullContext = false + } + return dfa +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.getS0().getEdges()) { + return nil + } + + return d.getS0().getIthEdge(precedence) +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + s0 := d.getS0() + if precedence >= s0.numEdges() { + edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...) + s0.setEdges(edges) + d.setS0(s0) + } + + s0.setIthEdge(precedence, startState) +} + +func (d *DFA) getPrecedenceDfa() bool { + return d.precedenceDfa +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.getPrecedenceDfa() != precedenceDfa { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](&ObjEqComparator[*DFAState]{}) + d.numstates = 0 + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) + + precedenceState.setEdges(make([]*DFAState, 0)) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.setS0(precedenceState) + } else { + d.setS0(nil) + } + + d.precedenceDfa = precedenceDfa + } +} + +func (d *DFA) getS0() *DFAState { + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0 = s +} + +// sortedStates returns the states in d sorted by their state number. +func (d *DFA) sortedStates() []*DFAState { + + vs := d.states.SortedSlice(func(i, j *DFAState) bool { + return i.stateNumber < j.stateNumber + }) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.getS0() == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.getS0() == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/runtime/Go/antlr/v4/dfa_serializer.go b/runtime/Go/antlr/v4/dfa_serializer.go new file mode 100644 index 0000000000..84d0a31e53 --- /dev/null +++ b/runtime/Go/antlr/v4/dfa_serializer.go @@ -0,0 +1,158 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// DFASerializer is a DFA walker that knows how to dump them to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.getS0() == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(i)) + sb.WriteByte('\'') + return sb.String() +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.getS0() == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/runtime/Go/antlr/v4/dfa_state.go b/runtime/Go/antlr/v4/dfa_state.go new file mode 100644 index 0000000000..c90dec55c8 --- /dev/null +++ b/runtime/Go/antlr/v4/dfa_state.go @@ -0,0 +1,169 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1a2..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." In conventional NFA-to-DFA +// conversion, therefore, the subset T would be a bitset representing the set of +// states the ATN could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a +// state (ala normal conversion) and a RuleContext describing the chain of rules +// (if any) followed to arrive at that state. +// +// A DFAState may have multiple references to a particular state, but with +// different ATN contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the ttype we match or alt we predict if the state is accept. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { + if configs == nil { + configs = NewBaseATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() []int { + var alts []int + + if d.configs != nil { + for _, c := range d.configs.GetItems() { + alts = append(alts, c.GetAlt()) + } + } + + if len(alts) == 0 { + return nil + } + + return alts +} + +func (d *DFAState) getEdges() []*DFAState { + return d.edges +} + +func (d *DFAState) numEdges() int { + return len(d.edges) +} + +func (d *DFAState) getIthEdge(i int) *DFAState { + return d.edges[i] +} + +func (d *DFAState) setEdges(newEdges []*DFAState) { + d.edges = newEdges +} + +func (d *DFAState) setIthEdge(i int, edge *DFAState) { + d.edges[i] = edge +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) +} + +func (d *DFAState) Hash() int { + h := murmurInit(7) + h = murmurUpdate(h, d.configs.Hash()) + return murmurFinish(h, 1) +} + +// Equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) Equals(o Collectable[*DFAState]) bool { + if d == o { + return true + } + + return d.configs.Equals(o.(*DFAState).configs) +} diff --git a/runtime/Go/antlr/v4/diagnostic_error_listener.go b/runtime/Go/antlr/v4/diagnostic_error_listener.go new file mode 100644 index 0000000000..c55bcc19b2 --- /dev/null +++ b/runtime/Go/antlr/v4/diagnostic_error_listener.go @@ -0,0 +1,109 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +//
      +//
    • Ambiguities: These are cases where more than one path through the +// grammar can Match the input.
    • +//
    • Weak context sensitivity: These are cases where full-context +// prediction resolved an SLL conflict to a unique alternative which equaled the +// minimum alternative of the SLL conflict.
    • +//
    • Strong (forced) context sensitivity: These are cases where the +// full-context prediction resolved an SLL conflict to a unique alternative, +// and the minimum alternative of the SLL conflict was found to not be +// a truly viable alternative. Two-stage parsing cannot be used for inputs where +// d situation occurs.
    • +//
    + +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.GetItems() { + result.add(c.GetAlt()) + } + + return result +} diff --git a/runtime/Go/antlr/v4/error_listener.go b/runtime/Go/antlr/v4/error_listener.go new file mode 100644 index 0000000000..f679f0dcd5 --- /dev/null +++ b/runtime/Go/antlr/v4/error_listener.go @@ -0,0 +1,104 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// Provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// {@inheritDoc} +// +//

    +// This implementation prints messages to {@link System//err} containing the +// values of {@code line}, {@code charPositionInLine}, and {@code msg} using +// the following format.

    +// +//
    +// line line:charPositionInLine msg
    +// 
    +func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/runtime/Go/antlr/v4/error_strategy.go b/runtime/Go/antlr/v4/error_strategy.go new file mode 100644 index 0000000000..5c0a637ba4 --- /dev/null +++ b/runtime/Go/antlr/v4/error_strategy.go @@ -0,0 +1,734 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// This is the default implementation of {@link ANTLRErrorStrategy} used for +// error Reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseum. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

    The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

    +func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +// +// @param recognizer +func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// {@inheritDoc} +// +//

    The default implementation simply calls {@link //endErrorCondition}.

    +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// {@inheritDoc} +// +//

    The default implementation returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} +// and dispatches the Reporting task based on the runtime type of {@code e} +// according to the following table.

    +// +//
      +//
    • {@link NoViableAltException}: Dispatches the call to +// {@link //ReportNoViableAlternative}
    • +//
    • {@link InputMisMatchException}: Dispatches the call to +// {@link //ReportInputMisMatch}
    • +//
    • {@link FailedPredicateException}: Dispatches the call to +// {@link //ReportFailedPredicate}
    • +//
    • All other types: calls {@link Parser//NotifyErrorListeners} to Report +// the exception
    • +//
    +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// {@inheritDoc} +// +//

    The default implementation reSynchronizes the parser by consuming tokens +// until we find one in the reSynchronization set--loosely the set of tokens +// that can follow the current rule.

    +func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.getErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure +// that the current lookahead symbol is consistent with what were expecting +// at d point in the ATN. You can call d anytime but ANTLR only +// generates code to check before subrules/loops and each iteration. +// +//

    Implements Jim Idle's magic Sync mechanism in closures and optional +// subrules. E.g.,

    +// +//
    +// a : Sync ( stuff Sync )*
    +// Sync : {consume to what can follow Sync}
    +// 
    +// +// At the start of a sub rule upon error, {@link //Sync} performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +//

    If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block +// with an empty alternative), then the expected set includes what follows +// the subrule.

    +// +//

    During loop iteration, it consumes until it sees a token that can start a +// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible.

    +// +//

    ORIGINS

    +// +//

    Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule

    +// +//
    +// classfunc : 'class' ID '{' member* '}'
    +// 
    +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +//

    This functionality cost a little bit of effort because the parser has to +// compare token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off d +// functionality by simply overriding d method as a blank { }.

    +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + panic(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// This is called by {@link //ReportError} when the exception is a +// {@link NoViableAltException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This is called by {@link //ReportError} when the exception is an +// {@link InputMisMatchException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This is called by {@link //ReportError} when the exception is a +// {@link FailedPredicateException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This method is called to Report a syntax error which requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is current {@code LT(1)} symbol and has not yet been +// removed from the input stream. When d method returns, +// {@code recognizer} is in error recovery mode. +// +//

    This method is called when {@link //singleTokenDeletion} identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error.

    +// +//

    The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

    +// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// This method is called to Report a syntax error which requires the +// insertion of a missing token into the input stream. At the time d +// method is called, the missing token has not yet been inserted. When d +// method returns, {@code recognizer} is in error recovery mode. +// +//

    This method is called when {@link //singleTokenInsertion} identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error.

    +// +//

    The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

    +// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +//

    The default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, d method panics an +// {@link InputMisMatchException}.

    +// +//

    EXTRA TOKEN (single token deletion)

    +// +//

    {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the +// right token, however, then assume {@code LA(1)} is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the {@code LA(2)} token) as the successful result of the Match operation.

    +// +//

    This recovery strategy is implemented by {@link +// //singleTokenDeletion}.

    +// +//

    MISSING TOKEN (single token insertion)

    +// +//

    If current token (at {@code LA(1)}) is consistent with what could come +// after the expected {@code LA(1)} token, then assume the token is missing +// and use the parser's {@link TokenFactory} to create it on the fly. The +// "insertion" is performed by returning the created token as the successful +// result of the Match operation.

    +// +//

    This recovery strategy is implemented by {@link +// //singleTokenInsertion}.

    +// +//

    EXAMPLE

    +// +//

    For example, Input {@code i=(3} is clearly missing the {@code ')'}. When +// the parser returns from the nested call to {@code expr}, it will have +// call chain:

    +// +//
    +// stat &rarr expr &rarr atom
    +// 
    +// +// and it will be trying to Match the {@code ')'} at d point in the +// derivation: +// +//
    +// => ID '=' '(' INT ')' ('+' atom)* ”
    +// ^
    +// 
    +// +// The attempt to Match {@code ')'} will fail when it sees {@code ”} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} +// is in the set of tokens that can follow the {@code ')'} token reference +// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + panic(NewInputMisMatchException(recognizer)) +} + +// This method implements the single-token insertion inline error recovery +// strategy. It is called by {@link //recoverInline} if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +//

    This method determines whether or not single-token insertion is viable by +// checking if the {@code LA(1)} input symbol could be successfully Matched +// if it were instead the {@code LA(2)} symbol. If d method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce d behavior.

    +// +// @param recognizer the parser instance +// @return {@code true} if single-token insertion is a viable recovery +// strategy for the current mismatched input, otherwise {@code false} +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// This method implements the single-token deletion inline error recovery +// strategy. It is called by {@link //recoverInline} to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// {@code recognizer} will not be in error recovery mode since the +// returned token was a successful Match. +// +//

    If the single-token deletion is successful, d method calls +// {@link //ReportUnwantedToken} to Report the error, followed by +// {@link Parser//consume} to actually "delete" the extraneous token. Then, +// before returning {@link //ReportMatch} is called to signal a successful +// Match.

    +// +// @param recognizer the parser instance +// @return the successfully Matched {@link Token} instance if single-token +// deletion successfully recovers from the mismatched input, otherwise +// {@code nil} +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// Conjure up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example, x=ID {f($x)}. The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// d token is missing and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a CommonToken of the appropriate type. The text will be the token. +// If you change what tokens must be created by the lexer, +// override d method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// Compute the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack d amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # EXAMPLE +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r//or* any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider grammar: +// +// a : '[' b ']' +// | '(' b ')' +// +// b : c '^' INT +// c : ID +// | INT +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input "[]", the call chain is +// +// a -> b -> c +// +// and, hence, the follow context stack is: +// +// depth follow set start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets--the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// "Algorithms + Data Structures = Programs" by Niklaus Wirth +// +// and +// +// "A note on error recovery in recursive descent parsers": +// http://portal.acm.org/citation.cfm?id=947902.947905 +// +// Later, Josef Grosch had some good ideas: +// +// "Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers": +// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined +// at run-time upon error to avoid overhead during parsing. +func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// +// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// by immediately canceling the parse operation with a +// {@link ParseCancellationException}. The implementation ensures that the +// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +//

    +// This error strategy is useful in the following scenarios.

    +// +//
      +//
    • Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of {@link BailErrorStrategy//Sync} improves the performance of +// the first stage.
    • +//
    • Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the {@link BailErrorStrategy} avoids wasting work on recovering from errors +// when the result will be ignored either way.
    • +//
    +// +//

    +// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

    +// +// @see Parser//setErrorHandler(ANTLRErrorStrategy) + +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Instead of recovering from exception {@code e}, re-panic it wrapped +// in a {@link ParseCancellationException} so it is not caught by the +// rule func catches. Use {@link Exception//getCause()} to get the +// original {@link RecognitionException}. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + panic(NewParseCancellationException()) // TODO we don't emit e properly +} + +// Make sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Make sure we don't attempt to recover from problems in subrules.// +func (b *BailErrorStrategy) Sync(recognizer Parser) { + // pass +} diff --git a/runtime/Go/antlr/v4/errors.go b/runtime/Go/antlr/v4/errors.go new file mode 100644 index 0000000000..3954c13782 --- /dev/null +++ b/runtime/Go/antlr/v4/errors.go @@ -0,0 +1,238 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + // The current {@link Token} when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error + // occurred. For {@link NoViableAltException} and + // {@link LexerNoViableAltException} exceptions, this is the + // {@link DecisionState} number. For others, it is the state whose outgoing + // edge we couldn't Match. + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

    If the state number is not known, b method returns -1.

    + +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. +// +//

    If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.

    +// +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs ATNConfigSet +} + +// Indicates that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. Reported by ReportNoViableAlternative() +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1)?// + n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// This signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// A semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. + +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/runtime/Go/antlr/v4/file_stream.go b/runtime/Go/antlr/v4/file_stream.go new file mode 100644 index 0000000000..bd6ad5efe3 --- /dev/null +++ b/runtime/Go/antlr/v4/file_stream.go @@ -0,0 +1,49 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "io" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + *InputStream + + filename string +} + +func NewFileStream(fileName string) (*FileStream, error) { + + buf := bytes.NewBuffer(nil) + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer f.Close() + _, err = io.Copy(buf, f) + if err != nil { + return nil, err + } + + fs := new(FileStream) + + fs.filename = fileName + s := string(buf.Bytes()) + + fs.InputStream = NewInputStream(s) + + return fs, nil + +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/runtime/Go/antlr/v4/go.mod b/runtime/Go/antlr/v4/go.mod new file mode 100644 index 0000000000..0d8feb6214 --- /dev/null +++ b/runtime/Go/antlr/v4/go.mod @@ -0,0 +1,5 @@ +module github.com/antlr/antlr4/runtime/Go/antlr/v4 + +go 1.18 + +require golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e diff --git a/runtime/Go/antlr/v4/go.sum b/runtime/Go/antlr/v4/go.sum new file mode 100644 index 0000000000..2b05f22a47 --- /dev/null +++ b/runtime/Go/antlr/v4/go.sum @@ -0,0 +1,2 @@ +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= diff --git a/runtime/Go/antlr/v4/input_stream.go b/runtime/Go/antlr/v4/input_stream.go new file mode 100644 index 0000000000..a8b889cedb --- /dev/null +++ b/runtime/Go/antlr/v4/input_stream.go @@ -0,0 +1,113 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type InputStream struct { + name string + index int + data []rune + size int +} + +func NewInputStream(data string) *InputStream { + + is := new(InputStream) + + is.name = "" + is.index = 0 + is.data = []rune(data) + is.size = len(is.data) // number of runes + + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +func (is *InputStream) Index() int { + return is.index +} + +func (is *InputStream) Size() int { + return is.size +} + +// mark/release do nothing we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +func (is *InputStream) Release(marker int) { +} + +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i *Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/runtime/Go/antlr/v4/int_stream.go b/runtime/Go/antlr/v4/int_stream.go new file mode 100644 index 0000000000..4778878bd0 --- /dev/null +++ b/runtime/Go/antlr/v4/int_stream.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/runtime/Go/antlr/v4/interval_set.go b/runtime/Go/antlr/v4/interval_set.go new file mode 100644 index 0000000000..c1e155e818 --- /dev/null +++ b/runtime/Go/antlr/v4/interval_set.go @@ -0,0 +1,312 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +/* stop is not included! */ +func NewInterval(start, stop int) *Interval { + i := new(Interval) + + i.Start = start + i.Stop = stop + return i +} + +func (i *Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +func (i *Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +func (i *Interval) length() int { + return i.Stop - i.Start +} + +type IntervalSet struct { + intervals []*Interval + readOnly bool +} + +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v *Interval) { + if i.intervals == nil { + i.intervals = make([]*Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + len := 0 + + for _, v := range i.intervals { + len += v.length() + } + + return len +} + +func (i *IntervalSet) removeRange(v *Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) GetIntervals() []*Interval { + return i.intervals +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + var sb strings.Builder + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteString("'..'") + sb.WriteRune(rune(v.Stop - 1)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/runtime/Go/antlr/v4/jcollect.go b/runtime/Go/antlr/v4/jcollect.go new file mode 100644 index 0000000000..8fb01c5bd9 --- /dev/null +++ b/runtime/Go/antlr/v4/jcollect.go @@ -0,0 +1,195 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import "sort" + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn + + kh := s.comparator.Hash1(value) + + for _, v1 := range s.store[kh] { + if s.comparator.Equals2(value, v1) { + return v1, true + } + } + s.store[kh] = append(s.store[kh], value) + s.len++ + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn + + kh := s.comparator.Hash1(key) + + for _, v := range s.store[kh] { + if s.comparator.Equals2(key, v) { + return v, true + } + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn + + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + for _, v := range e { + vs = append(vs, v) + } + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { + return &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } +} + +func (m *JMap[K, V, C]) Put(key K, val V) { + kh := m.comparator.Hash1(key) + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + m.len++ +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + + var none V + kh := m.comparator.Hash1(key) + for _, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + return e.val, true + } + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return len(m.store) +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} diff --git a/runtime/Go/antlr/v4/jcollect_test.go b/runtime/Go/antlr/v4/jcollect_test.go new file mode 100644 index 0000000000..816307a02c --- /dev/null +++ b/runtime/Go/antlr/v4/jcollect_test.go @@ -0,0 +1,15 @@ +package antlr + +import "testing" + +func Test_try(t *testing.T) { + tests := []struct { + name string + }{ + {"Test_try"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + }) + } +} diff --git a/runtime/Go/antlr/v4/lexer.go b/runtime/Go/antlr/v4/lexer.go new file mode 100644 index 0000000000..6533f05164 --- /dev/null +++ b/runtime/Go/antlr/v4/lexer.go @@ -0,0 +1,416 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something nonnil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// Return a token from l source i.e., Match a token on the char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + ttype := LexerSkip + + ttype = b.safeMatch() + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } +} + +// Instruct the lexer to Skip creating a token for current lexer rule +// and look for another token. NextToken() knows to keep looking when +// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +// / +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +func (b *BaseLexer) PushMode(m int) { + if LexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if LexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// By default does not support multiple emits per NextToken invocation +// for efficiency reasons. Subclass and override l method, NextToken, +// and GetToken (to push tokens into a list and pull from that list +// rather than a single variable as l implementation does). +// / +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// The standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override l method to emit +// custom Token objects or provide a Newfactory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// What is the index of the current character of lookahead?/// +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// Return the text Matched so far for the current token or any text override. +// Set the complete text of l token it wipes any previous changes to the text. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// Return a list of all Token objects in input char stream. +// Forces load of all tokens. Does not include EOF token. +// / +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Lexers can normally Match any char in it's vocabulary after Matching +// a token, so do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// / +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/runtime/Go/antlr/v4/lexer_action.go b/runtime/Go/antlr/v4/lexer_action.go new file mode 100644 index 0000000000..111656c295 --- /dev/null +++ b/runtime/Go/antlr/v4/lexer_action.go @@ -0,0 +1,432 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. + LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. + LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. + LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. + LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. + LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. + LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. + LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + Hash() int + Equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(lexer Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) Hash() int { + return b.actionType +} + +func (b *BaseLexerAction) Equals(other LexerAction) bool { + return b == other +} + +// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// +//

    The {@code Skip} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

    +type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// Provides a singleton instance of l parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +func (l *LexerSkipAction) String() string { + return "skip" +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// Implements the {@code pushMode} lexer action by calling +// {@link Lexer//pushMode} with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

    This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

    +func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// +//

    The {@code popMode} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

    +type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

    This action is implemented by calling {@link Lexer//popMode}.

    +func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

    The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

    + +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

    This action is implemented by calling {@link Lexer//popMode}.

    +func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

    This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

    +func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

    This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

    + +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

    Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

    +func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// Implements the {@code channel} lexer action by calling +// {@link Lexer//setChannel} with the assigned channel. +// Constructs a New{@code channel} action with the specified channel value. +// @param channel The channel value to pass to {@link Lexer//setChannel}. +type LexerChannelAction struct { + *BaseLexerAction + + channel int +} + +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

    This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

    +func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

    This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

    + +// Constructs a Newindexed custom action by associating a character offset +// with a {@link LexerAction}. +// +//

    Note: This class is only required for lexer actions for which +// {@link LexerAction//isPositionDependent} returns {@code true}.

    +// +// @param offset The offset into the input {@link CharStream}, relative to +// the token start index, at which the specified lexer action should be +// executed. +// @param action The lexer action to execute at a particular offset in the +// input {@link CharStream}. +type LexerIndexedCustomAction struct { + *BaseLexerAction + + offset int + lexerAction LexerAction + isPositionDependent bool +} + +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

    This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

    +func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.Hash()) + return murmurFinish(h, 2) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) + } +} diff --git a/runtime/Go/antlr/v4/lexer_action_executor.go b/runtime/Go/antlr/v4/lexer_action_executor.go new file mode 100644 index 0000000000..be1ba7a7e3 --- /dev/null +++ b/runtime/Go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,186 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

    The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

    + +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link LexerATNConfig//hashCode} operation. + l.cachedHash = murmurInit(57) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + + return l +} + +// Creates a {@link LexerActionExecutor} which executes the actions for +// the input {@code lexerActionExecutor} followed by a specified +// {@code lexerAction}. +// +// @param lexerActionExecutor The executor for actions already traversed by +// the lexer while Matching a token within a particular +// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// though it were an empty executor. +// @param lexerAction The lexer action to execute after the actions +// specified in {@code lexerActionExecutor}. +// +// @return A {@link LexerActionExecutor} for executing the combine actions +// of {@code lexerActionExecutor} and {@code lexerAction}. +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// Creates a {@link LexerActionExecutor} which encodes the current offset +// for position-dependent lexer actions. +// +//

    Normally, when the executor encounters lexer actions where +// {@link LexerAction//isPositionDependent} returns {@code true}, it calls +// {@link IntStream//seek} on the input {@link CharStream} to set the input +// position to the end of the current token. This behavior provides +// for efficient DFA representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters.

    +// +//

    Prior to traversing a Match transition in the ATN, the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the DFA representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same length, regardless of their absolute +// position in the input stream.

    +// +//

    If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns {@code this}.

    +// +// @param offset The current offset to assign to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// @return A {@link LexerActionExecutor} which stores input stream offsets +// for all position-dependent lexer actions. +// / +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0) + + for _, a := range l.lexerActions { + updatedLexerActions = append(updatedLexerActions, a) + } + } + + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

    This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

    +// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/runtime/Go/antlr/v4/lexer_atn_simulator.go b/runtime/Go/antlr/v4/lexer_atn_simulator.go new file mode 100644 index 0000000000..c573b75210 --- /dev/null +++ b/runtime/Go/antlr/v4/lexer_atn_simulator.go @@ -0,0 +1,684 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + LexerATNSimulatorDebug = false + LexerATNSimulatorDFADebug = false + + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + *BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache DoubleDict + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := new(LexerATNSimulator) + + l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + l.decisionToDFA = decisionToDFA + l.recog = recog + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + // line number 1..n within the input/// + l.Line = 1 + // The index of the character relative to the beginning of the line + // 0..n-1/// + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + // done + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + var s0 *DFAState + l.atn.stateMu.RLock() + s0 = dfa.getS0() + l.atn.stateMu.RUnlock() + + if s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if LexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure, suppressEdge) + + predict := l.execATN(input, next) + + if LexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if LexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if LexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes Newsrc/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + l.atn.edgeMu.RLock() + defer l.atn.edgeMu.RUnlock() + if s.getEdges() == nil { + return nil + } + target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) + if LexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param input The input stream +// @param s The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, l method +// returns {@link //ERROR}. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a failover from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// Given a starting configuration set, figure out all ATN configurations +// we can reach upon input {@code t}. Parameter {@code reach} is a return +// parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.GetItems() { + currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + continue + } + + if LexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := (t == TokenEOF) + config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if LexerATNSimulatorDebug { + fmt.Printf("ACTION %v\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// Since the alternatives within any lexer decision are ordered by +// preference, l method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from {@code config}, all other (potentially reachable) states for +// l rule would have a lower priority. +// +// @return {@code true} if an accept state is reached, otherwise +// {@code false}. +func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if LexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if LexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, + configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { + + var cfg *LexerATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if LexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.SetHasSemanticContext(true) + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In l case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// Evaluate a predicate specified in the lexer. +// +//

    If {@code speculative} is {@code true}, l method was called before +// {@link //consume} for the Matched character. This method should call +// {@link //consume} before evaluating the predicate to ensure position +// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, +// and {@link Lexer//getcolumn}, properly reflect the current +// lexer state. This method should restore {@code input} and the simulator +// to the original state before returning (i.e. undo the actions made by the +// call to {@link //consume}.

    +// +// @param input The input stream. +// @param ruleIndex The rule containing the predicate. +// @param predIndex The index of the predicate within the rule. +// @param speculative {@code true} if the current index in {@code input} is +// one character before the predicate's location. +// +// @return {@code true} if the specified predicate evaluates to +// {@code true}. +// / +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + suppressEdge := cfgs.HasSemanticContext() + cfgs.SetHasSemanticContext(false) + + to = l.addDFAState(cfgs, true) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if LexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + l.atn.edgeMu.Lock() + defer l.atn.edgeMu.Unlock() + if from.getEdges() == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)) + } + from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState ATNConfig + + for _, cfg := range configs.GetItems() { + + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + dfa := l.decisionToDFA[l.mode] + + l.atn.stateMu.Lock() + defer l.atn.stateMu.Unlock() + existing, present := dfa.states.Get(proposed) + if present { + + // This state was already present, so just return it. + // + proposed = existing + } else { + + // We need to add the new state + // + proposed.stateNumber = dfa.states.Len() + configs.SetReadOnly(true) + proposed.configs = configs + dfa.states.Put(proposed) + } + if !suppressEdge { + dfa.setS0(proposed) + } + return proposed +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// Get the text Matched so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(tt)) + sb.WriteByte('\'') + + return sb.String() +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/runtime/Go/antlr/v4/ll1_analyzer.go b/runtime/Go/antlr/v4/ll1_analyzer.go new file mode 100644 index 0000000000..a9e202d041 --- /dev/null +++ b/runtime/Go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,216 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +// - Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +// +// / +const ( + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() + lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) + seeThruPreds := false // fail to get lookahead upon pred + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// * +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

    If {@code ctx} is {@code nil} and the end of the rule containing +// {@code s} is reached, {@link Token//EPSILON} is added to the result set. +// If {@code ctx} is not {@code nil} and the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

    +// +// @param s the ATN state +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx the complete parser context, or {@code nil} if the context +// should be ignored +// +// @return The set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// / +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + seeThruPreds := true // ignore preds get all lookahead + var lookContext PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}), NewBitSet(), seeThruPreds, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

    If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

    +// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewBaseATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx != BasePredictionContextEMPTY { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/runtime/Go/antlr/v4/parser.go b/runtime/Go/antlr/v4/parser.go new file mode 100644 index 0000000000..d26bf06392 --- /dev/null +++ b/runtime/Go/antlr/v4/parser.go @@ -0,0 +1,708 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + // The {@link ParserRuleContext} object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + // Specifies whether or not the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + // The list of {@link ParseTreeListener} listeners registered to receive + // events during the parse. + p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is + // incremented each time {@link //NotifyErrorListeners} is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with +// bypass alternatives. +// +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

    If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

    +// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

    If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

    +// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// Registers {@code listener} to receive events during the parsing process. +// +//

    To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted.

    +// +//

    With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.

    +// +//
      +//
    • Alterations to the grammar used to generate code may change the +// behavior of the listener calls.
    • +//
    • Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls.
    • +//
    • Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls.
    • +//
    +// +// @param listener the listener to add +// +// @panics nilPointerException if {@code} listener is {@code nil} +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// Remove {@code listener} from the list of parse listeners. +// +//

    If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.

    +// @param listener the listener to remove +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// Notify any parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// Tell our token source and error strategy about a Newway to create tokens.// +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// The ATN with bypass alternatives is expensive to create so we create it +// lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
    +// ParseTree t = parser.expr()
    +// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
    +// MyParser.RULE_expr)
    +// ParseTreeMatch m = p.Match(t)
    +// String id = m.Get("ID")
    +// 
    + +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// Set the token stream and reset the parser.// +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// Match needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have Newlocalctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +//
    +// return getExpectedTokens().contains(symbol)
    +// 
    +// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, +// respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// Return List<String> of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +// +// this very useful for error messages. + +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// For debugging and other purposes.// +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// For debugging and other purposes.// +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.states.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/runtime/Go/antlr/v4/parser_atn_simulator.go b/runtime/Go/antlr/v4/parser_atn_simulator.go new file mode 100644 index 0000000000..c780e3c5f2 --- /dev/null +++ b/runtime/Go/antlr/v4/parser_atn_simulator.go @@ -0,0 +1,1549 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + ParserATNSimulatorDebug = false + ParserATNSimulatorListATNDecisions = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false + TurnOffLRLoopEntryBranchOpt = false +) + +type ParserATNSimulator struct { + *BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *DoubleDict + outerContext ParserRuleContext +} + +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := new(ParserATNSimulator) + + p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't Synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // wack cache after each prediction + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt := p.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// Performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. + +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? + +// We also have some key operations to do: +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// Reporting a conflict +// Reporting an ambiguity +// Reporting a context sensitivity +// Reporting insufficient predicates + +// cover these cases: +// +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if ParserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.GetConflictingAlts() + if D.predicates != nil { + if ParserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL failover") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if ParserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue() + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if ParserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + case 1: + return alts.minValue() + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue() + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. + +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create Newtarget state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if ParserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.SetUniqueAlt(predictedAlt) + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.GetConflictingAlts().minValue()) + } + if D.isAcceptState && D.configs.HasSemanticContext() { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previous, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if ParserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.SetUniqueAlt(p.getUniqueAlt(reach)) + // unique prediction? + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + predictedAlt = reach.GetUniqueAlt() + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt +} + +func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { + if ParserATNSimulatorDebug { + fmt.Println("in computeReachSet, starting closure: " + closure.String()) + } + if p.mergeCache == nil { + p.mergeCache = NewDoubleDict() + } + intermediate := NewBaseATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*BaseATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.GetItems() { + if ParserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) + if ParserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewBaseATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if ParserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewBaseATNConfigSet(fullCtx) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&ObjEqComparator[ATNConfig]{}) + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet p condition whether or not it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + if len(reach.GetItems()) == 0 { + return nil + } + + return reach +} + +// Return a configuration set containing only the configurations from +// {@code configs} which are in a {@link RuleStopState}. If all +// configurations in {@code configs} are already in a rule stop state, p +// method simply returns {@code configs}. +// +//

    When {@code lookToEndOfRule} is true, p method uses +// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions.

    +// +// @param configs the configuration set to update +// @param lookToEndOfRule when true, p method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// {@code configs}. +// +// @return {@code configs} if all configurations in {@code configs} are in a +// rule stop state, otherwise return a Newconfiguration set containing only +// the configurations from {@code configs} which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewBaseATNConfigSet(configs.FullContext()) + for _, config := range configs.GetItems() { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewBaseATNConfigSet(fullCtx) + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewBaseATNConfig6(target, i+1, initialContext) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](&BaseATNConfigComparator[ATNConfig]{}) + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// This method transforms the start state computed by +// {@link //computeStartState} to the special start state used by a +// precedence DFA for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +//
      +//
    1. Evaluate the precedence predicates for each configuration using +// {@link SemanticContext//evalPrecedence}.
    2. +//
    3. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. This transformation is +// valid for the following reasons: +//
        +//
      • The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule.
      • +//
      • The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level.
      • +//
      +//
    4. +//
    +// +//

    +// The prediction context must be considered by p filter to address +// situations like the following. +//

    +// +//
    +// grammar TA
    +// prog: statement* EOF
    +// statement: letterA | statement letterA 'b'
    +// letterA: 'a'
    +// 
    +//
    +//

    +// If the above grammar, the ATN state immediately before the token +// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// {@code statement}. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to {@code prog} (and then back in to {@code statement} +// from being eliminated by the filter. +//

    +// +// @param configs The configuration set computed by +// {@link //computeStartState} as the start state for the DFA. +// @return The transformed configuration set representing the start state +// for a precedence DFA at a particular precedence level (determined by +// calling {@link Parser//getPrecedence}). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { + + statesFromAlt1 := make(map[int]PredictionContext) + configSet := NewBaseATNConfigSet(configs.FullContext()) + + for _, config := range configs.GetItems() { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.GetItems() { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.GetItems() { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // nonambig alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if ParserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // unpredicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// This method is used to improve the localization of error messages by +// choosing an alternative rather than panicing a +// {@link NoViableAltException} in particular prediction scenarios where the +// {@link //ERROR} state was reached during ATN simulation. +// +//

    +// The default implementation of p method uses the following +// algorithm to identify an ATN configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// {@link ParserRuleContext} returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location.

    +// +//
      +//
    • If a syntactically valid path or paths reach the end of the decision rule and +// they are semantically valid if predicated, return the min associated alt.
    • +//
    • Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +//
    • +//
    • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
    • +//
    +// +//

    +// In some scenarios, the algorithm described above could predict an +// alternative which will result in a {@link FailedPredicateException} in +// the parser. Specifically, p could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing p alternative within +// {@link //AdaptivePredict} instead of panicing a +// {@link NoViableAltException}, the resulting +// {@link FailedPredicateException} in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +//

    +// +// @param configs The ATN configurations which were valid immediately before +// the {@link //ERROR} state was reached +// @param outerContext The is the \gamma_0 initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// @return The value to return from {@link //AdaptivePredict}, or +// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not +// identified and {@link //AdaptivePredict} should Report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.GetItems()) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.GetItems() { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { + succeeded := NewBaseATNConfigSet(configs.FullContext()) + failed := NewBaseATNConfigSet(configs.FullContext()) + + for _, c := range configs.GetItems() { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []ATNConfigSet{succeeded, failed} +} + +// Look through a list of predicate/alt pairs, returning alts for the +// +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if ParserATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") + fmt.Println("configs(" + configs.String() + ")") + if config.GetReachesIntoOuterContext() > 50 { + panic("problem") + } + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges// +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if p is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + newDepth-- + if ParserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { + if TurnOffLRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewBaseATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewBaseATNConfig4(config, t.getTarget()) +} + +func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewBaseATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) '' +// +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node +// because alternative to has another way to continue, via [6|2|[]]. +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state&rarrconfig-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not +// stop working on p state. In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +// + +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.GetUniqueAlt() != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.GetUniqueAlt()) + } else { + conflictingAlts = configs.GetConflictingAlts() + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil { + if t >= len(p.parser.GetLiteralNames()) { + fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) + // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect + } else { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in AdaptivePredict around execATN but I cut +// +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.GetItems() { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

    If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

    +// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if ParserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if ParserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// Add state {@code D} to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to {@code D} +// is already in the DFA, the existing state is returned. Otherwise p +// method returns {@code D} after adding it to the DFA. +// +//

    If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and +// does not change the DFA.

    +// +// @param dfa The dfa +// @param D The DFA state to add +// @return The state stored in the DFA. This will be either the existing +// state if {@code D} is already in the DFA, or {@code D} itself if the +// state was not already present. +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + existing, present := dfa.states.Get(d) + if present { + return existing + } + + // The state was not present, so update it with configs + // + d.stateNumber = dfa.states.Len() + if !d.configs.ReadOnly() { + d.configs.OptimizeConfigs(p.BaseATNSimulator) + d.configs.SetReadOnly(true) + } + dfa.states.Put(d) + if ParserATNSimulatorDebug { + fmt.Println("adding NewDFA state: " + d.String()) + } + + return d +} + +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// If context sensitive parsing, we know it's ambiguity not conflict// +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/runtime/Go/antlr/v4/parser_rule_context.go b/runtime/Go/antlr/v4/parser_rule_context.go new file mode 100644 index 0000000000..1c8cee7479 --- /dev/null +++ b/runtime/Go/antlr/v4/parser_rule_context.go @@ -0,0 +1,362 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + *BaseRuleContext + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil + + return prc +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// Double dispatch methods for listeners +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +} + +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +} + +// * Does not set parent link other add methods do that/// +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// * Used by EnterOuterAlt to toss out a RuleContext previously added as +// we entered a rule. If we have // label, we will need to remove +// generic ruleContext object. +// / +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/runtime/Go/antlr/v4/prediction_context.go b/runtime/Go/antlr/v4/prediction_context.go new file mode 100644 index 0000000000..72d24c3260 --- /dev/null +++ b/runtime/Go/antlr/v4/prediction_context.go @@ -0,0 +1,764 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "golang.org/x/exp/slices" + "strconv" +) + +// Represents {@code $} in local context prediction, which means wildcard. +// {@code//+x =//}. +// / +const ( + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// Represents {@code $} in an array in full context mode, when {@code $} +// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, +// {@code $} = {@link //EmptyReturnState}. +// / + +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +type PredictionContext interface { + Hash() int + Equals(interface{}) bool + GetParent(int) PredictionContext + getReturnState(int) int + length() int + isEmpty() bool + hasEmptyPath() bool + String() string +} + +type BasePredictionContext struct { + cachedHash int +} + +func NewBasePredictionContext(cachedHash int) *BasePredictionContext { + pc := new(BasePredictionContext) + pc.cachedHash = cachedHash + + return pc +} + +func (b *BasePredictionContext) isEmpty() bool { + return false +} + +func calculateHash(parent PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +// Used to cache {@link BasePredictionContext} objects. Its used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. + +type PredictionContextCache struct { + cache map[PredictionContext]PredictionContext +} + +func NewPredictionContextCache() *PredictionContextCache { + t := new(PredictionContextCache) + t.cache = make(map[PredictionContext]PredictionContext) + return t +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a Newcontext to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { + if ctx == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY + } + existing := p.cache[ctx] + if existing != nil { + return existing + } + p.cache[ctx] = ctx + return ctx +} + +func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { + return p.cache[ctx] +} + +func (p *PredictionContextCache) length() int { + return len(p.cache) +} + +type SingletonPredictionContext interface { + PredictionContext +} + +type BaseSingletonPredictionContext struct { + *BasePredictionContext + + parentCtx PredictionContext + returnState int +} + +func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { + var cachedHash int + if parent != nil { + cachedHash = calculateHash(parent, returnState) + } else { + cachedHash = calculateEmptyHash() + } + + s := new(BaseSingletonPredictionContext) + s.BasePredictionContext = NewBasePredictionContext(cachedHash) + + s.parentCtx = parent + s.returnState = returnState + + return s +} + +func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func (b *BaseSingletonPredictionContext) length() int { + return 1 +} + +func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { + return b.parentCtx +} + +func (b *BaseSingletonPredictionContext) getReturnState(index int) int { + return b.returnState +} + +func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { + return b.returnState == BasePredictionContextEmptyReturnState +} + +func (b *BaseSingletonPredictionContext) Hash() int { + return b.cachedHash +} + +func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { + if b == other { + return true + } + if _, ok := other.(*BaseSingletonPredictionContext); !ok { + return false + } + + otherP := other.(*BaseSingletonPredictionContext) + + if b.returnState != otherP.getReturnState(0) { + return false + } + if b.parentCtx == nil { + return otherP.parentCtx == nil + } + + return b.parentCtx.Equals(otherP.parentCtx) +} + +func (b *BaseSingletonPredictionContext) String() string { + var up string + + if b.parentCtx == nil { + up = "" + } else { + up = b.parentCtx.String() + } + + if len(up) == 0 { + if b.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(b.returnState) + } + + return strconv.Itoa(b.returnState) + " " + up +} + +var BasePredictionContextEMPTY = NewEmptyPredictionContext() + +type EmptyPredictionContext struct { + *BaseSingletonPredictionContext +} + +func NewEmptyPredictionContext() *EmptyPredictionContext { + + p := new(EmptyPredictionContext) + + p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) + p.cachedHash = calculateEmptyHash() + return p +} + +func (e *EmptyPredictionContext) isEmpty() bool { + return true +} + +func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { + return nil +} + +func (e *EmptyPredictionContext) getReturnState(index int) int { + return e.returnState +} + +func (e *EmptyPredictionContext) Hash() int { + return e.cachedHash +} + +func (e *EmptyPredictionContext) Equals(other interface{}) bool { + return e == other +} + +func (e *EmptyPredictionContext) String() string { + return "$" +} + +type ArrayPredictionContext struct { + *BasePredictionContext + + parents []PredictionContext + returnStates []int +} + +func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + + hash = murmurFinish(hash, len(parents)<<1) + + c := new(ArrayPredictionContext) + c.BasePredictionContext = NewBasePredictionContext(hash) + + c.parents = parents + c.returnStates = returnStates + + return c +} + +func (a *ArrayPredictionContext) GetReturnStates() []int { + return a.returnStates +} + +func (a *ArrayPredictionContext) hasEmptyPath() bool { + return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) isEmpty() bool { + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return a.returnStates[0] == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) length() int { + return len(a.returnStates) +} + +func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { + return a.parents[index] +} + +func (a *ArrayPredictionContext) getReturnState(index int) int { + return a.returnStates[index] +} + +// Equals is the default comparison function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Equals(o interface{}) bool { + if a == o { + return true + } + other, ok := o.(*ArrayPredictionContext) + if !ok { + return false + } + if a.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(a.returnStates, other.returnStates) && + slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { + return x.Equals(y) + }) +} + +// Hash is the default hash function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Hash() int { + return a.BasePredictionContext.cachedHash +} + +func (a *ArrayPredictionContext) String() string { + if a.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(a.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if a.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(a.returnStates[i]) + if a.parents[i] != nil { + s = s + " " + a.parents[i].String() + } else { + s = s + "nil" + } + } + + return s + "]" +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + // share same graph if both same + if a == b { + return a + } + + ac, ok1 := a.(*BaseSingletonPredictionContext) + bc, ok2 := b.(*BaseSingletonPredictionContext) + + if ok1 && ok2 { + return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if rootIsWildcard { + if _, ok := a.(*EmptyPredictionContext); ok { + return a + } + if _, ok := b.(*EmptyPredictionContext); ok { + return b + } + } + // convert singleton so both are arrays to normalize + if _, ok := a.(*BaseSingletonPredictionContext); ok { + a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } + if _, ok := b.(*BaseSingletonPredictionContext); ok { + b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } + return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) +} + +// Merge two {@link SingletonBasePredictionContext} instances. +// +//

    Stack tops equal, parents merge is same return left graph.
    +//

    +// +//

    Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A Newroot node is created to point to the +// merged parents.
    +//

    +// +//

    Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
    +//

    +// +//

    Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
    +//

    +// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.Hash(), b.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.Hash(), a.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent == a.parentCtx { + return a // ax + bx = ax, if a=b + } + if parent == b.parentCtx { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // Newjoined parent so create Newsingleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent PredictionContext + if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

    Local-Context Merges

    +// +//

    These local-context merge operations are used when {@code rootIsWildcard} +// is true.

    +// +//

    {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
    +//

    +// +//

    {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
    +//

    +// +//

    Special case of last merge if local context.
    +//

    +// +//

    Full-Context Merges

    +// +//

    These full-context merge operations are used when {@code rootIsWildcard} +// is false.

    +// +//

    +// +//

    Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
    +//

    +// +//

    +// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { + if rootIsWildcard { + if a == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // // + b =// + } + if b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

    Different tops, different parents.
    +//

    +// +//

    Shared top, same parents.
    +//

    +// +//

    Shared top, different parents.
    +//

    +// +//

    Shared top, all shared parents.
    +//

    +// +//

    Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
    +//

    +// / +func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.Hash(), b.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.Hash(), a.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + if M == a { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), a) + } + return a + } + if M == b { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), b) + } + return b + } + combineCommonParents(mergedParents) + + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), M) + } + return M +} + +// Make pass over all M {@code parents} merge any {@code equals()} +// ones. +// / +func combineCommonParents(parents []PredictionContext) { + uniqueParents := make(map[PredictionContext]PredictionContext) + + for p := 0; p < len(parents); p++ { + parent := parents[p] + if uniqueParents[parent] == nil { + uniqueParents[parent] = parent + } + } + for q := 0; q < len(parents); q++ { + parents[q] = uniqueParents[parents[q]] + } +} + +func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { + + if context.isEmpty() { + return context + } + existing := visited[context] + if existing != nil { + return existing + } + existing = contextCache.Get(context) + if existing != nil { + visited[context] = existing + return existing + } + changed := false + parents := make([]PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || parent != context.GetParent(i) { + if !changed { + parents = make([]PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited[context] = context + return context + } + var updated PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) + } + contextCache.add(updated) + visited[updated] = updated + visited[context] = updated + + return updated +} diff --git a/runtime/Go/antlr/v4/prediction_mode.go b/runtime/Go/antlr/v4/prediction_mode.go new file mode 100644 index 0000000000..270a89d393 --- /dev/null +++ b/runtime/Go/antlr/v4/prediction_mode.go @@ -0,0 +1,529 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // + // The SLL(*) prediction mode. This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + //

    + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // {@link //LL} prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the {@link //SLL} prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful {@link //LL} prediction abilities to complete successfully.

    + // + //

    + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

    + // + PredictionModeSLL = 0 + // + // The LL(*) prediction mode. This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + //

    + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // Report a precise answer for exactly which alternatives are + // ambiguous.

    + // + //

    + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

    + // + PredictionModeLL = 1 + // + // The LL(*) prediction mode with exact ambiguity detection. In addition to + // the correctness guarantees provided by the {@link //LL} prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + //

    + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary.

    + // + //

    + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

    + // + PredictionModeLLExactAmbigDetection = 2 +) + +// Computes the SLL prediction termination condition. +// +//

    +// This method computes the SLL prediction termination condition for both of +// the following cases.

    +// +//
      +//
    • The usual SLL+LL fallback upon SLL conflict
    • +//
    • Pure SLL without LL fallback
    • +//
    +// +//

    COMBINED SLL+LL PARSING

    +// +//

    When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction.

    +// +//

    Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative (e.g. +// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations.

    +// +//

    Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty.

    +// +//

    HEURISTIC

    +// +//

    As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon):

    +// +//

    {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

    +// +//

    When the ATN simulation reaches the state before {@code ”}, it has a +// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally +// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// processing this node because alternative to has another way to continue, +// via {@code [6|2|[]]}.

    +// +//

    It also let's us continue for this rule:

    +// +//

    {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

    +// +//

    After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done.

    +// +//

    PURE SLL PARSING

    +// +//

    To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic.

    +// +//

    PREDICATES IN SLL+LL PARSING

    +// +//

    SLL decisions don't evaluate predicates until after they reach DFA stop +// states because they need to create the DFA cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates.

    +// +//

    Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// semantic predicate contexts so we might see two configurations like the +// following.

    +// +//

    {@code (s, 1, x, {}), (s, 1, x', {p})}

    +// +//

    Before testing these configurations against others, we have to merge +// {@code x} and {@code x'} (without modifying the existing configurations). +// For example, we test {@code (x+x')==x”} when looking for conflicts in +// the following configurations.

    +// +//

    {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

    +// +//

    If the configuration set has predicates (as indicated by +// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// the configurations to strip out all of the predicates so that a standard +// {@link ATNConfigSet} will merge everything ignoring predicates.

    +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.HasSemanticContext() { + // dup configs, tossing out semantic predicates + dup := NewBaseATNConfigSet(false) + for _, c := range configs.GetItems() { + + // NewBaseATNConfig({semanticContext:}, c) + c = NewBaseATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// Checks if any configuration in {@code configs} is in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if any configuration in {@code configs} is in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// Checks if all configurations in {@code configs} are in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if all configurations in {@code configs} are in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { + + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// Full LL prediction termination. +// +//

    Can we stop looking ahead during ATN simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict.

    +// +//

    The basic idea is to split the set of configurations {@code C}, into +// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical {@link ATNConfig//state} and {@link ATNConfig//context} values +// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} +// and {@code (s, j, ctx, _)} for {@code i!=j}.

    +// +//

    Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows:

    +// +//

    {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in +// {@code C} holding {@code s} and {@code ctx} fixed.

    +// +//

    Or in pseudo-code, for each configuration {@code c} in {@code C}:

    +// +//
    +// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
    +// alt and not pred
    +// 
    +// +//

    The values in {@code map} are the set of {@code A_s,ctx} sets.

    +// +//

    If {@code |A_s,ctx|=1} then there is no conflict associated with +// {@code s} and {@code ctx}.

    +// +//

    Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// more lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round.

    +// +//

    The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal you will still have the conflict. It's just inefficient. It +// might even look until the end of file.

    +// +//

    No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check.

    +// +//

    CONFLICTING CONFIGS

    +// +//

    Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict +// when {@code i!=j} but {@code x=x'}. Because we merge all +// {@code (s, i, _)} configurations together, that means that there are at +// most {@code n} configurations associated with state {@code s} for +// {@code n} possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts {@code x} and +// {@code x'}. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either {@code x} or +// {@code x'}. If the {@code x} associated with lowest alternative {@code i} +// is the superset, then {@code i} is the only possible prediction since the +// others resolve to {@code min(i)} as well. However, if {@code x} is +// associated with {@code j>i} then at least one stack configuration for +// {@code j} is not in conflict with alternative {@code i}. The algorithm +// should keep going, looking for more lookahead due to the uncertainty.

    +// +//

    For simplicity, I'm doing a equality check between {@code x} and +// {@code x'} that lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict.

    +// +//

    CONTINUE/STOP RULE

    +// +//

    Continue if union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict.

    +// +//

    The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set.

    +// +//

    CASES

    +// +//
      +// +//
    • no conflicts and more than 1 alternative in set => continue
    • +// +//
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, +// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set +// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1,3}} => continue +//
    • +// +//
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set +// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1}} => stop and predict 1
    • +// +//
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {1}} = {@code {1}} => stop and predict 1, can announce +// ambiguity {@code {1,2}}
    • +// +//
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, +// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {2}} = {@code {1,2}} => continue
    • +// +//
    • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, +// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {3}} = {@code {1,3}} => continue
    • +// +//
    +// +//

    EXACT AMBIGUITY DETECTION

    +// +//

    If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set.

    +// +//

    |A_i|>1 and +// A_i = A_j for all i, j.

    +// +//

    In other words, we continue examining lookahead until all {@code A_i} +// have more than one alternative and all {@code A_i} are the same. If +// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {@code {1}}. To determine what the real +// ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like +// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

    +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// Determines if every alternative subset in {@code altsets} contains more +// than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every {@link BitSet} in {@code altsets} has +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// Determines if any single alternative subset in {@code altsets} contains +// exactly one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// Determines if any single alternative subset in {@code altsets} contains +// more than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// Determines if every alternative subset in {@code altsets} is equivalent. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every member of {@code altsets} is equal to the +// others, otherwise {@code false} +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// Returns the unique alternative predicted by all alternative subsets in +// {@code altsets}. If no such alternative exists, this method returns +// {@link ATN//INVALID_ALT_NUMBER}. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// Gets the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each {@link BitSet} +// in {@code altsets}. +// +// @param altsets a collection of alternative subsets +// @return the set of represented alternatives in {@code altsets} +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// For each configuration {@code c} in {@code configs}: +// +//
    +// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
    +// alt and not pred
    +// 
    +func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { + configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](&ATNAltConfigComparator[ATNConfig]{}) + + for _, c := range configs.GetItems() { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each +// configuration {@code c} in {@code configs}: +// +//
    +// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
    +// 
    +func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.GetItems() { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/runtime/Go/antlr/v4/recognizer.go b/runtime/Go/antlr/v4/recognizer.go new file mode 100644 index 0000000000..2cd05443b0 --- /dev/null +++ b/runtime/Go/antlr/v4/recognizer.go @@ -0,0 +1,216 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +var tokenTypeMapCache = make(map[string]int) +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.11.0" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// Get a map from rule names to rule indexes. +// +//

    Used for XPath and tree pattern compilation.

    +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +func (b *BaseRecognizer) GetTokenType(tokenName string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// What is the error header, normally line/character position information?// +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// How should a token be displayed in an error message? The default +// +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// @deprecated This method is not called by the ANTLR 4 Runtime. Specific +// implementations of {@link ANTLRErrorStrategy} may provide a similar +// feature when necessary. For example, see +// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// subclass needs to override these if there are sempreds or actions +// that the ATN interp needs to execute +func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { + return true +} + +func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { + return true +} diff --git a/runtime/Go/antlr/v4/rule_context.go b/runtime/Go/antlr/v4/rule_context.go new file mode 100644 index 0000000000..210699ba23 --- /dev/null +++ b/runtime/Go/antlr/v4/rule_context.go @@ -0,0 +1,114 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// A rule context is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. We actually carry no +// information about the rule associated with b context (except +// when parsing). We keep only the state number of the invoking state from +// the ATN submachine that invoked b. Contrast b with the s +// pointer inside ParserRuleContext that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the subclass +// ParserRuleContext. +// +// @see ParserRuleContext +// + +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} + +type BaseRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int +} + +func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { + + rn := new(BaseRuleContext) + + // What context invoked b rule? + rn.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + rn.invokingState = -1 + } else { + rn.invokingState = invokingState + } + + return rn +} + +func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { + return b +} + +func (b *BaseRuleContext) SetParent(v Tree) { + if v == nil { + b.parentCtx = nil + } else { + b.parentCtx = v.(RuleContext) + } +} + +func (b *BaseRuleContext) GetInvokingState() int { + return b.invokingState +} + +func (b *BaseRuleContext) SetInvokingState(t int) { + b.invokingState = t +} + +func (b *BaseRuleContext) GetRuleIndex() int { + return b.RuleIndex +} + +func (b *BaseRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (b *BaseRuleContext) SetAltNumber(altNumber int) {} + +// A context is empty if there is no invoking state meaning nobody call +// current context. +func (b *BaseRuleContext) IsEmpty() bool { + return b.invokingState == -1 +} + +// Return the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +//

    +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b +// method. +// + +func (b *BaseRuleContext) GetParent() Tree { + return b.parentCtx +} diff --git a/runtime/Go/antlr/v4/semantic_context.go b/runtime/Go/antlr/v4/semantic_context.go new file mode 100644 index 0000000000..f54926e760 --- /dev/null +++ b/runtime/Go/antlr/v4/semantic_context.go @@ -0,0 +1,469 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A tree structure used to record the semantic context in which +// an ATN configuration is valid. It's either a single predicate, +// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// +//

    I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +// {@link SemanticContext} within the scope of this outer class.

    +// + +type SemanticContext interface { + Equals(other Collectable[SemanticContext]) bool + Hash() int + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, p.ruleIndex) + h = murmurUpdate(h, p.predIndex) + if p.isCtxDependent { + h = murmurUpdate(h, 1) + } else { + h = murmurUpdate(h, 0) + } + return murmurFinish(h, 3) +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { + + var op *PrecedencePredicate + var ok bool + if op, ok = other.(*PrecedencePredicate); !ok { + return false + } + + if p == op { + return true + } + + return p.precedence == other.(*PrecedencePredicate).precedence +} + +func (p *PrecedencePredicate) Hash() int { + h := uint32(1) + h = 31*h + uint32(p.precedence) + return int(h) +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + set.Each(func(v SemanticContext) bool { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + return true + }) + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](&ObjEqComparator[SemanticContext]{}) + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) Equals(other Collectable[SemanticContext]) bool { + if a == other { + return true + } + if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +// {@inheritDoc} +// +//

    +// The evaluation of predicates by a context is short-circuiting, but +// unordered.

    +func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) Hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *OR) Hash() int { + h := murmurInit(41) // Init with a value different from AND + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](&ObjEqComparator[SemanticContext]{}) + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) Equals(other Collectable[SemanticContext]) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +//

    +// The evaluation of predicates by o context is short-circuiting, but +// unordered.

    +func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/runtime/Go/antlr/v4/testing_assert_test.go b/runtime/Go/antlr/v4/testing_assert_test.go new file mode 100644 index 0000000000..4a402a34f3 --- /dev/null +++ b/runtime/Go/antlr/v4/testing_assert_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// These assert functions are borrowed from https://github.com/stretchr/testify/ (MIT License) + +package antlr + +import ( + "fmt" + "reflect" + "testing" +) + +type assert struct { + t *testing.T +} + +func assertNew(t *testing.T) *assert { + return &assert{ + t: t, + } +} + +func (a *assert) Equal(expected, actual interface{}) bool { + if !objectsAreEqual(expected, actual) { + return a.Fail(fmt.Sprintf("Not equal:\n"+ + "expected: %#v\n"+ + " actual: %#v\n", expected, actual)) + } + return true +} + +func objectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + return reflect.DeepEqual(expected, actual) +} + +func (a *assert) Nil(object interface{}) bool { + if isNil(object) { + return true + } + return a.Fail(fmt.Sprintf("Expected nil, but got: %#v", object)) +} + +func (a *assert) NotNil(object interface{}) bool { + if !isNil(object) { + return true + } + return a.Fail("Expected value not to be nil.") +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +func (a *assert) Panics(f func()) bool { + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return a.Fail(fmt.Sprintf("func %p should panic\n\r\tPanic value:\t%v", f, panicValue)) + } + + return true +} + +// Fail reports a failure through +func (a *assert) Fail(failureMessage string) bool { + a.t.Errorf("%s", failureMessage) + return false +} + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f func()) (bool, interface{}) { + didPanic := false + var message interface{} + func() { + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + // call the target function + f() + }() + return didPanic, message +} diff --git a/runtime/Go/antlr/v4/testing_lexer_b_test.go b/runtime/Go/antlr/v4/testing_lexer_b_test.go new file mode 100644 index 0000000000..2485abf780 --- /dev/null +++ b/runtime/Go/antlr/v4/testing_lexer_b_test.go @@ -0,0 +1,137 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +/* +LexerB is a lexer for testing purpose. + +This file is generated from this grammer. + +lexer grammar LexerB; + +ID : 'a'..'z'+; +INT : '0'..'9'+; +SEMI : ';'; +ASSIGN : '='; +PLUS : '+'; +MULT : '*'; +WS : ' '+; +*/ + +import ( + "fmt" + "sync" + "unicode" +) + +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter + +type LexerB struct { + *BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexerbLexerStaticData struct { + once sync.Once + serializedATN []int32 + channelNames []string + modeNames []string + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *PredictionContextCache + atn *ATN + decisionToDFA []*DFA +} + +func lexerbLexerInit() { + staticData := &lexerbLexerStaticData + staticData.channelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.modeNames = []string{ + "DEFAULT_MODE", + } + staticData.literalNames = []string{ + "", "", "", "';'", "'='", "'+'", "'*'", + } + staticData.symbolicNames = []string{ + "", "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", + } + staticData.ruleNames = []string{ + "ID", "INT", "SEMI", "ASSIGN", "PLUS", "MULT", "WS", + } + staticData.predictionContextCache = NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 7, 38, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 1, 0, 4, 0, 17, 8, 0, 11, 0, 12, 0, 18, + 1, 1, 4, 1, 22, 8, 1, 11, 1, 12, 1, 23, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, + 4, 1, 5, 1, 5, 1, 6, 4, 6, 35, 8, 6, 11, 6, 12, 6, 36, 0, 0, 7, 1, 1, 3, + 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 1, 0, 0, 40, 0, 1, 1, 0, 0, 0, 0, 3, + 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, + 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 1, 16, 1, 0, 0, 0, 3, 21, 1, 0, 0, 0, 5, + 25, 1, 0, 0, 0, 7, 27, 1, 0, 0, 0, 9, 29, 1, 0, 0, 0, 11, 31, 1, 0, 0, + 0, 13, 34, 1, 0, 0, 0, 15, 17, 2, 97, 122, 0, 16, 15, 1, 0, 0, 0, 17, 18, + 1, 0, 0, 0, 18, 16, 1, 0, 0, 0, 18, 19, 1, 0, 0, 0, 19, 2, 1, 0, 0, 0, + 20, 22, 2, 48, 57, 0, 21, 20, 1, 0, 0, 0, 22, 23, 1, 0, 0, 0, 23, 21, 1, + 0, 0, 0, 23, 24, 1, 0, 0, 0, 24, 4, 1, 0, 0, 0, 25, 26, 5, 59, 0, 0, 26, + 6, 1, 0, 0, 0, 27, 28, 5, 61, 0, 0, 28, 8, 1, 0, 0, 0, 29, 30, 5, 43, 0, + 0, 30, 10, 1, 0, 0, 0, 31, 32, 5, 42, 0, 0, 32, 12, 1, 0, 0, 0, 33, 35, + 5, 32, 0, 0, 34, 33, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 34, 1, 0, 0, 0, + 36, 37, 1, 0, 0, 0, 37, 14, 1, 0, 0, 0, 4, 0, 18, 23, 36, 0, + } + deserializer := NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = NewDFA(state, index) + } +} + +// LexerBInit initializes any static state used to implement LexerB. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewLexerB(). You can call this function if you wish to initialize the static state ahead +// of time. +func LexerBInit() { + staticData := &lexerbLexerStaticData + staticData.once.Do(lexerbLexerInit) +} + +// NewLexerB produces a new lexer instance for the optional input antlr.CharStream. +func NewLexerB(input CharStream) *LexerB { + LexerBInit() + l := new(LexerB) + + l.BaseLexer = NewBaseLexer(input) + staticData := &lexerbLexerStaticData + l.Interpreter = NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + l.channelNames = staticData.channelNames + l.modeNames = staticData.modeNames + l.RuleNames = staticData.ruleNames + l.LiteralNames = staticData.literalNames + l.SymbolicNames = staticData.symbolicNames + l.GrammarFileName = "LexerB.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// LexerB tokens. +const ( + LexerBID = 1 + LexerBINT = 2 + LexerBSEMI = 3 + LexerBASSIGN = 4 + LexerBPLUS = 5 + LexerBMULT = 6 + LexerBWS = 7 +) diff --git a/runtime/Go/antlr/v4/testing_util_test.go b/runtime/Go/antlr/v4/testing_util_test.go new file mode 100644 index 0000000000..20428831b3 --- /dev/null +++ b/runtime/Go/antlr/v4/testing_util_test.go @@ -0,0 +1,30 @@ +package antlr + +import ( + "fmt" + "strings" +) + +// newTestCommonToken create common token with tokentype, text and channel +// notice: test purpose only +func newTestCommonToken(tokenType int, text string, channel int) *CommonToken { + t := new(CommonToken) + t.BaseToken = new(BaseToken) + t.tokenType = tokenType + t.channel = channel + t.text = text + t.line = 0 + t.column = -1 + return t +} + +// tokensToString returnes []Tokens string +// notice: test purpose only +func tokensToString(tokens []Token) string { + buf := make([]string, len(tokens)) + for i, token := range tokens { + buf[i] = fmt.Sprintf("%v", token) + } + + return "[" + strings.Join(buf, ", ") + "]" +} diff --git a/runtime/Go/antlr/v4/token.go b/runtime/Go/antlr/v4/token.go new file mode 100644 index 0000000000..f73b06bc6a --- /dev/null +++ b/runtime/Go/antlr/v4/token.go @@ -0,0 +1,209 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // During lookahead operations, this "token" signifies we hit rule end ATN state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // All tokens go to the parser (unless Skip() is called in that rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + + TokenDefaultChannel = 0 + + // Anything on different channel than DEFAULT_CHANNEL is not parsed + // by parser. + + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + *BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := new(CommonToken) + + t.BaseToken = new(BaseToken) + + t.source = source + t.tokenType = tokenType + t.channel = channel + t.start = start + t.stop = stop + t.tokenIndex = -1 + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

    +// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

    +// +// @param oldToken The token to copy. +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "" +} + +func (c *CommonToken) SetText(text string) { + c.text = text +} + +func (c *CommonToken) String() string { + txt := c.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if c.channel > 0 { + ch = ",channel=" + strconv.Itoa(c.channel) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + + txt + "',<" + strconv.Itoa(c.tokenType) + ">" + + ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" +} diff --git a/runtime/Go/antlr/v4/token_source.go b/runtime/Go/antlr/v4/token_source.go new file mode 100644 index 0000000000..a3f36eaa67 --- /dev/null +++ b/runtime/Go/antlr/v4/token_source.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/runtime/Go/antlr/v4/token_stream.go b/runtime/Go/antlr/v4/token_stream.go new file mode 100644 index 0000000000..1527d43f60 --- /dev/null +++ b/runtime/Go/antlr/v4/token_stream.go @@ -0,0 +1,20 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(*Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/runtime/Go/antlr/v4/tokenstream_rewriter.go b/runtime/Go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 0000000000..b3e38af344 --- /dev/null +++ b/runtime/Go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,659 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

    +// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

    +//

    + +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

    + +//

    +// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

    + +//

    +// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

    + +//

    +// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

    + +//
    +// CharStream input = new ANTLRFileStream("input");
    +// TLexer lex = new TLexer(input);
    +// CommonTokenStream tokens = new CommonTokenStream(lex);
    +// T parser = new T(tokens);
    +// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
    +// parser.startRule();
    +// 
    + +//

    +// Then in the rules, you can execute (assuming rewriter is visible):

    + +//
    +// Token t,u;
    +// ...
    +// rewriter.insertAfter(t, "text to put after t");}
    +// rewriter.insertAfter(u, "text after u");}
    +// System.out.println(rewriter.getText());
    +// 
    + +//

    +// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

    + +//
    +// rewriter.insertAfter("pass1", t, "text to put after t");}
    +// rewriter.insertAfter("pass2", u, "text after u");}
    +// System.out.println(rewriter.getText("pass1"));
    +// System.out.println(rewriter.getText("pass2"));
    +// 
    + +//

    +// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

    + +const ( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instruction_index +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.op_name +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instruction_index = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.op_name = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + op_name: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + op_name: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { + is, ok := tsr.programs[program_name] + if ok { + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { + tsr.Rollback(Default_Program_Name, instruction_index) +} + +// Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { + tsr.Replace(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if rewrites == nil || len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instruction_index] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instruction_index] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/runtime/Go/antlr/v4/tokenstream_rewriter_test.go b/runtime/Go/antlr/v4/tokenstream_rewriter_test.go new file mode 100644 index 0000000000..a00265128a --- /dev/null +++ b/runtime/Go/antlr/v4/tokenstream_rewriter_test.go @@ -0,0 +1,417 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. +package antlr + +import ( + "fmt" + "strings" + "sync" + "testing" + "unicode" +) + +/* Assume the following grammar for this test. + +lexer grammar LexerA; +A : 'a'; +B : 'b'; +C : 'c'; + +*/ + +func TestInsertBeforeIndex0(t *testing.T) { + input := NewInputStream("abc") + lexer := NewLexerA(input) + stream := NewCommonTokenStream(lexer, 0) + stream.Fill() + tokens := NewTokenStreamRewriter(stream) + tokens.InsertBeforeDefault(0, "0") + result := tokens.GetTextDefault() + if result != "0abc" { + t.Errorf("test failed, got %s", result) + } +} + +func prepare_rewriter(str string) *TokenStreamRewriter { + input := NewInputStream(str) + lexer := NewLexerA(input) + stream := NewCommonTokenStream(lexer, 0) + stream.Fill() + return NewTokenStreamRewriter(stream) +} + +type LexerTest struct { + input string + expected string + description string + expected_exception []string + ops func(*TokenStreamRewriter) +} + +func NewLexerTest(input, expected, desc string, ops func(*TokenStreamRewriter)) LexerTest { + return LexerTest{input: input, expected: expected, description: desc, ops: ops} +} + +func NewLexerExceptionTest(input string, expected_err []string, desc string, ops func(*TokenStreamRewriter)) LexerTest { + return LexerTest{input: input, expected_exception: expected_err, description: desc, ops: ops} +} + +func panic_tester(t *testing.T, expected_msg []string, r *TokenStreamRewriter) { + defer func() { + r := recover() + if r == nil { + t.Errorf("Panic is expected, but finished normally") + } else { + s_e := r.(string) + for _, e := range expected_msg { + if !strings.Contains(s_e, e) { + t.Errorf("Element [%s] is not in error message: [%s]", e, s_e) + } + } + } + }() + r.GetTextDefault() +} + +func TestLexerA(t *testing.T) { + tests := []LexerTest{ + NewLexerTest("abc", "0abc", "InsertBeforeIndex0", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "0") + }), + NewLexerTest("abc", "abcx", "InsertAfterLastIndex", + func(r *TokenStreamRewriter) { + r.InsertAfterDefault(2, "x") + }), + NewLexerTest("abc", "axbxc", "2InsertBeforeAfterMiddleIndex", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "x") + r.InsertAfterDefault(1, "x") + }), + NewLexerTest("abc", "xbc", "ReplaceIndex0", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(0, "x") + }), + NewLexerTest("abc", "abx", "ReplaceLastIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(2, "x") + }), + NewLexerTest("abc", "axc", "ReplaceMiddleIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(1, "x") + }), + NewLexerTest("abc", "ayc", "2ReplaceMiddleIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(1, "x") + r.ReplaceDefaultPos(1, "y") + }), + NewLexerTest("abc", "_ayc", "2ReplaceMiddleIndex1InsertBefore", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "_") + r.ReplaceDefaultPos(1, "x") + r.ReplaceDefaultPos(1, "y") + }), + NewLexerTest("abc", "ac", "ReplaceThenDeleteMiddleIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(1, "x") + r.DeleteDefaultPos(1) + }), + NewLexerExceptionTest("abc", []string{"insert op", "within boundaries of previous"}, + "InsertInPriorReplace", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(0, 2, "x") + r.InsertBeforeDefault(1, "0") + }), + NewLexerTest("abc", "0xbc", "InsertThenReplaceSameIndex", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "0") + r.ReplaceDefaultPos(0, "x") + }), + NewLexerTest("abc", "ayxbc", "2InsertMiddleIndex", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "x") + r.InsertBeforeDefault(1, "y") + }), + NewLexerTest("abc", "yxzbc", "2InsertThenReplaceIndex0", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "x") + r.InsertBeforeDefault(0, "y") + r.ReplaceDefaultPos(0, "z") + }), + NewLexerTest("abc", "abyx", "ReplaceThenInsertBeforeLastIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(2, "x") + r.InsertBeforeDefault(2, "y") + }), + NewLexerTest("abc", "abyx", "InsertThenReplaceLastIndex", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(2, "y") + r.ReplaceDefaultPos(2, "x") + }), + NewLexerTest("abc", "abxy", "ReplaceThenInsertAfterLastIndex", + func(r *TokenStreamRewriter) { + r.ReplaceDefaultPos(2, "x") + r.InsertAfterDefault(2, "y") + }), + NewLexerTest("abcccba", "abyxba", "ReplaceThenInsertAtLeftEdge", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "x") + r.InsertBeforeDefault(2, "y") + }), + NewLexerTest("abcccba", "abyxba", "ReplaceThenInsertAtLeftEdge", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "x") + r.InsertBeforeDefault(2, "y") + }), + NewLexerExceptionTest("abcccba", + []string{"insert op", "InsertBeforeOp", "within boundaries of previous", "ReplaceOp"}, + "ReplaceRangeThenInsertAtRightEdge", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "x") + r.InsertBeforeDefault(4, "y") + }), + NewLexerTest("abcccba", "abxyba", "ReplaceRangeThenInsertAfterRightEdge", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "x") + r.InsertAfterDefault(4, "y") + }), + NewLexerTest("abcccba", "x", "ReplaceAll", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(0, 6, "x") + }), + NewLexerTest("abcccba", "abxyzba", "ReplaceSubsetThenFetch", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "xyz") + }), + NewLexerExceptionTest("abcccba", + []string{"replace op boundaries of", "ReplaceOp", "overlap with previous"}, + "ReplaceThenReplaceSuperset", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "xyz") + r.ReplaceDefault(3, 5, "foo") + }), + NewLexerExceptionTest("abcccba", + []string{"replace op boundaries of", "ReplaceOp", "overlap with previous"}, + "ReplaceThenReplaceLowerIndexedSuperset", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 4, "xyz") + r.ReplaceDefault(1, 3, "foo") + }), + NewLexerTest("abcba", "fooa", "ReplaceSingleMiddleThenOverlappingSuperset", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 2, "xyz") + r.ReplaceDefault(0, 3, "foo") + }), + NewLexerTest("abc", "yxabc", "CombineInserts", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "x") + r.InsertBeforeDefault(0, "y") + }), + NewLexerTest("abc", "yazxbc", "Combine3Inserts", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "x") + r.InsertBeforeDefault(0, "y") + r.InsertBeforeDefault(1, "z") + }), + NewLexerTest("abc", "zfoo", "CombineInsertOnLeftWithReplace", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(0, 2, "foo") + r.InsertBeforeDefault(0, "z") + }), + NewLexerTest("abc", "z", "CombineInsertOnLeftWithDelete", + func(r *TokenStreamRewriter) { + r.DeleteDefault(0, 2) + r.InsertBeforeDefault(0, "z") + }), + NewLexerTest("abc", "zaxbyc", "DisjointInserts", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "x") + r.InsertBeforeDefault(2, "y") + r.InsertBeforeDefault(0, "z") + }), + NewLexerTest("abcc", "bar", "OverlappingReplace", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(1, 2, "foo") + r.ReplaceDefault(0, 3, "bar") + }), + NewLexerExceptionTest("abcc", + []string{"replace op boundaries of", "ReplaceOp", "overlap with previous"}, + "OverlappingReplace2", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(0, 3, "bar") + r.ReplaceDefault(1, 2, "foo") + }), + NewLexerTest("abcc", "barc", "OverlappingReplace3", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(1, 2, "foo") + r.ReplaceDefault(0, 2, "bar") + }), + NewLexerTest("abcc", "abar", "OverlappingReplace4", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(1, 2, "foo") + r.ReplaceDefault(1, 3, "bar") + }), + NewLexerTest("abcc", "afooc", "DropIdenticalReplace", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(1, 2, "foo") + r.ReplaceDefault(1, 2, "foo") + }), + NewLexerTest("abc", "afoofoo", "DropPrevCoveredInsert", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "foo") + r.ReplaceDefault(1, 2, "foo") + }), + NewLexerTest("abcc", "axbfoo", "LeaveAloneDisjointInsert", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(1, "x") + r.ReplaceDefault(2, 3, "foo") + }), + NewLexerTest("abcc", "axbfoo", "LeaveAloneDisjointInsert2", + func(r *TokenStreamRewriter) { + r.ReplaceDefault(2, 3, "foo") + r.InsertBeforeDefault(1, "x") + }), + NewLexerTest("abc", "aby", "InsertBeforeTokenThenDeleteThatToken", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(2, "y") + r.DeleteDefaultPos(2) + }), + NewLexerTest("aa", "aa", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1, "") + }), + NewLexerTest("aa", "

    a

    a", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "

    ") + r.InsertBeforeDefault(0, "") + r.InsertAfterDefault(0, "

    ") + r.InsertAfterDefault(0, "") + r.InsertBeforeDefault(1, "") + r.InsertAfterDefault(1, "") + }), + NewLexerTest("ab", "

    a

    !b", "DistinguishBetweenInsertAfterAndInsertBeforeToPreserverOrder2", + func(r *TokenStreamRewriter) { + r.InsertBeforeDefault(0, "

    ") + r.InsertBeforeDefault(0, "") + r.InsertBeforeDefault(0, "

    ") + r.InsertAfterDefault(0, "

    ") + r.InsertAfterDefault(0, "
    ") + r.InsertAfterDefault(0, "
    ") + r.InsertBeforeDefault(1, "!") + }), + } + + for _, c := range tests { + t.Run(c.description, func(t *testing.T) { + rewriter := prepare_rewriter(c.input) + c.ops(rewriter) + if len(c.expected_exception) > 0 { + panic_tester(t, c.expected_exception, rewriter) + } else { + result := rewriter.GetTextDefault() + if result != c.expected { + t.Errorf("Expected:%s | Result: %s", c.expected, result) + } + } + }) + } +} + +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter + +type LexerA struct { + *BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var lexeraLexerStaticData struct { + once sync.Once + serializedATN []int32 + channelNames []string + modeNames []string + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *PredictionContextCache + atn *ATN + decisionToDFA []*DFA +} + +func lexeraLexerInit() { + staticData := &lexeraLexerStaticData + staticData.channelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.modeNames = []string{ + "DEFAULT_MODE", + } + staticData.literalNames = []string{ + "", "'a'", "'b'", "'c'", + } + staticData.symbolicNames = []string{ + "", "A", "B", "C", + } + staticData.ruleNames = []string{ + "A", "B", "C", + } + staticData.predictionContextCache = NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 3, 13, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 1, 0, 1, 0, 1, + 1, 1, 1, 1, 2, 1, 2, 0, 0, 3, 1, 1, 3, 2, 5, 3, 1, 0, 0, 12, 0, 1, 1, 0, + 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 7, 1, 0, 0, 0, 3, 9, 1, 0, + 0, 0, 5, 11, 1, 0, 0, 0, 7, 8, 5, 97, 0, 0, 8, 2, 1, 0, 0, 0, 9, 10, 5, + 98, 0, 0, 10, 4, 1, 0, 0, 0, 11, 12, 5, 99, 0, 0, 12, 6, 1, 0, 0, 0, 1, + 0, 0, + } + deserializer := NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = NewDFA(state, index) + } +} + +// LexerAInit initializes any static state used to implement LexerA. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewLexerA(). You can call this function if you wish to initialize the static state ahead +// of time. +func LexerAInit() { + staticData := &lexeraLexerStaticData + staticData.once.Do(lexeraLexerInit) +} + +// NewLexerA produces a new lexer instance for the optional input antlr.CharStream. +func NewLexerA(input CharStream) *LexerA { + LexerAInit() + l := new(LexerA) + l.BaseLexer = NewBaseLexer(input) + staticData := &lexeraLexerStaticData + l.Interpreter = NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + l.channelNames = staticData.channelNames + l.modeNames = staticData.modeNames + l.RuleNames = staticData.ruleNames + l.LiteralNames = staticData.literalNames + l.SymbolicNames = staticData.symbolicNames + l.GrammarFileName = "LexerA.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// LexerA tokens. +const ( + LexerAA = 1 + LexerAB = 2 + LexerAC = 3 +) diff --git a/runtime/Go/antlr/v4/trace_listener.go b/runtime/Go/antlr/v4/trace_listener.go new file mode 100644 index 0000000000..7b663bf849 --- /dev/null +++ b/runtime/Go/antlr/v4/trace_listener.go @@ -0,0 +1,32 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type TraceListener struct { + parser *BaseParser +} + +func NewTraceListener(parser *BaseParser) *TraceListener { + tl := new(TraceListener) + tl.parser = parser + return tl +} + +func (t *TraceListener) VisitErrorNode(_ ErrorNode) { +} + +func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { + fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} + +func (t *TraceListener) VisitTerminal(node TerminalNode) { + fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) +} + +func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { + fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} diff --git a/runtime/Go/antlr/v4/transition.go b/runtime/Go/antlr/v4/transition.go new file mode 100644 index 0000000000..36be4f7331 --- /dev/null +++ b/runtime/Go/antlr/v4/transition.go @@ -0,0 +1,428 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

    This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

    +// +//

    Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

    + +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + *BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + + t := new(AtomTransition) + t.BaseTransition = NewBaseTransition(target) + + t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t.intervalSet = t.makeLabel() + t.serializationType = TransitionATOM + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + *BaseTransition + + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + + t := new(RuleTransition) + t.BaseTransition = NewBaseTransition(ruleStart) + + t.ruleIndex = ruleIndex + t.precedence = precedence + t.followState = followState + t.serializationType = TransitionRULE + t.isEpsilon = true + + return t +} + +func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +type EpsilonTransition struct { + *BaseTransition + + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + + t := new(EpsilonTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionEPSILON + t.isEpsilon = true + t.outermostPrecedenceReturn = outermostPrecedenceReturn + return t +} + +func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + *BaseTransition + + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + + t := new(RangeTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionRANGE + t.start = start + t.stop = stop + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + *BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + + t := new(BaseAbstractPredicateTransition) + t.BaseTransition = NewBaseTransition(target) + + return t +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + *BaseAbstractPredicateTransition + + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + + t := new(PredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPREDICATE + t.ruleIndex = ruleIndex + t.predIndex = predIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + *BaseTransition + + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + + t := new(ActionTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionACTION + t.ruleIndex = ruleIndex + t.actionIndex = actionIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + *BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + + t := new(SetTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionSET + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + *SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + + t := new(NotSetTransition) + + t.SetTransition = NewSetTransition(target, set) + + t.serializationType = TransitionNOTSET + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + *BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + + t := new(WildcardTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionWILDCARD + return t +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + *BaseAbstractPredicateTransition + + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + + t := new(PrecedencePredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPRECEDENCE + t.precedence = precedence + t.isEpsilon = true + + return t +} + +func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/runtime/Go/antlr/v4/tree.go b/runtime/Go/antlr/v4/tree.go new file mode 100644 index 0000000000..85b4f137b5 --- /dev/null +++ b/runtime/Go/antlr/v4/tree.go @@ -0,0 +1,253 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + + GetSourceInterval() *Interval +} + +type ParseTree interface { + SyntaxTree + + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + + GetRuleContext() RuleContext + GetBaseRuleContext() *BaseRuleContext +} + +type TerminalNode interface { + ParseTree + + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } + +// TODO +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(i int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(tree []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() *Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, EnterRule is called before +// recursively walking down into child nodes, then +// ExitRule is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// Exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/runtime/Go/antlr/v4/trees.go b/runtime/Go/antlr/v4/trees.go new file mode 100644 index 0000000000..d7dbb03228 --- /dev/null +++ b/runtime/Go/antlr/v4/trees.go @@ -0,0 +1,138 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// Print out a whole tree in LISP form. {@link //getNodeText} is used on the +// +// node payloads to get the text for the nodes. Detect +// parse trees and extract data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += (" " + s) + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recog for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// Return ordered list of all children of this node +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// Return a list of all ancestors of this node. The first node of +// +// list is the root and the last is the parent of this node. +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/runtime/Go/antlr/v4/utils.go b/runtime/Go/antlr/v4/utils.go new file mode 100644 index 0000000000..9fad5d916b --- /dev/null +++ b/runtime/Go/antlr/v4/utils.go @@ -0,0 +1,352 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "strconv" + "strings" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("Stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +type comparable interface { + Equals(other Collectable[any]) bool +} + +func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { + + return a.Equals(b) +} + +func standardHashFunction(a interface{}) int { + if h, ok := a.(hasher); ok { + return h.Hash() + } + + panic("Not Hasher") +} + +type hasher interface { + Hash() int +} + +const bitsPerWord = 64 + +func indexForBit(bit int) int { + return bit / bitsPerWord +} + +func wordForBit(data []uint64, bit int) uint64 { + idx := indexForBit(bit) + if idx >= len(data) { + return 0 + } + return data[idx] +} + +func maskForBit(bit int) uint64 { + return uint64(1) << (bit % bitsPerWord) +} + +func wordsNeeded(bit int) int { + return indexForBit(bit) + 1 +} + +type BitSet struct { + data []uint64 +} + +func NewBitSet() *BitSet { + return &BitSet{} +} + +func (b *BitSet) add(value int) { + idx := indexForBit(value) + if idx >= len(b.data) { + size := wordsNeeded(value) + data := make([]uint64, size) + copy(data, b.data) + b.data = data + } + b.data[idx] |= maskForBit(value) +} + +func (b *BitSet) clear(index int) { + idx := indexForBit(index) + if idx >= len(b.data) { + return + } + b.data[idx] &= ^maskForBit(index) +} + +func (b *BitSet) or(set *BitSet) { + // Get min size necessary to represent the bits in both sets. + bLen := b.minLen() + setLen := set.minLen() + maxLen := intMax(bLen, setLen) + if maxLen > len(b.data) { + // Increase the size of len(b.data) to repesent the bits in both sets. + data := make([]uint64, maxLen) + copy(data, b.data) + b.data = data + } + // len(b.data) is at least setLen. + for i := 0; i < setLen; i++ { + b.data[i] |= set.data[i] + } +} + +func (b *BitSet) remove(value int) { + b.clear(value) +} + +func (b *BitSet) contains(value int) bool { + idx := indexForBit(value) + if idx >= len(b.data) { + return false + } + return (b.data[idx] & maskForBit(value)) != 0 +} + +func (b *BitSet) minValue() int { + for i, v := range b.data { + if v == 0 { + continue + } + return i*bitsPerWord + bits.TrailingZeros64(v) + } + return 2147483647 +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if b == otherBitSet { + return true + } + + // We only compare set bits, so we cannot rely on the two slices having the same size. Its + // possible for two BitSets to have different slice lengths but the same set bits. So we only + // compare the relevant words and ignore the trailing zeros. + bLen := b.minLen() + otherLen := otherBitSet.minLen() + + if bLen != otherLen { + return false + } + + for i := 0; i < bLen; i++ { + if b.data[i] != otherBitSet.data[i] { + return false + } + } + + return true +} + +func (b *BitSet) minLen() int { + for i := len(b.data); i > 0; i-- { + if b.data[i-1] != 0 { + return i + } + } + return 0 +} + +func (b *BitSet) length() int { + cnt := 0 + for _, val := range b.data { + cnt += bits.OnesCount64(val) + } + return cnt +} + +func (b *BitSet) String() string { + vals := make([]string, 0, b.length()) + + for i, v := range b.data { + for v != 0 { + n := bits.TrailingZeros64(v) + vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) + v &= ^(uint64(1) << n) + } + } + + return "{" + strings.Join(vals, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +type DoubleDict struct { + data map[int]map[int]interface{} +} + +func NewDoubleDict() *DoubleDict { + dd := new(DoubleDict) + dd.data = make(map[int]map[int]interface{}) + return dd +} + +func (d *DoubleDict) Get(a, b int) interface{} { + data := d.data[a] + + if data == nil { + return nil + } + + return data[b] +} + +func (d *DoubleDict) set(a, b int, o interface{}) { + data := d.data[a] + + if data == nil { + data = make(map[int]interface{}) + d.data[a] = data + } + + data[b] = o +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// murmur hash +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h int, value int) int { + const c1 uint32 = 0xCC9E2D51 + const c2 uint32 = 0x1B873593 + const r1 uint32 = 15 + const r2 uint32 = 13 + const m uint32 = 5 + const n uint32 = 0xE6546B64 + + k := uint32(value) + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + + hash := uint32(h) ^ k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + return int(hash) +} + +func murmurFinish(h int, numberOfWords int) int { + var hash = uint32(h) + hash ^= uint32(numberOfWords) << 2 + hash ^= hash >> 16 + hash *= 0x85ebca6b + hash ^= hash >> 13 + hash *= 0xc2b2ae35 + hash ^= hash >> 16 + + return int(hash) +} diff --git a/runtime/Go/antlr/v4/utils_set.go b/runtime/Go/antlr/v4/utils_set.go new file mode 100644 index 0000000000..c9bd6751e3 --- /dev/null +++ b/runtime/Go/antlr/v4/utils_set.go @@ -0,0 +1,235 @@ +package antlr + +import "math" + +const ( + _initalCapacity = 16 + _initalBucketCapacity = 8 + _loadFactor = 0.75 +) + +type Set interface { + Add(value interface{}) (added interface{}) + Len() int + Get(value interface{}) (found interface{}) + Contains(value interface{}) bool + Values() []interface{} + Each(f func(interface{}) bool) +} + +type array2DHashSet struct { + buckets [][]Collectable[any] + hashcodeFunction func(interface{}) int + equalsFunction func(Collectable[any], Collectable[any]) bool + + n int // How many elements in set + threshold int // when to expand + + currentPrime int // jump by 4 primes each expand or whatever + initialBucketCapacity int +} + +func (as *array2DHashSet) Each(f func(interface{}) bool) { + if as.Len() < 1 { + return + } + + for _, bucket := range as.buckets { + for _, o := range bucket { + if o == nil { + break + } + if !f(o) { + return + } + } + } +} + +func (as *array2DHashSet) Values() []interface{} { + if as.Len() < 1 { + return nil + } + + values := make([]interface{}, 0, as.Len()) + as.Each(func(i interface{}) bool { + values = append(values, i) + return true + }) + return values +} + +func (as *array2DHashSet) Contains(value Collectable[any]) bool { + return as.Get(value) != nil +} + +func (as *array2DHashSet) Add(value Collectable[any]) interface{} { + if as.n > as.threshold { + as.expand() + } + return as.innerAdd(value) +} + +func (as *array2DHashSet) expand() { + old := as.buckets + + as.currentPrime += 4 + + var ( + newCapacity = len(as.buckets) << 1 + newTable = as.createBuckets(newCapacity) + newBucketLengths = make([]int, len(newTable)) + ) + + as.buckets = newTable + as.threshold = int(float64(newCapacity) * _loadFactor) + + for _, bucket := range old { + if bucket == nil { + continue + } + + for _, o := range bucket { + if o == nil { + break + } + + b := as.getBuckets(o) + bucketLength := newBucketLengths[b] + var newBucket []Collectable[any] + if bucketLength == 0 { + // new bucket + newBucket = as.createBucket(as.initialBucketCapacity) + newTable[b] = newBucket + } else { + newBucket = newTable[b] + if bucketLength == len(newBucket) { + // expand + newBucketCopy := make([]Collectable[any], len(newBucket)<<1) + copy(newBucketCopy[:bucketLength], newBucket) + newBucket = newBucketCopy + newTable[b] = newBucket + } + } + + newBucket[bucketLength] = o + newBucketLengths[b]++ + } + } +} + +func (as *array2DHashSet) Len() int { + return as.n +} + +func (as *array2DHashSet) Get(o Collectable[any]) interface{} { + if o == nil { + return nil + } + + b := as.getBuckets(o) + bucket := as.buckets[b] + if bucket == nil { // no bucket + return nil + } + + for _, e := range bucket { + if e == nil { + return nil // empty slot; not there + } + if as.equalsFunction(e, o) { + return e + } + } + + return nil +} + +func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { + b := as.getBuckets(o) + + bucket := as.buckets[b] + + // new bucket + if bucket == nil { + bucket = as.createBucket(as.initialBucketCapacity) + bucket[0] = o + + as.buckets[b] = bucket + as.n++ + return o + } + + // look for it in bucket + for i := 0; i < len(bucket); i++ { + existing := bucket[i] + if existing == nil { // empty slot; not there, add. + bucket[i] = o + as.n++ + return o + } + + if as.equalsFunction(existing, o) { // found existing, quit + return existing + } + } + + // full bucket, expand and add to end + oldLength := len(bucket) + bucketCopy := make([]Collectable[any], oldLength<<1) + copy(bucketCopy[:oldLength], bucket) + bucket = bucketCopy + as.buckets[b] = bucket + bucket[oldLength] = o + as.n++ + return o +} + +func (as *array2DHashSet) getBuckets(value Collectable[any]) int { + hash := as.hashcodeFunction(value) + return hash & (len(as.buckets) - 1) +} + +func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { + return make([][]Collectable[any], cap) +} + +func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { + return make([]Collectable[any], cap) +} + +func newArray2DHashSetWithCap( + hashcodeFunction func(interface{}) int, + equalsFunction func(Collectable[any], Collectable[any]) bool, + initCap int, + initBucketCap int, +) *array2DHashSet { + if hashcodeFunction == nil { + hashcodeFunction = standardHashFunction + } + + if equalsFunction == nil { + equalsFunction = standardEqualsFunction + } + + ret := &array2DHashSet{ + hashcodeFunction: hashcodeFunction, + equalsFunction: equalsFunction, + + n: 0, + threshold: int(math.Floor(_initalCapacity * _loadFactor)), + + currentPrime: 1, + initialBucketCapacity: initBucketCap, + } + + ret.buckets = ret.createBuckets(initCap) + return ret +} + +func newArray2DHashSet( + hashcodeFunction func(interface{}) int, + equalsFunction func(Collectable[any], Collectable[any]) bool, +) *array2DHashSet { + return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) +} diff --git a/runtime/Go/antlr/v4/utils_test.go b/runtime/Go/antlr/v4/utils_test.go new file mode 100644 index 0000000000..ed274ef339 --- /dev/null +++ b/runtime/Go/antlr/v4/utils_test.go @@ -0,0 +1,62 @@ +package antlr + +import "testing" + +func testBitSet(t *testing.T, bs *BitSet, str string, length int, contains []int, minValue int, minLen int) { + t.Helper() + if got, want := bs.String(), str; got != want { + t.Errorf("%+v.String() = %q, want %q", bs, got, want) + } + if got, want := bs.length(), length; got != want { + t.Errorf("%+v.length() = %q, want %q", bs, got, want) + } + for i := 0; i < len(bs.data)*bitsPerWord; i++ { + var want bool + for _, val := range contains { + if i == val { + want = true + break + } + } + if got := bs.contains(i); got != want { + t.Errorf("%+v.contains(%v) = %v, want %v", bs, i, got, want) + } + } + if got, want := bs.minValue(), minValue; got != want { + t.Errorf("%+v.minValue() = %v, want %v", bs, got, want) + } + if got, want := bs.minLen(), minLen; got != want { + t.Errorf("%+v.minLen() = %v, want %v", bs, got, want) + } +} + +func TestBitSet(t *testing.T) { + bs1 := NewBitSet() + testBitSet(t, bs1, "{}", 0, []int{}, 2147483647, 0) + bs1.add(0) + testBitSet(t, bs1, "{0}", 1, []int{0}, 0, 1) + bs1.add(63) + testBitSet(t, bs1, "{0, 63}", 2, []int{0, 63}, 0, 1) + bs1.remove(0) + testBitSet(t, bs1, "{63}", 1, []int{63}, 63, 1) + bs1.add(20) + testBitSet(t, bs1, "{20, 63}", 2, []int{20, 63}, 20, 1) + bs1.clear(63) + testBitSet(t, bs1, "{20}", 1, []int{20}, 20, 1) + bs2 := NewBitSet() + bs2.add(64) + bs1.or(bs2) + testBitSet(t, bs1, "{20, 64}", 2, []int{20, 64}, 20, 2) + bs1.remove(20) + testBitSet(t, bs1, "{64}", 1, []int{64}, 64, 2) + bs3 := NewBitSet() + bs3.add(63) + bs1.or(bs3) + testBitSet(t, bs1, "{63, 64}", 2, []int{63, 64}, 63, 2) + bs1.clear(64) + bs4 := NewBitSet() + bs4.or(bs1) + if got, want := bs4.equals(bs1), true; got != want { + t.Errorf("%+v.equals(%+v) = %v, want %v", bs4, bs1, got, want) + } +} diff --git a/runtime/Java/pom.xml b/runtime/Java/pom.xml index 49a84a8c0a..ca002d6179 100644 --- a/runtime/Java/pom.xml +++ b/runtime/Java/pom.xml @@ -9,7 +9,7 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT ../../pom.xml antlr4-runtime @@ -85,6 +85,7 @@ org.antlr.antlr4.runtime org.antlr.antlr4-runtime + org.antlr.v4.gui;resolution:=optional, * diff --git a/runtime/Java/src/org/antlr/v4/runtime/BufferedTokenStream.java b/runtime/Java/src/org/antlr/v4/runtime/BufferedTokenStream.java index e1dc4b1977..056ea963a1 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/BufferedTokenStream.java +++ b/runtime/Java/src/org/antlr/v4/runtime/BufferedTokenStream.java @@ -449,7 +449,7 @@ public String getText(Interval interval) { int start = interval.a; int stop = interval.b; if ( start<0 || stop<0 ) return ""; - fill(); + sync(stop); if ( stop>=tokens.size() ) stop = tokens.size()-1; StringBuilder buf = new StringBuilder(); diff --git a/runtime/Java/src/org/antlr/v4/runtime/Parser.java b/runtime/Java/src/org/antlr/v4/runtime/Parser.java index 5b52fa786c..34ab288108 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/Parser.java +++ b/runtime/Java/src/org/antlr/v4/runtime/Parser.java @@ -27,6 +27,7 @@ import org.antlr.v4.runtime.tree.pattern.ParseTreePattern; import org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher; +import java.io.PrintStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -873,16 +874,20 @@ public List getDFAStrings() { } } - /** For debugging and other purposes. */ public void dumpDFA() { + dumpDFA(System.out); + } + + /** For debugging and other purposes. */ + public void dumpDFA(PrintStream dumpStream) { synchronized (_interp.decisionToDFA) { boolean seenOne = false; for (int d = 0; d < _interp.decisionToDFA.length; d++) { DFA dfa = _interp.decisionToDFA[d]; if ( !dfa.states.isEmpty() ) { - if ( seenOne ) System.out.println(); - System.out.println("Decision " + dfa.decision + ":"); - System.out.print(dfa.toString(getVocabulary())); + if ( seenOne ) dumpStream.println(); + dumpStream.println("Decision " + dfa.decision + ":"); + dumpStream.print(dfa.toString(getVocabulary())); seenOne = true; } } diff --git a/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java b/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java index 4513b09862..635b1a473b 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/ParserRuleContext.java @@ -40,6 +40,8 @@ * satisfy the superclass interface. */ public class ParserRuleContext extends RuleContext { + public static final ParserRuleContext EMPTY = new ParserRuleContext(); + /** If we are debugging or building a parse tree for a visitor, * we need to track all of the tokens and rule invocations associated * with this rule's context. This is empty for parsing w/o tree constr. diff --git a/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java b/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java index 8b7f168b2b..e87e41e58f 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/RuleContext.java @@ -66,8 +66,6 @@ * @see ParserRuleContext */ public class RuleContext implements RuleNode { - public static final ParserRuleContext EMPTY = new ParserRuleContext(); - /** What context invoked this rule? */ public RuleContext parent; diff --git a/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java b/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java index a7cda7bedb..31fa435525 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java +++ b/runtime/Java/src/org/antlr/v4/runtime/RuntimeMetaData.java @@ -67,7 +67,7 @@ public class RuntimeMetaData { * omitted. * */ - public static final String VERSION = "4.10.1"; + public static final String VERSION = "4.10.2-SNAPSHOT"; /** * Gets the currently executing version of the ANTLR 4 runtime library. diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfig.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfig.java index a3e4d1d8b6..c24739ab1f 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfig.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfig.java @@ -7,8 +7,11 @@ package org.antlr.v4.runtime.atn; import org.antlr.v4.runtime.Recognizer; +import org.antlr.v4.runtime.misc.DoubleKeyMap; import org.antlr.v4.runtime.misc.MurmurHash; +import java.util.Objects; + /** A tuple: (ATN state, predicted alt, syntactic, semantic context). * The syntactic context is a graph-structured stack node whose * path(s) to the root is the rule invocation(s) @@ -76,7 +79,7 @@ public ATNConfig(ATNState state, int alt, PredictionContext context) { - this(state, alt, context, SemanticContext.NONE); + this(state, alt, context, SemanticContext.Empty.Instance); } public ATNConfig(ATNState state, @@ -168,7 +171,7 @@ else if (other == null) { return this.state.stateNumber==other.state.stateNumber && this.alt==other.alt - && (this.context==other.context || (this.context != null && this.context.equals(other.context))) + && Objects.equals(this.context, other.context) && this.semanticContext.equals(other.semanticContext) && this.isPrecedenceFilterSuppressed() == other.isPrecedenceFilterSuppressed(); } @@ -206,7 +209,7 @@ public String toString(Recognizer recog, boolean showAlt) { buf.append(context.toString()); buf.append("]"); } - if ( semanticContext!=null && semanticContext != SemanticContext.NONE ) { + if ( semanticContext!=null && semanticContext != SemanticContext.Empty.Instance ) { buf.append(","); buf.append(semanticContext); } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfigSet.java b/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfigSet.java index d40dfb4e37..5c40e327e9 100755 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfigSet.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/ATNConfigSet.java @@ -137,7 +137,7 @@ public boolean add( DoubleKeyMap mergeCache) { if ( readonly ) throw new IllegalStateException("This set is readonly"); - if ( config.semanticContext!=SemanticContext.NONE ) { + if ( config.semanticContext != SemanticContext.Empty.Instance ) { hasSemanticContext = true; } if (config.getOuterContextDepth() > 0) { @@ -199,7 +199,7 @@ public BitSet getAlts() { public List getPredicates() { List preds = new ArrayList(); for (ATNConfig c : configs) { - if ( c.semanticContext!=SemanticContext.NONE ) { + if ( c.semanticContext!=SemanticContext.Empty.Instance ) { preds.add(c.semanticContext); } } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/EmptyPredictionContext.java b/runtime/Java/src/org/antlr/v4/runtime/atn/EmptyPredictionContext.java index a11ff159ec..9feaad3dc6 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/EmptyPredictionContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/EmptyPredictionContext.java @@ -7,7 +7,13 @@ package org.antlr.v4.runtime.atn; public class EmptyPredictionContext extends SingletonPredictionContext { - public EmptyPredictionContext() { + /** + * Represents {@code $} in local context prediction, which means wildcard. + * {@code *+x = *}. + */ + public static final EmptyPredictionContext Instance = new EmptyPredictionContext(); + + private EmptyPredictionContext() { super(null, EMPTY_RETURN_STATE); } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/LL1Analyzer.java b/runtime/Java/src/org/antlr/v4/runtime/atn/LL1Analyzer.java index 8d5117b68d..97f0dca4e7 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/LL1Analyzer.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/LL1Analyzer.java @@ -45,7 +45,7 @@ public IntervalSet[] getDecisionLookahead(ATNState s) { look[alt] = new IntervalSet(); Set lookBusy = new HashSet(); boolean seeThruPreds = false; // fail to get lookahead upon pred - _LOOK(s.transition(alt).target, null, PredictionContext.EMPTY, + _LOOK(s.transition(alt).target, null, EmptyPredictionContext.Instance, look[alt], lookBusy, new BitSet(), seeThruPreds, false); // Wipe out lookahead for this alternative if we found nothing // or we had a predicate when we !seeThruPreds @@ -167,7 +167,7 @@ else if (ctx.isEmpty() && addEOF) { return; } - if ( ctx != PredictionContext.EMPTY ) { + if ( ctx != EmptyPredictionContext.Instance ) { // run thru all possible stack tops in ctx boolean removed = calledRuleStack.get(s.ruleIndex); try { diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNConfig.java b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNConfig.java index ad8766c49b..a48abd28e9 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNConfig.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNConfig.java @@ -21,7 +21,7 @@ public LexerATNConfig(ATNState state, int alt, PredictionContext context) { - super(state, alt, context, SemanticContext.NONE); + super(state, alt, context, SemanticContext.Empty.Instance); this.passedThroughNonGreedyDecision = false; this.lexerActionExecutor = null; } @@ -31,7 +31,7 @@ public LexerATNConfig(ATNState state, PredictionContext context, LexerActionExecutor lexerActionExecutor) { - super(state, alt, context, SemanticContext.NONE); + super(state, alt, context, SemanticContext.Empty.Instance); this.lexerActionExecutor = lexerActionExecutor; this.passedThroughNonGreedyDecision = false; } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java index 79d9030530..254e17839b 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/LexerATNSimulator.java @@ -380,7 +380,7 @@ protected ATNState getReachableTarget(Transition trans, int t) { protected ATNConfigSet computeStartState(CharStream input, ATNState p) { - PredictionContext initialContext = PredictionContext.EMPTY; + PredictionContext initialContext = EmptyPredictionContext.Instance; ATNConfigSet configs = new OrderedATNConfigSet(); for (int i=0; i splitAccordingToSemanticValidity( ATNConfigSet succeeded = new ATNConfigSet(configs.fullCtx); ATNConfigSet failed = new ATNConfigSet(configs.fullCtx); for (ATNConfig c : configs) { - if ( c.semanticContext!=SemanticContext.NONE ) { + if ( c.semanticContext!=SemanticContext.Empty.Instance ) { boolean predicateEvaluationResult = evalSemanticContext(c.semanticContext, outerContext, c.alt, configs.fullCtx); if ( predicateEvaluationResult ) { succeeded.add(c); @@ -1370,7 +1373,7 @@ protected BitSet evalSemanticContext(DFAState.PredPrediction[] predPredictions, { BitSet predictions = new BitSet(); for (DFAState.PredPrediction pair : predPredictions) { - if ( pair.pred==SemanticContext.NONE ) { + if ( pair.pred==SemanticContext.Empty.Instance ) { predictions.set(pair.alt); if (!complete) { break; @@ -1468,7 +1471,7 @@ protected void closureCheckingStopState(ATNConfig config, for (int i = 0; i < config.context.size(); i++) { if ( config.context.getReturnState(i)==PredictionContext.EMPTY_RETURN_STATE ) { if (fullCtx) { - configs.add(new ATNConfig(config, config.state, PredictionContext.EMPTY), mergeCache); + configs.add(new ATNConfig(config, config.state, EmptyPredictionContext.Instance), mergeCache); continue; } else { @@ -2184,11 +2187,13 @@ public Parser getParser() { public static String getSafeEnv(String envName) { try { - return System.getenv(envName); - } - catch(SecurityException e) { - // use the default value - } + return AccessController.doPrivileged(new PrivilegedAction() { + @Override + public String run() { + return System.getenv(envName); + } + }); + } catch (SecurityException e) { } return null; } } diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContext.java b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContext.java index e2a90bf373..b5f204c237 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContext.java @@ -6,6 +6,7 @@ package org.antlr.v4.runtime.atn; +import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.Recognizer; import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.misc.DoubleKeyMap; @@ -19,14 +20,9 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; public abstract class PredictionContext { - /** - * Represents {@code $} in local context prediction, which means wildcard. - * {@code *+x = *}. - */ - public static final EmptyPredictionContext EMPTY = new EmptyPredictionContext(); - /** * Represents {@code $} in an array in full context mode, when {@code $} * doesn't mean wildcard: {@code $ + x = [$,x]}. Here, @@ -36,8 +32,8 @@ public abstract class PredictionContext { private static final int INITIAL_HASH = 1; - public static int globalNodeCount = 0; - public final int id = globalNodeCount++; + private static final AtomicInteger globalNodeCount = new AtomicInteger(); + public final int id = globalNodeCount.getAndIncrement(); /** * Stores the computed hash code of this {@link PredictionContext}. The hash @@ -67,19 +63,19 @@ protected PredictionContext(int cachedHashCode) { } /** Convert a {@link RuleContext} tree to a {@link PredictionContext} graph. - * Return {@link #EMPTY} if {@code outerContext} is empty or null. + * Return {@link EmptyPredictionContext#Instance} if {@code outerContext} is empty or null. */ public static PredictionContext fromRuleContext(ATN atn, RuleContext outerContext) { - if ( outerContext==null ) outerContext = RuleContext.EMPTY; + if ( outerContext==null ) outerContext = ParserRuleContext.EMPTY; // if we are in RuleContext of start rule, s, then PredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) - if ( outerContext.parent==null || outerContext==RuleContext.EMPTY ) { - return PredictionContext.EMPTY; + if ( outerContext.parent==null || outerContext==ParserRuleContext.EMPTY ) { + return EmptyPredictionContext.Instance; } // If we have a parent, convert it to a PredictionContext graph - PredictionContext parent = EMPTY; + PredictionContext parent = EmptyPredictionContext.Instance; parent = PredictionContext.fromRuleContext(atn, outerContext.parent); ATNState state = atn.states.get(outerContext.invokingState); @@ -93,9 +89,9 @@ public static PredictionContext fromRuleContext(ATN atn, RuleContext outerContex public abstract int getReturnState(int index); - /** This means only the {@link #EMPTY} (wildcard? not sure) context is in set. */ + /** This means only the {@link EmptyPredictionContext#Instance} (wildcard? not sure) context is in set. */ public boolean isEmpty() { - return this == EMPTY; + return this == EmptyPredictionContext.Instance; } public boolean hasEmptyPath() { @@ -270,18 +266,18 @@ public static PredictionContext mergeSingletons( /** * Handle case where at least one of {@code a} or {@code b} is - * {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used - * to represent {@link #EMPTY}. + * {@link EmptyPredictionContext#Instance}. In the following diagrams, the symbol {@code $} is used + * to represent {@link EmptyPredictionContext#Instance}. * *

    Local-Context Merges

    * *

    These local-context merge operations are used when {@code rootIsWildcard} * is true.

    * - *

    {@link #EMPTY} is superset of any graph; return {@link #EMPTY}.
    + *

    {@link EmptyPredictionContext#Instance} is superset of any graph; return {@link EmptyPredictionContext#Instance}.
    *

    * - *

    {@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is + *

    {@link EmptyPredictionContext#Instance} and anything is {@code #EMPTY}, so merged parent is * {@code #EMPTY}; return left graph.
    *

    * @@ -295,7 +291,7 @@ public static PredictionContext mergeSingletons( * *

    * - *

    Must keep all contexts; {@link #EMPTY} in array is a special value (and + *

    Must keep all contexts; {@link EmptyPredictionContext#Instance} in array is a special value (and * null parent).
    *

    * @@ -311,19 +307,19 @@ public static PredictionContext mergeRoot(SingletonPredictionContext a, boolean rootIsWildcard) { if ( rootIsWildcard ) { - if ( a == EMPTY ) return EMPTY; // * + b = * - if ( b == EMPTY ) return EMPTY; // a + * = * + if ( a == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // * + b = * + if ( b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // a + * = * } else { - if ( a == EMPTY && b == EMPTY ) return EMPTY; // $ + $ = $ - if ( a == EMPTY ) { // $ + x = [x,$] + if ( a == EmptyPredictionContext.Instance && b == EmptyPredictionContext.Instance) return EmptyPredictionContext.Instance; // $ + $ = $ + if ( a == EmptyPredictionContext.Instance) { // $ + x = [x,$] int[] payloads = {b.returnState, EMPTY_RETURN_STATE}; PredictionContext[] parents = {b.parent, null}; PredictionContext joined = new ArrayPredictionContext(parents, payloads); return joined; } - if ( b == EMPTY ) { // x + $ = [x,$] ($ is always last if present) + if ( b == EmptyPredictionContext.Instance) { // x + $ = [x,$] ($ is always last if present) int[] payloads = {a.returnState, EMPTY_RETURN_STATE}; PredictionContext[] parents = {a.parent, null}; PredictionContext joined = @@ -521,7 +517,7 @@ public int compare(PredictionContext o1, PredictionContext o2) { } for (PredictionContext current : nodes) { - if ( current==EMPTY ) continue; + if ( current== EmptyPredictionContext.Instance) continue; for (int i = 0; i < current.size(); i++) { if ( current.getParent(i)==null ) continue; String s = String.valueOf(current.id); @@ -585,7 +581,7 @@ public static PredictionContext getCachedContext( PredictionContext updated; if (parents.length == 0) { - updated = EMPTY; + updated = EmptyPredictionContext.Instance; } else if (parents.length == 1) { updated = SingletonPredictionContext.create(parents[0], context.getReturnState(0)); @@ -651,7 +647,7 @@ public String toString(Recognizer recog) { } public String[] toStrings(Recognizer recognizer, int currentState) { - return toStrings(recognizer, EMPTY, currentState); + return toStrings(recognizer, EmptyPredictionContext.Instance, currentState); } // FROM SAM diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java index 11711ee226..0db6f4d1fd 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionContextCache.java @@ -22,7 +22,7 @@ public class PredictionContextCache { * Protect shared cache from unsafe thread access. */ public PredictionContext add(PredictionContext ctx) { - if ( ctx==PredictionContext.EMPTY ) return PredictionContext.EMPTY; + if ( ctx==EmptyPredictionContext.Instance ) return EmptyPredictionContext.Instance; PredictionContext existing = cache.get(ctx); if ( existing!=null ) { // System.out.println(name+" reuses "+existing); diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionMode.java b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionMode.java index 51a51d2c50..b023606f02 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionMode.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/PredictionMode.java @@ -228,7 +228,7 @@ public static boolean hasSLLConflictTerminatingPrediction(PredictionMode mode, A // dup configs, tossing out semantic predicates ATNConfigSet dup = new ATNConfigSet(); for (ATNConfig c : configs) { - c = new ATNConfig(c,SemanticContext.NONE); + c = new ATNConfig(c,SemanticContext.Empty.Instance); dup.add(c); } configs = dup; diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/SemanticContext.java b/runtime/Java/src/org/antlr/v4/runtime/atn/SemanticContext.java index 299ec99965..783b8ea454 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/SemanticContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/SemanticContext.java @@ -28,12 +28,6 @@ * {@link SemanticContext} within the scope of this outer class.

    */ public abstract class SemanticContext { - /** - * The default {@link SemanticContext}, which is semantically equivalent to - * a predicate of the form {@code {true}?}. - */ - public static final SemanticContext NONE = new Predicate(); - /** * For context independent predicates, we evaluate them without a local * context (i.e., null context). That way, we can evaluate them without @@ -57,7 +51,7 @@ public abstract class SemanticContext { * @return The simplified semantic context after precedence predicates are * evaluated, which will be one of the following values. *
      - *
    • {@link #NONE}: if the predicate simplifies to {@code true} after + *
    • {@link Empty#Instance}: if the predicate simplifies to {@code true} after * precedence predicates are evaluated.
    • *
    • {@code null}: if the predicate simplifies to {@code false} after * precedence predicates are evaluated.
    • @@ -71,6 +65,19 @@ public SemanticContext evalPrecedence(Recognizer parser, RuleContext parser return this; } + public static class Empty extends SemanticContext { + /** + * The default {@link SemanticContext}, which is semantically equivalent to + * a predicate of the form {@code {true}?}. + */ + public static final Empty Instance = new Empty(); + + @Override + public boolean eval(Recognizer parser, RuleContext parserCallStack) { + return false; + } + } + public static class Predicate extends SemanticContext { public final int ruleIndex; public final int predIndex; @@ -139,7 +146,7 @@ public boolean eval(Recognizer parser, RuleContext parserCallStack) { @Override public SemanticContext evalPrecedence(Recognizer parser, RuleContext parserCallStack) { if (parser.precpred(parserCallStack, precedence)) { - return SemanticContext.NONE; + return Empty.Instance; } else { return null; @@ -266,7 +273,7 @@ public SemanticContext evalPrecedence(Recognizer parser, RuleContext parse // The AND context is false if any element is false return null; } - else if (evaluated != NONE) { + else if (evaluated != Empty.Instance) { // Reduce the result by skipping true elements operands.add(evaluated); } @@ -278,7 +285,7 @@ else if (evaluated != NONE) { if (operands.isEmpty()) { // all elements were true, so the AND context is true - return NONE; + return Empty.Instance; } SemanticContext result = operands.get(0); @@ -359,9 +366,9 @@ public SemanticContext evalPrecedence(Recognizer parser, RuleContext parse for (SemanticContext context : opnds) { SemanticContext evaluated = context.evalPrecedence(parser, parserCallStack); differs |= (evaluated != context); - if (evaluated == NONE) { + if (evaluated == Empty.Instance) { // The OR context is true if any element is true - return NONE; + return Empty.Instance; } else if (evaluated != null) { // Reduce the result by skipping false elements @@ -393,8 +400,8 @@ public String toString() { } public static SemanticContext and(SemanticContext a, SemanticContext b) { - if ( a == null || a == NONE ) return b; - if ( b == null || b == NONE ) return a; + if ( a == null || a == Empty.Instance ) return b; + if ( b == null || b == Empty.Instance ) return a; AND result = new AND(a, b); if (result.opnds.length == 1) { return result.opnds[0]; @@ -410,7 +417,7 @@ public static SemanticContext and(SemanticContext a, SemanticContext b) { public static SemanticContext or(SemanticContext a, SemanticContext b) { if ( a == null ) return b; if ( b == null ) return a; - if ( a == NONE || b == NONE ) return NONE; + if ( a == Empty.Instance || b == Empty.Instance ) return Empty.Instance; OR result = new OR(a, b); if (result.opnds.length == 1) { return result.opnds[0]; diff --git a/runtime/Java/src/org/antlr/v4/runtime/atn/SingletonPredictionContext.java b/runtime/Java/src/org/antlr/v4/runtime/atn/SingletonPredictionContext.java index ca5ea66097..98631f23b3 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/atn/SingletonPredictionContext.java +++ b/runtime/Java/src/org/antlr/v4/runtime/atn/SingletonPredictionContext.java @@ -20,7 +20,7 @@ public class SingletonPredictionContext extends PredictionContext { public static SingletonPredictionContext create(PredictionContext parent, int returnState) { if ( returnState == EMPTY_RETURN_STATE && parent == null ) { // someone can pass in the bits of an array ctx that mean $ - return EMPTY; + return EmptyPredictionContext.Instance; } return new SingletonPredictionContext(parent, returnState); } diff --git a/runtime/Java/src/org/antlr/v4/runtime/misc/Array2DHashSet.java b/runtime/Java/src/org/antlr/v4/runtime/misc/Array2DHashSet.java index 9ec8566852..3f6f5afa15 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/misc/Array2DHashSet.java +++ b/runtime/Java/src/org/antlr/v4/runtime/misc/Array2DHashSet.java @@ -26,10 +26,12 @@ public class Array2DHashSet implements Set { /** How many elements in set */ protected int n = 0; - protected int threshold = (int)Math.floor(INITAL_CAPACITY * LOAD_FACTOR); // when to expand - protected int currentPrime = 1; // jump by 4 primes each expand or whatever - protected int initialBucketCapacity = INITAL_BUCKET_CAPACITY; + + /** when to expand */ + protected int threshold; + protected final int initialCapacity; + protected final int initialBucketCapacity; public Array2DHashSet() { this(null, INITAL_CAPACITY, INITAL_BUCKET_CAPACITY); @@ -45,8 +47,10 @@ public Array2DHashSet(AbstractEqualityComparator comparator, int init } this.comparator = comparator; - this.buckets = createBuckets(initialCapacity); + this.initialCapacity = initialCapacity; this.initialBucketCapacity = initialBucketCapacity; + this.buckets = createBuckets(initialCapacity); + this.threshold = (int)Math.floor(initialCapacity * LOAD_FACTOR); } /** @@ -381,9 +385,9 @@ public boolean removeAll(Collection c) { @Override public void clear() { - buckets = createBuckets(INITAL_CAPACITY); n = 0; - threshold = (int)Math.floor(INITAL_CAPACITY * LOAD_FACTOR); + buckets = createBuckets(this.initialCapacity); + threshold = (int)Math.floor(this.initialCapacity * LOAD_FACTOR); } @Override diff --git a/runtime/Java/src/org/antlr/v4/runtime/misc/FlexibleHashMap.java b/runtime/Java/src/org/antlr/v4/runtime/misc/FlexibleHashMap.java index 3c49eb2d0b..cc28bb0edc 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/misc/FlexibleHashMap.java +++ b/runtime/Java/src/org/antlr/v4/runtime/misc/FlexibleHashMap.java @@ -41,10 +41,12 @@ public String toString() { /** How many elements in set */ protected int n = 0; - protected int threshold = (int)(INITAL_CAPACITY * LOAD_FACTOR); // when to expand - protected int currentPrime = 1; // jump by 4 primes each expand or whatever - protected int initialBucketCapacity = INITAL_BUCKET_CAPACITY; + + /** when to expand */ + protected int threshold; + protected final int initialCapacity; + protected final int initialBucketCapacity; public FlexibleHashMap() { this(null, INITAL_CAPACITY, INITAL_BUCKET_CAPACITY); @@ -60,8 +62,10 @@ public FlexibleHashMap(AbstractEqualityComparator comparator, int ini } this.comparator = comparator; - this.buckets = createEntryListArray(initialBucketCapacity); + this.initialCapacity = initialCapacity; this.initialBucketCapacity = initialBucketCapacity; + this.threshold = (int)Math.floor(initialCapacity * LOAD_FACTOR); + this.buckets = createEntryListArray(initialBucketCapacity); } private static LinkedList>[] createEntryListArray(int length) { @@ -209,8 +213,9 @@ public boolean isEmpty() { @Override public void clear() { - buckets = createEntryListArray(INITAL_CAPACITY); + buckets = createEntryListArray(this.initialCapacity); n = 0; + threshold = (int)Math.floor(this.initialCapacity * LOAD_FACTOR); } @Override diff --git a/runtime/Java/src/org/antlr/v4/runtime/misc/IntegerList.java b/runtime/Java/src/org/antlr/v4/runtime/misc/IntegerList.java index d6c700d8f6..9381b7c997 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/misc/IntegerList.java +++ b/runtime/Java/src/org/antlr/v4/runtime/misc/IntegerList.java @@ -17,7 +17,7 @@ */ public class IntegerList { - private static int[] EMPTY_DATA = new int[0]; + private final static int[] EMPTY_DATA = new int[0]; private static final int INITIAL_SIZE = 4; private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; diff --git a/runtime/Java/src/org/antlr/v4/runtime/misc/Interval.java b/runtime/Java/src/org/antlr/v4/runtime/misc/Interval.java index ab3f46ba46..a07d035407 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/misc/Interval.java +++ b/runtime/Java/src/org/antlr/v4/runtime/misc/Interval.java @@ -11,16 +11,11 @@ public class Interval { public static final Interval INVALID = new Interval(-1,-2); - static Interval[] cache = new Interval[INTERVAL_POOL_MAX_VALUE+1]; + static final Interval[] cache = new Interval[INTERVAL_POOL_MAX_VALUE+1]; public int a; public int b; - public static int creates = 0; - public static int misses = 0; - public static int hits = 0; - public static int outOfRange = 0; - public Interval(int a, int b) { this.a=a; this.b=b; } /** Interval objects are used readonly so share all with the diff --git a/runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPathLexer.java b/runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPathLexer.java index 4d120a4cca..5178b3e5b4 100644 --- a/runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPathLexer.java +++ b/runtime/Java/src/org/antlr/v4/runtime/tree/xpath/XPathLexer.java @@ -20,7 +20,7 @@ public class XPathLexer extends Lexer { public static final int TOKEN_REF=1, RULE_REF=2, ANYWHERE=3, ROOT=4, WILDCARD=5, BANG=6, ID=7, STRING=8; - public static String[] modeNames = { + public final static String[] modeNames = { "DEFAULT_MODE" }; diff --git a/runtime/JavaScript/package-lock.json b/runtime/JavaScript/package-lock.json index e37cde8f42..5a69a0c10d 100644 --- a/runtime/JavaScript/package-lock.json +++ b/runtime/JavaScript/package-lock.json @@ -1,12 +1,12 @@ { "name": "antlr4", - "version": "4.10.1", + "version": "4.11.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "antlr4", - "version": "4.10.1", + "version": "4.11.0", "license": "BSD-3-Clause", "devDependencies": { "@babel/preset-env": "^7.16.11", diff --git a/runtime/JavaScript/package.json b/runtime/JavaScript/package.json index 157249a387..8fda104885 100644 --- a/runtime/JavaScript/package.json +++ b/runtime/JavaScript/package.json @@ -1,6 +1,6 @@ { "name": "antlr4", - "version": "4.10.1", + "version": "4.11.0", "type": "module", "description": "JavaScript runtime for ANTLR4", "main": "src/antlr4/index.js", diff --git a/runtime/JavaScript/src/antlr4/Recognizer.js b/runtime/JavaScript/src/antlr4/Recognizer.js index e6b9852597..e1ab6b8bcb 100644 --- a/runtime/JavaScript/src/antlr4/Recognizer.js +++ b/runtime/JavaScript/src/antlr4/Recognizer.js @@ -15,7 +15,7 @@ export default class Recognizer { } checkVersion(toolVersion) { - const runtimeVersion = "4.10.1"; + const runtimeVersion = "4.11.0"; if (runtimeVersion!==toolVersion) { console.log("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion); } diff --git a/runtime/Python2/setup.py b/runtime/Python2/setup.py index ebc4bf1d1c..f7a7e9bd8d 100644 --- a/runtime/Python2/setup.py +++ b/runtime/Python2/setup.py @@ -1,6 +1,6 @@ from setuptools import setup -v = '4.10.1' +v = '4.11.0' setup( name='antlr4-python2-runtime', version=v, @@ -10,5 +10,5 @@ package_dir={'': 'src'}, author='Eric Vergnaud, Terence Parr, Sam Harwell', author_email='eric.vergnaud@wanadoo.fr', - description='ANTLR %s runtime for Python 2.7.12' % v -) + description='ANTLR '+v+' runtime for Python 2.7.12' +) \ No newline at end of file diff --git a/runtime/Python2/src/antlr4/Recognizer.py b/runtime/Python2/src/antlr4/Recognizer.py index 8df1419933..c141d9cbed 100644 --- a/runtime/Python2/src/antlr4/Recognizer.py +++ b/runtime/Python2/src/antlr4/Recognizer.py @@ -30,7 +30,7 @@ def extractVersion(self, version): return major, minor def checkVersion(self, toolVersion): - runtimeVersion = "4.10.1" + runtimeVersion = "4.11.0" rvmajor, rvminor = self.extractVersion(runtimeVersion) tvmajor, tvminor = self.extractVersion(toolVersion) if rvmajor!=tvmajor or rvminor!=tvminor: diff --git a/runtime/Python2/src/antlr4/atn/SemanticContext.py b/runtime/Python2/src/antlr4/atn/SemanticContext.py index 03b6ebd3d8..22ec04221e 100644 --- a/runtime/Python2/src/antlr4/atn/SemanticContext.py +++ b/runtime/Python2/src/antlr4/atn/SemanticContext.py @@ -92,6 +92,9 @@ def orContext(a, b): def filterPrecedencePredicates(collection): return [context for context in collection if isinstance(context, PrecedencePredicate)] +class EmptySemanticContext(SemanticContext): + pass + class Predicate(SemanticContext): def __init__(self, ruleIndex=-1, predIndex=-1, isCtxDependent=False): @@ -315,4 +318,4 @@ def __unicode__(self): return buf.getvalue() -SemanticContext.NONE = Predicate() \ No newline at end of file +SemanticContext.NONE = EmptySemanticContext() diff --git a/runtime/Python3/setup.py b/runtime/Python3/setup.py index a5e0870b6a..c58524f557 100644 --- a/runtime/Python3/setup.py +++ b/runtime/Python3/setup.py @@ -1,6 +1,6 @@ from setuptools import setup -v = '4.10.1' +v = '4.11.0' setup( name='antlr4-python3-runtime', @@ -15,5 +15,5 @@ author='Eric Vergnaud, Terence Parr, Sam Harwell', author_email='eric.vergnaud@wanadoo.fr', entry_points={'console_scripts': ['pygrun=antlr4._pygrun:main']}, - description=f'ANTLR {v} runtime for Python 3' + description='ANTLR %s runtime for Python 3' % v ) diff --git a/runtime/Python3/src/antlr4/Recognizer.py b/runtime/Python3/src/antlr4/Recognizer.py index b94de2980c..6174969593 100644 --- a/runtime/Python3/src/antlr4/Recognizer.py +++ b/runtime/Python3/src/antlr4/Recognizer.py @@ -34,7 +34,7 @@ def extractVersion(self, version): return major, minor def checkVersion(self, toolVersion): - runtimeVersion = "4.10.1" + runtimeVersion = "4.11.0" rvmajor, rvminor = self.extractVersion(runtimeVersion) tvmajor, tvminor = self.extractVersion(toolVersion) if rvmajor!=tvmajor or rvminor!=tvminor: diff --git a/runtime/Python3/src/antlr4/atn/SemanticContext.py b/runtime/Python3/src/antlr4/atn/SemanticContext.py index 8f4dc31088..cd0d5ee417 100644 --- a/runtime/Python3/src/antlr4/atn/SemanticContext.py +++ b/runtime/Python3/src/antlr4/atn/SemanticContext.py @@ -94,6 +94,9 @@ def filterPrecedencePredicates(collection:set): return [context for context in collection if isinstance(context, PrecedencePredicate)] +class EmptySemanticContext(SemanticContext): + pass + class Predicate(SemanticContext): __slots__ = ('ruleIndex', 'predIndex', 'isCtxDependent') @@ -320,4 +323,4 @@ def __str__(self): return buf.getvalue() -SemanticContext.NONE = Predicate() +SemanticContext.NONE = EmptySemanticContext() diff --git a/Package.swift b/runtime/Swift/Package.swift similarity index 51% rename from Package.swift rename to runtime/Swift/Package.swift index b2c571d739..9de0120e70 100644 --- a/Package.swift +++ b/runtime/Swift/Package.swift @@ -1,6 +1,5 @@ // swift-tools-version:5.6 - import PackageDescription let package = Package( @@ -8,29 +7,21 @@ let package = Package( products: [ .library( name: "Antlr4", - targets: ["Antlr4"] - ), - .library( - name: "Antlr4Dynamic", type: .dynamic, - targets: ["Antlr4"] - ), - .library( - name: "Antlr4Static", - type: .static, - targets: ["Antlr4"] - ) + targets: ["Antlr4"]), ], targets: [ .target( name: "Antlr4", dependencies: [], - path: "./runtime/Swift/Sources/Antlr4" - ), + path: "Sources/Antlr4"), .testTarget( name: "Antlr4Tests", dependencies: ["Antlr4"], - path:"./runtime/Swift/Tests/Antlr4Tests" + path:"Tests/Antlr4Tests", + exclude: [ + "VisitorBasic.g4", "VisitorCalc.g4", "LexerA.g4", "LexerB.g4", "Threading.g4" + ] ) ] ) diff --git a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift index 060317920f..b78379f87a 100644 --- a/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/ParserRuleContext.swift @@ -5,28 +5,30 @@ /// A rule invocation record for parsing. -/// +/// /// Contains all of the information about the current rule not stored in the /// RuleContext. It handles parse tree children list, Any ATN state /// tracing, and the default values available for rule invocations: /// start, stop, rule index, current alt number. -/// +/// /// Subclasses made for each rule and grammar track the parameters, /// return values, locals, and labels specific to that rule. These /// are the objects that are returned from rules. -/// +/// /// Note text is not an actual field of a rule return value; it is computed /// from start and stop using the input stream's toString() method. I /// could add a ctor to this so that we can pass in and store the input /// stream, but I'm not sure we want to do that. It would seem to be undefined /// to get the .text property anyway if the rule matches tokens from multiple /// input streams. -/// +/// /// I do not use getters for fields of objects that are used simply to /// group values such as this aggregate. The getters/setters are there to /// satisfy the superclass interface. -/// +/// open class ParserRuleContext: RuleContext { + public static let EMPTY = ParserRuleContext() + public var visited = false /// If we are debugging or building a parse tree for a visitor, @@ -34,7 +36,7 @@ open class ParserRuleContext: RuleContext { /// with this rule's context. This is empty for parsing w/o tree constr. /// operation because we don't the need to track the details about /// how we parse this rule. - /// + /// public var children: [ParseTree]? /// For debugging/tracing purposes, we want to track all of the nodes in @@ -44,23 +46,23 @@ open class ParserRuleContext: RuleContext { /// ATN nodes and other rules used to match rule invocations. It /// traces the rule invocation node itself but nothing inside that /// other rule's ATN submachine. - /// + /// /// There is NOT a one-to-one correspondence between the children and /// states list. There are typically many nodes in the ATN traversed /// for each element in the children list. For example, for a rule /// invocation there is the invoking state and the following state. - /// + /// /// The parser setState() method updates field s and adds it to this list /// if we are debugging/tracing. - /// + /// /// This does not trace states visited during prediction. - /// + /// public var start: Token?, stop: Token? - /// + /// /// The exception that forced this rule to return. If the rule successfully /// completed, this is `null`. - /// + /// public var exception: RecognitionException? public override init() { @@ -73,15 +75,15 @@ open class ParserRuleContext: RuleContext { /// COPY a ctx (I'm deliberately not using copy constructor) to avoid /// confusion with creating node with parent. Does not copy children. - /// + /// /// This is used in the generated parser code to flip a generic XContext /// node for rule X to a YContext for alt label Y. In that sense, it is /// not really a generic copy function. - /// + /// /// If we do an error sync() at start of a rule, we might add error nodes /// to the generic XContext so this function must copy those nodes to /// the YContext as well else they are lost! - /// + /// open func copyFrom(_ ctx: ParserRuleContext) { self.parent = ctx.parent self.invokingState = ctx.invokingState @@ -112,13 +114,13 @@ open class ParserRuleContext: RuleContext { /// internal and leaf nodes. Does not set parent link; /// other add methods must do that. Other addChild methods /// call this. - /// + /// /// We cannot set the parent pointer of the incoming node /// because the existing interfaces do not have a setParent() /// method and I don't want to break backward compatibility for this. - /// + /// /// - Since: 4.7 - /// + /// open func addAnyChild(_ t: ParseTree) { if children == nil { children = [ParseTree]() @@ -146,7 +148,7 @@ open class ParserRuleContext: RuleContext { /// Used by enterOuterAlt to toss out a RuleContext previously added as /// we entered a rule. If we have # label, we will need to remove /// generic ruleContext object. - /// + /// open func removeLastChild() { children?.removeLast() } @@ -241,19 +243,19 @@ open class ParserRuleContext: RuleContext { return Interval.of(start.getTokenIndex(), stop.getTokenIndex()) } - /// + /// /// Get the initial token in this context. /// Note that the range from start to stop is inclusive, so for rules that do not consume anything /// (for example, zero length or error productions) this token may exceed stop. - /// + /// open func getStart() -> Token? { return start } - /// + /// /// Get the final token in this context. /// Note that the range from start to stop is inclusive, so for rules that do not consume anything /// (for example, zero length or error productions) this token may precede start. - /// + /// open func getStop() -> Token? { return stop } diff --git a/runtime/Swift/Sources/Antlr4/RuleContext.swift b/runtime/Swift/Sources/Antlr4/RuleContext.swift index 0afad6065e..0178dc5a78 100644 --- a/runtime/Swift/Sources/Antlr4/RuleContext.swift +++ b/runtime/Swift/Sources/Antlr4/RuleContext.swift @@ -5,20 +5,20 @@ /// A rule context is a record of a single rule invocation. -/// +/// /// We form a stack of these context objects using the parent /// pointer. A parent pointer of null indicates that the current /// context is the bottom of the stack. The ParserRuleContext subclass /// as a children list so that we can turn this data structure into a /// tree. -/// +/// /// The root node always has a null pointer and invokingState of ATNState.INVALID_STATE_NUMBER. -/// +/// /// Upon entry to parsing, the first invoked rule function creates a /// context object (asubclass specialized for that rule such as /// SContext) and makes it the root of a parse tree, recorded by field /// Parser._ctx. -/// +/// /// public final SContext s() throws RecognitionException { /// SContext _localctx = new SContext(_ctx, getState()); <-- create new node /// enterRule(_localctx, 0, RULE_s); <-- push it @@ -26,38 +26,36 @@ /// exitRule(); <-- pop back to _localctx /// return _localctx; /// } -/// +/// /// A subsequent rule invocation of r from the start rule s pushes a /// new context object for r whose parent points at s and use invoking /// state is the state with r emanating as edge label. -/// +/// /// The invokingState fields from a context object to the root /// together form a stack of rule indication states where the root /// (bottom of the stack) has a -1 sentinel value. If we invoke start /// symbol s then call r1, which calls r2, the would look like /// this: -/// +/// /// SContext[-1] <- root node (bottom of the stack) /// R1Context[p] <- p in rule s called r1 /// R2Context[q] <- q in rule r1 called r2 -/// +/// /// So the top of the stack, _ctx, represents a call to the current /// rule and it holds the return address from another rule that invoke /// to this rule. To invoke a rule, we must always have a current context. -/// +/// /// The parent contexts are useful for computing lookahead sets and /// getting error information. -/// +/// /// These objects are used during parsing and prediction. /// For the special case of parsers, we use the subclass /// ParserRuleContext. -/// +/// /// - SeeAlso: org.antlr.v4.runtime.ParserRuleContext -/// +/// open class RuleContext: RuleNode { - public static let EMPTY = ParserRuleContext() - /// What context invoked this rule? public weak var parent: RuleContext? @@ -65,7 +63,7 @@ open class RuleContext: RuleNode { /// The "return address" is the followState of invokingState /// If parent is null, this should be ATNState.INVALID_STATE_NUMBER /// this context object represents the start rule. - /// + /// public var invokingState = ATNState.INVALID_STATE_NUMBER public init() { @@ -89,7 +87,7 @@ open class RuleContext: RuleNode { /// A context is empty if there is no invoking state; meaning nobody called /// current context. - /// + /// open func isEmpty() -> Bool { return invokingState == ATNState.INVALID_STATE_NUMBER } @@ -118,11 +116,11 @@ open class RuleContext: RuleNode { /// Return the combined text of all child nodes. This method only considers /// tokens which have been added to the parse tree. - /// + /// /// Since tokens on hidden channels (e.g. whitespace or comments) are not /// added to the parse trees, they will not appear in the output of this /// method. - /// + /// open func getText() -> String { let length = getChildCount() @@ -174,7 +172,7 @@ open class RuleContext: RuleNode { /// Print out a whole tree, not just a node, in LISP format /// (root child1 .. childN). Print just a node if this is a leaf. - /// + /// public func toStringTree(_ ruleNames: [String]?) -> String { return Trees.toStringTree(self, ruleNames) } diff --git a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift index d08a985e69..6138856300 100644 --- a/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift +++ b/runtime/Swift/Sources/Antlr4/RuntimeMetaData.swift @@ -63,7 +63,7 @@ public class RuntimeMetaData { /// omitted, the `-` (hyphen-minus) appearing before it is also /// omitted. /// - public static let VERSION: String = "4.10.1" + public static let VERSION: String = "4.11.0" /// /// Gets the currently executing version of the ANTLR 4 runtime library. diff --git a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift index 6b2841d2ae..4d1c0f610a 100644 --- a/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift +++ b/runtime/Swift/Sources/Antlr4/UnbufferedCharStream.swift @@ -334,17 +334,15 @@ fileprivate struct UInt8StreamIterator: IteratorProtocol { } switch stream.streamStatus { - case .notOpen, .writing, .closed: - preconditionFailure() - case .atEnd: - return nil - case .error: - hasErrorOccurred = true - return nil - case .opening, .open, .reading: - break - @unknown default: - fatalError() + case .notOpen, .writing, .closed: + preconditionFailure() + case .atEnd: + return nil + case .error: + hasErrorOccurred = true + return nil + case .opening, .open, .reading: + break } let count = stream.read(&buffer, maxLength: buffer.count) diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift index 5e5539c1fa..16bdbbd3a6 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfig.swift @@ -1,57 +1,57 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// -/// +/// /// A tuple: (ATN state, predicted alt, syntactic, semantic context). /// The syntactic context is a graph-structured stack node whose /// path(s) to the root is the rule invocation(s) /// chain used to arrive at the state. The semantic context is /// the tree of semantic predicates encountered before reaching /// an ATN state. -/// +/// public class ATNConfig: Hashable, CustomStringConvertible { - /// + /// /// This field stores the bit mask for implementing the /// _#isPrecedenceFilterSuppressed_ property as a bit within the /// existing _#reachesIntoOuterContext_ field. - /// + /// private static let SUPPRESS_PRECEDENCE_FILTER: Int = 0x40000000 - /// + /// /// The ATN state associated with this configuration - /// + /// public final let state: ATNState - /// + /// /// What alt (or lexer rule) is predicted by this configuration - /// + /// public final let alt: Int - /// + /// /// The stack of invoking states leading to the rule/states associated /// with this config. We track only those contexts pushed during /// execution of the ATN simulator. - /// + /// public internal(set) final var context: PredictionContext? - /// + /// /// We cannot execute predicates dependent upon local context unless /// we know for sure we are in the correct context. Because there is /// no way to do this efficiently, we simply cannot evaluate /// dependent predicates unless we are in the rule that initially /// invokes the ATN simulator. - /// - /// + /// + /// /// closure() tracks the depth of how far we dip into the outer context: /// depth > 0. Note that it may not be totally accurate depth since I /// don't ever decrement. TODO: make it a boolean then - /// - /// + /// + /// /// For memory efficiency, the _#isPrecedenceFilterSuppressed_ method /// is also backed by this field. Since the field is publicly accessible, the /// highest bit which would not cause the value to become negative is used to @@ -61,7 +61,7 @@ public class ATNConfig: Hashable, CustomStringConvertible { /// constructors as well as certain operations like /// _org.antlr.v4.runtime.atn.ATNConfigSet#add(org.antlr.v4.runtime.atn.ATNConfig, DoubleKeyMap)_ method are /// __completely__ unaffected by the change. - /// + /// public internal(set) final var reachesIntoOuterContext: Int = 0 public final let semanticContext: SemanticContext @@ -69,7 +69,7 @@ public class ATNConfig: Hashable, CustomStringConvertible { public init(_ state: ATNState, _ alt: Int, _ context: PredictionContext?, - _ semanticContext: SemanticContext = SemanticContext.NONE) { + _ semanticContext: SemanticContext = SemanticContext.Empty.Instance) { self.state = state self.alt = alt self.context = context @@ -105,11 +105,11 @@ public class ATNConfig: Hashable, CustomStringConvertible { self.reachesIntoOuterContext = c.reachesIntoOuterContext } - /// + /// /// This method gets the value of the _#reachesIntoOuterContext_ field /// as it existed prior to the introduction of the /// _#isPrecedenceFilterSuppressed_ method. - /// + /// public final func getOuterContextDepth() -> Int { return reachesIntoOuterContext & ~Self.SUPPRESS_PRECEDENCE_FILTER } @@ -145,7 +145,7 @@ public class ATNConfig: Hashable, CustomStringConvertible { if let context = context { buf += ",[\(context)]" } - if semanticContext != SemanticContext.NONE { + if semanticContext != SemanticContext.Empty.Instance { buf += ",\(semanticContext)" } let outerDepth = getOuterContextDepth() @@ -167,7 +167,7 @@ public func ==(lhs: ATNConfig, rhs: ATNConfig) -> Bool { if lhs === rhs { return true } - + if let l = lhs as? LexerATNConfig, let r = rhs as? LexerATNConfig { return l == r diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift index 5d940a3ea0..946af07aba 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNConfigSet.swift @@ -1,23 +1,23 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// -/// +/// /// Specialized _java.util.Set_`<`_org.antlr.v4.runtime.atn.ATNConfig_`>` that can track /// info about the set, with support for combining similar configurations using a /// graph-structured stack. /// public final class ATNConfigSet: Hashable, CustomStringConvertible { - /// + /// /// The reason that we need this is because we don't want the hash map to use /// the standard hash code and equals. We need all configurations with the same /// `(s,i,_,semctx)` to be equal. Unfortunately, this key effectively doubles /// the number of objects associated with ATNConfigs. The other solution is to /// use a hash table that lets us specify the equals/hashcode operation. - /// + /// /// @@ -29,27 +29,27 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { /// private var readonly = false - /// + /// /// All configs but hashed by (s, i, _, pi) not including context. Wiped out /// when we go readonly as this set becomes a DFA state. - /// + /// private var configLookup: LookupDictionary - /// + /// /// Track the elements as they are added to the set; supports get(i) - /// + /// public private(set) var configs = [ATNConfig]() // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation // TODO: can we track conflicts as they are added to save scanning configs later? public internal(set) var uniqueAlt = ATN.INVALID_ALT_NUMBER //TODO no default - /// + /// /// Currently this is only used when we detect SLL conflict; this does /// not necessarily represent the ambiguous alternatives. In fact, /// I should also point out that this seems to include predicated alternatives /// that have predicates that evaluate to false. Computed in computeTargetState(). - /// + /// internal var conflictingAlts: BitSet? // Used in parser and lexer. In lexer, it indicates we hit a pred @@ -59,11 +59,11 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { public internal(set) var dipsIntoOuterContext = false //TODO no default - /// + /// /// Indicates that this configuration set is part of a full context /// LL prediction. It will be used to determine how to merge $. With SLL /// it's a wildcard whereas it is not for LL context merge. - /// + /// public let fullCtx: Bool private var cachedHashCode = -1 @@ -80,16 +80,16 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { return try add(config, &mergeCache) } - /// + /// /// Adding a new config means merging contexts with existing configs for /// `(s, i, pi, _)`, where `s` is the /// _org.antlr.v4.runtime.atn.ATNConfig#state_, `i` is the _org.antlr.v4.runtime.atn.ATNConfig#alt_, and /// `pi` is the _org.antlr.v4.runtime.atn.ATNConfig#semanticContext_. We use /// `(s,i,pi)` as key. - /// + /// /// This method updates _#dipsIntoOuterContext_ and /// _#hasSemanticContext_ when necessary. - /// + /// @discardableResult public func add( _ config: ATNConfig, @@ -98,7 +98,7 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { throw ANTLRError.illegalState(msg: "This set is readonly") } - if config.semanticContext != SemanticContext.NONE { + if config.semanticContext != SemanticContext.Empty.Instance { hasSemanticContext = true } if config.getOuterContextDepth() > 0 { @@ -137,9 +137,9 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { } - /// + /// /// Return a List holding list of configs - /// + /// public func elements() -> [ATNConfig] { return configs } @@ -152,14 +152,14 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { return states } - /// + /// /// Gets the complete set of represented alternatives for the configuration /// set. - /// + /// /// - returns: the set of represented alternatives in this configuration set - /// + /// /// - since: 4.3 - /// + /// public func getAlts() -> BitSet { let alts = BitSet() for config in configs { @@ -171,7 +171,7 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { public func getPredicates() -> [SemanticContext] { var preds = [SemanticContext]() for config in configs { - if config.semanticContext != SemanticContext.NONE { + if config.semanticContext != SemanticContext.Empty.Instance { preds.append(config.semanticContext) } } @@ -279,11 +279,11 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { return buf } - /// + /// /// override /// public func toArray(a : [T]) -> [T] { /// return configLookup.toArray(a); - /// + /// private func configHash(_ stateNumber: Int,_ context: PredictionContext?) -> Int{ var hashCode = MurmurHash.initialize(7) hashCode = MurmurHash.update(hashCode, stateNumber) @@ -428,11 +428,11 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { } if !config.isPrecedenceFilterSuppressed() { - /// + /// /// In the future, this elimination step could be updated to also /// filter the prediction context for alternatives predicting alt>1 /// (basically a graph subtraction algorithm). - /// + /// let context = statesFromAlt1[config.state.stateNumber] if context != nil && context == config.context { // eliminated @@ -456,9 +456,9 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { var nPredAlts = 0 for i in 1...nalts { if altToPred[i] == nil { - altToPred[i] = SemanticContext.NONE + altToPred[i] = SemanticContext.Empty.Instance } - else if altToPred[i] != SemanticContext.NONE { + else if altToPred[i] != SemanticContext.Empty.Instance { nPredAlts += 1 } } @@ -487,23 +487,23 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { return alts.getMinElement() } - /// + /// /// Walk the list of configurations and split them according to /// those that have preds evaluating to true/false. If no pred, assume /// true pred and include in succeeded set. Returns Pair of sets. - /// + /// /// Create a new set so as not to alter the incoming parameter. - /// + /// /// Assumption: the input stream has been restored to the starting point /// prediction, which is where predicates need to evaluate. - /// + /// public func splitAccordingToSemanticValidity( _ outerContext: ParserRuleContext, _ evalSemanticContext: (SemanticContext, ParserRuleContext, Int, Bool) throws -> Bool) rethrows -> (ATNConfigSet, ATNConfigSet) { let succeeded = ATNConfigSet(fullCtx) let failed = ATNConfigSet(fullCtx) for config in configs { - if config.semanticContext != SemanticContext.NONE { + if config.semanticContext != SemanticContext.Empty.Instance { let predicateEvaluationResult = try evalSemanticContext(config.semanticContext, outerContext, config.alt,fullCtx) if predicateEvaluationResult { try! succeeded.add(config) @@ -520,7 +520,7 @@ public final class ATNConfigSet: Hashable, CustomStringConvertible { public func dupConfigsWithoutSemanticPredicates() -> ATNConfigSet { let dup = ATNConfigSet() for config in configs { - let c = ATNConfig(config, SemanticContext.NONE) + let c = ATNConfig(config, SemanticContext.Empty.Instance) try! dup.add(c) } return dup diff --git a/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift b/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift index 8677ebf6fb..e9b5e71b9d 100644 --- a/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift +++ b/runtime/Swift/Sources/Antlr4/atn/ATNDeserializer.swift @@ -47,7 +47,7 @@ public class ATNDeserializer { continue } - var ruleIndex = data[p] + let ruleIndex = data[p] p += 1 let s = try stateFactory(stype, ruleIndex)! if stype == ATNState.LOOP_END { @@ -102,7 +102,7 @@ public class ATNDeserializer { ruleToStartState.append(startState) if atn.grammarType == ATNType.lexer { - var tokenType = data[p] + let tokenType = data[p] p += 1 ruleToTokenType.append(tokenType) } @@ -179,9 +179,9 @@ public class ATNDeserializer { for _ in 0.. [IntervalSet?]? { - + guard let s = s else { return nil } @@ -39,7 +39,7 @@ public class LL1Analyzer { look[alt] = IntervalSet() var lookBusy = Set() let seeThruPreds = false // fail to get lookahead upon pred - _LOOK(s.transition(alt).target, nil, PredictionContext.EMPTY, + _LOOK(s.transition(alt).target, nil, EmptyPredictionContext.Instance, look[alt]!, &lookBusy, BitSet(), seeThruPreds, false) // Wipe out lookahead for this alternative if we found nothing // or we had a predicate when we !seeThruPreds @@ -50,44 +50,44 @@ public class LL1Analyzer { return look } - /// + /// /// Compute set of tokens that can follow `s` in the ATN in the /// specified `ctx`. - /// + /// /// If `ctx` is `null` and the end of the rule containing /// `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to the result set. /// If `ctx` is not `null` and the end of the outermost rule is /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. - /// + /// /// - parameter s: the ATN state /// - parameter ctx: the complete parser context, or `null` if the context /// should be ignored - /// + /// /// - returns: The set of tokens that can follow `s` in the ATN in the /// specified `ctx`. - /// + /// public func LOOK(_ s: ATNState, _ ctx: RuleContext?) -> IntervalSet { return LOOK(s, nil, ctx) } - /// + /// /// Compute set of tokens that can follow `s` in the ATN in the /// specified `ctx`. - /// + /// /// If `ctx` is `null` and the end of the rule containing /// `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to the result set. /// If `ctx` is not `null` and the end of the outermost rule is /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. - /// + /// /// - parameter s: the ATN state /// - parameter stopState: the ATN state to stop at. This can be a /// _org.antlr.v4.runtime.atn.BlockEndState_ to detect epsilon paths through a closure. /// - parameter ctx: the complete parser context, or `null` if the context /// should be ignored - /// + /// /// - returns: The set of tokens that can follow `s` in the ATN in the /// specified `ctx`. - /// + /// public func LOOK(_ s: ATNState, _ stopState: ATNState?, _ ctx: RuleContext?) -> IntervalSet { let r = IntervalSet() @@ -98,16 +98,16 @@ public class LL1Analyzer { return r } - /// + /// /// Compute set of tokens that can follow `s` in the ATN in the /// specified `ctx`. - /// + /// /// If `ctx` is `null` and `stopState` or the end of the /// rule containing `s` is reached, _org.antlr.v4.runtime.Token#EPSILON_ is added to /// the result set. If `ctx` is not `null` and `addEOF` is /// `true` and `stopState` or the end of the outermost rule is /// reached, _org.antlr.v4.runtime.Token#EOF_ is added to the result set. - /// + /// /// - parameter s: the ATN state. /// - parameter stopState: the ATN state to stop at. This can be a /// _org.antlr.v4.runtime.atn.BlockEndState_ to detect epsilon paths through a closure. @@ -127,7 +127,7 @@ public class LL1Analyzer { /// - parameter addEOF: Add _org.antlr.v4.runtime.Token#EOF_ to the result if the end of the /// outermost context is reached. This parameter has no effect if `ctx` /// is `null`. - /// + /// internal func _LOOK(_ s: ATNState, _ stopState: ATNState?, _ ctx: PredictionContext?, @@ -168,7 +168,7 @@ public class LL1Analyzer { return } - if ctx != PredictionContext.EMPTY { + if ctx != EmptyPredictionContext.Instance { let removed = try! calledRuleStack.get(s.ruleIndex!) try! calledRuleStack.clear(s.ruleIndex!) defer { diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift b/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift index 38e55dc5e6..8c241a8ffd 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerATNConfig.swift @@ -1,14 +1,14 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// public class LexerATNConfig: ATNConfig { - /// + /// /// This is the backing field for _#getLexerActionExecutor_. - /// + /// private let lexerActionExecutor: LexerActionExecutor? fileprivate let passedThroughNonGreedyDecision: Bool @@ -19,7 +19,7 @@ public class LexerATNConfig: ATNConfig { self.passedThroughNonGreedyDecision = false self.lexerActionExecutor = nil - super.init(state, alt, context, SemanticContext.NONE) + super.init(state, alt, context, SemanticContext.Empty.Instance) } public init(_ state: ATNState, @@ -29,7 +29,7 @@ public class LexerATNConfig: ATNConfig { self.lexerActionExecutor = lexerActionExecutor self.passedThroughNonGreedyDecision = false - super.init(state, alt, context, SemanticContext.NONE) + super.init(state, alt, context, SemanticContext.Empty.Instance) } public init(_ c: LexerATNConfig, _ state: ATNState) { @@ -60,10 +60,10 @@ public class LexerATNConfig: ATNConfig { return source.passedThroughNonGreedyDecision || target is DecisionState && (target as! DecisionState).nonGreedy } - /// + /// /// Gets the _org.antlr.v4.runtime.atn.LexerActionExecutor_ capable of executing the embedded /// action(s) for the current configuration. - /// + /// public final func getLexerActionExecutor() -> LexerActionExecutor? { return lexerActionExecutor } diff --git a/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift b/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift index 5b373c6448..207d956305 100644 --- a/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift +++ b/runtime/Swift/Sources/Antlr4/atn/LexerATNSimulator.swift @@ -389,7 +389,7 @@ open class LexerATNSimulator: ATNSimulator { final func computeStartState(_ input: CharStream, _ p: ATNState) throws -> ATNConfigSet { - let initialContext = PredictionContext.EMPTY + let initialContext = EmptyPredictionContext.Instance let configs = ATNConfigSet(true, isOrdered: true) let length = p.getNumberOfTransitions() for i in 0..? // LAME globals to avoid parameters!!!!! I need these down deep in predTransition @@ -389,18 +389,18 @@ open class ParserATNSimulator: ATNSimulator { } - /// + /// /// Performs ATN simulation to compute a predicted alternative based /// upon the remaining input, but also updates the DFA cache to avoid /// having to traverse the ATN again for the same input sequence. - /// + /// /// There are some key conditions we're looking for after computing a new /// set of ATN configs (proposed DFA state): /// if the set is empty, there is no viable alternative for current symbol /// does the state uniquely predict an alternative? /// does the state have a conflict that would prevent us from /// putting it on the work list? - /// + /// /// We also have some key operations to do: /// add an edge from previous DFA state to potentially new DFA state, D, /// upon current symbol but only if adding to work list, which means in all @@ -412,14 +412,14 @@ open class ParserATNSimulator: ATNSimulator { /// reporting an ambiguity /// reporting a context sensitivity /// reporting insufficient predicates - /// + /// /// cover these cases: /// dead end /// single alt /// single alt + preds /// conflict /// conflict + preds - /// + /// final func execATN(_ dfa: DFA, _ s0: DFAState, _ input: TokenStream, _ startIndex: Int, _ outerContext: ParserRuleContext) throws -> Int { @@ -537,17 +537,17 @@ open class ParserATNSimulator: ATNSimulator { } } - /// + /// /// Get an existing target state for an edge in the DFA. If the target state /// for the edge has not yet been computed or is otherwise not available, /// this method returns `null`. - /// + /// /// - parameter previousD: The current DFA state /// - parameter t: The next input symbol /// - returns: The existing target DFA state for the given input symbol /// `t`, or `null` if the target state for this edge is not /// already cached - /// + /// func getExistingTargetState(_ previousD: DFAState, _ t: Int) -> DFAState? { let edges = previousD.edges if edges == nil || (t + 1) < 0 || (t + 1) >= (edges!.count) { @@ -557,18 +557,18 @@ open class ParserATNSimulator: ATNSimulator { return edges![t + 1] } - /// + /// /// Compute a target state for an edge in the DFA, and attempt to add the /// computed state and corresponding edge to the DFA. - /// + /// /// - parameter dfa: The DFA /// - parameter previousD: The current DFA state /// - parameter t: The next input symbol - /// + /// /// - returns: The computed target DFA state for the given input symbol /// `t`. If `t` does not lead to a valid DFA state, this method /// returns _#ERROR_. - /// + /// func computeTargetState(_ dfa: DFA, _ previousD: DFAState, _ t: Int) throws -> DFAState { guard let reach = try computeReachSet(previousD.configs, t, false) else { @@ -721,30 +721,30 @@ open class ParserATNSimulator: ATNSimulator { // We do not check predicates here because we have checked them // on-the-fly when doing full context prediction. - /// + /// /// In non-exact ambiguity detection mode, we might actually be able to /// detect an exact ambiguity, but I'm not going to spend the cycles /// needed to check. We only emit ambiguity warnings in exact ambiguity /// mode. - /// + /// /// For example, we might know that we have conflicting configurations. /// But, that does not mean that there is no way forward without a /// conflict. It's possible to have nonconflicting alt subsets as in: - /// + /// /// LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - /// + /// /// from - /// + /// /// [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), /// (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - /// + /// /// In this case, (17,1,[5 $]) indicates there is some next sequence that /// would resolve this without conflict to alternative 1. Any other viable /// next sequence, however, is associated with a conflict. We stop /// looking for input because no amount of further lookahead will alter /// the fact that we should predict alternative 1. We just can't say for /// sure that there is an ambiguity without looking further. - /// + /// reportAmbiguity(dfa, D, startIndex, input.index(), foundExactAmbig, reach.getAlts(), reach) } @@ -764,17 +764,17 @@ open class ParserATNSimulator: ATNSimulator { let intermediate = ATNConfigSet(fullCtx) - /// + /// /// Configurations already in a rule stop state indicate reaching the end /// of the decision rule (local context) or end of the start rule (full /// context). Once reached, these configurations are never updated by a /// closure operation, so they are handled separately for the performance /// advantage of having a smaller intermediate set when calling closure. - /// + /// /// For full-context reach operations, separate handling is required to /// ensure that the alternative matching the longest overall sequence is /// chosen when multiple such configurations can match the input. - /// + /// var skippedStopStates: [ATNConfig]? = nil // First figure out where we can reach on input t @@ -810,16 +810,16 @@ open class ParserATNSimulator: ATNSimulator { var reach: ATNConfigSet? = nil - /// + /// /// This block optimizes the reach operation for intermediate sets which /// trivially indicate a termination state for the overall /// adaptivePredict operation. - /// + /// /// The conditions assume that intermediate /// contains all configurations relevant to the reach set, but this /// condition is not true when one or more configurations have been /// withheld in skippedStopStates, or when the current symbol is EOF. - /// + /// if skippedStopStates == nil && t != CommonToken.EOF { if intermediate.size() == 1 { // Don't pursue the closure if there is just one state. @@ -836,10 +836,10 @@ open class ParserATNSimulator: ATNSimulator { } } - /// + /// /// If the reach set could not be trivially determined, perform a closure /// operation on the intermediate set to compute its initial value. - /// + /// if reach == nil { reach = ATNConfigSet(fullCtx) var closureBusy = Set() @@ -850,28 +850,28 @@ open class ParserATNSimulator: ATNSimulator { } if t == BufferedTokenStream.EOF { - /// + /// /// After consuming EOF no additional input is possible, so we are /// only interested in configurations which reached the end of the /// decision rule (local context) or end of the start rule (full /// context). Update reach to contain only these configurations. This /// handles both explicit EOF transitions in the grammar and implicit /// EOF transitions following the end of the decision or start rule. - /// + /// /// When reach==intermediate, no closure operation was performed. In /// this case, removeAllConfigsNotInRuleStopState needs to check for /// reachable rule stop states as well as configurations already in /// a rule stop state. - /// + /// /// This is handled before the configurations in skippedStopStates, /// because any configurations potentially added from that list are /// already guaranteed to meet this condition whether or not it's /// required. - /// + /// reach = removeAllConfigsNotInRuleStopState(reach!, reach! === intermediate) } - /// + /// /// If skippedStopStates is not null, then it contains at least one /// configuration. For full-context reach operations, these /// configurations reached the end of the start rule, in which case we @@ -879,7 +879,7 @@ open class ParserATNSimulator: ATNSimulator { /// closure operation reached such a state. This ensures adaptivePredict /// chooses an alternative matching the longest overall sequence when /// multiple alternatives are viable. - /// + /// if let reach = reach { if let skippedStopStates = skippedStopStates, (!fullCtx || !PredictionMode.hasConfigInRuleStopState(reach)) { assert(!skippedStopStates.isEmpty, "Expected: !skippedStopStates.isEmpty()") @@ -895,26 +895,26 @@ open class ParserATNSimulator: ATNSimulator { return reach } - /// + /// /// Return a configuration set containing only the configurations from /// `configs` which are in a _org.antlr.v4.runtime.atn.RuleStopState_. If all /// configurations in `configs` are already in a rule stop state, this /// method simply returns `configs`. - /// + /// /// When `lookToEndOfRule` is true, this method uses /// _org.antlr.v4.runtime.atn.ATN#nextTokens_ for each configuration in `configs` which is /// not already in a rule stop state to see if a rule stop state is reachable /// from the configuration via epsilon-only transitions. - /// + /// /// - parameter configs: the configuration set to update /// - parameter lookToEndOfRule: when true, this method checks for rule stop states /// reachable by epsilon-only transitions from each configuration in /// `configs`. - /// + /// /// - returns: `configs` if all configurations in `configs` are in a /// rule stop state, otherwise return a new configuration set containing only /// the configurations from `configs` which are in a rule stop state - /// + /// final func removeAllConfigsNotInRuleStopState(_ configs: ATNConfigSet, _ lookToEndOfRule: Bool) -> ATNConfigSet { return configs.removeAllConfigsNotInRuleStopState(&mergeCache,lookToEndOfRule,atn) } @@ -934,10 +934,10 @@ open class ParserATNSimulator: ATNSimulator { return configs } - /// + /// /// parrt internal source braindump that doesn't mess up /// external API spec. - /// + /// /// applyPrecedenceFilter is an optimization to avoid highly /// nonlinear prediction of expressions and other left recursive /// rules. The precedence predicates such as {3>=prec}? Are highly @@ -948,23 +948,23 @@ open class ParserATNSimulator: ATNSimulator { /// these predicates out of context, the resulting conflict leads /// to full LL evaluation and nonlinear prediction which shows up /// very clearly with fairly large expressions. - /// + /// /// Example grammar: - /// + /// /// e : e '*' e /// | e '+' e /// | INT /// ; - /// + /// /// We convert that to the following: - /// + /// /// e[int prec] /// : INT /// ( {3>=prec}? '*' e[4] /// | {2>=prec}? '+' e[3] /// )* /// ; - /// + /// /// The (..)* loop has a decision for the inner block as well as /// an enter or exit decision, which is what concerns us here. At /// the 1st + of input 1+2+3, the loop entry sees both predicates @@ -976,7 +976,7 @@ open class ParserATNSimulator: ATNSimulator { /// cannot evaluate those predicates because we have fallen off /// the edge of the stack and will in general not know which prec /// parameter is the right one to use in the predicate. - /// + /// /// Because we have special information, that these are precedence /// predicates, we can resolve them without failing over to full /// LL despite their context sensitive nature. We make an @@ -991,7 +991,7 @@ open class ParserATNSimulator: ATNSimulator { /// the same value and so we can decide to enter the loop instead /// of matching it later. That means we can strip out the other /// configuration for the exit branch. - /// + /// /// So imagine we have (14,1,$,{2>=prec}?) and then /// (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization /// allows us to collapse these two configurations. We know that @@ -1003,33 +1003,33 @@ open class ParserATNSimulator: ATNSimulator { /// enter the loop as it is consistent with the notion of operator /// precedence. It's also how the full LL conflict resolution /// would work. - /// + /// /// The solution requires a different DFA start state for each /// precedence level. - /// + /// /// The basic filter mechanism is to remove configurations of the /// form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In /// other words, for the same ATN state and predicate context, /// remove any configuration associated with an exit branch if /// there is a configuration associated with the enter branch. - /// + /// /// It's also the case that the filter evaluates precedence /// predicates and resolves conflicts according to precedence /// levels. For example, for input 1+2+3 at the first +, we see /// prediction filtering - /// + /// /// [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1), /// (11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext - /// + /// /// to - /// + /// /// [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext - /// + /// /// This filters because {3>=prec}? evals to true and collapses /// (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict /// resolution based upon rules of operator precedence fits with /// our usual match first alt upon conflict. - /// + /// /// We noticed a problem where a recursive call resets precedence /// to 0. Sam's fix: each config has flag indicating if it has /// returned from an expr[0] call. then just don't filter any @@ -1040,15 +1040,15 @@ open class ParserATNSimulator: ATNSimulator { /// after leaving the rule stop state of the LR rule containing /// state p, corresponding to a rule invocation with precedence /// level 0" - /// + /// - /// + /// /// This method transforms the start state computed by /// _#computeStartState_ to the special start state used by a /// precedence DFA for a particular precedence value. The transformation /// process applies the following changes to the start state's configuration /// set. - /// + /// /// * Evaluate the precedence predicates for each configuration using /// _org.antlr.v4.runtime.atn.SemanticContext#evalPrecedence_. /// * When _org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed_ is `false`, @@ -1056,7 +1056,7 @@ open class ParserATNSimulator: ATNSimulator { /// for which another configuration that predicts alternative 1 is in the /// same ATN state with the same prediction context. This transformation is /// valid for the following reasons: - /// + /// /// * The closure block cannot contain any epsilon transitions which bypass /// the body of the closure, so all states reachable via alternative 1 are /// part of the precedence alternatives of the transformed left-recursive @@ -1069,7 +1069,7 @@ open class ParserATNSimulator: ATNSimulator { /// _org.antlr.v4.runtime.atn.ATNConfig#isPrecedenceFilterSuppressed_ property marks ATN /// configurations which do not meet this condition, and therefore are not /// eligible for elimination during the filtering process. - /// + /// /// The prediction context must be considered by this filter to address /// situations like the following. /// ``` @@ -1085,13 +1085,13 @@ open class ParserATNSimulator: ATNSimulator { /// configurations distinguishes between them, and prevents the alternative /// which stepped out to `prog` (and then back in to `statement` /// from being eliminated by the filter. - /// + /// /// - parameter configs: The configuration set computed by /// _#computeStartState_ as the start state for the DFA. /// - returns: The transformed configuration set representing the start state /// for a precedence DFA at a particular precedence level (determined by /// calling _org.antlr.v4.runtime.Parser#getPrecedence_). - /// + /// final internal func applyPrecedenceFilter(_ configs: ATNConfigSet) throws -> ATNConfigSet { return try configs.applyPrecedenceFilter(&mergeCache,parser,_outerContext) } @@ -1109,18 +1109,18 @@ open class ParserATNSimulator: ATNSimulator { _ configs: ATNConfigSet, _ nalts: Int) -> [SemanticContext?]? { // REACH=[1|1|[]|0:0, 1|2|[]|0:1] - /// + /// /// altToPred starts as an array of all null contexts. The entry at index i /// corresponds to alternative i. altToPred[i] may have one of three values: /// 1. null: no ATNConfig c is found such that c.alt==i - /// 2. SemanticContext.NONE: At least one ATNConfig c exists such that - /// c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + /// 2. SemanticContext.Empty.Instance: At least one ATNConfig c exists such that + /// c.alt==i and c.semanticContext==SemanticContext.Empty.Instance. In other words, /// alt i has at least one unpredicated config. /// 3. Non-NONE Semantic Context: There exists at least one, and for all - /// ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. - /// + /// ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.Empty.Instance. + /// /// From this, it is clear that NONE||anything==NONE. - /// + /// let altToPred = configs.getPredsForAmbigAlts(ambigAlts,nalts) if debug { print("getPredsForAmbigAlts result \(String(describing: altToPred))") @@ -1134,13 +1134,13 @@ open class ParserATNSimulator: ATNSimulator { var containsPredicate = false for (i, pred) in altToPred.enumerated().dropFirst() { - // unpredicated is indicated by SemanticContext.NONE + // unpredicated is indicated by SemanticContext.Empty.Instance assert(pred != nil, "Expected: pred!=null") if let ambigAlts = ambigAlts, try! ambigAlts.get(i) { pairs.append(DFAState.PredPrediction(pred!, i)) } - if pred != SemanticContext.NONE { + if pred != SemanticContext.Empty.Instance { containsPredicate = true } } @@ -1152,25 +1152,25 @@ open class ParserATNSimulator: ATNSimulator { return pairs ///pairs.toArray(new, DFAState.PredPrediction[pairs.size()]); } - /// + /// /// This method is used to improve the localization of error messages by /// choosing an alternative rather than throwing a /// _org.antlr.v4.runtime.NoViableAltException_ in particular prediction scenarios where the /// _#ERROR_ state was reached during ATN simulation. - /// + /// /// The default implementation of this method uses the following /// algorithm to identify an ATN configuration which successfully parsed the /// decision entry rule. Choosing such an alternative ensures that the /// _org.antlr.v4.runtime.ParserRuleContext_ returned by the calling rule will be complete /// and valid, and the syntax error will be reported later at a more /// localized location. - /// + /// /// * If a syntactically valid path or paths reach the end of the decision rule and /// they are semantically valid if predicated, return the min associated alt. /// * Else, if a semantically invalid but syntactically valid path exist /// or paths exist, return the minimum associated alt. /// * Otherwise, return _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_. - /// + /// /// In some scenarios, the algorithm described above could predict an /// alternative which will result in a _org.antlr.v4.runtime.FailedPredicateException_ in /// the parser. Specifically, this could occur if the __only__ configuration @@ -1182,16 +1182,16 @@ open class ParserATNSimulator: ATNSimulator { /// predicate which is preventing the parser from successfully parsing the /// decision rule, which helps developers identify and correct logic errors /// in semantic predicates. - /// + /// /// - parameter configs: The ATN configurations which were valid immediately before /// the _#ERROR_ state was reached /// - parameter outerContext: The is the \gamma_0 initial parser context from the paper /// or the parser stack at the instant before prediction commences. - /// + /// /// - returns: The value to return from _#adaptivePredict_, or /// _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_ if a suitable alternative was not /// identified and _#adaptivePredict_ should report an error instead. - /// + /// final internal func getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(_ configs: ATNConfigSet, _ outerContext: ParserRuleContext) throws -> Int { let (semValidConfigs, semInvalidConfigs) = try splitAccordingToSemanticValidity(configs, outerContext) @@ -1216,16 +1216,16 @@ open class ParserATNSimulator: ATNSimulator { return configs.getAltThatFinishedDecisionEntryRule() } - /// + /// /// Walk the list of configurations and split them according to /// those that have preds evaluating to true/false. If no pred, assume /// true pred and include in succeeded set. Returns Pair of sets. - /// + /// /// Create a new set so as not to alter the incoming parameter. - /// + /// /// Assumption: the input stream has been restored to the starting point /// prediction, which is where predicates need to evaluate. - /// + /// final internal func splitAccordingToSemanticValidity( _ configs: ATNConfigSet, _ outerContext: ParserRuleContext) throws -> (ATNConfigSet, ATNConfigSet) { @@ -1233,19 +1233,19 @@ open class ParserATNSimulator: ATNSimulator { return try configs.splitAccordingToSemanticValidity(outerContext, evalSemanticContext) } - /// + /// /// Look through a list of predicate/alt pairs, returning alts for the /// pairs that win. A `NONE` predicate indicates an alt containing an /// unpredicated config which behaves as "always true." If !complete /// then we stop at the first predicate that evaluates to true. This /// includes pairs with null predicates. - /// + /// final internal func evalSemanticContext(_ predPredictions: [DFAState.PredPrediction], _ outerContext: ParserRuleContext, _ complete: Bool) throws -> BitSet { let predictions = BitSet() for pair in predPredictions { - if pair.pred == SemanticContext.NONE { + if pair.pred == SemanticContext.Empty.Instance { try! predictions.set(pair.alt) if !complete { break @@ -1273,13 +1273,13 @@ open class ParserATNSimulator: ATNSimulator { return predictions } - /// + /// /// Evaluate a semantic context within a specific parser context. - /// + /// /// This method might not be called for every semantic context evaluated /// during the prediction process. In particular, we currently do not /// evaluate the following but it may change in the future: - /// + /// /// * Precedence predicates (represented by /// _org.antlr.v4.runtime.atn.SemanticContext.PrecedencePredicate_) are not currently evaluated /// through this method. @@ -1289,7 +1289,7 @@ open class ParserATNSimulator: ATNSimulator { /// Implementations which require evaluation results from individual /// predicates should override this method to explicitly handle evaluation of /// the operands within operator predicates. - /// + /// /// - parameter pred: The semantic context to evaluate /// - parameter parserCallStack: The parser context in which to evaluate the /// semantic context @@ -1297,20 +1297,20 @@ open class ParserATNSimulator: ATNSimulator { /// - parameter fullCtx: `true` if the evaluation is occurring during LL /// prediction; otherwise, `false` if the evaluation is occurring /// during SLL prediction - /// + /// /// - since: 4.3 - /// + /// internal func evalSemanticContext(_ pred: SemanticContext, _ parserCallStack: ParserRuleContext, _ alt: Int, _ fullCtx: Bool) throws -> Bool { return try pred.eval(parser, parserCallStack) } - /// + /// /// TODO: If we are doing predicates, there is no point in pursuing /// closure operations if we reach a DFA state that uniquely predicts /// alternative. We will not be caching that DFA state and it is a /// waste to pursue the closure. Might have to advance when we do /// ambig detection thought :( - /// + /// final internal func closure(_ config: ATNConfig, _ configs: ATNConfigSet, _ closureBusy: inout Set, @@ -1344,7 +1344,7 @@ open class ParserATNSimulator: ATNSimulator { for i in 0.., @@ -1478,50 +1478,50 @@ open class ParserATNSimulator: ATNSimulator { //print("That took: "+(finishTime-startTime)+ " ms"); } - /// + /// /// Implements first-edge (loop entry) elimination as an optimization /// during closure operations. See antlr/antlr4#1398. - /// + /// /// The optimization is to avoid adding the loop entry config when /// the exit path can only lead back to the same /// StarLoopEntryState after popping context at the rule end state /// (traversing only epsilon edges, so we're still in closure, in /// this same rule). - /// + /// /// We need to detect any state that can reach loop entry on /// epsilon w/o exiting rule. We don't have to look at FOLLOW /// links, just ensure that all stack tops for config refer to key /// states in LR rule. - /// + /// /// To verify we are in the right situation we must first check /// closure is at a StarLoopEntryState generated during LR removal. /// Then we check that each stack top of context is a return state /// from one of these cases: - /// + /// /// 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state /// 2. expr op expr. The return state is the block end of internal block of (...)* /// 3. 'between' expr 'and' expr. The return state of 2nd expr reference. /// That state points at block end of internal block of (...)*. /// 4. expr '?' expr ':' expr. The return state points at block end, /// which points at loop entry state. - /// + /// /// If any is true for each stack top, then closure does not add a /// config to the current config set for edge[0], the loop entry branch. - /// + /// /// Conditions fail if any context for the current config is: - /// + /// /// a. empty (we'd fall out of expr to do a global FOLLOW which could /// even be to some weird spot in expr) or, /// b. lies outside of expr or, /// c. lies within expr but at a state not the BlockEndState /// generated during LR removal - /// + /// /// Do we need to evaluate predicates ever in closure for this case? - /// + /// /// No. Predicates, including precedence predicates, are only /// evaluated when computing a DFA start state. I.e., only before /// the lookahead (but not parser) consumes a token. - /// + /// /// There are no epsilon edges allowed in LR rule alt blocks or in /// the "primary" part (ID here). If closure is in /// StarLoopEntryState any lookahead operation will have consumed a @@ -1533,9 +1533,9 @@ open class ParserATNSimulator: ATNSimulator { /// closure starting at edges[0], edges[1] emanating from /// StarLoopEntryState. That means it is not performing closure on /// StarLoopEntryState during compute-start-state. - /// + /// /// How do we know this always gives same prediction answer? - /// + /// /// Without predicates, loop entry and exit paths are ambiguous /// upon remaining input +b (in, say, a+b). Either paths lead to /// valid parses. Closure can lead to consuming + immediately or by @@ -1543,30 +1543,30 @@ open class ParserATNSimulator: ATNSimulator { /// again to StarLoopEntryState to match +b. In this special case, /// we choose the more efficient path, which is to take the bypass /// path. - /// + /// /// The lookahead language has not changed because closure chooses /// one path over the other. Both paths lead to consuming the same /// remaining input during a lookahead operation. If the next token /// is an operator, lookahead will enter the choice block with /// operators. If it is not, lookahead will exit expr. Same as if /// closure had chosen to enter the choice block immediately. - /// + /// /// Closure is examining one config (some loopentrystate, some alt, /// context) which means it is considering exactly one alt. Closure /// always copies the same alt to any derived configs. - /// + /// /// How do we know this optimization doesn't mess up precedence in /// our parse trees? - /// + /// /// Looking through expr from left edge of stat only has to confirm /// that an input, say, a+b+c; begins with any valid interpretation /// of an expression. The precedence actually doesn't matter when /// making a decision in stat seeing through expr. It is only when /// parsing rule expr that we must use the precedence to get the /// right interpretation and, hence, parse tree. - /// + /// /// - 4.6 - /// + /// internal func canDropLoopEntryEdgeInLeftRecursiveRule(_ config: ATNConfig) -> Bool { if ParserATNSimulator.TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT { return false @@ -1799,27 +1799,27 @@ open class ParserATNSimulator: ATNSimulator { return ATNConfig(config, t.target, newContext) } - /// + /// /// Gets a _java.util.BitSet_ containing the alternatives in `configs` /// which are part of one or more conflicting alternative subsets. - /// + /// /// - parameter configs: The _org.antlr.v4.runtime.atn.ATNConfigSet_ to analyze. /// - returns: The alternatives in `configs` which are part of one or more /// conflicting alternative subsets. If `configs` does not contain any /// conflicting subsets, this method returns an empty _java.util.BitSet_. - /// + /// final func getConflictingAlts(_ configs: ATNConfigSet) -> BitSet { let altsets = PredictionMode.getConflictingAltSubsets(configs) return PredictionMode.getAlts(altsets) } - /// + /// /// Sam pointed out a problem with the previous definition, v3, of /// ambiguous states. If we have another state associated with conflicting /// alternatives, we should keep going. For example, the following grammar - /// + /// /// s : (ID | ID ID?) ';' ; - /// + /// /// When the ATN simulation reaches the state before ';', it has a DFA /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node @@ -1831,13 +1831,13 @@ open class ParserATNSimulator: ATNSimulator { /// ignore the conflict between alts 1 and 2. We ignore a set of /// conflicting alts when there is an intersection with an alternative /// associated with a single alt state in the state→config-list map. - /// + /// /// It's also the case that we might have two conflicting configurations but /// also a 3rd nonconflicting configuration for a different alternative: /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: - /// + /// /// a : A | A | A B ; - /// + /// /// After matching input A, we reach the stop state for rule A, state 1. /// State 8 is the state right before B. Clearly alternatives 1 and 2 /// conflict and no amount of further lookahead will separate the two. @@ -1848,7 +1848,7 @@ open class ParserATNSimulator: ATNSimulator { /// looking for input reasonably, I don't declare the state done. We /// ignore a set of conflicting alts when we have an alternative /// that we still need to pursue. - /// + /// final func getConflictingAltsOrUniqueAlt(_ configs: ATNConfigSet) -> BitSet { var conflictingAlts: BitSet if configs.uniqueAlt != ATN.INVALID_ALT_NUMBER { @@ -1878,11 +1878,11 @@ open class ParserATNSimulator: ATNSimulator { return try getTokenName(input.LA(1)) } - /// + /// /// Used for debugging in adaptivePredict around execATN but I cut /// it out for clarity now that alg. works well. We can leave this /// "dead" code for a bit. - /// + /// public final func dumpDeadEndConfigs(_ nvae: NoViableAltException) { errPrint("dead end configs: ") for c in nvae.getDeadEndConfigs()!.configs { @@ -1921,24 +1921,24 @@ open class ParserATNSimulator: ATNSimulator { return alt } - /// + /// /// Add an edge to the DFA, if possible. This method calls /// _#addDFAState_ to ensure the `to` state is present in the /// DFA. If `from` is `null`, or if `t` is outside the /// range of edges that can be represented in the DFA tables, this method /// returns without adding the edge to the DFA. - /// + /// /// If `to` is `null`, this method returns `null`. /// Otherwise, this method returns the _org.antlr.v4.runtime.dfa.DFAState_ returned by calling /// _#addDFAState_ for the `to` state. - /// + /// /// - parameter dfa: The DFA /// - parameter from: The source state for the edge /// - parameter t: The input symbol /// - parameter to: The target state for the edge - /// + /// /// - returns: the result of calling _#addDFAState_ on `to` - /// + /// @discardableResult private final func addDFAEdge(_ dfa: DFA, _ from: DFAState, @@ -1969,38 +1969,38 @@ open class ParserATNSimulator: ATNSimulator { return to } - /// + /// /// Add state `D` to the DFA if it is not already present, and return /// the actual instance stored in the DFA. If a state equivalent to `D` /// is already in the DFA, the existing state is returned. Otherwise this /// method returns `D` after adding it to the DFA. - /// + /// /// If `D` is _#ERROR_, this method returns _#ERROR_ and /// does not change the DFA. - /// + /// /// - parameter dfa: The dfa /// - parameter D: The DFA state to add /// - returns: The state stored in the DFA. This will be either the existing /// state if `D` is already in the DFA, or `D` itself if the /// state was not already present. - /// + /// private final func addDFAState(_ dfa: DFA, _ D: DFAState) -> DFAState { if D == ATNSimulator.ERROR { return D } - + return dfa.statesMutex.synchronized { if let existing = dfa.states[D] { return existing } D.stateNumber = dfa.states.count - + if !D.configs.isReadonly() { try! D.configs.optimizeConfigs(self) D.configs.setReadonly(true) } - + dfa.states[D] = D if debug { print("adding new DFA state: \(D)") @@ -2026,9 +2026,9 @@ open class ParserATNSimulator: ATNSimulator { parser.getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, configs) } - /// + /// /// If context sensitive parsing, we know it's ambiguity not conflict - /// + /// // configs that LL not SLL considered conflictin internal func reportAmbiguity(_ dfa: DFA, _ D: DFAState, // the DFA state from execATN() that had SLL conflicts diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift index d7ab6f9f68..8ef0a166aa 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionContext.swift @@ -1,24 +1,18 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// import Foundation public class PredictionContext: Hashable, CustomStringConvertible { - /// - /// Represents `$` in local context prediction, which means wildcard. - /// `+x = *`. - /// - public static let EMPTY = EmptyPredictionContext() - - /// + /// /// Represents `$` in an array in full context mode, when `$` /// doesn't mean wildcard: `$ + x = [$,x]`. Here, /// `$` = _#EMPTY_RETURN_STATE_. - /// + /// public static let EMPTY_RETURN_STATE = Int(Int32.max) private static let INITIAL_HASH = UInt32(1) @@ -31,44 +25,44 @@ public class PredictionContext: Hashable, CustomStringConvertible { return oldGlobalNodeCount }() - /// + /// /// Stores the computed hash code of this _org.antlr.v4.runtime.atn.PredictionContext_. The hash /// code is computed in parts to match the following reference algorithm. - /// - /// + /// + /// /// private int referenceHashCode() { /// int hash = _org.antlr.v4.runtime.misc.MurmurHash#initialize MurmurHash.initialize_(_#INITIAL_HASH_); - /// + /// /// for (int i = 0; i < _#size()_; i++) { /// hash = _org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update_(hash, _#getParent getParent_(i)); /// } - /// + /// /// for (int i = 0; i < _#size()_; i++) { /// hash = _org.antlr.v4.runtime.misc.MurmurHash#update MurmurHash.update_(hash, _#getReturnState getReturnState_(i)); /// } - /// + /// /// hash = _org.antlr.v4.runtime.misc.MurmurHash#finish MurmurHash.finish_(hash, 2 * _#size()_); /// return hash; /// } - /// - /// + /// + /// public let cachedHashCode: Int init(_ cachedHashCode: Int) { self.cachedHashCode = cachedHashCode } - /// + /// /// Convert a _org.antlr.v4.runtime.RuleContext_ tree to a _org.antlr.v4.runtime.atn.PredictionContext_ graph. /// Return _#EMPTY_ if `outerContext` is empty or null. - /// + /// public static func fromRuleContext(_ atn: ATN, _ outerContext: RuleContext?) -> PredictionContext { - let _outerContext = outerContext ?? RuleContext.EMPTY + let _outerContext = outerContext ?? ParserRuleContext.EMPTY // if we are in RuleContext of start rule, s, then PredictionContext // is EMPTY. Nobody called us. (if we are empty, return empty) - if (_outerContext.parent == nil || _outerContext === RuleContext.EMPTY) { - return PredictionContext.EMPTY + if (_outerContext.parent == nil || _outerContext === ParserRuleContext.EMPTY) { + return EmptyPredictionContext.Instance } // If we have a parent, convert it to a PredictionContext graph @@ -94,11 +88,11 @@ public class PredictionContext: Hashable, CustomStringConvertible { } - /// + /// /// This means only the _#EMPTY_ context is in set. - /// + /// public func isEmpty() -> Bool { - return self === PredictionContext.EMPTY + return self === EmptyPredictionContext.Instance } public func hasEmptyPath() -> Bool { @@ -176,33 +170,33 @@ public class PredictionContext: Hashable, CustomStringConvertible { rootIsWildcard, &mergeCache) } - /// + /// /// Merge two _org.antlr.v4.runtime.atn.SingletonPredictionContext_ instances. - /// + /// /// Stack tops equal, parents merge is same; return left graph. - /// - /// + /// + /// /// Same stack top, parents differ; merge parents giving array node, then /// remainders of those graphs. A new root node is created to point to the /// merged parents. - /// - /// + /// + /// /// Different stack tops pointing to same parent. Make array node for the /// root where both element in the root point to the same (original) /// parent. - /// - /// + /// + /// /// Different stack tops pointing to different parents. Make array node for /// the root where each element points to the corresponding original /// parent. - /// - /// + /// + /// /// - parameter a: the first _org.antlr.v4.runtime.atn.SingletonPredictionContext_ /// - parameter b: the second _org.antlr.v4.runtime.atn.SingletonPredictionContext_ /// - parameter rootIsWildcard: `true` if this is a local-context merge, /// otherwise false to indicate a full-context merge /// - parameter mergeCache: - /// + /// public static func mergeSingletons( _ a: SingletonPredictionContext, _ b: SingletonPredictionContext, @@ -285,66 +279,66 @@ public class PredictionContext: Hashable, CustomStringConvertible { } } - /// + /// /// Handle case where at least one of `a` or `b` is /// _#EMPTY_. In the following diagrams, the symbol `$` is used /// to represent _#EMPTY_. - /// + /// /// Local-Context Merges - /// + /// /// These local-context merge operations are used when `rootIsWildcard` /// is true. - /// + /// /// _#EMPTY_ is superset of any graph; return _#EMPTY_. - /// - /// + /// + /// /// _#EMPTY_ and anything is `#EMPTY`, so merged parent is /// `#EMPTY`; return left graph. - /// - /// + /// + /// /// Special case of last merge if local context. - /// - /// + /// + /// /// Full-Context Merges - /// + /// /// These full-context merge operations are used when `rootIsWildcard` /// is false. - /// - /// - /// + /// + /// + /// /// Must keep all contexts; _#EMPTY_ in array is a special value (and /// null parent). - /// - /// - /// - /// + /// + /// + /// + /// /// - parameter a: the first _org.antlr.v4.runtime.atn.SingletonPredictionContext_ /// - parameter b: the second _org.antlr.v4.runtime.atn.SingletonPredictionContext_ /// - parameter rootIsWildcard: `true` if this is a local-context merge, /// otherwise false to indicate a full-context merge - /// + /// public static func mergeRoot(_ a: SingletonPredictionContext, _ b: SingletonPredictionContext, _ rootIsWildcard: Bool) -> PredictionContext? { if rootIsWildcard { - if a === PredictionContext.EMPTY { - return PredictionContext.EMPTY + if a === EmptyPredictionContext.Instance { + return EmptyPredictionContext.Instance } // * + b = * - if b === PredictionContext.EMPTY { - return PredictionContext.EMPTY + if b === EmptyPredictionContext.Instance { + return EmptyPredictionContext.Instance } // a + * = * } else { - if a === PredictionContext.EMPTY && b === PredictionContext.EMPTY { - return PredictionContext.EMPTY + if a === EmptyPredictionContext.Instance && b === EmptyPredictionContext.Instance { + return EmptyPredictionContext.Instance } // $ + $ = $ - if a === PredictionContext.EMPTY { + if a === EmptyPredictionContext.Instance { // $ + x = [$,x] let payloads = [b.returnState, EMPTY_RETURN_STATE] let parents = [b.parent, nil] let joined = ArrayPredictionContext(parents, payloads) return joined } - if b === PredictionContext.EMPTY { + if b === EmptyPredictionContext.Instance { // x + $ = [$,x] ($ is always first if present) let payloads = [a.returnState, EMPTY_RETURN_STATE] let parents = [a.parent, nil] @@ -355,25 +349,25 @@ public class PredictionContext: Hashable, CustomStringConvertible { return nil } - /// + /// /// Merge two _org.antlr.v4.runtime.atn.ArrayPredictionContext_ instances. - /// + /// /// Different tops, different parents. - /// - /// + /// + /// /// Shared top, same parents. - /// - /// + /// + /// /// Shared top, different parents. - /// - /// + /// + /// /// Shared top, all shared parents. - /// - /// + /// + /// /// Equal tops, merge parents and reduce top to /// _org.antlr.v4.runtime.atn.SingletonPredictionContext_. - /// - /// + /// + /// public static func mergeArrays( _ a: ArrayPredictionContext, _ b: ArrayPredictionContext, @@ -530,7 +524,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { } for current in nodes { - if current === EMPTY { + if current === EmptyPredictionContext.Instance { continue } let length = current.size() @@ -601,7 +595,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { let updated: PredictionContext if parents.isEmpty { - updated = EMPTY + updated = EmptyPredictionContext.Instance } else if parents.count == 1 { updated = SingletonPredictionContext.create(parents[0], context.getReturnState(0)) @@ -648,7 +642,7 @@ public class PredictionContext: Hashable, CustomStringConvertible { } public func toStrings(_ recognizer: Recognizer, _ currentState: Int) -> [String] { - return toStrings(recognizer, PredictionContext.EMPTY, currentState) + return toStrings(recognizer, EmptyPredictionContext.Instance, currentState) } // FROM SAM diff --git a/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift b/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift index 491c40ce10..813c6223b7 100644 --- a/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift +++ b/runtime/Swift/Sources/Antlr4/atn/PredictionContextCache.swift @@ -1,15 +1,15 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// -/// +/// /// Used to cache _org.antlr.v4.runtime.atn.PredictionContext_ objects. Its used for the shared /// context cash associated with contexts in DFA states. This cache /// can be used for both lexers and parsers. -/// +/// public final class PredictionContextCache { private var cache = [PredictionContext: PredictionContext]() @@ -21,11 +21,11 @@ public final class PredictionContextCache { /// Add a context to the cache and return it. If the context already exists, /// return that one instead and do not add a new context to the cache. /// Protect shared cache from unsafe thread access. - /// + /// @discardableResult public func add(_ ctx: PredictionContext) -> PredictionContext { - if ctx === PredictionContext.EMPTY { - return PredictionContext.EMPTY + if ctx === EmptyPredictionContext.Instance { + return EmptyPredictionContext.Instance } if let existing = cache[ctx] { // print(name+" reuses "+existing); diff --git a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift index ef4b1fdefe..e14a7fdde7 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SemanticContext.swift @@ -1,49 +1,43 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// -/// +/// /// A tree structure used to record the semantic context in which /// an ATN configuration is valid. It's either a single predicate, /// a conjunction `p1&&p2`, or a sum of products `p1||p2`. -/// +/// /// I have scoped the _org.antlr.v4.runtime.atn.SemanticContext.AND_, _org.antlr.v4.runtime.atn.SemanticContext.OR_, and _org.antlr.v4.runtime.atn.SemanticContext.Predicate_ subclasses of /// _org.antlr.v4.runtime.atn.SemanticContext_ within the scope of this outer class. -/// +/// import Foundation public class SemanticContext: Hashable, CustomStringConvertible { - /// - /// The default _org.antlr.v4.runtime.atn.SemanticContext_, which is semantically equivalent to - /// a predicate of the form `{true`?}. - /// - public static let NONE: SemanticContext = Predicate() - - /// + /// /// For context independent predicates, we evaluate them without a local /// context (i.e., null context). That way, we can evaluate them without /// having to create proper rule-specific context during prediction (as /// opposed to the parser, which creates them naturally). In a practical /// sense, this avoids a cast exception from RuleContext to myruleContext. - /// + /// /// For context dependent predicates, we must pass in a local context so that /// references such as $arg evaluate properly as _localctx.arg. We only /// capture context dependent predicates in the context in which we begin /// prediction, so we passed in the outer context here in case of context /// dependent predicate evaluation. - /// + /// public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { fatalError(#function + " must be overridden") } - /// + /// /// Evaluate the precedence predicates for the context and reduce the result. - /// + /// /// - parameter parser: The parser instance. /// - parameter parserCallStack: /// - returns: The simplified semantic context after precedence predicates are @@ -56,7 +50,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { /// precedence predicate evaluation. /// * A non-`null` _org.antlr.v4.runtime.atn.SemanticContext_: the new simplified /// semantic context after precedence predicates are evaluated. - /// + /// public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { return self } @@ -69,6 +63,22 @@ public class SemanticContext: Hashable, CustomStringConvertible { fatalError(#function + " must be overridden") } + public class Empty: SemanticContext { + // + /// The default _org.antlr.v4.runtime.atn.SemanticContext_, which is semantically equivalent to + /// a predicate of the form `{true?}. + /// + public static let Instance: Empty = Empty() + + public override func hash(into hasher: inout Hasher) { + } + + override + public var description: String { + return "{true}?" + } + } + public class Predicate: SemanticContext { public let ruleIndex: Int public let predIndex: Int @@ -127,7 +137,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { override public func evalPrecedence(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> SemanticContext? { if parser.precpred(parserCallStack, precedence) { - return SemanticContext.NONE + return SemanticContext.Empty.Instance } else { return nil } @@ -145,32 +155,32 @@ public class SemanticContext: Hashable, CustomStringConvertible { } } - /// + /// /// This is the base class for semantic context "operators", which operate on /// a collection of semantic context "operands". - /// + /// /// - 4.3 - /// + /// public class Operator: SemanticContext { - /// + /// /// Gets the operands for the semantic context operator. - /// + /// /// - returns: a collection of _org.antlr.v4.runtime.atn.SemanticContext_ operands for the /// operator. - /// + /// /// - 4.3 - /// + /// public func getOperands() -> Array { fatalError(#function + " must be overridden") } } - /// + /// /// A semantic context which is true whenever none of the contained contexts /// is false. - /// + /// public class AND: Operator { public let opnds: [SemanticContext] @@ -211,13 +221,13 @@ public class SemanticContext: Hashable, CustomStringConvertible { hasher.combine(opnds) } - /// - /// - /// - /// + /// + /// + /// + /// /// The evaluation of predicates by this context is short-circuiting, but /// unordered. - /// + /// override public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { for opnd in opnds { @@ -242,7 +252,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { // The AND context is false if any element is false return nil } - else if evaluated != SemanticContext.NONE { + else if evaluated != SemanticContext.Empty.Instance { // Reduce the result by skipping true elements operands.append(evaluated!) } @@ -252,7 +262,7 @@ public class SemanticContext: Hashable, CustomStringConvertible { return self } - return operands.reduce(SemanticContext.NONE, SemanticContext.and) + return operands.reduce(SemanticContext.Empty.Instance, SemanticContext.and) } override @@ -262,10 +272,10 @@ public class SemanticContext: Hashable, CustomStringConvertible { } } - /// + /// /// A semantic context which is true whenever at least one of the contained /// contexts is true. - /// + /// public class OR: Operator { public final var opnds: [SemanticContext] @@ -305,13 +315,13 @@ public class SemanticContext: Hashable, CustomStringConvertible { hasher.combine(opnds) } - /// - /// - /// - /// + /// + /// + /// + /// /// The evaluation of predicates by this context is short-circuiting, but /// unordered. - /// + /// override public func eval(_ parser: Recognizer, _ parserCallStack: RuleContext) throws -> Bool { for opnd in opnds { @@ -329,9 +339,9 @@ public class SemanticContext: Hashable, CustomStringConvertible { for context in opnds { let evaluated = try context.evalPrecedence(parser, parserCallStack) differs = differs || (evaluated != context) - if evaluated == SemanticContext.NONE { + if evaluated == SemanticContext.Empty.Instance { // The OR context is true if any element is true - return SemanticContext.NONE + return SemanticContext.Empty.Instance } else if let evaluated = evaluated { // Reduce the result by skipping false elements @@ -354,10 +364,10 @@ public class SemanticContext: Hashable, CustomStringConvertible { } public static func and(_ a: SemanticContext?, _ b: SemanticContext?) -> SemanticContext { - if a == nil || a == SemanticContext.NONE { + if a == nil || a == SemanticContext.Empty.Instance { return b! } - if b == nil || b == SemanticContext.NONE { + if b == nil || b == SemanticContext.Empty.Instance { return a! } let result: AND = AND(a!, b!) @@ -368,10 +378,10 @@ public class SemanticContext: Hashable, CustomStringConvertible { return result } - /// - /// + /// + /// /// - seealso: org.antlr.v4.runtime.atn.ParserATNSimulator#getPredsForAmbigAlts - /// + /// public static func or(_ a: SemanticContext?, _ b: SemanticContext?) -> SemanticContext { if a == nil { return b! @@ -379,8 +389,8 @@ public class SemanticContext: Hashable, CustomStringConvertible { if b == nil { return a! } - if a == SemanticContext.NONE || b == SemanticContext.NONE { - return SemanticContext.NONE + if a == SemanticContext.Empty.Instance || b == SemanticContext.Empty.Instance { + return SemanticContext.Empty.Instance } let result: OR = OR(a!, b!) if result.opnds.count == 1 { diff --git a/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift b/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift index e40971492d..2947efa2d1 100644 --- a/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift +++ b/runtime/Swift/Sources/Antlr4/atn/SingletonPredictionContext.swift @@ -1,8 +1,8 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// @@ -24,7 +24,7 @@ public class SingletonPredictionContext: PredictionContext { public static func create(_ parent: PredictionContext?, _ returnState: Int) -> SingletonPredictionContext { if returnState == PredictionContext.EMPTY_RETURN_STATE && parent == nil { // someone can pass in the bits of an array ctx that mean $ - return PredictionContext.EMPTY + return EmptyPredictionContext.Instance } return SingletonPredictionContext(parent, returnState) } diff --git a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift index 360abb9278..acc2f8ed82 100644 --- a/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift +++ b/runtime/Swift/Sources/Antlr4/dfa/DFAState.swift @@ -1,11 +1,11 @@ -/// +/// /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// -/// +/// /// A DFA state represents a set of possible ATN configurations. /// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state /// to keep track of all possible states the ATN can be in after @@ -21,22 +21,22 @@ /// jump from rule to rule, emulating rule invocations (method calls). /// I have to add a stack to simulate the proper lookahead sequences for /// the underlying LL grammar from which the ATN was derived. -/// +/// /// I use a set of ATNConfig objects not simple states. An ATNConfig /// is both a state (ala normal conversion) and a RuleContext describing /// the chain of rules (if any) followed to arrive at that state. -/// +/// /// A DFA state may have multiple references to a particular state, /// but with different ATN contexts (with same or different alts) /// meaning that state was reached via a different set of rule invocations. -/// +/// public final class DFAState: Hashable, CustomStringConvertible { public internal(set) var stateNumber = ATNState.INVALID_STATE_NUMBER public internal(set) var configs: ATNConfigSet - /// + /// /// `edges[symbol]` points to target of symbol. Shift up by 1 so (-1) /// _org.antlr.v4.runtime.Token#EOF_ maps to `edges[0]`. /// @@ -44,51 +44,51 @@ public final class DFAState: Hashable, CustomStringConvertible { public internal(set) var isAcceptState = false - /// + /// /// if accept state, what ttype do we match or alt do we predict? /// This is set to _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_ when _#predicates_`!=null` or /// _#requiresFullContext_. - /// + /// public internal(set) var prediction = ATN.INVALID_ALT_NUMBER public internal(set) var lexerActionExecutor: LexerActionExecutor? - /// + /// /// Indicates that this state was created during SLL prediction that /// discovered a conflict between the configurations in the state. Future /// _org.antlr.v4.runtime.atn.ParserATNSimulator#execATN_ invocations immediately jumped doing /// full context prediction if this field is true. - /// + /// public internal(set) var requiresFullContext = false - /// + /// /// During SLL parsing, this is a list of predicates associated with the /// ATN configurations of the DFA state. When we have predicates, /// _#requiresFullContext_ is `false` since full context prediction evaluates predicates /// on-the-fly. If this is not null, then _#prediction_ is /// _org.antlr.v4.runtime.atn.ATN#INVALID_ALT_NUMBER_. - /// + /// /// We only use these for non-_#requiresFullContext_ but conflicting states. That /// means we know from the context (it's $ or we don't dip into outer /// context) that it's an ambiguity not a conflict. - /// + /// /// This list is computed by _org.antlr.v4.runtime.atn.ParserATNSimulator#predicateDFAState_. - /// + /// public internal(set) var predicates: [PredPrediction]? - + /// /// mutex for states changes. /// internal private(set) var mutex = Mutex() - /// + /// /// Map a predicate to a predicted alternative. - /// + /// public final class PredPrediction: CustomStringConvertible { public let pred: SemanticContext - // never null; at least SemanticContext.NONE + // never null; at least SemanticContext.Empty.Instance public let alt: Int public init(_ pred: SemanticContext, _ alt: Int) { @@ -105,10 +105,10 @@ public final class DFAState: Hashable, CustomStringConvertible { self.configs = configs } - /// + /// /// Get the set of all alts mentioned by all ATN configurations in this /// DFA state. - /// + /// public func getAltSet() -> Set? { return configs.getAltSet() } diff --git a/runtime/Swift/Sources/Antlr4/misc/Utils.swift b/runtime/Swift/Sources/Antlr4/misc/Utils.swift index 0866e20c50..78d70a14de 100644 --- a/runtime/Swift/Sources/Antlr4/misc/Utils.swift +++ b/runtime/Swift/Sources/Antlr4/misc/Utils.swift @@ -2,7 +2,7 @@ /// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. /// Use of this file is governed by the BSD 3-clause license that /// can be found in the LICENSE.txt file in the project root. -/// +/// import Foundation @@ -39,24 +39,4 @@ public class Utils { } return m } - - public static func bitLeftShift(_ n: Int) -> Int64 { - return (Int64(1) &<< n) - } - - - public static func testBitLeftShiftArray(_ nArray: [Int],_ bitsShift: Int) -> Bool { - let test: Bool = (((nArray[0] - bitsShift) & ~0x3f) == 0) - - var temp: Int64 = Int64(nArray[0] - bitsShift) - temp = (temp < 0) ? (64 + (temp % 64 )) : (temp % 64) - let test1: Int64 = (Int64(1) << temp) - - var test2: Int64 = Utils.bitLeftShift(nArray[1] - bitsShift) - - for i in 1 ..< nArray.count { - test2 = test2 | Utils.bitLeftShift(nArray[i] - bitsShift) - } - return test && (( test1 & test2 ) != 0) - } } diff --git a/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift b/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift index 5be0c28a8a..082ce54959 100644 --- a/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift +++ b/runtime/Swift/Sources/Antlr4/misc/utils/Mutex.swift @@ -1,42 +1,29 @@ import Foundation -/// +/// /// Using class so it can be shared even if /// it appears to be a field in a class. -/// +/// class Mutex { - - /// + /// /// The mutex instance. - /// - private var mutex = pthread_mutex_t() - - /// - /// Initialization - /// - init() { - pthread_mutex_init(&mutex, nil) - } - - /// + /// + private let semaphore = DispatchSemaphore(value: 1) + + /// /// Running the supplied closure synchronously. - /// + /// /// - Parameter closure: the closure to run /// - Returns: the value returned by the closure /// - Throws: the exception populated by the closure run - /// + /// @discardableResult func synchronized(closure: () throws -> R) rethrows -> R { - pthread_mutex_lock(&mutex) + semaphore.wait() defer { - pthread_mutex_unlock(&mutex) + semaphore.signal() } return try closure() } - - deinit { - // free the mutex resource - pthread_mutex_destroy(&mutex) - } } diff --git a/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift b/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift index 0bb12fd98b..b1cb150040 100644 --- a/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift +++ b/runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift @@ -29,6 +29,9 @@ class RuntimeMetaDataTests: XCTestCase { doGetMajorMinorVersionTest("4.9.2-SNAPSHOT", "4.9") doGetMajorMinorVersionTest("4.9.3-SNAPSHOT", "4.9") doGetMajorMinorVersionTest("4.10-SNAPSHOT", "4.10") + doGetMajorMinorVersionTest("4.10.1", "4.10") + doGetMajorMinorVersionTest("4.11.0", "4.11") + doGetMajorMinorVersionTest("4.11-SNAPSHOT", "4.11") } } diff --git a/scripts/files-to-update.txt b/scripts/files-to-update.txt index 81293b4d0d..746ee74434 100644 --- a/scripts/files-to-update.txt +++ b/scripts/files-to-update.txt @@ -7,6 +7,7 @@ docker/Dockerfile runtime/Go/antlr/recognizer.go +runtime/Go/antlr/v4/recognizer.go runtime/Python3/setup.py runtime/Python3/src/antlr4/Recognizer.py runtime/Python2/setup.py @@ -32,12 +33,12 @@ runtime/JavaScript/src/antlr4/Recognizer.js # Allow doc files to have multiple values updated * doc/swift-target.md * doc/dart-target.md -doc/releasing-antlr.md # Do this one manually; it'll fail because of multiple lines but we get warning +# doc/releasing-antlr.md * doc/getting-started.md * doc/swift-target.md * doc/getting-started.md -#runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift This is special so must manually edit +# runtime/Swift/Tests/Antlr4Tests/RuntimeMetaDataTests.swift This is special so must manually edit # Just documenting that these are generated... diff --git a/tool-testsuite/pom.xml b/tool-testsuite/pom.xml index 6b138a0f5f..5527aad160 100644 --- a/tool-testsuite/pom.xml +++ b/tool-testsuite/pom.xml @@ -10,7 +10,7 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT antlr4-tool-testsuite ANTLR 4 Tool Tests @@ -22,54 +22,60 @@ 2009 + + 5.9.0 + + org.antlr ST4 - 4.3.1 + 4.3.4 test org.antlr antlr4-runtime-testsuite ${project.version} - test - test-jar + test + test-jar org.antlr antlr4 ${project.version} - test - - - junit - junit - 4.13.1 test + + org.junit.jupiter + junit-jupiter-api + ${jUnitVersion} + test + + + org.junit.jupiter + junit-jupiter-engine + ${jUnitVersion} + test + + + org.junit.jupiter + junit-jupiter-params + ${jUnitVersion} + test + - src test org.apache.maven.plugins maven-surefire-plugin - 2.12.4 + 2.22.2 -Dfile.encoding=UTF-8 - - **/Test*.java - - - ../../antlr4-python2/src - ../../antlr4-python3/src - ../../antlr4-cpp/src - ../../antlr4-javascript/src - diff --git a/tool-testsuite/resources/junit-platform.properties b/tool-testsuite/resources/junit-platform.properties new file mode 100644 index 0000000000..ad19ea833b --- /dev/null +++ b/tool-testsuite/resources/junit-platform.properties @@ -0,0 +1,3 @@ +junit.jupiter.execution.parallel.enabled = true +junit.jupiter.execution.parallel.mode.default = concurrent +junit.jupiter.execution.parallel.mode.classes.default = concurrent \ No newline at end of file diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/BaseJavaToolTest.java b/tool-testsuite/test/org/antlr/v4/test/tool/BaseJavaToolTest.java deleted file mode 100644 index f3d0f7177c..0000000000 --- a/tool-testsuite/test/org/antlr/v4/test/tool/BaseJavaToolTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - * Use of this file is governed by the BSD 3-clause license that - * can be found in the LICENSE.txt file in the project root. - */ - -package org.antlr.v4.test.tool; - -import org.antlr.v4.Tool; -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.java.BaseJavaTest; - -import java.io.File; - -import static org.junit.Assert.assertEquals; - -public class BaseJavaToolTest extends BaseJavaTest { - - public void testErrors(String[] pairs, boolean printTree) { - for (int i = 0; i < pairs.length; i+=2) { - String grammarStr = pairs[i]; - String expect = pairs[i+1]; - - String[] lines = grammarStr.split("\n"); - String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), null, fileName, grammarStr, false); // use default language target in case test overrides - - String actual = equeue.toString(true); - actual = actual.replace(getTempDirPath() + File.separator, ""); -// System.err.println(actual); - String msg = grammarStr; - msg = msg.replace("\n","\\n"); - msg = msg.replace("\r","\\r"); - msg = msg.replace("\t","\\t"); - - assertEquals("error in: "+msg,expect,actual); - } - } - - public String getFilenameFromFirstLineOfGrammar(String line) { - String fileName = "A" + Tool.GRAMMAR_EXTENSION; - int grIndex = line.lastIndexOf("grammar"); - int semi = line.lastIndexOf(';'); - if ( grIndex>=0 && semi>=0 ) { - int space = line.indexOf(' ', grIndex); - fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION; - } - if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "A" + Tool.GRAMMAR_EXTENSION; - return fileName; - } -} diff --git a/runtime-testsuite/test/org/antlr/v4/test/runtime/MockIntTokenStream.java b/tool-testsuite/test/org/antlr/v4/test/tool/MockIntTokenStream.java similarity index 97% rename from runtime-testsuite/test/org/antlr/v4/test/runtime/MockIntTokenStream.java rename to tool-testsuite/test/org/antlr/v4/test/tool/MockIntTokenStream.java index 06eacddfb1..25303fe666 100644 --- a/runtime-testsuite/test/org/antlr/v4/test/runtime/MockIntTokenStream.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/MockIntTokenStream.java @@ -1,4 +1,4 @@ -package org.antlr.v4.test.runtime; +package org.antlr.v4.test.tool; import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.misc.IntegerList; diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.gunit b/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.gunit deleted file mode 100644 index 9641b13d75..0000000000 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.gunit +++ /dev/null @@ -1,155 +0,0 @@ -/** Test ANTLRParser's AST construction. Translate to junit tests with: - * - * $ java org.antlr.v4.gunit.Gen TestASTStructure.gunit - - NO LONGER using gunit!!! - - */ -gunit TestASTStructure; - -@header {package org.antlr.v4.test;} -options { - adaptor = org.antlr.v4.parse.GrammarASTAdaptor; - parser = org.antlr.v4.parse.ANTLRParser; - lexer = org.antlr.v4.parse.ANTLRLexer; -} - -grammarSpec: - "parser grammar P; a : A;" - -> (PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - tokens { A; B='33'; } - @header {foo} - a : A; - >> - -> - (PARSER_GRAMMAR P - (tokens { A (= B '33')) - (@ header {foo}) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - @header {foo} - tokens { A; B='33'; } - a : A; - >> - -> - (PARSER_GRAMMAR P - (@ header {foo}) - (tokens { A (= B '33')) - (RULES (RULE a (BLOCK (ALT A))))) - - << - parser grammar P; - import A=B, C; - a : A; - >> - -> - (PARSER_GRAMMAR P - (import (= A B) C) - (RULES (RULE a (BLOCK (ALT A))))) - -delegateGrammars: - "import A;" -> (import A) - -rule: - "a : A;" -> - (RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c)))))) - "A : B+;" -> (RULE A (BLOCK (ALT (+ (BLOCK (ALT B)))))) - - << - public a[int i] returns [int y] - options {backtrack=true;} - @init {blort} - : ID ; - >> - -> - (RULE a - (RULEMODIFIERS public) - int i - (returns int y) - (OPTIONS (= backtrack true)) - (@ init {blort}) - (BLOCK (ALT ID))) - - << - a[int i] returns [int y] - @init {blort} - options {backtrack=true;} - : ID; - >> - -> - (RULE a int i - (returns int y) - (@ init {blort}) - (OPTIONS (= backtrack true)) - (BLOCK (ALT ID))) - - << - a : ID ; - catch[A b] {foo} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A b {foo}) (finally {bar})) - - << - a : ID ; - catch[A a] {foo} - catch[B b] {fu} - finally {bar} - >> - -> - (RULE a (BLOCK (ALT ID)) - (catch A a {foo}) (catch B b {fu}) (finally {bar})) - - << - a[int i] - locals [int a, float b] - : A - ; - >> - -> (RULE a int i (locals int a, float b) (BLOCK (ALT A))) - - << - a[int i] throws a.b.c - : A - ; - >> - -> (RULE a int i (throws a.b.c) (BLOCK (ALT A))) - -ebnf: - "(A|B)" -> (BLOCK (ALT A) (ALT B)) - "(A|B)?" -> (? (BLOCK (ALT A) (ALT B))) - "(A|B)*" -> (* (BLOCK (ALT A) (ALT B))) - "(A|B)+" -> (+ (BLOCK (ALT A) (ALT B))) - -element: - "~A" -> (~ (SET A)) - "b+" -> (+ (BLOCK (ALT b))) - "(b)+" -> (+ (BLOCK (ALT b))) - "b?" -> (? (BLOCK (ALT b))) - "(b)?" -> (? (BLOCK (ALT b))) - "(b)*" -> (* (BLOCK (ALT b))) - "b*" -> (* (BLOCK (ALT b))) - "'while'*" -> (* (BLOCK (ALT 'while'))) - "'a'+" -> (+ (BLOCK (ALT 'a'))) - "a[3]" -> (a 3) - "'a'..'z'+" -> (+ (BLOCK (ALT (.. 'a' 'z')))) - "x=ID" -> (= x ID) - "x=ID?" -> (? (BLOCK (ALT (= x ID)))) - "x=ID*" -> (* (BLOCK (ALT (= x ID)))) - "x=b" -> (= x b) - "x=(A|B)" -> (= x (BLOCK (ALT A) (ALT B))) - "x=~(A|B)" -> (= x (~ (SET A B))) - "x+=~(A|B)" -> (+= x (~ (SET A B))) - "x+=~(A|B)+"-> (+ (BLOCK (ALT (+= x (~ (SET A B)))))) - "x=b+" -> (+ (BLOCK (ALT (= x b)))) - "x+=ID*" -> (* (BLOCK (ALT (+= x ID)))) - "x+='int'*" -> (* (BLOCK (ALT (+= x 'int')))) - "x+=b+" -> (+ (BLOCK (ALT (+= x b)))) - "({blort} 'x')*" -> (* (BLOCK (ALT {blort} 'x'))) diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.java index c1d6238362..23ad3b6530 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestASTStructure.java @@ -15,14 +15,12 @@ import org.antlr.runtime.TokenStream; import org.antlr.runtime.tree.Tree; import org.antlr.runtime.tree.TreeAdaptor; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.lang.reflect.Constructor; import java.lang.reflect.Method; -import static org.junit.Assert.assertEquals; - -// NO LONGER using gunit!!! +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestASTStructure { String lexerClassName = "org.antlr.v4.parse.ANTLRLexer"; @@ -65,7 +63,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "parser grammar P; a : A;", 15); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(PARSER_GRAMMAR P (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); + assertEquals(expecting, actual, "testing rule grammarSpec"); } @Test public void test_grammarSpec2() throws Exception { @@ -73,7 +71,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n tokens { A, B }\n @header {foo}\n a : A;\n ", 18); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(PARSER_GRAMMAR P (tokens { A B) (@ header {foo}) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); + assertEquals(expecting, actual, "testing rule grammarSpec"); } @Test public void test_grammarSpec3() throws Exception { @@ -81,7 +79,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n @header {foo}\n tokens { A,B }\n a : A;\n ", 30); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(PARSER_GRAMMAR P (@ header {foo}) (tokens { A B) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); + assertEquals(expecting, actual, "testing rule grammarSpec"); } @Test public void test_grammarSpec4() throws Exception { @@ -89,19 +87,19 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("grammarSpec", "\n parser grammar P;\n import A=B, C;\n a : A;\n ", 42); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(PARSER_GRAMMAR P (import (= A B) C) (RULES (RULE a (BLOCK (ALT A)))))"; - assertEquals("testing rule grammarSpec", expecting, actual); + assertEquals(expecting, actual, "testing rule grammarSpec"); } @Test public void test_delegateGrammars1() throws Exception { // gunit test on line 53 RuleReturnScope rstruct = (RuleReturnScope)execParser("delegateGrammars", "import A;", 53); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(import A)"; - assertEquals("testing rule delegateGrammars", expecting, actual); + assertEquals(expecting, actual, "testing rule delegateGrammars"); } @Test public void test_rule1() throws Exception { // gunit test on line 56 RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "a : A;", 56); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a (BLOCK (ALT (A (ELEMENT_OPTIONS X (= Y a.b.c))))))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule2() throws Exception { @@ -109,7 +107,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "A : B+;", 58); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE A (BLOCK (ALT (+ (BLOCK (ALT B))))))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule3() throws Exception { @@ -117,7 +115,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n : ID ;\n ", 60); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule4() throws Exception { @@ -125,7 +123,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a[int i] returns [int y]\n @init {blort}\n options {backtrack=true;}\n : ID;\n ", 75); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a int i (returns int y) (@ init {blort}) (OPTIONS (= backtrack true)) (BLOCK (ALT ID)))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule5() throws Exception { @@ -133,7 +131,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A b] {foo}\n finally {bar}\n ", 88); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A b {foo}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule6() throws Exception { @@ -141,7 +139,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n a : ID ;\n catch[A a] {foo}\n catch[B b] {fu}\n finally {bar}\n ", 97); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a (BLOCK (ALT ID)) (catch A a {foo}) (catch B b {fu}) (finally {bar}))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule7() throws Exception { @@ -149,7 +147,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i]\n\tlocals [int a, float b]\n\t\t:\tA\n\t\t;\n\t", 107); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a int i (locals int a, float b) (BLOCK (ALT A)))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_rule8() throws Exception { @@ -157,13 +155,13 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("rule", "\n\ta[int i] throws a.b.c\n\t\t:\tA\n\t\t;\n\t", 115); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(RULE a int i (throws a.b.c) (BLOCK (ALT A)))"; - assertEquals("testing rule rule", expecting, actual); + assertEquals(expecting, actual, "testing rule rule"); } @Test public void test_ebnf1() throws Exception { // gunit test on line 123 RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)", 123); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(BLOCK (ALT A) (ALT B))"; - assertEquals("testing rule ebnf", expecting, actual); + assertEquals(expecting, actual, "testing rule ebnf"); } @Test public void test_ebnf2() throws Exception { @@ -171,7 +169,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)?", 124); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(? (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); + assertEquals(expecting, actual, "testing rule ebnf"); } @Test public void test_ebnf3() throws Exception { @@ -179,7 +177,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)*", 125); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); + assertEquals(expecting, actual, "testing rule ebnf"); } @Test public void test_ebnf4() throws Exception { @@ -187,13 +185,13 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("ebnf", "(A|B)+", 126); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule ebnf", expecting, actual); + assertEquals(expecting, actual, "testing rule ebnf"); } @Test public void test_element1() throws Exception { // gunit test on line 129 RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "~A", 129); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(~ (SET A))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element2() throws Exception { @@ -201,7 +199,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b+", 130); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element3() throws Exception { @@ -209,7 +207,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)+", 131); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element4() throws Exception { @@ -217,7 +215,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b?", 132); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element5() throws Exception { @@ -225,7 +223,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)?", 133); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(? (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element6() throws Exception { @@ -233,7 +231,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "(b)*", 134); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element7() throws Exception { @@ -241,7 +239,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "b*", 135); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT b)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element8() throws Exception { @@ -249,7 +247,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'while'*", 136); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT 'while')))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element9() throws Exception { @@ -257,7 +255,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'+", 137); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT 'a')))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element10() throws Exception { @@ -265,7 +263,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "a[3]", 138); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(a 3)"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element11() throws Exception { @@ -273,7 +271,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "'a'..'z'+", 139); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT (.. 'a' 'z'))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element12() throws Exception { @@ -281,7 +279,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID", 140); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(= x ID)"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element13() throws Exception { @@ -289,7 +287,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID?", 141); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(? (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element14() throws Exception { @@ -297,7 +295,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=ID*", 142); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT (= x ID))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element15() throws Exception { @@ -305,7 +303,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b", 143); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(= x b)"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element16() throws Exception { @@ -313,7 +311,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=(A|B)", 144); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(= x (BLOCK (ALT A) (ALT B)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element17() throws Exception { @@ -321,7 +319,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=~(A|B)", 145); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(= x (~ (SET A B)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element18() throws Exception { @@ -329,7 +327,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)", 146); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+= x (~ (SET A B)))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element19() throws Exception { @@ -337,7 +335,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=~(A|B)+", 147); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT (+= x (~ (SET A B))))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element20() throws Exception { @@ -345,7 +343,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x=b+", 148); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT (= x b))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element21() throws Exception { @@ -353,7 +351,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=ID*", 149); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT (+= x ID))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element22() throws Exception { @@ -361,7 +359,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+='int'*", 150); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT (+= x 'int'))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element23() throws Exception { @@ -369,7 +367,7 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "x+=b+", 151); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(+ (BLOCK (ALT (+= x b))))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } @Test public void test_element24() throws Exception { @@ -377,6 +375,6 @@ public Object execParser( RuleReturnScope rstruct = (RuleReturnScope)execParser("element", "({blort} 'x')*", 152); Object actual = ((Tree)rstruct.getTree()).toStringTree(); Object expecting = "(* (BLOCK (ALT {blort} 'x')))"; - assertEquals("testing rule element", expecting, actual); + assertEquals(expecting, actual, "testing rule element"); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNConstruction.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNConstruction.java index 0452b2bec9..be7267379a 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNConstruction.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNConstruction.java @@ -13,32 +13,25 @@ import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNState; import org.antlr.v4.test.runtime.ErrorQueue; +import org.antlr.v4.test.runtime.RuntimeTestUtils; import org.antlr.v4.tool.ErrorType; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; import org.antlr.v4.tool.ast.GrammarAST; import org.antlr.v4.tool.ast.GrammarRootAST; import org.antlr.v4.tool.ast.RuleAST; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; -public class TestATNConstruction extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test - public void testA() throws Exception { +public class TestATNConstruction { + @Test public void testA() throws Exception { Grammar g = new Grammar( "parser grammar P;\n"+ "a : A;"); @@ -47,7 +40,7 @@ public void testA() throws Exception { "s2-A->s3\n" + "s3->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAB() throws Exception { Grammar g = new Grammar( @@ -59,7 +52,7 @@ public void testA() throws Exception { "s3-B->s4\n" + "s4->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAorB() throws Exception { Grammar g = new Grammar( @@ -74,7 +67,7 @@ public void testA() throws Exception { "BlockEnd_6->RuleStop_a_1\n" + "s4-action_0:-1->BlockEnd_6\n" + "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testSetAorB() throws Exception { Grammar g = new Grammar( @@ -85,7 +78,7 @@ public void testA() throws Exception { "s2-{A, B}->s3\n" + "s3->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testLexerIsntSetMultiCharString() throws Exception { LexerGrammar g = new LexerGrammar( @@ -265,7 +258,7 @@ public void testA() throws Exception { "s2-{'b', A}->s3\n" + "s3->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s4\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testABorCD() throws Exception { Grammar g = new Grammar( @@ -281,7 +274,7 @@ public void testA() throws Exception { "s5-D->BlockEnd_7\n" + "BlockEnd_7->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testbA() throws Exception { Grammar g = new Grammar( @@ -294,13 +287,13 @@ public void testA() throws Exception { "s5-A->s6\n" + "s6->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); expecting = "RuleStart_b_2->s7\n" + "s7-B->s8\n" + "s8->RuleStop_b_3\n" + "RuleStop_b_3->s5\n"; - checkRuleATN(g, "b", expecting); + RuntimeTestUtils.checkRuleATN(g, "b", expecting); } @Test public void testFollow() throws Exception { Grammar g = new Grammar( @@ -314,7 +307,7 @@ public void testA() throws Exception { "s10->RuleStop_b_3\n" + "RuleStop_b_3->s7\n" + "RuleStop_b_3->s12\n"; - checkRuleATN(g, "b", expecting); + RuntimeTestUtils.checkRuleATN(g, "b", expecting); } @Test public void testAorEpsilon() throws Exception { Grammar g = new Grammar( @@ -328,7 +321,7 @@ public void testA() throws Exception { "s3->BlockEnd_5\n" + "BlockEnd_5->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s6\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAOptional() throws Exception { Grammar g = new Grammar( @@ -341,7 +334,7 @@ public void testA() throws Exception { "s2-A->BlockEnd_4\n" + "BlockEnd_4->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAorBoptional() throws Exception { Grammar g = new Grammar( @@ -357,7 +350,7 @@ public void testA() throws Exception { "BlockEnd_6->RuleStop_a_1\n" + "s3-action_0:-1->BlockEnd_6\n" + "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testSetAorBoptional() throws Exception { Grammar g = new Grammar( @@ -370,7 +363,7 @@ public void testA() throws Exception { "s2-{A, B}->BlockEnd_4\n" + "BlockEnd_4->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAorBthenC() throws Exception { Grammar g = new Grammar( @@ -382,7 +375,7 @@ public void testA() throws Exception { "s3-C->s4\n" + "s4->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAplus() throws Exception { Grammar g = new Grammar( @@ -397,7 +390,7 @@ public void testA() throws Exception { "PlusLoopBack_5->s6\n" + "s6->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s7\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAplusSingleAltHasPlusASTPointingAtLoopBackState() throws Exception { Grammar g = new Grammar( @@ -413,7 +406,7 @@ public void testA() throws Exception { "PlusLoopBack_10->s11\n" + "s11->RuleStop_a_3\n" + "RuleStop_a_3->s5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); // Get all AST -> ATNState relationships. Make sure loopback is covered when no loop entry decision List ruleNodes = g.ast.getNodesWithType(ANTLRParser.RULE); RuleAST a = (RuleAST)ruleNodes.get(1); @@ -442,7 +435,7 @@ public void testA() throws Exception { "PlusLoopBack_7->s8\n" + "s8->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAorBorEmptyPlus() throws Exception { Grammar g = new Grammar( @@ -461,7 +454,7 @@ public void testA() throws Exception { "PlusLoopBack_7->s8\n" + "s8->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s9\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testEmptyOrEmpty() throws Exception { Grammar g = new Grammar( @@ -475,7 +468,7 @@ public void testA() throws Exception { "s3->BlockEnd_5\n"+ "BlockEnd_5->RuleStop_a_1\n"+ "RuleStop_a_1-EOF->s6\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAStar() throws Exception { Grammar g = new Grammar( @@ -491,7 +484,7 @@ public void testA() throws Exception { "RuleStop_a_1-EOF->s8\n" + "BlockEnd_4->StarLoopBack_7\n" + "StarLoopBack_7->StarLoopEntry_5\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testNestedAstar() throws Exception { Grammar g = new Grammar( @@ -514,7 +507,7 @@ public void testA() throws Exception { "BlockEnd_5->StarLoopBack_8\n" + "StarLoopBack_13->StarLoopEntry_11\n" + "StarLoopBack_8->StarLoopEntry_6\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testAorBstar() throws Exception { Grammar g = new Grammar( @@ -533,7 +526,7 @@ public void testA() throws Exception { "BlockEnd_6->StarLoopBack_9\n" + "s4-action_0:-1->BlockEnd_6\n" + "StarLoopBack_9->StarLoopEntry_7\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testPredicatedAorB() throws Exception { Grammar g = new Grammar( @@ -549,7 +542,7 @@ public void testA() throws Exception { "s5-B->BlockEnd_7\n" + "BlockEnd_7->RuleStop_a_1\n" + "RuleStop_a_1-EOF->s8\n"; - checkRuleATN(g, "a", expecting); + RuntimeTestUtils.checkRuleATN(g, "a", expecting); } @Test public void testParserRuleRefInLexerRule() throws Exception { @@ -638,7 +631,7 @@ public void testA() throws Exception { "StarLoopBack_29->StarLoopEntry_27\n"+ "s20-e->RuleStart_e_2\n"+ "s21->BlockEnd_26\n"; - checkRuleATN(g, "e", expecting); + RuntimeTestUtils.checkRuleATN(g, "e", expecting); } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNDeserialization.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNDeserialization.java index 25d8127ad0..1398fb795b 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNDeserialization.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNDeserialization.java @@ -12,23 +12,18 @@ import org.antlr.v4.runtime.misc.IntegerList; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import static org.antlr.v4.runtime.atn.ATNDeserializer.encodeIntsWith16BitWords; import static org.antlr.v4.runtime.atn.ATNDeserializer.decodeIntsEncodedAs16BitWords; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; +import static org.antlr.v4.test.tool.ToolTestUtils.createATN; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestATNDeserialization extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +public class TestATNDeserialization { @Test public void testSimpleNoBlock() throws Exception { Grammar g = new Grammar( "parser grammar T;\n"+ diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNInterpreter.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNInterpreter.java index 758d565182..a3dcdc7ce7 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNInterpreter.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNInterpreter.java @@ -16,28 +16,22 @@ import org.antlr.v4.runtime.atn.LexerATNSimulator; import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.test.runtime.MockIntTokenStream; import org.antlr.v4.tool.DOTGenerator; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; import org.antlr.v4.tool.Rule; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.antlr.v4.test.runtime.RuntimeTestUtils.getTokenTypesViaATN; -import static org.junit.Assert.assertEquals; +import static org.antlr.v4.test.tool.ToolTestUtils.createATN; +import static org.antlr.v4.test.tool.ToolTestUtils.getTokenTypesViaATN; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH -public class TestATNInterpreter extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestATNInterpreter { @Test public void testSimpleNoBlock() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -94,7 +88,7 @@ public void testSetUp() throws Exception { checkMatchedAlt(lg, g, "abc", 2); } - @Test(expected = NoViableAltException.class) + @Test public void testMustTrackPreviousGoodAltWithEOF() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -110,11 +104,11 @@ public void testMustTrackPreviousGoodAltWithEOF() throws Exception { try { checkMatchedAlt(lg, g, "ac", 1); + fail(); } catch (NoViableAltException re) { assertEquals(1, re.getOffendingToken().getTokenIndex()); assertEquals(3, re.getOffendingToken().getType()); - throw re; } } @@ -138,7 +132,7 @@ public void testMustTrackPreviousGoodAltWithEOF() throws Exception { checkMatchedAlt(lg, g, "abcd", 3); } - @Test(expected = NoViableAltException.class) + @Test public void testMustTrackPreviousGoodAlt2WithEOF() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -156,11 +150,11 @@ public void testMustTrackPreviousGoodAlt2WithEOF() throws Exception { try { checkMatchedAlt(lg, g, "abd", 1); + fail(); } catch (NoViableAltException re) { assertEquals(2, re.getOffendingToken().getTokenIndex()); assertEquals(4, re.getOffendingToken().getType()); - throw re; } } @@ -184,7 +178,7 @@ public void testMustTrackPreviousGoodAlt2WithEOF() throws Exception { checkMatchedAlt(lg, g, "abcd", 3); } - @Test(expected = NoViableAltException.class) + @Test public void testMustTrackPreviousGoodAlt3WithEOF() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -202,11 +196,11 @@ public void testMustTrackPreviousGoodAlt3WithEOF() throws Exception { try { checkMatchedAlt(lg, g, "abd", 1); + fail(); } catch (NoViableAltException re) { assertEquals(2, re.getOffendingToken().getTokenIndex()); assertEquals(4, re.getOffendingToken().getType()); - throw re; } } @@ -271,7 +265,7 @@ public void testMustTrackPreviousGoodAlt3WithEOF() throws Exception { checkMatchedAlt(lg, g, "abcd", 3); } - @Test(expected = NoViableAltException.class) + @Test public void testAmbigAltChooseFirst2WithEOF() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -288,11 +282,11 @@ public void testAmbigAltChooseFirst2WithEOF() throws Exception { try { checkMatchedAlt(lg, g, "abd", 1); + fail(); } catch (NoViableAltException re) { assertEquals(2, re.getOffendingToken().getTokenIndex()); assertEquals(4, re.getOffendingToken().getType()); - throw re; } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java index 9af6c6ec95..9f5987734d 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNLexerInterpreter.java @@ -6,20 +6,22 @@ package org.antlr.v4.test.tool; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CharStreams; +import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNState; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.misc.Utils; -import org.antlr.v4.test.runtime.RuntimeTestUtils; +import org.antlr.v4.test.runtime.states.ExecutedState; import org.antlr.v4.tool.DOTGenerator; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.antlr.v4.test.tool.ToolTestUtils.*; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Lexer rules are little quirky when it comes to wildcards. Problem @@ -34,13 +36,7 @@ * want, but occasionally there are some quirks as you'll see from * the tests below. */ -public class TestATNLexerInterpreter extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestATNLexerInterpreter { @Test public void testLexerTwoRules() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n"+ @@ -429,9 +425,9 @@ public void testSetUp() throws Exception { "lexer grammar L;\n" + "options { caseInsensitive = true; }\n" + "LITERAL_WITH_NOT: ~'f';\n"; // ~('f' | 'F) - execLexer("L.g4", grammar, "L", "F"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "F"); - assertEquals("line 1:0 token recognition error at: 'F'\n", getParseErrors()); + assertEquals("line 1:0 token recognition error at: 'F'\n", executedState.errors); } @Test public void testLexerCaseInsensitiveSetWithNegation() { @@ -439,9 +435,9 @@ public void testSetUp() throws Exception { "lexer grammar L;\n" + "options { caseInsensitive = true; }\n" + "SET_WITH_NOT: ~[a-c];\n"; // ~[a-cA-C] - execLexer("L.g4", grammar, "L", "B"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "B"); - assertEquals("line 1:0 token recognition error at: 'B'\n", getParseErrors()); + assertEquals("line 1:0 token recognition error at: 'B'\n", executedState.errors); } @Test public void testLexerCaseInsensitiveFragments() throws Exception { @@ -521,22 +517,47 @@ public void testSetUp() throws Exception { "options { caseInsensitive=true; }\n" + "STRING options { caseInsensitive=false; } : 'N'? '\\'' (~'\\'' | '\\'\\'')* '\\'';\n"; - execLexer("L.g4", grammar, "L", "n'sample'"); - assertEquals("line 1:0 token recognition error at: 'n'\n", getParseErrors()); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "n'sample'"); + assertEquals("line 1:0 token recognition error at: 'n'\n", executedState.errors); } - protected void checkLexerMatches(LexerGrammar lg, String inputString, String expecting) { + private void checkLexerMatches(LexerGrammar lg, String inputString, String expecting) { ATN atn = createATN(lg, true); CharStream input = CharStreams.fromString(inputString); ATNState startState = atn.modeNameToStartState.get("DEFAULT_MODE"); DOTGenerator dot = new DOTGenerator(lg); // System.out.println(dot.getDOT(startState, true)); - List tokenTypes = RuntimeTestUtils.getTokenTypes(lg, atn, input); + List tokenTypes = getTokenTypes(lg, atn, input); String result = Utils.join(tokenTypes.iterator(), ", "); // System.out.println(tokenTypes); assertEquals(expecting, result); } + private static List getTokenTypes(LexerGrammar lg, ATN atn, CharStream input) { + LexerATNSimulator interp = new LexerATNSimulator(atn, new DFA[]{new DFA(atn.modeToStartState.get(Lexer.DEFAULT_MODE))}, null); + List tokenTypes = new ArrayList<>(); + int ttype; + boolean hitEOF = false; + do { + if ( hitEOF ) { + tokenTypes.add("EOF"); + break; + } + int t = input.LA(1); + ttype = interp.match(input, Lexer.DEFAULT_MODE); + if ( ttype== Token.EOF ) { + tokenTypes.add("EOF"); + } + else { + tokenTypes.add(lg.typeToTokenList.get(ttype)); + } + + if ( t== IntStream.EOF ) { + hitEOF = true; + } + } while ( ttype!=Token.EOF ); + return tokenTypes; + } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNParserPrediction.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNParserPrediction.java index 3fb0dc9e61..6831287813 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNParserPrediction.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNParserPrediction.java @@ -17,32 +17,24 @@ import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.test.runtime.MockIntTokenStream; import org.antlr.v4.tool.DOTGenerator; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LeftRecursiveRule; import org.antlr.v4.tool.LexerGrammar; import org.antlr.v4.tool.Rule; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; -import static org.antlr.v4.test.runtime.RuntimeTestUtils.getTokenTypesViaATN; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.antlr.v4.test.tool.ToolTestUtils.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH // NOTICE: TOKENS IN LEXER, PARSER MUST BE SAME OR TOKEN TYPE MISMATCH -public class TestATNParserPrediction extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestATNParserPrediction { @Test public void testAorB() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + @@ -499,7 +491,6 @@ public void testSetUp() throws Exception { public void checkPredictedAlt(LexerGrammar lg, Grammar g, int decision, String inputString, int expectedAlt) { - Tool.internalOption_ShowATNConfigsInDFA = true; ATN lexatn = createATN(lg, true); LexerATNSimulator lexInterp = new LexerATNSimulator(lexatn,new DFA[] { new DFA(lexatn.modeToStartState.get(Lexer.DEFAULT_MODE)) },new PredictionContextCache()); diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNSerialization.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNSerialization.java index dd4abc8a6f..e07baddbd3 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestATNSerialization.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestATNSerialization.java @@ -9,26 +9,19 @@ import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNSerializer; import org.antlr.v4.runtime.misc.IntegerList; -import org.antlr.v4.tool.DOTGenerator; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import static org.antlr.v4.runtime.atn.ATNDeserializer.encodeIntsWith16BitWords; import static org.antlr.v4.runtime.atn.ATNDeserializer.decodeIntsEncodedAs16BitWords; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; - -public class TestATNSerialization extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.createATN; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +public class TestATNSerialization { @Test public void testSimpleNoBlock() throws Exception { Grammar g = new Grammar( "parser grammar T;\n"+ diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestActionSplitter.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestActionSplitter.java index d376c7bee5..82908eb347 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestActionSplitter.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestActionSplitter.java @@ -10,22 +10,15 @@ import org.antlr.runtime.Token; import org.antlr.v4.parse.ActionSplitter; import org.antlr.v4.semantics.BlankActionSplitterListener; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestActionSplitter extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - static String[] exprs = { +public class TestActionSplitter { + final static String[] exprs = { "foo", "['foo'<" + ActionSplitter.TEXT + ">]", "$x", "['$x'<" + ActionSplitter.ATTR + ">]", "\\$x", "['\\$x'<" + ActionSplitter.TEXT + ">]", @@ -42,16 +35,17 @@ public void testSetUp() throws Exception { "$foo.get(\"ick\");", "['$foo'<" + ActionSplitter.ATTR + ">, '.get(\"ick\");'<" + ActionSplitter.TEXT + ">]", }; - @Test public void testExprs() { - for (int i = 0; i < exprs.length; i+=2) { - String input = exprs[i]; - String expect = exprs[i+1]; - List chunks = getActionChunks(input); - assertEquals("input: "+input, expect, chunks.toString()); - } - } + @Test + public void testExprs() { + for (int i = 0; i < exprs.length; i += 2) { + String input = exprs[i]; + String expect = exprs[i + 1]; + List chunks = getActionChunks(input); + assertEquals(expect, chunks.toString(), "input: " + input); + } + } - public static List getActionChunks(String a) { + private static List getActionChunks(String a) { List chunks = new ArrayList(); ActionSplitter splitter = new ActionSplitter(new ANTLRStringStream(a), new BlankActionSplitterListener()); diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestActionTranslation.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestActionTranslation.java index e601cec5d6..80314a91d9 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestActionTranslation.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestActionTranslation.java @@ -6,19 +6,25 @@ package org.antlr.v4.test.tool; +import org.antlr.v4.analysis.AnalysisPipeline; +import org.antlr.v4.automata.ATNFactory; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.codegen.CodeGenerator; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.test.runtime.ErrorQueue; import org.antlr.v4.tool.Grammar; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.tool.LexerGrammar; +import org.junit.jupiter.api.Test; +import org.stringtemplate.v4.ST; +import org.stringtemplate.v4.STGroup; +import org.stringtemplate.v4.STGroupString; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** */ @SuppressWarnings("unused") -public class TestActionTranslation extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestActionTranslation { String attributeTemplate = "attributeTemplate(members,init,inline,finally,inline2) ::= <<\n" + "parser grammar A;\n"+ @@ -217,188 +223,39 @@ public void testSetUp() throws Exception { Grammar g = new Grammar(gS); } - -/* - @Test public void testSimplePlusEqualLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualStringLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualSetLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testPlusEqualWildcardLabel() throws Exception { - String action = "$ids.size();"; // must be qualified - } - @Test public void testImplicitTokenLabel() throws Exception { - String action = "$ID; $ID.text; $ID.getText()"; - } - - @Test public void testImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingListLabelWithImplicitRuleLabel() throws Exception { - String action = "$r.start;"; - } - - @Test public void testReuseExistingLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testReuseExistingListLabelWithImplicitTokenLabel() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleLabelWithoutOutputOption() throws Exception { - } - @Test public void testMissingArgs() throws Exception { - } - @Test public void testArgsWhenNoneDefined() throws Exception { - } - @Test public void testReturnInitValue() throws Exception { - } - @Test public void testMultipleReturnInitValue() throws Exception { - } - @Test public void testCStyleReturnInitValue() throws Exception { - } - @Test public void testArgsWithInitValues() throws Exception { - } - @Test public void testArgsOnToken() throws Exception { - } - @Test public void testArgsOnTokenInLexer() throws Exception { - } - @Test public void testLabelOnRuleRefInLexer() throws Exception { - String action = "$i.text"; - } - - @Test public void testRefToRuleRefInLexer() throws Exception { - String action = "$ID.text"; - } - - @Test public void testRefToRuleRefInLexerNoAttribute() throws Exception { - String action = "$ID"; - } - - @Test public void testCharLabelInLexer() throws Exception { - } - @Test public void testCharListLabelInLexer() throws Exception { - } - @Test public void testWildcardCharLabelInLexer() throws Exception { - } - @Test public void testWildcardCharListLabelInLexer() throws Exception { - } - @Test public void testMissingArgsInLexer() throws Exception { - } - @Test public void testLexerRulePropertyRefs() throws Exception { - String action = "$text $type $line $pos $channel $index $start $stop"; - } - - @Test public void testLexerLabelRefs() throws Exception { - String action = "$a $b.text $c $d.text"; - } - - @Test public void testSettingLexerRulePropertyRefs() throws Exception { - String action = "$text $type=1 $line=1 $pos=1 $channel=1 $index"; - } - - @Test public void testArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testMissingArgsOnTokenInLexerRuleOfCombined() throws Exception { - } - @Test public void testTokenLabelTreeProperty() throws Exception { - String action = "$id.tree;"; - } - - @Test public void testTokenRefTreeProperty() throws Exception { - String action = "$ID.tree;"; - } - - @Test public void testAmbiguousTokenRef() throws Exception { - String action = "$ID;"; - } - - @Test public void testAmbiguousTokenRefWithProp() throws Exception { - String action = "$ID.text;"; - } - - @Test public void testRuleRefWithDynamicScope() throws Exception { - String action = "$field::x = $field.st;"; - } - - @Test public void testAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.tree = null;"; - } - - @Test public void testAssignToOwnParamAttr() throws Exception { - String action = "$rule.i = 42; $i = 23;"; - } - - @Test public void testIllegalAssignToOwnRulenameAttr() throws Exception { - String action = "$rule.stop = 0;"; - } - - @Test public void testIllegalAssignToLocalAttr() throws Exception { - String action = "$tree = null; $st = null; $start = 0; $stop = 0; $text = 0;"; - } - - @Test public void testIllegalAssignRuleRefAttr() throws Exception { - String action = "$other.tree = null;"; - } - - @Test public void testIllegalAssignTokenRefAttr() throws Exception { - String action = "$ID.text = \"test\";"; - } - - @Test public void testAssignToTreeNodeAttribute() throws Exception { - String action = "$tree.scope = localScope;"; - } - - @Test public void testDoNotTranslateAttributeCompare() throws Exception { - String action = "$a.line == $b.line"; - } - - @Test public void testDoNotTranslateScopeAttributeCompare() throws Exception { - String action = "if ($rule::foo == \"foo\" || 1) { System.out.println(\"ouch\"); }"; - } - - @Test public void testTreeRuleStopAttributeIsInvalid() throws Exception { - String action = "$r.x; $r.start; $r.stop"; - } - - @Test public void testRefToTextAttributeForCurrentTreeRule() throws Exception { - String action = "$text"; - } - - @Test public void testTypeOfGuardedAttributeRefIsCorrect() throws Exception { - String action = "int x = $b::n;"; - } - - @Test public void testBracketArgParsing() throws Exception { - } - - @Test public void testStringArgParsing() throws Exception { - String action = "34, '{', \"it's<\", '\"', \"\\\"\", 19"; - } - @Test public void testComplicatedSingleArgParsing() throws Exception { - String action = "(*a).foo(21,33,\",\")"; - } - @Test public void testArgWithLT() throws Exception { - String action = "34<50"; - } - @Test public void testGenericsAsArgumentDefinition() throws Exception { - String action = "$foo.get(\"ick\");"; - } - @Test public void testGenericsAsArgumentDefinition2() throws Exception { - String action = "$foo.get(\"ick\"); x=3;"; - } - @Test public void testGenericsAsReturnValue() throws Exception { + private static void testActions(String templates, String actionName, String action, String expected) throws org.antlr.runtime.RecognitionException { + int lp = templates.indexOf('('); + String name = templates.substring(0, lp); + STGroup group = new STGroupString(templates); + ST st = group.getInstanceOf(name); + st.add(actionName, action); + String grammar = st.render(); + ErrorQueue equeue = new ErrorQueue(); + Grammar g = new Grammar(grammar, equeue); + if ( g.ast!=null && !g.ast.hasErrors ) { + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + + ATNFactory factory = new ParserATNFactory(g); + if ( g.isLexer() ) factory = new LexerATNFactory((LexerGrammar)g); + g.atn = factory.createATN(); + + AnalysisPipeline anal = new AnalysisPipeline(g); + anal.process(); + + CodeGenerator gen = CodeGenerator.create(g); + ST outputFileST = gen.generateParser(false); + String output = outputFileST.render(); + //System.out.println(output); + String b = "#" + actionName + "#"; + int start = output.indexOf(b); + String e = "#end-" + actionName + "#"; + int end = output.indexOf(e); + String snippet = output.substring(start+b.length(),end); + assertEquals(expected, snippet); + } + if ( equeue.size()>0 ) { +// System.err.println(equeue.toString()); + } } -*/ - // TODO: nonlocal $rule::x } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestAmbigParseTrees.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestAmbigParseTrees.java index a91ade8153..a4ce0ee513 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestAmbigParseTrees.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestAmbigParseTrees.java @@ -24,11 +24,12 @@ import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.GrammarParserInterpreter; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestAmbigParseTrees { @Test public void testParseDecisionWithinAmbiguousStartRule() throws Exception { diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestAttributeChecks.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestAttributeChecks.java index f36511ebf7..15397c5cc4 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestAttributeChecks.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestAttributeChecks.java @@ -8,20 +8,15 @@ import org.antlr.runtime.RecognitionException; import org.antlr.v4.tool.ErrorType; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.misc.ErrorBuffer; -/** */ -public class TestAttributeChecks extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; +/** */ +public class TestAttributeChecks { String attributeTemplate = "parser grammar A;\n"+ "@members {}\n" + @@ -245,16 +240,16 @@ public void testSetUp() throws Exception { testErrors(new String[] {grammar, expected}, false); } - public void testActions(String location, String[] pairs, String template) { - for (int i = 0; i < pairs.length; i+=2) { - String action = pairs[i]; - String expected = pairs[i+1]; - STGroup g = new STGroup('<','>'); - g.setListener(new ErrorBuffer()); // hush warnings - ST st = new ST(g, template); - st.add(location, action); - String grammar = st.render(); - testErrors(new String[] {grammar, expected}, false); - } - } + private static void testActions(String location, String[] pairs, String template) { + for (int i = 0; i < pairs.length; i += 2) { + String action = pairs[i]; + String expected = pairs[i + 1]; + STGroup g = new STGroup('<', '>'); + g.setListener(new ErrorBuffer()); // hush warnings + ST st = new ST(g, template); + st.add(location, action); + String grammar = st.render(); + testErrors(new String[]{grammar, expected}, false); + } + } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java index f6e2ad2238..4a41630baf 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestBasicSemanticErrors.java @@ -7,18 +7,13 @@ package org.antlr.v4.test.tool; import org.antlr.v4.tool.ErrorType; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.stringtemplate.v4.ST; -public class TestBasicSemanticErrors extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; - static String[] U = { +public class TestBasicSemanticErrors { + final static String[] U = { // INPUT "parser grammar U;\n" + "options { foo=bar; k=3;}\n" + @@ -50,7 +45,7 @@ public void testSetUp() throws Exception { "warning(" + ErrorType.ILLEGAL_OPTION.code + "): U.g4:16:16: unsupported option x\n", }; - @Test public void testU() { super.testErrors(U, false); } + @Test public void testU() { testErrors(U, false); } /** * Regression test for #25 "Don't allow labels on not token set subrules". diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java index b5548fa2df..1fd83a6c99 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestBufferedTokenStream.java @@ -14,18 +14,11 @@ import org.antlr.v4.runtime.TokenSource; import org.antlr.v4.runtime.TokenStream; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; - -public class TestBufferedTokenStream extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.junit.jupiter.api.Assertions.assertEquals; +public class TestBufferedTokenStream { protected TokenStream createTokenStream(TokenSource src) { return new BufferedTokenStream(src); } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestCharSupport.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestCharSupport.java index eb3ba3ad18..ac63663a80 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestCharSupport.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestCharSupport.java @@ -8,110 +8,112 @@ import org.antlr.v4.misc.CharSupport; import org.antlr.v4.runtime.misc.IntervalSet; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; public class TestCharSupport { @Test public void testGetANTLRCharLiteralForChar() { - Assert.assertEquals("''", + assertEquals("''", CharSupport.getANTLRCharLiteralForChar(-1)); - Assert.assertEquals("'\\n'", + assertEquals("'\\n'", CharSupport.getANTLRCharLiteralForChar('\n')); - Assert.assertEquals("'\\\\'", + assertEquals("'\\\\'", CharSupport.getANTLRCharLiteralForChar('\\')); - Assert.assertEquals("'\\''", + assertEquals("'\\''", CharSupport.getANTLRCharLiteralForChar('\'')); - Assert.assertEquals("'b'", + assertEquals("'b'", CharSupport.getANTLRCharLiteralForChar('b')); - Assert.assertEquals("'\\uFFFF'", + assertEquals("'\\uFFFF'", CharSupport.getANTLRCharLiteralForChar(0xFFFF)); - Assert.assertEquals("'\\u{10FFFF}'", + assertEquals("'\\u{10FFFF}'", CharSupport.getANTLRCharLiteralForChar(0x10FFFF)); } @Test public void testGetCharValueFromGrammarCharLiteral() { - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromGrammarCharLiteral(null)); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromGrammarCharLiteral("")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromGrammarCharLiteral("b")); - Assert.assertEquals(111, + assertEquals(111, CharSupport.getCharValueFromGrammarCharLiteral("foo")); } @Test public void testGetStringFromGrammarStringLiteral() { - Assert.assertNull(CharSupport + assertNull(CharSupport .getStringFromGrammarStringLiteral("foo\\u{bbb")); - Assert.assertNull(CharSupport + assertNull(CharSupport .getStringFromGrammarStringLiteral("foo\\u{[]bb")); - Assert.assertNull(CharSupport + assertNull(CharSupport .getStringFromGrammarStringLiteral("foo\\u[]bb")); - Assert.assertNull(CharSupport + assertNull(CharSupport .getStringFromGrammarStringLiteral("foo\\ubb")); - Assert.assertEquals("oo»b", CharSupport + assertEquals("oo»b", CharSupport .getStringFromGrammarStringLiteral("foo\\u{bb}bb")); } @Test public void testGetCharValueFromCharInGrammarLiteral() { - Assert.assertEquals(102, + assertEquals(102, CharSupport.getCharValueFromCharInGrammarLiteral("f")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromCharInGrammarLiteral("\' ")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromCharInGrammarLiteral("\\ ")); - Assert.assertEquals(39, + assertEquals(39, CharSupport.getCharValueFromCharInGrammarLiteral("\\\'")); - Assert.assertEquals(10, + assertEquals(10, CharSupport.getCharValueFromCharInGrammarLiteral("\\n")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromCharInGrammarLiteral("foobar")); - Assert.assertEquals(4660, + assertEquals(4660, CharSupport.getCharValueFromCharInGrammarLiteral("\\u1234")); - Assert.assertEquals(18, + assertEquals(18, CharSupport.getCharValueFromCharInGrammarLiteral("\\u{12}")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromCharInGrammarLiteral("\\u{")); - Assert.assertEquals(-1, + assertEquals(-1, CharSupport.getCharValueFromCharInGrammarLiteral("foo")); } @Test public void testParseHexValue() { - Assert.assertEquals(-1, CharSupport.parseHexValue("foobar", -1, 3)); - Assert.assertEquals(-1, CharSupport.parseHexValue("foobar", 1, -1)); - Assert.assertEquals(-1, CharSupport.parseHexValue("foobar", 1, 3)); - Assert.assertEquals(35, CharSupport.parseHexValue("123456", 1, 3)); + assertEquals(-1, CharSupport.parseHexValue("foobar", -1, 3)); + assertEquals(-1, CharSupport.parseHexValue("foobar", 1, -1)); + assertEquals(-1, CharSupport.parseHexValue("foobar", 1, 3)); + assertEquals(35, CharSupport.parseHexValue("123456", 1, 3)); } @Test public void testCapitalize() { - Assert.assertEquals("Foo", CharSupport.capitalize("foo")); + assertEquals("Foo", CharSupport.capitalize("foo")); } @Test public void testGetIntervalSetEscapedString() { - Assert.assertEquals("", + assertEquals("", CharSupport.getIntervalSetEscapedString(new IntervalSet())); - Assert.assertEquals("'\\u0000'", + assertEquals("'\\u0000'", CharSupport.getIntervalSetEscapedString(new IntervalSet(0))); - Assert.assertEquals("'\\u0001'..'\\u0003'", + assertEquals("'\\u0001'..'\\u0003'", CharSupport.getIntervalSetEscapedString(new IntervalSet(3, 1, 2))); } @Test public void testGetRangeEscapedString() { - Assert.assertEquals("'\\u0002'..'\\u0004'", + assertEquals("'\\u0002'..'\\u0004'", CharSupport.getRangeEscapedString(2, 4)); - Assert.assertEquals("'\\u0002'", + assertEquals("'\\u0002'", CharSupport.getRangeEscapedString(2, 2)); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestCodeGeneration.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestCodeGeneration.java index de16751680..524eb9d683 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestCodeGeneration.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestCodeGeneration.java @@ -14,8 +14,7 @@ import org.antlr.v4.test.runtime.ErrorQueue; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.stringtemplate.v4.AutoIndentWriter; import org.stringtemplate.v4.InstanceScope; import org.stringtemplate.v4.Interpreter; @@ -30,16 +29,10 @@ import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; - -public class TestCodeGeneration extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +public class TestCodeGeneration { @Test public void testArgDecl() throws Exception { // should use template not string /*ErrorQueue equeue = */new ErrorQueue(); String g = @@ -49,7 +42,7 @@ public void testSetUp() throws Exception { System.out.println(evals); for (int i = 0; i < evals.size(); i++) { String eval = evals.get(i); - assertFalse("eval should not be POJO: "+eval, eval.startsWith(" factory) { assertEquals(1, tokens.size()); } - @Test(expected = IllegalStateException.class) - public void testCannotConsumeEOF() throws Exception { + @Test + public void testCannotConsumeEOF() { TokenSource lexer = new TokenSource() { @Override @@ -288,6 +282,6 @@ public void setTokenFactory(TokenFactory factory) { assertEquals(Token.EOF, tokens.LA(1)); assertEquals(0, tokens.index()); assertEquals(1, tokens.size()); - tokens.consume(); + assertThrows(IllegalStateException.class, tokens::consume); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestCompositeGrammars.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestCompositeGrammars.java index 330ca7fa77..7d4fa79823 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestCompositeGrammars.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestCompositeGrammars.java @@ -6,39 +6,40 @@ package org.antlr.v4.test.tool; -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestUtils; +import org.antlr.runtime.RecognitionException; +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.JavaCompiledState; import org.antlr.v4.tool.ANTLRMessage; import org.antlr.v4.tool.ErrorType; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.GrammarSemanticsMessage; -import org.junit.Before; -import org.junit.Test; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.util.*; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.antlr.v4.test.runtime.RuntimeTestUtils.sort; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.PathSeparator; +import static org.antlr.v4.test.tool.ToolTestUtils.*; +import static org.junit.jupiter.api.Assertions.*; -public class TestCompositeGrammars extends BaseJavaToolTest { +public class TestCompositeGrammars { protected boolean debug = false; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testImportFileLocationInSubdir() throws Exception { + @Test public void testImportFileLocationInSubdir(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : B {System.out.println(\"S.a\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - String subdir = getTempDirPath() + PATH_SEP + "sub"; - RuntimeTestUtils.mkdir(subdir); + FileUtils.mkdir(tempDirPath); + String subdir = tempDirPath + PathSeparator + "sub"; + FileUtils.mkdir(subdir); writeFile(subdir, "S.g4", slave); String master = "grammar M;\n" + @@ -46,65 +47,69 @@ public void testSetUp() throws Exception { "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", subdir); + writeFile(tempDirPath, "M.g4", master); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", subdir); assertEquals(0, equeue.size()); } // Test for https://github.com/antlr/antlr4/issues/1317 - @Test public void testImportSelfLoop() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportSelfLoop(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "grammar M;\n" + "import M;\n" + "s : 'a' ;\n"; - writeFile(getTempDirPath(), "M.g4", master); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + writeFile(tempDirPath, "M.g4", master); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.size()); } - @Test public void testImportIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + "import S;\n" + "A : 'a';\n" + "B : 'b';\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "C : 'c';\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testImportModesIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportModesIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + "import S;\n" + "A : 'a' -> pushMode(X);\n" + "B : 'b';\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "D : 'd';\n" + "mode X;\n" + "C : 'c' -> popMode;\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testImportChannelsIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportChannelsIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + @@ -112,19 +117,20 @@ public void testSetUp() throws Exception { "channels {CH_A, CH_B}\n" + "A : 'a' -> channel(CH_A);\n" + "B : 'b' -> channel(CH_B);\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "C : 'c';\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testImportMixedChannelsIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportMixedChannelsIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + @@ -132,20 +138,21 @@ public void testSetUp() throws Exception { "channels {CH_A, CH_B}\n" + "A : 'a' -> channel(CH_A);\n" + "B : 'b' -> channel(CH_B);\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "channels {CH_C}\n" + "C : 'c' -> channel(CH_C);\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testImportClashingChannelsIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testImportClashingChannelsIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + @@ -154,20 +161,21 @@ public void testSetUp() throws Exception { "A : 'a' -> channel(CH_A);\n" + "B : 'b' -> channel(CH_B);\n" + "C : 'C' -> channel(CH_C);\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "channels {CH_C}\n" + "C : 'c' -> channel(CH_C);\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testMergeModesIntoLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testMergeModesIntoLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + @@ -175,21 +183,22 @@ public void testSetUp() throws Exception { "A : 'a' -> pushMode(X);\n" + "mode X;\n" + "B : 'b';\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "D : 'd';\n" + "mode X;\n" + "C : 'c' -> popMode;\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testEmptyModesInLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testEmptyModesInLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "lexer grammar M;\n" + @@ -197,21 +206,22 @@ public void testSetUp() throws Exception { "A : 'a';\n" + "C : 'e';\n" + "B : 'b';\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "D : 'd';\n" + "mode X;\n" + "C : 'c' -> popMode;\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(0, equeue.errors.size()); } - @Test public void testCombinedGrammarImportsModalLexerGrammar() throws Exception { - RuntimeTestUtils.mkdir(getTempDirPath()); + @Test public void testCombinedGrammarImportsModalLexerGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + FileUtils.mkdir(tempDirPath); String master = "grammar M;\n" + @@ -219,16 +229,16 @@ public void testSetUp() throws Exception { "A : 'a';\n" + "B : 'b';\n" + "r : A B;\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); String slave = "lexer grammar S;\n" + "D : 'd';\n" + "mode X;\n" + "C : 'c' -> popMode;\n"; - writeFile(getTempDirPath(), "S.g4", slave); + writeFile(tempDirPath, "S.g4", slave); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); assertEquals(1, equeue.errors.size()); ANTLRMessage msg = equeue.errors.get(0); assertEquals(ErrorType.MODE_NOT_IN_LEXER, msg.getErrorType()); @@ -238,7 +248,8 @@ public void testSetUp() throws Exception { assertEquals("M.g4", new File(msg.fileName).getName()); } - @Test public void testDelegatesSeeSameTokenType() throws Exception { + @Test public void testDelegatesSeeSameTokenType(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); String slaveS = "parser grammar S;\n"+ "tokens { A, B, C }\n"+ @@ -248,9 +259,9 @@ public void testSetUp() throws Exception { "tokens { C, B, A } // reverse order\n"+ "y : A ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slaveS); - writeFile(getTempDirPath(), "T.g4", slaveT); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slaveS); + writeFile(tempDirPath, "T.g4", slaveT); String master = "// The lexer will create rules to match letters a, b, c.\n"+ @@ -270,29 +281,30 @@ public void testSetUp() throws Exception { "A : 'a' ;\n"+ "C : 'c' ;\n"+ "WS : (' '|'\\n') -> skip ;\n"; - writeFile(getTempDirPath(), "M.g4", master); + writeFile(tempDirPath, "M.g4", master); ErrorQueue equeue = new ErrorQueue(); - Grammar g = new Grammar(getTempDirPath()+"/M.g4", master, equeue); + Grammar g = new Grammar(tempDirPath+"/M.g4", master, equeue); String expectedTokenIDToTypeMap = "{EOF=-1, B=1, A=2, C=3, WS=4}"; String expectedStringLiteralToTypeMap = "{'a'=2, 'b'=1, 'c'=3}"; String expectedTypeToTokenList = "[B, A, C, WS]"; assertEquals(expectedTokenIDToTypeMap, g.tokenNameToTypeMap.toString()); assertEquals(expectedStringLiteralToTypeMap, sort(g.stringLiteralToTypeMap).toString()); assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + assertEquals(0, equeue.errors.size(), "unexpected errors: "+equeue); } - @Test public void testErrorInImportedGetsRightFilename() throws Exception { + @Test public void testErrorInImportedGetsRightFilename(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : 'a' | c;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave); String master = "grammar M;\n" + "import S;\n"; - writeFile(getTempDirPath(), "M.g4", master); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-lib", getTempDirPath()); + writeFile(tempDirPath, "M.g4", master); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-lib", tempDirPath); ANTLRMessage msg = equeue.errors.get(0); assertEquals(ErrorType.UNDEFINED_RULE_REF, msg.getErrorType()); assertEquals("c", msg.getArgs()[0]); @@ -301,13 +313,14 @@ public void testSetUp() throws Exception { assertEquals("S.g4", new File(msg.fileName).getName()); } - @Test public void testImportFileNotSearchedForInOutputDir() throws Exception { + @Test public void testImportFileNotSearchedForInOutputDir(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : B {System.out.println(\"S.a\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - String outdir = getTempDirPath() + "/out"; - RuntimeTestUtils.mkdir(outdir); + FileUtils.mkdir(tempDirPath); + String outdir = tempDirPath + "/out"; + FileUtils.mkdir(outdir); writeFile(outdir, "S.g4", slave); String master = "grammar M;\n" + @@ -315,18 +328,19 @@ public void testSetUp() throws Exception { "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-o", outdir); + writeFile(tempDirPath, "M.g4", master); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-o", outdir); assertEquals(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, equeue.errors.get(0).getErrorType()); } - @Test public void testOutputDirShouldNotEffectImports() throws Exception { + @Test public void testOutputDirShouldNotEffectImports(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : B {System.out.println(\"S.a\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - String subdir = getTempDirPath() + "/sub"; - RuntimeTestUtils.mkdir(subdir); + FileUtils.mkdir(tempDirPath); + String subdir = tempDirPath + "/sub"; + FileUtils.mkdir(subdir); writeFile(subdir, "S.g4", slave); String master = "grammar M;\n" + @@ -334,57 +348,59 @@ public void testSetUp() throws Exception { "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - String outdir = getTempDirPath() + "/out"; - RuntimeTestUtils.mkdir(outdir); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", false, "-o", outdir, "-lib", subdir); + writeFile(tempDirPath, "M.g4", master); + String outdir = tempDirPath + "/out"; + FileUtils.mkdir(outdir); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", false, "-o", outdir, "-lib", subdir); assertEquals(0, equeue.size()); } - @Test public void testTokensFileInOutputDirAndImportFileInSubdir() throws Exception { + @Test public void testTokensFileInOutputDirAndImportFileInSubdir(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : B {System.out.println(\"S.a\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - String subdir = getTempDirPath() + "/sub"; - RuntimeTestUtils.mkdir(subdir); + FileUtils.mkdir(tempDirPath); + String subdir = tempDirPath + "/sub"; + FileUtils.mkdir(subdir); writeFile(subdir, "S.g4", slave); String parser = "parser grammar MParser;\n" + "import S;\n" + "options {tokenVocab=MLexer;}\n" + "s : a ;\n"; - writeFile(getTempDirPath(), "MParser.g4", parser); + writeFile(tempDirPath, "MParser.g4", parser); String lexer = "lexer grammar MLexer;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "MLexer.g4", lexer); - String outdir = getTempDirPath() + "/out"; - RuntimeTestUtils.mkdir(outdir); - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "MLexer.g4", false, "-o", outdir); + writeFile(tempDirPath, "MLexer.g4", lexer); + String outdir = tempDirPath + "/out"; + FileUtils.mkdir(outdir); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "MLexer.g4", false, "-o", outdir); assertEquals(0, equeue.size()); - equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "MParser.g4", false, "-o", outdir, "-lib", subdir); + equeue = Generator.antlrOnString(tempDirPath, "Java", "MParser.g4", false, "-o", outdir, "-lib", subdir); assertEquals(0, equeue.size()); } - @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { + @Test public void testImportedTokenVocabIgnoredWithWarning(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); ErrorQueue equeue = new ErrorQueue(); String slave = "parser grammar S;\n" + "options {tokenVocab=whatever;}\n" + "tokens { A }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave); String master = "grammar M;\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - Grammar g = new Grammar(getTempDirPath()+"/M.g4", master, equeue); + writeFile(tempDirPath, "M.g4", master); + Grammar g = new Grammar(tempDirPath+"/M.g4", master, equeue); Object expectedArg = "S"; ErrorType expectedMsgID = ErrorType.OPTIONS_IN_DELEGATE; @@ -392,50 +408,52 @@ public void testSetUp() throws Exception { new GrammarSemanticsMessage(expectedMsgID, g.fileName, null, expectedArg); checkGrammarSemanticsWarning(equeue, expectedMessage); - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size()); + assertEquals(0, equeue.errors.size(), "unexpected errors: "+equeue); + assertEquals(1, equeue.warnings.size(), "unexpected warnings: "+equeue); } - @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception { + @Test public void testSyntaxErrorsInImportsNotThrownOut(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); ErrorQueue equeue = new ErrorQueue(); String slave = "parser grammar S;\n" + "options {toke\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave); String master = "grammar M;\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') -> skip ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - /*Grammar g =*/ new Grammar(getTempDirPath()+"/M.g4", master, equeue); + writeFile(tempDirPath, "M.g4", master); + /*Grammar g =*/ new Grammar(tempDirPath+"/M.g4", master, equeue); assertEquals(ErrorType.SYNTAX_ERROR, equeue.errors.get(0).getErrorType()); } // Make sure that M can import S that imports T. - @Test public void test3LevelImport() throws Exception { + @Test public void test3LevelImport(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); ErrorQueue equeue = new ErrorQueue(); String slave = "parser grammar T;\n" + "a : T ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "T.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "T.g4", slave); String slave2 = "parser grammar S;\n" + "import T;\n" + "a : S ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave2); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave2); String master = "grammar M;\n" + "import S;\n" + "a : M ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - Grammar g = new Grammar(getTempDirPath()+"/M.g4", master, equeue); + writeFile(tempDirPath, "M.g4", master); + Grammar g = new Grammar(tempDirPath+"/M.g4", master, equeue); String expectedTokenIDToTypeMap = "{EOF=-1, M=1}"; // S and T aren't imported; overridden String expectedStringLiteralToTypeMap = "{}"; @@ -447,57 +465,54 @@ public void testSetUp() throws Exception { assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); - - boolean ok = - rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); + assertEquals(0, equeue.errors.size(), "unexpected errors: "+equeue); + assertTrue(compile("M.g4", master, "MParser", "a", tempDir)); } - @Test public void testBigTreeOfImports() throws Exception { + @Test public void testBigTreeOfImports(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); ErrorQueue equeue = new ErrorQueue(); String slave = "parser grammar T;\n" + "tokens{T}\n" + "x : T ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "T.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "T.g4", slave); slave = "parser grammar S;\n" + "import T;\n" + "tokens{S}\n" + "y : S ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave); slave = "parser grammar C;\n" + "tokens{C}\n" + "i : C ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "C.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "C.g4", slave); slave = "parser grammar B;\n" + "tokens{B}\n" + "j : B ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "B.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "B.g4", slave); slave = "parser grammar A;\n" + "import B,C;\n" + "tokens{A}\n" + "k : A ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "A.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "A.g4", slave); String master = "grammar M;\n" + "import S,A;\n" + "tokens{M}\n" + "a : M ;\n" ; - writeFile(getTempDirPath(), "M.g4", master); - Grammar g = new Grammar(getTempDirPath()+"/M.g4", master, equeue); + writeFile(tempDirPath, "M.g4", master); + Grammar g = new Grammar(tempDirPath+"/M.g4", master, equeue); assertEquals("[]", equeue.errors.toString()); assertEquals("[]", equeue.warnings.toString()); @@ -510,33 +525,30 @@ public void testSetUp() throws Exception { assertEquals(expectedStringLiteralToTypeMap, g.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - - boolean ok = - rawGenerateAndBuildRecognizer("M.g4", master, "MParser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); + assertTrue(compile("M.g4", master, "MParser", "a", tempDir)); } - @Test public void testRulesVisibleThroughMultilevelImport() throws Exception { + @Test public void testRulesVisibleThroughMultilevelImport(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); ErrorQueue equeue = new ErrorQueue(); String slave = "parser grammar T;\n" + "x : T ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "T.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "T.g4", slave); String slave2 = "parser grammar S;\n" + // A, B, C token type order "import T;\n" + "a : S ;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave2); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave2); String master = "grammar M;\n" + "import S;\n" + "a : M x ;\n" ; // x MUST BE VISIBLE TO M - writeFile(getTempDirPath(), "M.g4", master); - Grammar g = new Grammar(getTempDirPath()+"/M.g4", master, equeue); + writeFile(tempDirPath, "M.g4", master); + Grammar g = new Grammar(tempDirPath+"/M.g4", master, equeue); String expectedTokenIDToTypeMap = "{EOF=-1, M=1, T=2}"; String expectedStringLiteralToTypeMap = "{}"; @@ -548,10 +560,11 @@ public void testSetUp() throws Exception { assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + assertEquals(0, equeue.errors.size(), "unexpected errors: "+equeue); } - @Test public void testNestedComposite() throws Exception { + @Test public void testNestedComposite(@TempDir Path tempDir) throws RecognitionException { + String tempDirPath = tempDir.toString(); // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438 ErrorQueue equeue = new ErrorQueue(); String gstr = @@ -560,30 +573,30 @@ public void testSetUp() throws Exception { "T2: '2';\n" + "T3: '3';\n" + "T4: '4';\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "L.g4", gstr); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "L.g4", gstr); gstr = "parser grammar G1;\n" + "s: a | b;\n" + "a: T1;\n" + "b: T2;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "G1.g4", gstr); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "G1.g4", gstr); gstr = "parser grammar G2;\n" + "import G1;\n" + "a: T3;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "G2.g4", gstr); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "G2.g4", gstr); String G3str = "grammar G3;\n" + "import G2;\n" + "b: T4;\n" ; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "G3.g4", G3str); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "G3.g4", G3str); - Grammar g = new Grammar(getTempDirPath()+"/G3.g4", G3str, equeue); + Grammar g = new Grammar(tempDirPath+"/G3.g4", G3str, equeue); String expectedTokenIDToTypeMap = "{EOF=-1, T4=1, T3=2}"; String expectedStringLiteralToTypeMap = "{}"; @@ -595,20 +608,18 @@ public void testSetUp() throws Exception { assertEquals(expectedTypeToTokenList, realElements(g.typeToTokenList).toString()); - assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); + assertEquals(0, equeue.errors.size(), "unexpected errors: "+equeue); - boolean ok = - rawGenerateAndBuildRecognizer("G3.g4", G3str, "G3Parser", null); - boolean expecting = true; // should be ok - assertEquals(expecting, ok); + assertTrue(compile("G3.g4", G3str, "G3Parser", "b", tempDir)); } - @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception { + @Test public void testHeadersPropogatedCorrectlyToImportedGrammars(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "parser grammar S;\n" + "a : B {System.out.print(\"S.a\");} ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "S.g4", slave); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "S.g4", slave); String master = "grammar M;\n" + "import S;\n" + @@ -616,7 +627,7 @@ public void testSetUp() throws Exception { "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') -> skip ;\n" ; - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "M.g4", master, false); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "M.g4", master, false); int expecting = 0; // should be ok assertEquals(expecting, equeue.errors.size()); } @@ -629,18 +640,20 @@ public void testSetUp() throws Exception { */ // TODO: migrate to test framework @Test - public void testImportLargeGrammar() throws Exception { - String slave = load("Java.g4", "UTF-8"); + public void testImportLargeGrammar(@TempDir Path tempDir) throws IOException { + String tempDirPath = tempDir.toString(); + String slave = load("Java.g4"); String master = "grammar NewJava;\n" + "import Java;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "Java.g4", slave); - String found = execParser("NewJava.g4", master, "NewJavaParser", "NewJavaLexer", - null, null, "compilationUnit", "package Foo;", debug); - assertEquals(null, found); - assertNull(getParseErrors()); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "Java.g4", slave); + ExecutedState executedState = execParser("NewJava.g4", master, + "NewJavaParser", "NewJavaLexer", "compilationUnit", "package Foo;", + debug, tempDir); + assertEquals("", executedState.output); + assertEquals("", executedState.errors); } /** @@ -650,7 +663,8 @@ public void testImportLargeGrammar() throws Exception { */ // TODO: migrate to test framework @Test - public void testImportLeftRecursiveGrammar() throws Exception { + public void testImportLeftRecursiveGrammar(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String slave = "grammar Java;\n" + "e : '(' e ')'\n" + @@ -663,11 +677,70 @@ public void testImportLeftRecursiveGrammar() throws Exception { "import Java;\n" + "s : e ;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); - writeFile(getTempDirPath(), "Java.g4", slave); - String found = execParser("T.g4", master, "TParser", "TLexer", - null, null, "s", "a=b", debug); - assertEquals(null, found); - assertNull(getParseErrors()); + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "Java.g4", slave); + ExecutedState executedState = execParser( + "T.g4", master, "TParser", "TLexer", "s", "a=b", debug, + tempDir); + assertEquals("", executedState.output); + assertEquals("", executedState.errors); + } + + // ISSUE: https://github.com/antlr/antlr4/issues/2296 + @Test + public void testCircularGrammarInclusion(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); + String g1 = + "grammar G1;\n" + + "import G2;\n" + + "r : 'R1';"; + + String g2 = + "grammar G2;\n" + + "import G1;\n" + + "r : 'R2';"; + + FileUtils.mkdir(tempDirPath); + writeFile(tempDirPath, "G1.g4", g1); + ExecutedState executedState = execParser("G2.g4", g2, "G2Parser", "G2Lexer", "r", "R2", debug, tempDir); + assertEquals("", executedState.errors); + } + + private static void checkGrammarSemanticsWarning(ErrorQueue equeue, GrammarSemanticsMessage expectedMessage) { + ANTLRMessage foundMsg = null; + for (int i = 0; i < equeue.warnings.size(); i++) { + ANTLRMessage m = equeue.warnings.get(i); + if (m.getErrorType()==expectedMessage.getErrorType() ) { + foundMsg = m; + } + } + assertNotNull(foundMsg, "no error; "+expectedMessage.getErrorType()+" expected"); + assertTrue(foundMsg instanceof GrammarSemanticsMessage, "error is not a GrammarSemanticsMessage"); + assertEquals(Arrays.toString(expectedMessage.getArgs()), Arrays.toString(foundMsg.getArgs())); + if ( equeue.size()!=1 ) { + System.err.println(equeue); + } + } + + private static boolean compile(String grammarFileName, String grammarStr, String parserName, String startRuleName, + Path tempDirPath + ) { + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, grammarStr, parserName, null, + false, false, startRuleName, null, + false, false, Stage.Compile, false); + try (JavaRunner runner = new JavaRunner(tempDirPath, false)) { + JavaCompiledState compiledState = (JavaCompiledState) runner.run(runOptions); + return !compiledState.containsErrors(); + } + } + + public static ,V> LinkedHashMap sort(Map data) { + LinkedHashMap dup = new LinkedHashMap(); + List keys = new ArrayList(data.keySet()); + Collections.sort(keys); + for (K k : keys) { + dup.put(k, data.get(k)); + } + return dup; } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestDollarParser.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestDollarParser.java index 91efcabe11..816a9fabd4 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestDollarParser.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestDollarParser.java @@ -6,29 +6,22 @@ package org.antlr.v4.test.tool; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class TestDollarParser extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.execParser; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +public class TestDollarParser { @Test - public void testSimpleCall() throws Exception { + public void testSimpleCall() { String grammar = "grammar T;\n" + - "a : ID { System.out.println( $parser.getSourceName() ); }\n" + - " ;\n" + - "ID : 'a'..'z'+ ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", - null, null, "a", "x", true); - assertTrue(found.indexOf(this.getClass().getSimpleName())>=0); - assertNull(getParseErrors()); + "a : ID { outStream.println(new java.io.File($parser.getSourceName()).getAbsolutePath()); }\n" + + " ;\n" + + "ID : 'a'..'z'+ ;\n"; + ExecutedState executedState = execParser("T.g4", grammar, "TParser", "TLexer", "a", "x", true); + assertTrue(executedState.output.contains("input")); + assertEquals("", executedState.errors); } - } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestErrorSets.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestErrorSets.java index a0bdc18c8a..0896e4874f 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestErrorSets.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestErrorSets.java @@ -6,23 +6,14 @@ package org.antlr.v4.test.tool; import org.antlr.v4.tool.ErrorType; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; /** Test errors with the set stuff in lexer and parser */ -public class TestErrorSets extends BaseJavaToolTest { +public class TestErrorSets { protected boolean debug = false; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - /** Public default constructor used by TestRig */ - public TestErrorSets() { - } - @Test public void testNotCharSetWithRuleRef() throws Exception { // might be a useful feature to add someday String[] pair = new String[] { @@ -32,7 +23,7 @@ public TestErrorSets() { "B : 'b' ;\n", "error(" + ErrorType.UNSUPPORTED_REFERENCE_IN_LEXER_SET.code + "): T.g4:3:10: rule reference B is not currently supported in a set\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testNotCharSetWithString() throws Exception { @@ -44,8 +35,6 @@ public TestErrorSets() { "B : 'b' ;\n", "error(" + ErrorType.INVALID_LITERAL_IN_LEXER_SET.code + "): T.g4:3:10: multi-character literals are not allowed in lexer sets: 'aa'\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } - - } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestEscapeSequenceParsing.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestEscapeSequenceParsing.java index e4b3c37234..aa0bd50c78 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestEscapeSequenceParsing.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestEscapeSequenceParsing.java @@ -8,10 +8,10 @@ import org.antlr.v4.misc.EscapeSequenceParsing; import org.antlr.v4.runtime.misc.IntervalSet; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.antlr.v4.misc.EscapeSequenceParsing.Result; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestEscapeSequenceParsing { @Test diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestFastQueue.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestFastQueue.java index 3bf4f4acfc..cc57404a56 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestFastQueue.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestFastQueue.java @@ -6,11 +6,11 @@ package org.antlr.v4.test.tool; import org.antlr.runtime.misc.FastQueue; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.NoSuchElementException; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestFastQueue { @Test public void testQueueNoRemove() throws Exception { @@ -38,7 +38,7 @@ public class TestFastQueue { buf.append(o); if ( q.size()>0 ) buf.append(" "); } - assertEquals("queue should be empty", 0, q.size()); + assertEquals(0, q.size(), "queue should be empty"); String expecting = "a b c d e"; String found = buf.toString(); assertEquals(expecting, found); @@ -57,7 +57,7 @@ public class TestFastQueue { buf.append(q.remove()); q.add("e"); buf.append(q.remove()); - assertEquals("queue should be empty", 0, q.size()); + assertEquals(0, q.size(), "queue should be empty"); String expecting = "abcde"; String found = buf.toString(); assertEquals(expecting, found); diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestGrammarParserInterpreter.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestGrammarParserInterpreter.java index def2642bf4..f3b2472dc6 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestGrammarParserInterpreter.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestGrammarParserInterpreter.java @@ -14,9 +14,9 @@ import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.GrammarParserInterpreter; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; /** Tests to ensure GrammarParserInterpreter subclass of ParserInterpreter * hasn't messed anything up. diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestGraphNodes.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestGraphNodes.java index cb2cfcb2df..bf5808de54 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestGraphNodes.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestGraphNodes.java @@ -7,36 +7,26 @@ package org.antlr.v4.test.tool; import org.antlr.v4.runtime.atn.ArrayPredictionContext; +import org.antlr.v4.runtime.atn.EmptyPredictionContext; import org.antlr.v4.runtime.atn.PredictionContext; -import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.atn.SingletonPredictionContext; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.util.ArrayDeque; import java.util.Deque; import java.util.IdentityHashMap; import java.util.Map; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestGraphNodes { - PredictionContextCache contextCache; - - @Before - public void setUp() { - PredictionContext.globalNodeCount = 1; - contextCache = new PredictionContextCache(); - } - public boolean rootIsWildcard() { return true; } public boolean fullCtx() { return false; } @Test public void test_$_$() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, - PredictionContext.EMPTY, - rootIsWildcard(), null); + PredictionContext r = PredictionContext.merge( + EmptyPredictionContext.Instance, EmptyPredictionContext.Instance, rootIsWildcard(), null); // System.out.println(toDOTString(r, rootIsWildcard())); String expecting = "digraph G {\n" + @@ -47,9 +37,8 @@ public void setUp() { } @Test public void test_$_$_fullctx() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, - PredictionContext.EMPTY, - fullCtx(), null); + PredictionContext r = PredictionContext.merge( + EmptyPredictionContext.Instance, EmptyPredictionContext.Instance, fullCtx(), null); // System.out.println(toDOTString(r, fullCtx())); String expecting = "digraph G {\n" + @@ -60,7 +49,7 @@ public void setUp() { } @Test public void test_x_$() { - PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, rootIsWildcard(), null); + PredictionContext r = PredictionContext.merge(x(), EmptyPredictionContext.Instance, rootIsWildcard(), null); // System.out.println(toDOTString(r, rootIsWildcard())); String expecting = "digraph G {\n" + @@ -71,7 +60,7 @@ public void setUp() { } @Test public void test_x_$_fullctx() { - PredictionContext r = PredictionContext.merge(x(), PredictionContext.EMPTY, fullCtx(), null); + PredictionContext r = PredictionContext.merge(x(), EmptyPredictionContext.Instance, fullCtx(), null); // System.out.println(toDOTString(r, fullCtx())); String expecting = "digraph G {\n" + @@ -84,7 +73,7 @@ public void setUp() { } @Test public void test_$_x() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), rootIsWildcard(), null); + PredictionContext r = PredictionContext.merge(EmptyPredictionContext.Instance, x(), rootIsWildcard(), null); // System.out.println(toDOTString(r, rootIsWildcard())); String expecting = "digraph G {\n" + @@ -95,7 +84,7 @@ public void setUp() { } @Test public void test_$_x_fullctx() { - PredictionContext r = PredictionContext.merge(PredictionContext.EMPTY, x(), fullCtx(), null); + PredictionContext r = PredictionContext.merge(EmptyPredictionContext.Instance, x(), fullCtx(), null); // System.out.println(toDOTString(r, fullCtx())); String expecting = "digraph G {\n" + @@ -171,7 +160,7 @@ public void setUp() { } @Test public void test_aa$_a$_$_fullCtx() { - PredictionContext empty = PredictionContext.EMPTY; + PredictionContext empty = EmptyPredictionContext.Instance; PredictionContext child1 = createSingleton(empty, 8); PredictionContext right = PredictionContext.merge(empty, child1, false, null); PredictionContext left = createSingleton(right, 8); @@ -403,7 +392,7 @@ public void setUp() { assertEquals(expecting, toDOTString(r, fullCtx())); } - @Ignore("Known inefficiency but deferring resolving the issue for now") + @Disabled("Known inefficiency but deferring resolving the issue for now") @Test public void test_aex_bfx() { // TJP: this is inefficient as it leaves the top x nodes unmerged. PredictionContext x1 = x(); @@ -434,8 +423,8 @@ public void setUp() { // Array merges @Test public void test_A$_A$_fullctx() { - ArrayPredictionContext A1 = array(PredictionContext.EMPTY); - ArrayPredictionContext A2 = array(PredictionContext.EMPTY); + ArrayPredictionContext A1 = array(EmptyPredictionContext.Instance); + ArrayPredictionContext A2 = array(EmptyPredictionContext.Instance); PredictionContext r = PredictionContext.merge(A1, A2, fullCtx(), null); // System.out.println(toDOTString(r, fullCtx())); String expecting = @@ -757,39 +746,39 @@ public void setUp() { // ------------ SUPPORT ------------------------- protected SingletonPredictionContext a() { - return createSingleton(PredictionContext.EMPTY, 1); + return createSingleton(EmptyPredictionContext.Instance, 1); } private SingletonPredictionContext b() { - return createSingleton(PredictionContext.EMPTY, 2); + return createSingleton(EmptyPredictionContext.Instance, 2); } private SingletonPredictionContext c() { - return createSingleton(PredictionContext.EMPTY, 3); + return createSingleton(EmptyPredictionContext.Instance, 3); } private SingletonPredictionContext d() { - return createSingleton(PredictionContext.EMPTY, 4); + return createSingleton(EmptyPredictionContext.Instance, 4); } private SingletonPredictionContext u() { - return createSingleton(PredictionContext.EMPTY, 6); + return createSingleton(EmptyPredictionContext.Instance, 6); } private SingletonPredictionContext v() { - return createSingleton(PredictionContext.EMPTY, 7); + return createSingleton(EmptyPredictionContext.Instance, 7); } private SingletonPredictionContext w() { - return createSingleton(PredictionContext.EMPTY, 8); + return createSingleton(EmptyPredictionContext.Instance, 8); } private SingletonPredictionContext x() { - return createSingleton(PredictionContext.EMPTY, 9); + return createSingleton(EmptyPredictionContext.Instance, 9); } private SingletonPredictionContext y() { - return createSingleton(PredictionContext.EMPTY, 10); + return createSingleton(EmptyPredictionContext.Instance, 10); } public SingletonPredictionContext createSingleton(PredictionContext parent, int payload) { diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestIntervalSet.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestIntervalSet.java index 59bcba324a..034ae46e22 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestIntervalSet.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestIntervalSet.java @@ -9,25 +9,16 @@ import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.misc.IntervalSet; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.*; -public class TestIntervalSet extends BaseJavaToolTest { +public class TestIntervalSet { /** Public default constructor used by TestRig */ public TestIntervalSet() { } - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - @Test public void testSingleElement() throws Exception { IntervalSet s = IntervalSet.of(99); String expecting = "99"; @@ -431,6 +422,4 @@ public void testSetUp() throws Exception { String result = s.toString(); assertEquals(expecting, result); } - - } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestLeftRecursionToolIssues.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestLeftRecursionToolIssues.java index 04159db536..34554972dd 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestLeftRecursionToolIssues.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestLeftRecursionToolIssues.java @@ -7,19 +7,14 @@ package org.antlr.v4.test.tool; import org.antlr.v4.tool.ErrorType; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; /** */ -public class TestLeftRecursionToolIssues extends BaseJavaToolTest { +public class TestLeftRecursionToolIssues { protected boolean debug = false; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - @Test public void testCheckForNonLeftRecursiveRule() throws Exception { String grammar = "grammar T;\n" + diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestLexerActions.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestLexerActions.java index 069c6d612b..8e9944231e 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestLexerActions.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestLexerActions.java @@ -6,49 +6,43 @@ package org.antlr.v4.test.tool; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; - -public class TestLexerActions extends BaseJavaToolTest { - - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.execLexer; +import static org.junit.jupiter.api.Assertions.assertEquals; +public class TestLexerActions { // ----- ACTIONS -------------------------------------------------------- @Test public void testActionExecutedInDFA() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "I : '0'..'9'+ {outStream.println(\"I\");} ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34 34"); String expecting = "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,3:4='34',<1>,1:3]\n" + "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testActionEvalsAtCorrectIndex() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : [0-9] {System.out.println(\"2nd char: \"+(char)_input.LA(1));} [0-9]+ ;\n"+ + "I : [0-9] {outStream.println(\"2nd char: \"+(char)_input.LA(1));} [0-9]+ ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 45"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "123 45"); String expecting = "2nd char: 2\n" + "2nd char: 5\n" + "[@0,0:2='123',<1>,1:0]\n" + "[@1,4:5='45',<1>,1:4]\n" + "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } /** @@ -71,8 +65,8 @@ public void testSetUp() throws Exception { " return lexer._input.getText (new Interval (start_index, stop_index));\n" + " }\n" + "\n" + - " public void start () { start_index = lexer._input.index (); System.out.println (\"Start:\" + start_index);}\n" + - " public void stop () { stop_index = lexer._input.index (); System.out.println (\"Stop:\" + stop_index);}\n" + + " public void start () { start_index = lexer._input.index (); outStream.println (\"Start:\" + start_index);}\n" + + " public void stop () { stop_index = lexer._input.index (); outStream.println (\"Stop:\" + stop_index);}\n" + "\n" + " private int start_index = 0;\n" + " private int stop_index = 0;\n" + @@ -82,11 +76,11 @@ public void testSetUp() throws Exception { "Marker m_name = new Marker (this);\n" + "}\n" + "\n" + - "HELLO: 'hello' WS { m_name.start (); } NAME { m_name.stop (); } '\\n' { System.out.println (\"Hello: \" + m_name.getText ()); };\n" + + "HELLO: 'hello' WS { m_name.start (); } NAME { m_name.stop (); } '\\n' { outStream.println (\"Hello: \" + m_name.getText ()); };\n" + "NAME: ('a'..'z' | 'A'..'Z')+ ('\\n')?;\n" + "\n" + "fragment WS: [ \\r\\t\\n]+ ;\n"; - String found = execLexer("L.g4", grammar, "L", "hello Steve\n"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "hello Steve\n"); String expecting = "Start:6\n" + "Stop:11\n" + @@ -94,15 +88,15 @@ public void testSetUp() throws Exception { "\n" + "[@0,0:11='hello Steve\\n',<1>,1:0]\n" + "[@1,12:11='',<-1>,2:0]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void test2ActionsIn1Rule() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : [0-9] {System.out.println(\"x\");} [0-9]+ {System.out.println(\"y\");} ;\n"+ + "I : [0-9] {outStream.println(\"x\");} [0-9]+ {outStream.println(\"y\");} ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 45"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "123 45"); String expecting = "x\n" + "y\n" + @@ -111,39 +105,39 @@ public void testSetUp() throws Exception { "[@0,0:2='123',<1>,1:0]\n" + "[@1,4:5='45',<1>,1:4]\n" + "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testAltActionsIn1Rule() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : ( [0-9]+ {System.out.print(\"int\");}\n" + - " | [a-z]+ {System.out.print(\"id\");}\n" + + "I : ( [0-9]+ {outStream.print(\"int\");}\n" + + " | [a-z]+ {outStream.print(\"id\");}\n" + " )\n" + - " {System.out.println(\" last\");}\n" + + " {outStream.println(\" last\");}\n" + " ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "123 ab"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "123 ab"); String expecting = "int last\n" + "id last\n" + "[@0,0:2='123',<1>,1:0]\n" + "[@1,4:5='ab',<1>,1:4]\n" + "[@2,6:5='',<-1>,1:6]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testActionPlusCommand() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} -> skip ;\n"+ + "I : '0'..'9'+ {outStream.println(\"I\");} -> skip ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34 34"); String expecting = "I\n" + "I\n" + "[@0,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } // ----- COMMANDS -------------------------------------------------------- @@ -151,60 +145,60 @@ public void testSetUp() throws Exception { @Test public void testSkipCommand() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "I : '0'..'9'+ {outStream.println(\"I\");} ;\n"+ "WS : (' '|'\\n') -> skip ;"; - String found = execLexer("L.g4", grammar, "L", "34 34"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34 34"); String expecting = "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,3:4='34',<1>,1:3]\n" + "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testMoreCommand() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "I : '0'..'9'+ {outStream.println(\"I\");} ;\n"+ "WS : '#' -> more ;"; - String found = execLexer("L.g4", grammar, "L", "34#10"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34#10"); String expecting = "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,2:4='#10',<1>,1:2]\n" + "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testTypeCommand() throws Exception { String grammar = "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "I : '0'..'9'+ {outStream.println(\"I\");} ;\n"+ "HASH : '#' -> type(HASH) ;"; - String found = execLexer("L.g4", grammar, "L", "34#"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34#"); String expecting = "I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,2:2='#',<2>,1:2]\n" + "[@2,3:2='',<-1>,1:3]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testCombinedCommand() throws Exception { String grammar = - "lexer grammar L;\n"+ - "I : '0'..'9'+ {System.out.println(\"I\");} ;\n"+ + "lexer grammar L;\n" + + "I : '0'..'9'+ {outStream.println(\"I\");} ;\n"+ "HASH : '#' -> type(100), skip, more ;"; - String found = execLexer("L.g4", grammar, "L", "34#11"); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "34#11"); String expecting = "I\n" + "I\n" + "[@0,0:1='34',<1>,1:0]\n" + "[@1,2:4='#11',<1>,1:2]\n" + "[@2,5:4='',<-1>,1:5]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testLexerMode() throws Exception { @@ -215,12 +209,12 @@ public void testSetUp() throws Exception { "mode STRING_MODE;\n"+ "STRING : '\"' -> popMode;\n"+ "ANY : . -> more;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String expecting = "[@0,0:4='\"abc\"',<2>,1:0]\n" + "[@1,6:9='\"ab\"',<2>,1:6]\n" + "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testLexerPushPopModeAction() throws Exception { @@ -231,12 +225,12 @@ public void testSetUp() throws Exception { "mode STRING_MODE;\n"+ "STRING : '\"' -> popMode ;\n"+ // token type 2 "ANY : . -> more ;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String expecting = "[@0,0:4='\"abc\"',<2>,1:0]\n" + "[@1,6:9='\"ab\"',<2>,1:6]\n" + "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } @Test public void testLexerModeAction() throws Exception { @@ -247,12 +241,12 @@ public void testSetUp() throws Exception { "mode STRING_MODE;\n"+ "STRING : '\"' -> mode(DEFAULT_MODE) ;\n"+ // ttype 2 since '"' ambiguity "ANY : . -> more ;\n"; - String found = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); + ExecutedState executedState = execLexer("L.g4", grammar, "L", "\"abc\" \"ab\""); String expecting = "[@0,0:4='\"abc\"',<2>,1:0]\n" + "[@1,6:9='\"ab\"',<2>,1:6]\n" + "[@2,10:9='',<-1>,1:10]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } // ----- PREDICATES -------------------------------------------------------- @@ -282,7 +276,7 @@ public void testFailingPredicateEvalIsNotCached() { "Item: name of item\n" + "Another line.\n" + "More line.\n"; - String found = execLexer("TestLexer.g4", grammar, "TestLexer", input); + ExecutedState executedState = execLexer("TestLexer.g4", grammar, "TestLexer", input); String expecting = "[@0,0:12='A line here.\\n',<1>,1:0]\n" + "[@1,13:17='Item:',<2>,2:0]\n" + @@ -291,7 +285,7 @@ public void testFailingPredicateEvalIsNotCached() { "[@4,32:45='Another line.\\n',<1>,3:0]\n" + "[@5,46:56='More line.\\n',<1>,4:0]\n" + "[@6,57:56='',<-1>,5:0]\n"; - assertEquals(expecting, found); + assertEquals(expecting, executedState.output); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestLookaheadTrees.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestLookaheadTrees.java index e560fd567d..776bc8d45e 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestLookaheadTrees.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestLookaheadTrees.java @@ -17,11 +17,11 @@ import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.GrammarParserInterpreter; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestLookaheadTrees { public static final String lexerText = diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java index c5af50097c..54fa647d93 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestParseTreeMatcher.java @@ -6,38 +6,29 @@ package org.antlr.v4.test.tool; -import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.InputMismatchException; import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.NoViableAltException; import org.antlr.v4.runtime.Parser; import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.misc.Pair; import org.antlr.v4.runtime.tree.pattern.ParseTreeMatch; import org.antlr.v4.runtime.tree.pattern.ParseTreePattern; import org.antlr.v4.runtime.tree.pattern.ParseTreePatternMatcher; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.test.runtime.RunOptions; +import org.antlr.v4.test.runtime.Stage; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.JavaCompiledState; +import org.antlr.v4.test.runtime.states.JavaExecutedState; +import org.junit.jupiter.api.Test; -import java.lang.reflect.Constructor; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class TestParseTreeMatcher extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.antlr.v4.test.tool.ToolTestUtils.createOptionsForJavaToolTests; +import static org.junit.jupiter.api.Assertions.*; - @Test public void testChunking() throws Exception { +public class TestParseTreeMatcher { + @Test public void testChunking() { ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); assertEquals("[ID, ' = ', expr, ' ;']", m.split(" = ;").toString()); assertEquals("[' ', ID, ' = ', expr]", m.split(" = ").toString()); @@ -47,7 +38,7 @@ public void testSetUp() throws Exception { assertEquals("['foo bar ', tag]", m.split("foo \\ bar ").toString()); } - @Test public void testDelimiters() throws Exception { + @Test public void testDelimiters() { ParseTreePatternMatcher m = new ParseTreePatternMatcher(null, null); m.setDelimiters("<<", ">>", "$"); String result = m.split("<> = <> ;$<< ick $>>").toString(); @@ -101,16 +92,10 @@ public void testSetUp() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X1.g4", grammar, "X1Parser", "X1Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X1"); + ParseTreePatternMatcher m = getPatternMatcher("X1.g4", grammar, "X1Parser", "X1Lexer", "s"); List tokens = m.tokenize(" = ;"); - String results = tokens.toString(); - String expected = "[ID:3, [@-1,1:1='=',<1>,1:1], expr:7, [@-1,1:1=';',<2>,1:1]]"; - assertEquals(expected, results); + assertEquals("[ID:3, [@-1,1:1='=',<1>,1:1], expr:7, [@-1,1:1=';',<2>,1:1]]", tokens.toString()); } @Test @@ -122,16 +107,10 @@ public void testCompilingPattern() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); - String results = t.getPatternTree().toStringTree(m.getParser()); - String expected = "(s = (expr ) ;)"; - assertEquals(expected, results); + assertEquals("(s = (expr ) ;)", t.getPatternTree().toStringTree(m.getParser())); } @Test @@ -143,11 +122,7 @@ public void testCompilingPatternConsumesAllTokens() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); boolean failed = false; try { @@ -168,11 +143,7 @@ public void testPatternMatchesStartRule() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); boolean failed = false; try { @@ -193,11 +164,7 @@ public void testPatternMatchesStartRule2() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); boolean failed = false; try { @@ -218,16 +185,10 @@ public void testHiddenTokensNotSeenByTreePatternParser() throws Exception { "ID : [a-z]+ ;\n" + "INT : [0-9]+ ;\n" + "WS : [ \\r\\n\\t]+ -> channel(HIDDEN) ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); - String results = t.getPatternTree().toStringTree(m.getParser()); - String expected = "(s = (expr ) ;)"; - assertEquals(expected, results); + assertEquals("(s = (expr ) ;)", t.getPatternTree().toStringTree(m.getParser())); } @Test @@ -237,11 +198,7 @@ public void testCompilingMultipleTokens() throws Exception { "s : ID '=' ID ';' ;\n" + "ID : [a-z]+ ;\n" + "WS : [ \\r\\n\\t]+ -> skip ;\n"; - boolean ok = - rawGenerateAndBuildRecognizer("X2.g4", grammar, "X2Parser", "X2Lexer", false); - assertTrue(ok); - - ParseTreePatternMatcher m = getPatternMatcher("X2"); + ParseTreePatternMatcher m = getPatternMatcher("X2.g4", grammar, "X2Parser", "X2Lexer", "s"); ParseTreePattern t = m.compile(" = ;", m.getParser().getRuleIndex("s")); String results = t.getPatternTree().toStringTree(m.getParser()); @@ -417,7 +374,7 @@ public void testCompilingMultipleTokens() throws Exception { checkPatternMatch(grammar, "expr", input, pattern, "X6"); } - public ParseTreeMatch checkPatternMatch(String grammar, String startRule, + private static ParseTreeMatch checkPatternMatch(String grammar, String startRule, String input, String pattern, String grammarName) throws Exception @@ -425,7 +382,7 @@ public ParseTreeMatch checkPatternMatch(String grammar, String startRule, return checkPatternMatch(grammar, startRule, input, pattern, grammarName, false); } - public ParseTreeMatch checkPatternMatch(String grammar, String startRule, + private static ParseTreeMatch checkPatternMatch(String grammar, String startRule, String input, String pattern, String grammarName, boolean invertMatch) throws Exception @@ -433,45 +390,36 @@ public ParseTreeMatch checkPatternMatch(String grammar, String startRule, String grammarFileName = grammarName+".g4"; String parserName = grammarName+"Parser"; String lexerName = grammarName+"Lexer"; - boolean ok = - rawGenerateAndBuildRecognizer(grammarFileName, grammar, parserName, lexerName, false); - assertTrue(ok); - - ParseTree result = execParser(startRule, input, parserName, lexerName); - - ParseTreePattern p = getPattern(grammarName, pattern, startRule); - ParseTreeMatch match = p.match(result); - boolean matched = match.succeeded(); - if ( invertMatch ) assertFalse(matched); - else assertTrue(matched); - return match; - } - - public ParseTreePattern getPattern(String grammarName, String pattern, String ruleName) - throws Exception - { - Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); - Constructor ctor = lexerClass.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance((CharStream) null); - - Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); - Constructor pctor = parserClass.getConstructor(TokenStream.class); - Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); - - return parser.compileParseTreePattern(pattern, parser.getRuleIndex(ruleName)); + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, grammar, parserName, lexerName, + false, false, startRule, input, + false, false, Stage.Execute, true); + try (JavaRunner runner = new JavaRunner()) { + JavaExecutedState executedState = (JavaExecutedState)runner.run(runOptions); + JavaCompiledState compiledState = (JavaCompiledState)executedState.previousState; + Parser parser = compiledState.initializeLexerAndParser("").b; + + ParseTreePattern p = parser.compileParseTreePattern(pattern, parser.getRuleIndex(startRule)); + + ParseTreeMatch match = p.match(executedState.parseTree); + boolean matched = match.succeeded(); + if ( invertMatch ) assertFalse(matched); + else assertTrue(matched); + return match; + } } - public ParseTreePatternMatcher getPatternMatcher(String grammarName) - throws Exception - { - Class lexerClass = loadLexerClassFromTempDir(grammarName + "Lexer"); - Constructor ctor = lexerClass.getConstructor(CharStream.class); - Lexer lexer = ctor.newInstance((CharStream) null); + private static ParseTreePatternMatcher getPatternMatcher( + String grammarFileName, String grammar, String parserName, String lexerName, String startRule + ) throws Exception { + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, grammar, parserName, lexerName, + false, false, startRule, null, + false, false, Stage.Compile, false); + try (JavaRunner runner = new JavaRunner()) { + JavaCompiledState compiledState = (JavaCompiledState) runner.run(runOptions); - Class parserClass = loadParserClassFromTempDir(grammarName + "Parser"); - Constructor pctor = parserClass.getConstructor(TokenStream.class); - Parser parser = pctor.newInstance(new CommonTokenStream(lexer)); + Pair lexerParserPair = compiledState.initializeLexerAndParser(""); - return new ParseTreePatternMatcher(lexer, parser); + return new ParseTreePatternMatcher(lexerParserPair.a, lexerParserPair.b); + } } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserExec.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserExec.java index 8ebe137ad0..9522a302d7 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserExec.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserExec.java @@ -6,13 +6,15 @@ package org.antlr.v4.test.tool; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import java.nio.file.Path; + +import static org.antlr.v4.test.tool.ToolTestUtils.*; +import static org.junit.jupiter.api.Assertions.assertEquals; /** Test parser execution. * @@ -45,18 +47,12 @@ * Nongreedy loops match as much input as possible while still allowing * the remaining input to match. */ -public class TestParserExec extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestParserExec { /** * This is a regression test for antlr/antlr4#118. * https://github.com/antlr/antlr4/issues/118 */ - @Ignore("Performance impact of passing this test may not be worthwhile") + @Disabled("Performance impact of passing this test may not be worthwhile") // TODO: port to test framework (not ported because test currently fails) @Test public void testStartRuleWithoutEOF() { String grammar = @@ -66,16 +62,15 @@ public void testSetUp() throws Exception { "ID : 'a'..'z'+ ;\n"+ "INT : '0'..'9'+ ;\n"+ "WS : (' '|'\\t'|'\\n')+ -> skip ;\n"; - String result = execParser("T.g4", grammar, "TParser", "TLexer", - null, null, "s", - "abc 34", true); + ExecutedState executedState = execParser("T.g4", grammar, "TParser", "TLexer", + "s", "abc 34", true); String expecting = "Decision 0:\n" + "s0-ID->s1\n" + "s1-INT->s2\n" + "s2-EOF->:s3=>1\n"; // Must point at accept state - assertEquals(expecting, result); - assertNull(getParseErrors()); + assertEquals(expecting, executedState.output); + assertEquals("", executedState.errors); } /** @@ -85,10 +80,11 @@ public void testSetUp() throws Exception { */ // TODO: port to test framework (can we simplify the Psl grammar?) @Test public void testFailedPredicateExceptionState() throws Exception { - String grammar = load("Psl.g4", "UTF-8"); - String found = execParser("Psl.g4", grammar, "PslParser", "PslLexer", null, null, "floating_constant", " . 234", false); - assertEquals(null, found); - assertEquals("line 1:6 rule floating_constant DEC:A floating-point constant cannot have internal white space\n", getParseErrors()); + String grammar = load("Psl.g4"); + ExecutedState executedState = execParser("Psl.g4", grammar, + "PslParser", "PslLexer", "floating_constant", " . 234", false); + assertEquals("", executedState.output); + assertEquals("line 1:6 rule floating_constant DEC:A floating-point constant cannot have internal white space\n", executedState.errors); } /** @@ -97,7 +93,7 @@ public void testSetUp() throws Exception { * https://github.com/antlr/antlr4/issues/563 */ // TODO: port to test framework (missing templates) - @Test public void testAlternateQuotes() throws Exception { + @Test public void testAlternateQuotes(@TempDir Path tempDir) { String lexerGrammar = "lexer grammar ModeTagsLexer;\n" + "\n" + @@ -120,16 +116,14 @@ public void testSetUp() throws Exception { " | '«' '/' ID '»'\n" + " ;"; - boolean success = rawGenerateAndBuildRecognizer("ModeTagsLexer.g4", - lexerGrammar, - null, - "ModeTagsLexer"); - assertTrue(success); - - String found = execParser("ModeTagsParser.g4", parserGrammar, "ModeTagsParser", "ModeTagsLexer", - null, null, "file", "", false); - assertEquals(null, found); - assertNull(getParseErrors()); + execLexer("ModeTagsLexer.g4", lexerGrammar, "ModeTagsLexer", "", + tempDir, true); + ExecutedState executedState = execParser("ModeTagsParser.g4", parserGrammar, + "ModeTagsParser", "ModeTagsLexer", + "file", "", false, + tempDir); + assertEquals("", executedState.output); + assertEquals("", executedState.errors); } /** @@ -138,13 +132,13 @@ public void testSetUp() throws Exception { * https://github.com/antlr/antlr4/issues/672 */ // TODO: port to test framework (missing templates) - @Test public void testAttributeValueInitialization() throws Exception { + @Test public void testAttributeValueInitialization() { String grammar = "grammar Data; \n" + "\n" + "file : group+ EOF; \n" + "\n" + - "group: INT sequence {System.out.println($sequence.values.size());} ; \n" + + "group: INT sequence {outStream.println($sequence.values.size());} ; \n" + "\n" + "sequence returns [List values = new ArrayList()] \n" + " locals[List localValues = new ArrayList()]\n" + @@ -155,10 +149,10 @@ public void testSetUp() throws Exception { "WS : [ \\t\\n\\r]+ -> skip ; // toss out all whitespace\n"; String input = "2 9 10 3 1 2 3"; - String found = execParser("Data.g4", grammar, "DataParser", "DataLexer", - null, null, "file", input, false); - assertEquals("6\n", found); - assertNull(getParseErrors()); + ExecutedState executedState = execParser("Data.g4", grammar, + "DataParser", "DataLexer", "file", input, false); + assertEquals("6\n", executedState.output); + assertEquals("", executedState.errors); } @Test public void testCaseInsensitiveInCombinedGrammar() throws Exception { @@ -175,10 +169,11 @@ public void testSetUp() throws Exception { "WS: [ \\t\\n\\r]+ -> skip;"; String input = "NEW Abc (Not a AND not B)"; - execParser( + ExecutedState executedState = execParser( "CaseInsensitiveGrammar.g4", grammar, "CaseInsensitiveGrammarParser", "CaseInsensitiveGrammarLexer", - null, null, "e", input, false); - assertNull(getParseErrors()); + "e", input, false); + assertEquals("", executedState.output); + assertEquals("", executedState.errors); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserInterpreter.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserInterpreter.java index 7b001f3190..6b3396bbf7 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserInterpreter.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserInterpreter.java @@ -13,19 +13,12 @@ import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestParserInterpreter extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestParserInterpreter { @Test public void testEmptyStartRule() throws Exception { LexerGrammar lg = new LexerGrammar( "lexer grammar L;\n" + diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserProfiler.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserProfiler.java index fe89349e0c..38feb0f733 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestParserProfiler.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestParserProfiler.java @@ -6,40 +6,46 @@ package org.antlr.v4.test.tool; +import org.antlr.runtime.RecognitionException; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.LexerInterpreter; import org.antlr.v4.runtime.ParserInterpreter; import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.atn.DecisionInfo; +import org.antlr.v4.test.runtime.RunOptions; +import org.antlr.v4.test.runtime.Stage; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.ExecutedState; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; import org.antlr.v4.tool.Rule; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.util.Arrays; -import static org.junit.Assert.assertEquals; +import static org.antlr.v4.test.tool.ToolTestUtils.createOptionsForJavaToolTests; +import static org.junit.jupiter.api.Assertions.assertEquals; @SuppressWarnings("unused") -public class TestParserProfiler extends BaseJavaToolTest { - LexerGrammar lg; +public class TestParserProfiler { + final static LexerGrammar lg; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - lg = new LexerGrammar( - "lexer grammar L;\n" + - "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + - "SEMI : ';' ;\n" + - "DOT : '.' ;\n" + - "ID : [a-zA-Z]+ ;\n" + - "INT : [0-9]+ ;\n" + - "PLUS : '+' ;\n" + - "MULT : '*' ;\n"); + static { + try { + lg = new LexerGrammar( + "lexer grammar L;\n" + + "WS : [ \\r\\t\\n]+ -> channel(HIDDEN) ;\n" + + "SEMI : ';' ;\n" + + "DOT : '.' ;\n" + + "ID : [a-zA-Z]+ ;\n" + + "INT : [0-9]+ ;\n" + + "PLUS : '+' ;\n" + + "MULT : '*' ;\n"); + } catch (RecognitionException e) { + throw new RuntimeException(e); + } } @Test public void testLL1() throws Exception { @@ -157,7 +163,7 @@ public void testSetUp() throws Exception { assertEquals(expecting, info[1].toString()); } - @Ignore + @Disabled @Test public void testSimpleLanguage() throws Exception { Grammar g = new Grammar(TestXPath.grammar); String input = @@ -174,7 +180,7 @@ public void testSetUp() throws Exception { assertEquals(1, info.length); } - @Ignore + @Disabled @Test public void testDeepLookahead() throws Exception { Grammar g = new Grammar( "parser grammar T;\n" + @@ -200,7 +206,7 @@ public void testSetUp() throws Exception { assertEquals(expecting, Arrays.toString(info)); } - @Test public void testProfilerGeneratedCode() throws Exception { + @Test public void testProfilerGeneratedCode() { String grammar = "grammar T;\n" + "s : a+ ID EOF ;\n" + @@ -215,15 +221,19 @@ public void testSetUp() throws Exception { "PLUS : '+' ;\n" + "MULT : '*' ;\n"; - String found = execParser("T.g4", grammar, "TParser", "TLexer", null, null, "s", - "xyz;abc;z.q", false, true); - String expecting = - "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, SLL_ATNTransitions=4, " + - "SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}," + - " {decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + - "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]\n"; - assertEquals(expecting, found); - assertEquals(null, getParseErrors()); + RunOptions runOptions = createOptionsForJavaToolTests("T.g4", grammar, "TParser", "TLexer", + false, false, "s", "xyz;abc;z.q", + true, false, Stage.Execute, false); + try (JavaRunner runner = new JavaRunner()) { + ExecutedState state = (ExecutedState) runner.run(runOptions); + String expecting = + "[{decision=0, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, SLL_ATNTransitions=4, " + + "SLL_DFATransitions=2, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}," + + " {decision=1, contextSensitivities=0, errors=0, ambiguities=0, SLL_lookahead=6, " + + "SLL_ATNTransitions=3, SLL_DFATransitions=3, LL_Fallback=0, LL_lookahead=0, LL_ATNTransitions=0}]\n"; + assertEquals(expecting, state.output); + assertEquals("", state.errors); + } } public DecisionInfo[] interpAndGetDecisionInfo( diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestPerformance.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestPerformance.java index 86127703ba..b56216389f 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestPerformance.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestPerformance.java @@ -21,7 +21,6 @@ import org.antlr.v4.runtime.RecognitionException; import org.antlr.v4.runtime.Recognizer; import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenSource; import org.antlr.v4.runtime.TokenStream; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNConfig; @@ -40,12 +39,13 @@ import org.antlr.v4.runtime.tree.ParseTreeListener; import org.antlr.v4.runtime.tree.ParseTreeWalker; import org.antlr.v4.runtime.tree.TerminalNode; -import org.antlr.v4.test.runtime.BaseRuntimeTest; -import org.antlr.v4.test.runtime.ErrorQueue; -import org.antlr.v4.test.runtime.RuntimeTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.JavaCompiledState; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.FilenameFilter; @@ -56,8 +56,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; @@ -79,13 +78,15 @@ import java.util.logging.Level; import java.util.logging.Logger; -import static org.antlr.v4.test.runtime.BaseRuntimeTest.writeFile; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.antlr.v4.test.runtime.FileUtils.writeFile; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.NewLine; +import static org.antlr.v4.test.tool.ToolTestUtils.createOptionsForJavaToolTests; +import static org.antlr.v4.test.tool.ToolTestUtils.load; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; @SuppressWarnings("unused") -public class TestPerformance extends BaseJavaToolTest { +public class TestPerformance { /** * Parse all java files under this package within the JDK_SOURCE_ROOT * (environment variable or property defined on the Java command line). @@ -386,24 +387,19 @@ public class TestPerformance extends BaseJavaToolTest { private final AtomicIntegerArray tokenCount = new AtomicIntegerArray(PASSES); - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - @Test - @org.junit.Ignore + @Disabled public void compileJdk() throws IOException, InterruptedException, ExecutionException { String jdkSourceRoot = getSourceRoot("JDK"); - assertTrue("The JDK_SOURCE_ROOT environment variable must be set for performance testing.", jdkSourceRoot != null && !jdkSourceRoot.isEmpty()); + assertTrue(jdkSourceRoot != null && !jdkSourceRoot.isEmpty(), + "The JDK_SOURCE_ROOT environment variable must be set for performance testing."); - compileJavaParser(USE_LR_GRAMMAR); + JavaCompiledState javaCompiledState = compileJavaParser(USE_LR_GRAMMAR); final String lexerName = USE_LR_GRAMMAR ? "JavaLRLexer" : "JavaLexer"; final String parserName = USE_LR_GRAMMAR ? "JavaLRParser" : "JavaParser"; final String listenerName = USE_LR_GRAMMAR ? "JavaLRBaseListener" : "JavaBaseListener"; final String entryPoint = "compilationUnit"; - final ParserFactory factory = getParserFactory(lexerName, parserName, listenerName, entryPoint); + final ParserFactory factory = getParserFactory(javaCompiledState, listenerName, entryPoint); if (!TOP_PACKAGE.isEmpty()) { jdkSourceRoot = jdkSourceRoot + '/' + TOP_PACKAGE.replace('.', '/'); @@ -682,13 +678,6 @@ private String getSourceRoot(String prefix) { return sourceRoot; } - @Override - public void eraseTempDir() { - if (DELETE_TEMP_FILES) { - super.eraseTempDir(); - } - } - public static String getOptionsDescription(String topPackage) { StringBuilder builder = new StringBuilder(); builder.append("Input="); @@ -702,20 +691,20 @@ public static String getOptionsDescription(String topPackage) { builder.append(", Grammar=").append(USE_LR_GRAMMAR ? "LR" : "Standard"); builder.append(", ForceAtn=").append(FORCE_ATN); - builder.append(NEW_LINE); + builder.append(NewLine); builder.append("Op=Lex").append(RUN_PARSER ? "+Parse" : " only"); builder.append(", Strategy=").append(BAIL_ON_ERROR ? BailErrorStrategy.class.getSimpleName() : DefaultErrorStrategy.class.getSimpleName()); builder.append(", BuildParseTree=").append(BUILD_PARSE_TREES); builder.append(", WalkBlankListener=").append(BLANK_LISTENER); - builder.append(NEW_LINE); + builder.append(NewLine); builder.append("Lexer=").append(REUSE_LEXER ? "setInputStream" : "newInstance"); builder.append(", Parser=").append(REUSE_PARSER ? "setInputStream" : "newInstance"); builder.append(", AfterPass=").append(CLEAR_DFA ? "newInstance" : "setInputStream"); - builder.append(NEW_LINE); + builder.append(NewLine); return builder.toString(); } @@ -1091,11 +1080,11 @@ private static long sum(long[] array) { return result; } - protected void compileJavaParser(boolean leftRecursive) throws IOException { + protected JavaCompiledState compileJavaParser(boolean leftRecursive) throws IOException { String grammarFileName = leftRecursive ? "JavaLR.g4" : "Java.g4"; String parserName = leftRecursive ? "JavaLRParser" : "JavaParser"; String lexerName = leftRecursive ? "JavaLRLexer" : "JavaLexer"; - String body = load(grammarFileName, null); + String body = load(grammarFileName); List extraOptions = new ArrayList(); extraOptions.add("-Werror"); if (FORCE_ATN) { @@ -1110,10 +1099,14 @@ protected void compileJavaParser(boolean leftRecursive) throws IOException { extraOptions.add("-XdbgSTWait"); } } - extraOptions.add("-visitor"); String[] extraOptionsArray = extraOptions.toArray(new String[0]); - boolean success = rawGenerateAndBuildRecognizer(grammarFileName, body, parserName, lexerName, true, extraOptionsArray); - assertTrue(success); + + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, body, parserName, lexerName, + false, true, null, null, + false, false, Stage.Compile, false); + try (RuntimeRunner runner = new JavaRunner()) { + return (JavaCompiledState) runner.run(runOptions); + } } private static void updateChecksum(MurmurHashChecksum checksum, int value) { @@ -1134,19 +1127,16 @@ private static void updateChecksum(MurmurHashChecksum checksum, Token token) { updateChecksum(checksum, token.getChannel()); } - protected ParserFactory getParserFactory(String lexerName, String parserName, String listenerName, final String entryPoint) { + protected ParserFactory getParserFactory(JavaCompiledState javaCompiledState, String listenerName, final String entryPoint) { try { - ClassLoader loader = new URLClassLoader(new URL[] { getTempTestDir().toURI().toURL() }, ClassLoader.getSystemClassLoader()); - final Class lexerClass = loader.loadClass(lexerName).asSubclass(Lexer.class); - final Class parserClass = loader.loadClass(parserName).asSubclass(Parser.class); - final Class listenerClass = loader.loadClass(listenerName).asSubclass(ParseTreeListener.class); + ClassLoader loader = javaCompiledState.loader; + final Class listenerClass = loader.loadClass(listenerName).asSubclass(ParseTreeListener.class); - final Constructor lexerCtor = lexerClass.getConstructor(CharStream.class); - final Constructor parserCtor = parserClass.getConstructor(TokenStream.class); + final Constructor lexerCtor = javaCompiledState.lexer.getConstructor(CharStream.class); + final Constructor parserCtor = javaCompiledState.parser.getConstructor(TokenStream.class); // construct initial instances of the lexer and parser to deserialize their ATNs - TokenSource tokenSource = lexerCtor.newInstance(new ANTLRInputStream("")); - parserCtor.newInstance(new CommonTokenStream(tokenSource)); + javaCompiledState.initializeLexerAndParser(""); return new ParserFactory() { @@ -1260,7 +1250,7 @@ public FileParseResult parseFile(CharStream input, int currentPass, int thread) parser.setErrorHandler(new BailErrorStrategy()); } - Method parseMethod = parserClass.getMethod(entryPoint); + Method parseMethod = javaCompiledState.parser.getMethod(entryPoint); Object parseResult; try { @@ -1333,7 +1323,7 @@ public FileParseResult parseFile(CharStream input, int currentPass, int thread) parseResult = parseMethod.invoke(parser); } - assertThat(parseResult, instanceOf(ParseTree.class)); + assertTrue(parseResult instanceof ParseTree); if (COMPUTE_CHECKSUM && BUILD_PARSE_TREES) { ParseTreeWalker.DEFAULT.walk(new ChecksumParseTreeListener(checksum), (ParseTree)parseResult); } @@ -1354,7 +1344,7 @@ public FileParseResult parseFile(CharStream input, int currentPass, int thread) }; } catch (Exception e) { e.printStackTrace(System.out); - Assert.fail(e.getMessage()); + fail(e.getMessage()); throw new IllegalStateException(e); } } @@ -1553,7 +1543,7 @@ protected ATNConfigSet computeReachSet(ATNConfigSet closure, int t, boolean full } private static class DescriptiveErrorListener extends BaseErrorListener { - public static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener(); + public final static DescriptiveErrorListener INSTANCE = new DescriptiveErrorListener(); @Override public void syntaxError(Recognizer recognizer, Object offendingSymbol, @@ -1944,8 +1934,10 @@ public int getValue() { } } - @Test(timeout = 20000) - public void testExponentialInclude() { + @Test + @Timeout(20) + public void testExponentialInclude(@TempDir Path tempDir) { + String tempDirPath = tempDir.toString(); String grammarFormat = "parser grammar Level_%d_%d;\n" + "\n" + @@ -1953,7 +1945,7 @@ public void testExponentialInclude() { "\n" + "rule_%d_%d : EOF;\n"; - RuntimeTestUtils.mkdir(getTempDirPath()); + FileUtils.mkdir(tempDirPath); long startTime = System.nanoTime(); @@ -1961,15 +1953,15 @@ public void testExponentialInclude() { for (int level = 0; level < levels; level++) { String leafPrefix = level == levels - 1 ? "//" : ""; String grammar1 = String.format(grammarFormat, level, 1, leafPrefix, level + 1, level + 1, level, 1); - writeFile(getTempDirPath(), "Level_" + level + "_1.g4", grammar1); + writeFile(tempDirPath, "Level_" + level + "_1.g4", grammar1); if (level > 0) { String grammar2 = String.format(grammarFormat, level, 2, leafPrefix, level + 1, level + 1, level, 1); - writeFile(getTempDirPath(), "Level_" + level + "_2.g4", grammar2); + writeFile(tempDirPath, "Level_" + level + "_2.g4", grammar2); } } - ErrorQueue equeue = BaseRuntimeTest.antlrOnString(getTempDirPath(), "Java", "Level_0_1.g4", false); - Assert.assertTrue(equeue.errors.isEmpty()); + ErrorQueue equeue = Generator.antlrOnString(tempDirPath, "Java", "Level_0_1.g4", false); + assertTrue(equeue.errors.isEmpty()); long endTime = System.nanoTime(); System.out.format("%s milliseconds.%n", (endTime - startTime) / 1000000.0); diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestScopeParsing.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestScopeParsing.java index 64db0e8e8e..75584b545a 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestScopeParsing.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestScopeParsing.java @@ -10,92 +10,91 @@ import org.antlr.v4.parse.ScopeParser; import org.antlr.v4.tool.Attribute; import org.antlr.v4.tool.Grammar; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.util.ArrayList; import java.util.Collection; import java.util.LinkedHashMap; import java.util.List; -import static org.junit.Assert.assertEquals; -@RunWith(Parameterized.class) -public class TestScopeParsing extends BaseJavaToolTest { - static String[] argPairs = { - "", "", - " ", "", - "int i", "i:int", - "int[] i, int j[]", "i:int[], j:int []", - "Map[] i, int j[]", "i:Map[], j:int []", - "Map>[] i", "i:Map>[]", - "int i = 34+a[3], int j[] = new int[34]", - "i:int=34+a[3], j:int []=new int[34]", - "char *[3] foo = {1,2,3}", "foo:char *[3]={1,2,3}", // not valid C really, C is "type name" however so this is cool (this was broken in 4.5 anyway) - "String[] headers", "headers:String[]", +import static org.junit.jupiter.api.Assertions.assertEquals; - // C++ - "std::vector x", "x:std::vector", // yuck. Don't choose :: as the : of a declaration +public class TestScopeParsing { + final static String[] argPairs = { + "", "", + " ", "", + "int i", "i:int", + "int[] i, int j[]", "i:int[], j:int []", + "Map[] i, int j[]", "i:Map[], j:int []", + "Map>[] i", "i:Map>[]", + "int i = 34+a[3], int j[] = new int[34]", + "i:int=34+a[3], j:int []=new int[34]", + "char *[3] foo = {1,2,3}", "foo:char *[3]={1,2,3}", // not valid C really, C is "type name" however so this is cool (this was broken in 4.5 anyway) + "String[] headers", "headers:String[]", - // python/ruby style - "i", "i", - "i,j", "i, j", - "i\t,j, k", "i, j, k", + // C++ + "std::vector x", "x:std::vector", // yuck. Don't choose :: as the : of a declaration - // swift style - "x: int", "x:int", - "x :int", "x:int", - "x:int", "x:int", - "x:int=3", "x:int=3", - "r:Rectangle=Rectangle(fromLength: 6, fromBreadth: 12)", "r:Rectangle=Rectangle(fromLength: 6, fromBreadth: 12)", - "p:pointer to int", "p:pointer to int", - "a: array[3] of int", "a:array[3] of int", - "a \t:\tfunc(array[3] of int)", "a:func(array[3] of int)", - "x:int, y:float", "x:int, y:float", - "x:T?, f:func(array[3] of int), y:int", "x:T?, f:func(array[3] of int), y:int", + // python/ruby style + "i", "i", + "i,j", "i, j", + "i\t,j, k", "i, j, k", - // go is postfix type notation like "x int" but must use either "int x" or "x:int" in [...] actions - "float64 x = 3", "x:float64=3", - "map[string]int x", "x:map[string]int", - }; + // swift style + "x: int", "x:int", + "x :int", "x:int", + "x:int", "x:int", + "x:int=3", "x:int=3", + "r:Rectangle=Rectangle(fromLength: 6, fromBreadth: 12)", "r:Rectangle=Rectangle(fromLength: 6, fromBreadth: 12)", + "p:pointer to int", "p:pointer to int", + "a: array[3] of int", "a:array[3] of int", + "a \t:\tfunc(array[3] of int)", "a:func(array[3] of int)", + "x:int, y:float", "x:int, y:float", + "x:T?, f:func(array[3] of int), y:int", "x:T?, f:func(array[3] of int), y:int", - String input; - String output; + // go is postfix type notation like "x int" but must use either "int x" or "x:int" in [...] actions + "float64 x = 3", "x:float64=3", + "map[string]int x", "x:map[string]int", + }; - public TestScopeParsing(String input, String output) { - this.input = input; - this.output = output; - } + @ParameterizedTest + @MethodSource("getAllTestDescriptors") + public void testArgs(Parameter parameter) throws Exception { + Grammar dummy = new Grammar("grammar T; a:'a';"); - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); + LinkedHashMap attributes = ScopeParser.parseTypedArgList(null, parameter.input, dummy).attributes; + List out = new ArrayList<>(); + for (String arg : attributes.keySet()) { + Attribute attr = attributes.get(arg); + out.add(attr.toString()); + } + String actual = Utils.join(out.toArray(), ", "); + assertEquals(parameter.output, actual); } - @Test - public void testArgs() throws Exception { - Grammar dummy = new Grammar("grammar T; a:'a';"); - - LinkedHashMap attributes = ScopeParser.parseTypedArgList(null, input, dummy).attributes; - List out = new ArrayList<>(); - for (String arg : attributes.keySet()) { - Attribute attr = attributes.get(arg); - out.add(attr.toString()); - } - String actual = Utils.join(out.toArray(), ", "); - assertEquals(output, actual); - } - - @Parameterized.Parameters(name="{0}") - public static Collection getAllTestDescriptors() { - List tests = new ArrayList<>(); + private static Collection getAllTestDescriptors() { + List tests = new ArrayList<>(); for (int i = 0; i < argPairs.length; i+=2) { String arg = argPairs[i]; String output = argPairs[i+1]; - tests.add(new Object[]{arg,output}); + tests.add(new Parameter(arg, output)); } return tests; } + + static class Parameter { + public final String input; + public final String output; + + public Parameter(String input, String output) { + this.input = input; + this.output = output; + } + + @Override + public String toString() { + return input; + } + } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestSymbolIssues.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestSymbolIssues.java index f83f3c1a4a..c1efd0ce85 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestSymbolIssues.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestSymbolIssues.java @@ -8,14 +8,15 @@ import org.antlr.v4.tool.ErrorType; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.antlr.v4.test.tool.ToolTestUtils.realElements; +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; +import static org.junit.jupiter.api.Assertions.assertEquals; /** */ -public class TestSymbolIssues extends BaseJavaToolTest { - static String[] A = { +public class TestSymbolIssues { + final static String[] A = { // INPUT "grammar A;\n" + "options { opt='sss'; k=3; }\n" + @@ -43,7 +44,7 @@ public class TestSymbolIssues extends BaseJavaToolTest { "error(" + ErrorType.MISSING_RULE_ARGS.code + "): A.g4:10:31: missing argument(s) on rule reference: a\n" }; - static String[] B = { + final static String[] B = { // INPUT "parser grammar B;\n" + "tokens { ID, FOO, X, Y }\n" + @@ -61,7 +62,7 @@ public class TestSymbolIssues extends BaseJavaToolTest { "error(" + ErrorType.IMPLICIT_STRING_DEFINITION.code + "): B.g4:4:20: cannot create implicit token for string literal in non-combined grammar: '.'\n" }; - static String[] D = { + final static String[] D = { // INPUT "parser grammar D;\n" + "tokens{ID}\n" + @@ -78,7 +79,7 @@ public class TestSymbolIssues extends BaseJavaToolTest { "error(" + ErrorType.RETVAL_CONFLICTS_WITH_ARG.code + "): D.g4:6:22: return value i conflicts with parameter with same name\n" }; - static String[] E = { + final static String[] E = { // INPUT "grammar E;\n" + "tokens {\n" + @@ -92,7 +93,7 @@ public class TestSymbolIssues extends BaseJavaToolTest { "warning(" + ErrorType.TOKEN_NAME_REASSIGNMENT.code + "): E.g4:3:4: token name A is already defined\n" }; - static String[] F = { + final static String[] F = { // INPUT "lexer grammar F;\n" + "A: 'a';\n" + @@ -106,17 +107,11 @@ public class TestSymbolIssues extends BaseJavaToolTest { "error(" + ErrorType.MODE_CONFLICTS_WITH_TOKEN.code + "): F.g4:3:0: mode M1 conflicts with token with same name\n" }; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testA() { super.testErrors(A, false); } - @Test public void testB() { super.testErrors(B, false); } - @Test public void testD() { super.testErrors(D, false); } - @Test public void testE() { super.testErrors(E, false); } - @Test public void testF() { super.testErrors(F, false); } + @Test public void testA() { testErrors(A, false); } + @Test public void testB() { testErrors(B, false); } + @Test public void testD() { testErrors(D, false); } + @Test public void testE() { testErrors(E, false); } + @Test public void testF() { testErrors(F, false); } @Test public void testStringLiteralRedefs() throws Exception { String grammar = diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java index a226f73b4a..baef7f31e6 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenPositionOptions.java @@ -11,21 +11,14 @@ import org.antlr.v4.runtime.misc.IntervalSet; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.ast.GrammarAST; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertEquals; - -public class TestTokenPositionOptions extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } +import static org.junit.jupiter.api.Assertions.assertEquals; +public class TestTokenPositionOptions { @Test public void testLeftRecursionRewrite() throws Exception { Grammar g = new Grammar( "grammar T;\n" + diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java index 4c8d7fe4b4..804c8ee5a2 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestTokenTypeAssignment.java @@ -9,21 +9,17 @@ import org.antlr.v4.runtime.Token; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import java.util.StringTokenizer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.*; -public class TestTokenTypeAssignment extends BaseJavaToolTest { - - @Test - public void testParserSimpleTokens() throws Exception { +public class TestTokenTypeAssignment { + @Test public void testParserSimpleTokens() throws Exception { Grammar g = new Grammar( "parser grammar t;\n"+ "a : A | B;\n" + @@ -182,14 +178,12 @@ protected void checkSymbols(Grammar g, StringTokenizer st = new StringTokenizer(allValidTokensStr, ", "); while ( st.hasMoreTokens() ) { String tokenName = st.nextToken(); - assertTrue("token "+tokenName+" expected, but was undefined", - g.getTokenType(tokenName) != Token.INVALID_TYPE); + assertTrue(g.getTokenType(tokenName) != Token.INVALID_TYPE, "token "+tokenName+" expected, but was undefined"); tokens.remove(tokenName); } // make sure there are not any others (other than etc...) for (String tokenName : tokens) { - assertTrue("unexpected token name "+tokenName, - g.getTokenType(tokenName) < Token.MIN_USER_TOKEN_TYPE); + assertTrue(g.getTokenType(tokenName) < Token.MIN_USER_TOKEN_TYPE, "unexpected token name "+tokenName); } // make sure all expected rules are there @@ -197,14 +191,11 @@ protected void checkSymbols(Grammar g, int n = 0; while ( st.hasMoreTokens() ) { String ruleName = st.nextToken(); - assertNotNull("rule "+ruleName+" expected", g.getRule(ruleName)); + assertNotNull(g.getRule(ruleName), "rule "+ruleName+" expected"); n++; } //System.out.println("rules="+rules); // make sure there are no extra rules - assertEquals("number of rules mismatch; expecting "+n+"; found "+g.rules.size(), - n, g.rules.size()); - + assertEquals(n, g.rules.size(), "number of rules mismatch; expecting "+n+"; found "+g.rules.size()); } - } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java index 4f883d0828..a501a023b5 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestToolSyntaxErrors.java @@ -8,12 +8,13 @@ import org.antlr.v4.Tool; import org.antlr.v4.tool.ErrorType; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; -public class TestToolSyntaxErrors extends BaseJavaToolTest { - static String[] A = { +import static org.antlr.v4.test.tool.ToolTestUtils.testErrors; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +public class TestToolSyntaxErrors { + final static String[] A = { // INPUT "grammar A;\n" + "", @@ -55,23 +56,17 @@ public class TestToolSyntaxErrors extends BaseJavaToolTest { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:15: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n", }; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - @Test public void AllErrorCodesDistinct() { ErrorType[] errorTypes = ErrorType.class.getEnumConstants(); for (int i = 0; i < errorTypes.length; i++) { for (int j = i + 1; j < errorTypes.length; j++) { - Assert.assertNotEquals(errorTypes[i].code, errorTypes[j].code); + assertNotEquals(errorTypes[i].code, errorTypes[j].code); } } } - @Test public void testA() { super.testErrors(A, true); } + @Test public void testA() { testErrors(A, true); } @Test public void testExtraColon() { String[] pair = new String[] { @@ -80,7 +75,7 @@ public void AllErrorCodesDistinct() { "b : B ;", "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: ':' came as a complete surprise to me while matching alternative\n", }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testMissingRuleSemi() { @@ -90,7 +85,7 @@ public void AllErrorCodesDistinct() { "b : B ;", "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:0: syntax error: unterminated rule (missing ';') detected at 'b :' while looking for rule element\n", }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testMissingRuleSemi2() { @@ -100,7 +95,7 @@ public void AllErrorCodesDistinct() { "B : 'b' ;", "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:2: syntax error: unterminated rule (missing ';') detected at ': 'b'' while looking for lexer rule element\n", }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testMissingRuleSemi3() { @@ -110,7 +105,7 @@ public void AllErrorCodesDistinct() { "b[int i] returns [int y] : B ;", "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:3:9: syntax error: unterminated rule (missing ';') detected at 'returns int y' while looking for rule element\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testMissingRuleSemi4() { @@ -122,7 +117,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'b catch' while looking for rule element\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testMissingRuleSemi5() { @@ -133,7 +128,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: unterminated rule (missing ';') detected at 'A catch' while looking for rule element\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testBadRulePrequelStart() { @@ -144,7 +139,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:4: syntax error: 'options {' came as a complete surprise to me while looking for an identifier\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testBadRulePrequelStart2() { @@ -155,7 +150,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:2: syntax error: '}' came as a complete surprise to me while matching rule preamble\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testModeInParser() { @@ -168,7 +163,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:0: syntax error: 'b' came as a complete surprise to me\n" + "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:4:6: syntax error: mismatched input ';' expecting COLON while matching a lexer rule\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -184,7 +179,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.UNTERMINATED_STRING_LITERAL.code + "): A.g4:2:4: unterminated string literal\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -199,7 +194,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.SYNTAX_ERROR.code + "): A.g4:2:0: syntax error: '_' came as a complete surprise to me\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -215,7 +210,7 @@ public void AllErrorCodesDistinct() { "" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -230,7 +225,7 @@ public void AllErrorCodesDistinct() { "" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -245,7 +240,7 @@ public void AllErrorCodesDistinct() { "" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testEmptyTokensBlock() { @@ -256,7 +251,7 @@ public void AllErrorCodesDistinct() { "" }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -275,7 +270,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:4:14: lexer command popmode does not exist or is not supported by the current target\n" + "error(" + ErrorType.INVALID_LEXER_COMMAND.code + "): A.g4:5:14: lexer command token does not exist or is not supported by the current target\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testLexerCommandArgumentValidation() { @@ -289,7 +284,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.UNWANTED_LEXER_COMMAND_ARGUMENT.code + "): A.g4:4:14: lexer command popMode does not take any arguments\n" + "error(" + ErrorType.MISSING_LEXER_COMMAND_ARGUMENT.code + "): A.g4:5:14: missing argument for lexer command type\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testRuleRedefinition() { @@ -304,7 +299,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.RULE_REDEFINITION.code + "): Oops.g4:4:0: rule ret_ty redefinition; previous at line 3\n" }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testEpsilonClosureAnalysis() { @@ -325,7 +320,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } // Test for https://github.com/antlr/antlr4/issues/1203 @@ -342,7 +337,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } // Test for https://github.com/antlr/antlr4/issues/2860, https://github.com/antlr/antlr4/issues/1105 @@ -360,7 +355,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } // Test for https://github.com/antlr/antlr4/issues/3359 @@ -378,7 +373,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } // Test for https://github.com/antlr/antlr4/issues/1203 @@ -395,7 +390,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testEpsilonOptionalAnalysis() { @@ -414,7 +409,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -444,7 +439,7 @@ public void AllErrorCodesDistinct() { ""; String[] pair = new String[] { grammar, expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -476,7 +471,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -496,7 +491,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -516,7 +511,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -538,7 +533,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -565,7 +560,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testInvalidCharSetsAndStringLiterals() { @@ -597,7 +592,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testInvalidUnicodeEscapesInCharSet() { @@ -637,7 +632,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -659,7 +654,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -688,7 +683,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -709,7 +704,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -718,7 +713,7 @@ public void AllErrorCodesDistinct() { * https://github.com/antlr/antlr4/issues/649 * Stops before processing the lexer */ - @Test public void testInvalidLanguageInGrammarWithLexerCommand() throws Exception { + @Test public void testInvalidLanguageInGrammarWithLexerCommand() { String grammar = "grammar T;\n" + "options { language=Foo; }\n" + @@ -731,7 +726,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -739,7 +734,7 @@ public void AllErrorCodesDistinct() { * null ptr exception.". * https://github.com/antlr/antlr4/issues/649 */ - @Test public void testInvalidLanguageInGrammar() throws Exception { + @Test public void testInvalidLanguageInGrammar() { String grammar = "grammar T;\n" + "options { language=Foo; }\n" + @@ -752,7 +747,7 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testChannelDefinitionInLexer() throws Exception { @@ -770,7 +765,7 @@ public void AllErrorCodesDistinct() { String expected = ""; String[] pair = { grammar, expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testChannelDefinitionInParser() throws Exception { @@ -788,7 +783,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.CHANNELS_BLOCK_IN_PARSER_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in parser grammars\n"; String[] pair = { grammar, expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testChannelDefinitionInCombined() throws Exception { @@ -811,7 +806,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.CHANNELS_BLOCK_IN_COMBINED_GRAMMAR.code + "): T.g4:3:0: custom channels are not supported in combined grammars\n"; String[] pair = { grammar, expected }; - super.testErrors(pair, true); + testErrors(pair, true); } /** @@ -838,7 +833,7 @@ public void AllErrorCodesDistinct() { "error(" + ErrorType.CONSTANT_VALUE_IS_NOT_A_RECOGNIZED_CHANNEL_NAME.code + "): T.g4:10:34: NEWLINE_CHANNEL is not a recognized channel name\n"; String[] pair = { grammar, expected }; - super.testErrors(pair, true); + testErrors(pair, true); } // Test for https://github.com/antlr/antlr4/issues/1556 @@ -854,14 +849,14 @@ public void AllErrorCodesDistinct() { expected }; - super.testErrors(pair, true); + testErrors(pair, true); } @Test public void testRuleNamesAsTree() { String grammar = "grammar T;\n" + "tree : 'X';"; - super.testErrors(new String[] { grammar, "" }, true); + testErrors(new String[] { grammar, "" }, true); } @Test public void testLexerRuleLabel() { @@ -869,7 +864,7 @@ public void AllErrorCodesDistinct() { "grammar T;\n" + "a : A;\n" + "A : h=~('b'|'c') ;"; - super.testErrors(new String[] { + testErrors(new String[] { grammar, "error(" + ErrorType.SYNTAX_ERROR.code + "): T.g4:3:5: syntax error: '=' came as a complete surprise to me while looking for lexer rule element\n" }, false); } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestTopologicalSort.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestTopologicalSort.java index 7e3dfefe19..6f5635c3bc 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestTopologicalSort.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestTopologicalSort.java @@ -6,21 +6,14 @@ package org.antlr.v4.test.tool; import org.antlr.v4.misc.Graph; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; /** Test topo sort in GraphNode. */ -public class TestTopologicalSort extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestTopologicalSort { @Test public void testFairlyLargeGraph() throws Exception { Graph g = new Graph(); diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java index 11f781d64f..a738f5bed1 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedCharStream.java @@ -14,23 +14,17 @@ import org.antlr.v4.runtime.UnbufferedCharStream; import org.antlr.v4.runtime.misc.Interval; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.Reader; import java.io.StringReader; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; @SuppressWarnings("unused") -public class TestUnbufferedCharStream extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testNoChar() throws Exception { +public class TestUnbufferedCharStream { + @Test public void testNoChar() { CharStream input = createStream(""); assertEquals(IntStream.EOF, input.LA(1)); assertEquals(IntStream.EOF, input.LA(2)); @@ -41,18 +35,17 @@ public void testSetUp() throws Exception { * EOF symbol is consumed, but {@link UnbufferedCharStream} handles this * particular case by throwing an {@link IllegalStateException}. */ - @Test(expected = IllegalStateException.class) - public void testConsumeEOF() throws Exception { + @Test + public void testConsumeEOF() { CharStream input = createStream(""); assertEquals(IntStream.EOF, input.LA(1)); - input.consume(); - input.consume(); + assertThrows(IllegalStateException.class, input::consume); } - @Test(expected = IllegalArgumentException.class) + @Test public void testNegativeSeek() { CharStream input = createStream(""); - input.seek(-1); + assertThrows(IllegalArgumentException.class, () -> input.seek(-1)); } @Test @@ -69,12 +62,12 @@ public void testSeekPastEOF() { * {@link UnbufferedCharStream} handles this case by throwing an * {@link IllegalStateException}. */ - @Test(expected = IllegalStateException.class) + @Test public void testMarkReleaseOutOfOrder() { CharStream input = createStream(""); int m1 = input.mark(); int m2 = input.mark(); - input.release(m1); + assertThrows(IllegalStateException.class, () -> input.release(m1)); } /** @@ -82,12 +75,12 @@ public void testMarkReleaseOutOfOrder() { * is released twice, but {@link UnbufferedCharStream} handles this case by * throwing an {@link IllegalStateException}. */ - @Test(expected = IllegalStateException.class) + @Test public void testMarkReleasedTwice() { CharStream input = createStream(""); int m1 = input.mark(); input.release(m1); - input.release(m1); + assertThrows(IllegalStateException.class, () -> input.release(m1)); } /** @@ -95,13 +88,13 @@ public void testMarkReleasedTwice() { * is released twice, but {@link UnbufferedCharStream} handles this case by * throwing an {@link IllegalStateException}. */ - @Test(expected = IllegalStateException.class) + @Test public void testNestedMarkReleasedTwice() { CharStream input = createStream(""); int m1 = input.mark(); int m2 = input.mark(); input.release(m2); - input.release(m2); + assertThrows(IllegalStateException.class, () -> input.release(m2)); } /** @@ -109,30 +102,30 @@ public void testNestedMarkReleasedTwice() { * {@link UnbufferedCharStream} creates marks in such a way that this * invalid usage results in an {@link IllegalArgumentException}. */ - @Test(expected = IllegalArgumentException.class) + @Test public void testMarkPassedToSeek() { CharStream input = createStream(""); int m1 = input.mark(); - input.seek(m1); + assertThrows(IllegalArgumentException.class, () -> input.seek(m1)); } - @Test(expected = IllegalArgumentException.class) + @Test public void testSeekBeforeBufferStart() { CharStream input = createStream("xyz"); input.consume(); int m1 = input.mark(); assertEquals(1, input.index()); input.consume(); - input.seek(0); + assertThrows(IllegalArgumentException.class, () -> input.seek(0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void testGetTextBeforeBufferStart() { CharStream input = createStream("xyz"); input.consume(); int m1 = input.mark(); assertEquals(1, input.index()); - input.getText(new Interval(0, 1)); + assertThrows(UnsupportedOperationException.class, () -> input.getText(new Interval(0, 1))); } @Test @@ -322,19 +315,19 @@ public void testLastChar() { assertEquals("\uFFFF", input.getBuffer()); } - @Test(expected = RuntimeException.class) - public void testDanglingHighSurrogateAtEOFThrows() throws Exception { - createStream("\uD83C"); + @Test + public void testDanglingHighSurrogateAtEOFThrows() { + assertThrows(RuntimeException.class, () -> createStream("\uD83C")); } - @Test(expected = RuntimeException.class) - public void testDanglingHighSurrogateThrows() throws Exception { - createStream("\uD83C\u0123"); + @Test + public void testDanglingHighSurrogateThrows() { + assertThrows(RuntimeException.class, () -> createStream("\uD83C\u0123")); } - @Test(expected = RuntimeException.class) - public void testDanglingLowSurrogateThrows() throws Exception { - createStream("\uDF0E"); + @Test + public void testDanglingLowSurrogateThrows() { + assertThrows(RuntimeException.class, () -> createStream("\uDF0E")); } protected static TestingUnbufferedCharStream createStream(String text) { diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java index 700fe74f26..69d62595a5 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnbufferedTokenStream.java @@ -14,25 +14,19 @@ import org.antlr.v4.runtime.TokenStream; import org.antlr.v4.runtime.UnbufferedTokenStream; import org.antlr.v4.tool.LexerGrammar; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.StringReader; import java.util.Arrays; import java.util.Collections; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; @SuppressWarnings("unused") -public class TestUnbufferedTokenStream extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - - @Test public void testLookahead() throws Exception { +public class TestUnbufferedTokenStream { + @Test + public void testLookahead() throws Exception { LexerGrammar g = new LexerGrammar( "lexer grammar t;\n"+ "ID : 'a'..'z'+;\n" + diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeData.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeData.java index a707598104..c50409ae16 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeData.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeData.java @@ -5,24 +5,13 @@ */ package org.antlr.v4.test.tool; - -import java.util.Map; - import org.antlr.v4.unicode.UnicodeData; -import org.antlr.v4.runtime.misc.IntervalSet; -import org.junit.Test; -import org.junit.Rule; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.*; public class TestUnicodeData { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Test public void testUnicodeGeneralCategoriesLatin() { assertTrue(UnicodeData.getPropertyCodePoints("Lu").contains('X')); @@ -141,53 +130,52 @@ public void testUnicodeBlockAliases() { @Test public void testEnumeratedPropertyEquals() { assertFalse( - "U+1F47E ALIEN MONSTER is not an emoji modifier", - UnicodeData.getPropertyCodePoints("Grapheme_Cluster_Break=E_Base").contains(0x1F47E)); + UnicodeData.getPropertyCodePoints("Grapheme_Cluster_Break=E_Base").contains(0x1F47E), + "U+1F47E ALIEN MONSTER is not an emoji modifier"); assertFalse( - "U+1038 MYANMAR SIGN VISARGA is not a spacing mark", - UnicodeData.getPropertyCodePoints("Grapheme_Cluster_Break=E_Base").contains(0x1038)); + UnicodeData.getPropertyCodePoints("Grapheme_Cluster_Break=E_Base").contains(0x1038), + "U+1038 MYANMAR SIGN VISARGA is not a spacing mark"); assertTrue( - "U+00A1 INVERTED EXCLAMATION MARK has ambiguous East Asian Width", - UnicodeData.getPropertyCodePoints("East_Asian_Width=Ambiguous").contains(0x00A1)); + UnicodeData.getPropertyCodePoints("East_Asian_Width=Ambiguous").contains(0x00A1), + "U+00A1 INVERTED EXCLAMATION MARK has ambiguous East Asian Width"); assertFalse( - "U+00A2 CENT SIGN does not have ambiguous East Asian Width", - UnicodeData.getPropertyCodePoints("East_Asian_Width=Ambiguous").contains(0x00A2)); - + UnicodeData.getPropertyCodePoints("East_Asian_Width=Ambiguous").contains(0x00A2), + "U+00A2 CENT SIGN does not have ambiguous East Asian Width"); } @Test public void extendedPictographic() { assertTrue( - "U+1F588 BLACK PUSHPIN is in Extended Pictographic", - UnicodeData.getPropertyCodePoints("Extended_Pictographic").contains(0x1F588)); + UnicodeData.getPropertyCodePoints("Extended_Pictographic").contains(0x1F588), + "U+1F588 BLACK PUSHPIN is in Extended Pictographic"); assertFalse( - "0 is not in Extended Pictographic", - UnicodeData.getPropertyCodePoints("Extended_Pictographic").contains('0')); + UnicodeData.getPropertyCodePoints("Extended_Pictographic").contains('0'), + "0 is not in Extended Pictographic"); } @Test public void emojiPresentation() { assertTrue( - "U+1F4A9 PILE OF POO is in EmojiPresentation=EmojiDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains(0x1F4A9)); + UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains(0x1F4A9), + "U+1F4A9 PILE OF POO is in EmojiPresentation=EmojiDefault"); assertFalse( - "0 is not in EmojiPresentation=EmojiDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains('0')); + UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains('0'), + "0 is not in EmojiPresentation=EmojiDefault"); assertFalse( - "A is not in EmojiPresentation=EmojiDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains('A')); + UnicodeData.getPropertyCodePoints("EmojiPresentation=EmojiDefault").contains('A'), + "A is not in EmojiPresentation=EmojiDefault"); assertFalse( - "U+1F4A9 PILE OF POO is not in EmojiPresentation=TextDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains(0x1F4A9)); + UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains(0x1F4A9), + "U+1F4A9 PILE OF POO is not in EmojiPresentation=TextDefault"); assertTrue( - "0 is in EmojiPresentation=TextDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains('0')); + UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains('0'), + "0 is in EmojiPresentation=TextDefault"); assertFalse( - "A is not in EmojiPresentation=TextDefault", - UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains('A')); + UnicodeData.getPropertyCodePoints("EmojiPresentation=TextDefault").contains('A'), + "A is not in EmojiPresentation=TextDefault"); } @Test @@ -205,8 +193,7 @@ public void testPropertyDashSameAsUnderscore() { @Test public void modifyingUnicodeDataShouldThrow() { - thrown.expect(IllegalStateException.class); - thrown.expectMessage("can't alter readonly IntervalSet"); - UnicodeData.getPropertyCodePoints("L").add(0x12345); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> UnicodeData.getPropertyCodePoints("L").add(0x12345)); + assertEquals("can't alter readonly IntervalSet", exception.getMessage()); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeEscapes.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeEscapes.java index 66941b9b6f..9bcbece485 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeEscapes.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeEscapes.java @@ -7,9 +7,9 @@ package org.antlr.v4.test.tool; import org.antlr.v4.codegen.UnicodeEscapes; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestUnicodeEscapes { @Test diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeGrammar.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeGrammar.java index fb19531722..ad8cc4fb37 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeGrammar.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUnicodeGrammar.java @@ -15,15 +15,15 @@ import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.tool.Grammar; import org.antlr.v4.tool.GrammarParserInterpreter; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -public class TestUnicodeGrammar extends BaseJavaToolTest { +public class TestUnicodeGrammar { @Test public void unicodeBMPLiteralInGrammar() throws Exception { String grammarText = diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestUtils.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestUtils.java index c9ba151888..732ab8156f 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestUtils.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestUtils.java @@ -3,30 +3,31 @@ import org.antlr.runtime.Token; import org.antlr.v4.misc.Utils; import org.antlr.v4.tool.ast.GrammarAST; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; +import static org.junit.jupiter.api.Assertions.*; + public class TestUtils { @Test public void testStripFileExtension() { - Assert.assertNull(Utils.stripFileExtension(null)); - Assert.assertEquals("foo", Utils.stripFileExtension("foo")); - Assert.assertEquals("foo", Utils.stripFileExtension("foo.txt")); + assertNull(Utils.stripFileExtension(null)); + assertEquals("foo", Utils.stripFileExtension("foo")); + assertEquals("foo", Utils.stripFileExtension("foo.txt")); } @Test public void testJoin() { - Assert.assertEquals("foobbar", + assertEquals("foobbar", Utils.join(new String[]{"foo", "bar"}, "b")); - Assert.assertEquals("foo,bar", + assertEquals("foo,bar", Utils.join(new String[]{"foo", "bar"}, ",")); } @Test public void testSortLinesInString() { - Assert.assertEquals("bar\nbaz\nfoo\n", + assertEquals("bar\nbaz\nfoo\n", Utils.sortLinesInString("foo\nbar\nbaz")); } @@ -37,18 +38,18 @@ public void testNodesToStrings() { values.add(new GrammarAST(Token.DOWN)); values.add(new GrammarAST(Token.UP)); - Assert.assertNull(Utils.nodesToStrings(null)); - Assert.assertNotNull(Utils.nodesToStrings(values)); + assertNull(Utils.nodesToStrings(null)); + assertNotNull(Utils.nodesToStrings(values)); } @Test public void testCapitalize() { - Assert.assertEquals("Foo", Utils.capitalize("foo")); + assertEquals("Foo", Utils.capitalize("foo")); } @Test public void testDecapitalize() { - Assert.assertEquals("fOO", Utils.decapitalize("FOO")); + assertEquals("fOO", Utils.decapitalize("FOO")); } @Test @@ -68,8 +69,8 @@ public Object exec(Object arg1) { retval.add("baz"); retval.add("baz"); - Assert.assertEquals(retval, Utils.select(strings, func1)); - Assert.assertNull(Utils.select(null, null)); + assertEquals(retval, Utils.select(strings, func1)); + assertNull(Utils.select(null, null)); } @Test @@ -77,9 +78,9 @@ public void testFind() { ArrayList strings = new ArrayList<>(); strings.add("foo"); strings.add("bar"); - Assert.assertEquals("foo", Utils.find(strings, String.class)); + assertEquals("foo", Utils.find(strings, String.class)); - Assert.assertNull(Utils.find(new ArrayList<>(), String.class)); + assertNull(Utils.find(new ArrayList<>(), String.class)); } @Test @@ -93,8 +94,8 @@ public boolean select(Object o) { return true; } }; - Assert.assertEquals(0, Utils.indexOf(strings, filter)); - Assert.assertEquals(-1, Utils.indexOf(new ArrayList<>(), null)); + assertEquals(0, Utils.indexOf(strings, filter)); + assertEquals(-1, Utils.indexOf(new ArrayList<>(), null)); } @Test @@ -108,8 +109,8 @@ public boolean select(Object o) { return true; } }; - Assert.assertEquals(1, Utils.lastIndexOf(strings, filter)); - Assert.assertEquals(-1, Utils.lastIndexOf(new ArrayList<>(), null)); + assertEquals(1, Utils.lastIndexOf(strings, filter)); + assertEquals(-1, Utils.lastIndexOf(new ArrayList<>(), null)); } @Test @@ -118,12 +119,12 @@ public void testSetSize() { strings.add("foo"); strings.add("bar"); strings.add("baz"); - Assert.assertEquals(3, strings.size()); + assertEquals(3, strings.size()); Utils.setSize(strings, 2); - Assert.assertEquals(2, strings.size()); + assertEquals(2, strings.size()); Utils.setSize(strings, 4); - Assert.assertEquals(4, strings.size()); + assertEquals(4, strings.size()); } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestVocabulary.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestVocabulary.java index 6a880c7c4a..d766f4e1a5 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestVocabulary.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestVocabulary.java @@ -8,26 +8,20 @@ import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.Vocabulary; import org.antlr.v4.runtime.VocabularyImpl; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.*; /** * * @author Sam Harwell */ -public class TestVocabulary extends BaseJavaToolTest { - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - +public class TestVocabulary { @Test public void testEmptyVocabulary() { - Assert.assertNotNull(VocabularyImpl.EMPTY_VOCABULARY); - Assert.assertEquals("EOF", VocabularyImpl.EMPTY_VOCABULARY.getSymbolicName(Token.EOF)); - Assert.assertEquals("0", VocabularyImpl.EMPTY_VOCABULARY.getDisplayName(Token.INVALID_TYPE)); + assertNotNull(VocabularyImpl.EMPTY_VOCABULARY); + assertEquals("EOF", VocabularyImpl.EMPTY_VOCABULARY.getSymbolicName(Token.EOF)); + assertEquals("0", VocabularyImpl.EMPTY_VOCABULARY.getDisplayName(Token.INVALID_TYPE)); } @Test @@ -38,24 +32,23 @@ public void testVocabularyFromTokenNames() { }; Vocabulary vocabulary = VocabularyImpl.fromTokenNames(tokenNames); - Assert.assertNotNull(vocabulary); - Assert.assertEquals("EOF", vocabulary.getSymbolicName(Token.EOF)); + assertNotNull(vocabulary); + assertEquals("EOF", vocabulary.getSymbolicName(Token.EOF)); for (int i = 0; i < tokenNames.length; i++) { - Assert.assertEquals(tokenNames[i], vocabulary.getDisplayName(i)); + assertEquals(tokenNames[i], vocabulary.getDisplayName(i)); if (tokenNames[i].startsWith("'")) { - Assert.assertEquals(tokenNames[i], vocabulary.getLiteralName(i)); - Assert.assertNull(vocabulary.getSymbolicName(i)); + assertEquals(tokenNames[i], vocabulary.getLiteralName(i)); + assertNull(vocabulary.getSymbolicName(i)); } else if (Character.isUpperCase(tokenNames[i].charAt(0))) { - Assert.assertNull(vocabulary.getLiteralName(i)); - Assert.assertEquals(tokenNames[i], vocabulary.getSymbolicName(i)); + assertNull(vocabulary.getLiteralName(i)); + assertEquals(tokenNames[i], vocabulary.getSymbolicName(i)); } else { - Assert.assertNull(vocabulary.getLiteralName(i)); - Assert.assertNull(vocabulary.getSymbolicName(i)); + assertNull(vocabulary.getLiteralName(i)); + assertNull(vocabulary.getSymbolicName(i)); } } } - } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/TestXPath.java b/tool-testsuite/test/org/antlr/v4/test/tool/TestXPath.java index d3b0b0a2b3..4001f6be64 100644 --- a/tool-testsuite/test/org/antlr/v4/test/tool/TestXPath.java +++ b/tool-testsuite/test/org/antlr/v4/test/tool/TestXPath.java @@ -6,24 +6,28 @@ package org.antlr.v4.test.tool; -import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.Parser; import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.misc.Pair; import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.runtime.tree.TerminalNode; import org.antlr.v4.runtime.tree.xpath.XPath; -import org.junit.Before; -import org.junit.Test; +import org.antlr.v4.test.runtime.RunOptions; +import org.antlr.v4.test.runtime.Stage; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.JavaCompiledState; +import org.antlr.v4.test.runtime.states.JavaExecutedState; +import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Collection; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.antlr.v4.test.tool.ToolTestUtils.createOptionsForJavaToolTests; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; -public class TestXPath extends BaseJavaToolTest { +public class TestXPath { public static final String grammar = "grammar Expr;\n" + "prog: func+ ;\n" + @@ -58,18 +62,7 @@ public class TestXPath extends BaseJavaToolTest { "def f(x,y) { x = 3+4; y; ; }\n" + "def g(x) { return 1+2*x; }\n"; - @Before - @Override - public void testSetUp() throws Exception { - super.testSetUp(); - } - @Test public void testValidPaths() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String xpath[] = { "/prog/func", // all funcs under prog at root "/prog/*", // all children of prog at root @@ -118,96 +111,61 @@ public void testSetUp() throws Exception { }; for (int i=0; i nodes = getNodeStrings(SAMPLE_PROGRAM, xpath[i], "prog", "ExprParser", "ExprLexer"); + List nodes = getNodeStrings("Expr.g4", grammar, SAMPLE_PROGRAM, xpath[i], "prog", "ExprParser", "ExprLexer"); String result = nodes.toString(); - assertEquals("path "+xpath[i]+" failed", expected[i], result); + assertEquals(expected[i], result, "path "+xpath[i]+" failed"); } } @Test public void testWeirdChar() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "&"; String expected = "Invalid tokens or characters at index 0 in path '&'"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } @Test public void testWeirdChar2() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "//w&e/"; String expected = "Invalid tokens or characters at index 3 in path '//w&e/'"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } @Test public void testBadSyntax() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "///"; String expected = "/ at index 2 isn't a valid rule name"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } @Test public void testMissingWordAtEnd() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "//"; String expected = "Missing path element at end of path"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } @Test public void testBadTokenName() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "//Ick"; String expected = "Ick at index 2 isn't a valid token name"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } @Test public void testBadRuleName() throws Exception { - boolean ok = - rawGenerateAndBuildRecognizer("Expr.g4", grammar, "ExprParser", - "ExprLexer", false); - assertTrue(ok); - String path = "/prog/ick"; String expected = "ick at index 6 isn't a valid rule name"; - testError(SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); + testError("Expr.g4", grammar, SAMPLE_PROGRAM, path, expected, "prog", "ExprParser", "ExprLexer"); } - protected void testError(String input, String path, String expected, - String startRuleName, - String parserName, String lexerName) + private void testError(String grammarFileName, String grammar, String input, String xpath, String expected, + String startRuleName, String parserName, String lexerName) throws Exception { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - ParseTree tree = execStartRule(startRuleName, parser); - IllegalArgumentException e = null; try { - XPath.findAll(tree, path, parser); + compileAndExtract(grammarFileName, grammar, input, xpath, startRuleName, parserName, lexerName); } catch (IllegalArgumentException iae) { e = iae; @@ -216,20 +174,18 @@ protected void testError(String input, String path, String expected, assertEquals(expected, e.getMessage()); } - public List getNodeStrings(String input, String xpath, - String startRuleName, - String parserName, String lexerName) + private List getNodeStrings(String grammarFileName, String grammar, String input, String xpath, + String startRuleName, String parserName, String lexerName) throws Exception { - Pair pl = getParserAndLexer(input, parserName, lexerName); - Parser parser = pl.a; - ParseTree tree = execStartRule(startRuleName, parser); + Pair> result = compileAndExtract( + grammarFileName, grammar, input, xpath, startRuleName, parserName, lexerName); - List nodes = new ArrayList(); - for (ParseTree t : XPath.findAll(tree, xpath, parser) ) { + List nodes = new ArrayList<>(); + for (ParseTree t : result.b) { if ( t instanceof RuleContext) { RuleContext r = (RuleContext)t; - nodes.add(parser.getRuleNames()[r.getRuleIndex()]); + nodes.add(result.a[r.getRuleIndex()]); } else { TerminalNode token = (TerminalNode)t; @@ -238,4 +194,21 @@ public List getNodeStrings(String input, String xpath, } return nodes; } + + private Pair> compileAndExtract(String grammarFileName, String grammar, + String input, String xpath, String startRuleName, + String parserName, String lexerName + ) throws Exception { + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, grammar, parserName, lexerName, + false, false, startRuleName, input, + false, false, Stage.Execute, true); + try (JavaRunner runner = new JavaRunner()) { + JavaExecutedState executedState = (JavaExecutedState)runner.run(runOptions); + JavaCompiledState compiledState = (JavaCompiledState)executedState.previousState; + Parser parser = compiledState.initializeLexerAndParser(input).b; + Collection found = XPath.findAll(executedState.parseTree, xpath, parser); + + return new Pair<>(parser.getRuleNames(), found); + } + } } diff --git a/tool-testsuite/test/org/antlr/v4/test/tool/ToolTestUtils.java b/tool-testsuite/test/org/antlr/v4/test/tool/ToolTestUtils.java new file mode 100644 index 0000000000..b68286ae35 --- /dev/null +++ b/tool-testsuite/test/org/antlr/v4/test/tool/ToolTestUtils.java @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + * Use of this file is governed by the BSD 3-clause license that + * can be found in the LICENSE.txt file in the project root. + */ + +package org.antlr.v4.test.tool; + +import org.antlr.v4.Tool; +import org.antlr.v4.automata.LexerATNFactory; +import org.antlr.v4.automata.ParserATNFactory; +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ATNSerializer; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.misc.IntegerList; +import org.antlr.v4.semantics.SemanticPipeline; +import org.antlr.v4.test.runtime.*; +import org.antlr.v4.test.runtime.java.JavaRunner; +import org.antlr.v4.test.runtime.states.ExecutedState; +import org.antlr.v4.test.runtime.states.State; +import org.antlr.v4.tool.Grammar; +import org.antlr.v4.tool.LexerGrammar; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; + +import static org.antlr.v4.test.runtime.FileUtils.deleteDirectory; +import static org.antlr.v4.test.runtime.Generator.antlrOnString; +import static org.antlr.v4.test.runtime.RuntimeTestUtils.TempDirectory; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +public class ToolTestUtils { + public static ExecutedState execLexer(String grammarFileName, String grammarStr, String lexerName, String input) { + return execLexer(grammarFileName, grammarStr, lexerName, input, null, false); + } + + public static ExecutedState execLexer(String grammarFileName, String grammarStr, String lexerName, String input, + Path tempDir, boolean saveTestDir) { + return execRecognizer(grammarFileName, grammarStr, null, lexerName, + null, input, false, tempDir, saveTestDir); + } + + public static ExecutedState execParser(String grammarFileName, String grammarStr, + String parserName, String lexerName, String startRuleName, + String input, boolean showDiagnosticErrors + ) { + return execParser(grammarFileName, grammarStr, parserName, lexerName, startRuleName, + input, showDiagnosticErrors, null); + } + + public static ExecutedState execParser(String grammarFileName, String grammarStr, + String parserName, String lexerName, String startRuleName, + String input, boolean showDiagnosticErrors, Path workingDir + ) { + return execRecognizer(grammarFileName, grammarStr, parserName, lexerName, + startRuleName, input, showDiagnosticErrors, workingDir, false); + } + + private static ExecutedState execRecognizer(String grammarFileName, String grammarStr, + String parserName, String lexerName, String startRuleName, + String input, boolean showDiagnosticErrors, + Path workingDir, boolean saveTestDir) { + RunOptions runOptions = createOptionsForJavaToolTests(grammarFileName, grammarStr, parserName, lexerName, + false, true, startRuleName, input, + false, showDiagnosticErrors, Stage.Execute, false); + try (JavaRunner runner = new JavaRunner(workingDir, saveTestDir)) { + State result = runner.run(runOptions); + if (!(result instanceof ExecutedState)) { + fail(result.getErrorMessage()); + } + return (ExecutedState) result; + } + } + + public static RunOptions createOptionsForJavaToolTests( + String grammarFileName, String grammarStr, String parserName, String lexerName, + boolean useListener, boolean useVisitor, String startRuleName, + String input, boolean profile, boolean showDiagnosticErrors, + Stage endStage, boolean returnObject + ) { + return new RunOptions(grammarFileName, grammarStr, parserName, lexerName, useListener, useVisitor, startRuleName, + input, profile, showDiagnosticErrors, false, endStage, returnObject, "Java", + JavaRunner.runtimeTestParserName); + } + + public static void testErrors(String[] pairs, boolean printTree) { + for (int i = 0; i < pairs.length; i += 2) { + String grammarStr = pairs[i]; + String expect = pairs[i + 1]; + + String[] lines = grammarStr.split("\n"); + String fileName = getFilenameFromFirstLineOfGrammar(lines[0]); + + String tempDirName = "AntlrTestErrors-" + Thread.currentThread().getName() + "-" + System.currentTimeMillis(); + String tempTestDir = Paths.get(TempDirectory, tempDirName).toString(); + + try { + ErrorQueue equeue = antlrOnString(tempTestDir, null, fileName, grammarStr, false); + + String actual = equeue.toString(true); + actual = actual.replace(tempTestDir + File.separator, ""); + String msg = grammarStr; + msg = msg.replace("\n", "\\n"); + msg = msg.replace("\r", "\\r"); + msg = msg.replace("\t", "\\t"); + + assertEquals(expect, actual, "error in: " + msg); + } + finally { + try { + deleteDirectory(new File(tempTestDir)); + } catch (IOException ignored) { + } + } + } + } + + public static String getFilenameFromFirstLineOfGrammar(String line) { + String fileName = "A" + Tool.GRAMMAR_EXTENSION; + int grIndex = line.lastIndexOf("grammar"); + int semi = line.lastIndexOf(';'); + if ( grIndex>=0 && semi>=0 ) { + int space = line.indexOf(' ', grIndex); + fileName = line.substring(space+1, semi)+Tool.GRAMMAR_EXTENSION; + } + if ( fileName.length()==Tool.GRAMMAR_EXTENSION.length() ) fileName = "A" + Tool.GRAMMAR_EXTENSION; + return fileName; + } + + public static List realElements(List elements) { + return elements.subList(Token.MIN_USER_TOKEN_TYPE, elements.size()); + } + + public static String load(String fileName) + throws IOException { + if ( fileName==null ) { + return null; + } + + String fullFileName = ToolTestUtils.class.getPackage().getName().replace('.', '/')+'/'+fileName; + int size = 65000; + InputStream fis = ToolTestUtils.class.getClassLoader().getResourceAsStream(fullFileName); + try (InputStreamReader isr = new InputStreamReader(fis)) { + char[] data = new char[size]; + int n = isr.read(data); + return new String(data, 0, n); + } + } + + public static ATN createATN(Grammar g, boolean useSerializer) { + if ( g.atn==null ) { + semanticProcess(g); + assertEquals(0, g.tool.getNumErrors()); + + ParserATNFactory f = g.isLexer() ? new LexerATNFactory((LexerGrammar) g) : new ParserATNFactory(g); + + g.atn = f.createATN(); + assertEquals(0, g.tool.getNumErrors()); + } + + ATN atn = g.atn; + if ( useSerializer ) { + // sets some flags in ATN + IntegerList serialized = ATNSerializer.getSerialized(atn); + return new ATNDeserializer().deserialize(serialized.toArray()); + } + + return atn; + } + + public static void semanticProcess(Grammar g) { + if ( g.ast!=null && !g.ast.hasErrors ) { +// System.out.println(g.ast.toStringTree()); + Tool antlr = new Tool(); + SemanticPipeline sem = new SemanticPipeline(g); + sem.process(); + if ( g.getImportedGrammars()!=null ) { // process imported grammars (if any) + for (Grammar imp : g.getImportedGrammars()) { + antlr.processNonCombinedGrammar(imp, false); + } + } + } + } + + public static IntegerList getTokenTypesViaATN(String input, LexerATNSimulator lexerATN) { + ANTLRInputStream in = new ANTLRInputStream(input); + IntegerList tokenTypes = new IntegerList(); + int ttype; + do { + ttype = lexerATN.match(in, Lexer.DEFAULT_MODE); + tokenTypes.add(ttype); + } while ( ttype!= Token.EOF ); + return tokenTypes; + } +} diff --git a/tool/MIGRATION.txt b/tool/MIGRATION.txt deleted file mode 100644 index c4aacd8fe0..0000000000 --- a/tool/MIGRATION.txt +++ /dev/null @@ -1,23 +0,0 @@ -Parsers - -* Full context LL(*) not SLL(*) -* Adaptive, takes all but indirect left-recursion - -Actions/scopes - -* no global scopes. no scope[n]. - -Trees - -* no ASTs -* no tree grammars -* parse trees created by default -* moved methods to Trees - -Lexers - -* Added [Abc] notation - -* unicode rule/token names - -* -> skip notation \ No newline at end of file diff --git a/tool/playground/Main.java b/tool/playground/Main.java deleted file mode 100644 index 8001d315a1..0000000000 --- a/tool/playground/Main.java +++ /dev/null @@ -1,44 +0,0 @@ -public class Main -{ -// public static void main(String[] args) -// { -// TParser parser = new TParser(new CommonTokenStream(new TLexer(new ANTLRInputStream("b")))); -// parser.addParseListener(new MyTBaseListener()); -// -// parser.a(); -// -// System.out.println("######################"); -// parser = new TParser(new CommonTokenStream(new TLexer(new ANTLRInputStream("x")))); -// parser.addParseListener(new MyTBaseListener()); -// parser.b(); -// } -// -// private static class MyTBaseListener extends TBaseListener { -// @Override -// public void enterAlt1(TParser.Alt1Context ctx) -// { -// System.out.println("entering alt1"); -// } -// -// @Override -// public void exitAlt1(TParser.Alt1Context ctx) -// { -// System.out.println("exiting alt1"); -// } -// -// @Override -// public void enterB(TParser.BContext ctx) { -// System.out.println("enter b"); -// } -// -// @Override -// public void exitB(TParser.BContext ctx) { -// System.out.println("exiting b"); -// } -// -// @Override -// public void enterEveryRule(ParserRuleContext ctx) { -// System.out.println("enterEveryRule"); -// } -// } -} diff --git a/tool/playground/T.g4 b/tool/playground/T.g4 deleted file mode 100644 index 47265ebbfc..0000000000 --- a/tool/playground/T.g4 +++ /dev/null @@ -1,12 +0,0 @@ -grammar T; - -a - : 'b' #alt1 - | 'c' #alt2 - ; - -b : 'x' | 'y' {} ; - -e : e '*' e - | 'foo' - ; diff --git a/tool/pom.xml b/tool/pom.xml index b559c3980e..5ceb0df332 100644 --- a/tool/pom.xml +++ b/tool/pom.xml @@ -9,7 +9,7 @@ org.antlr antlr4-master - 4.10.2-SNAPSHOT + 4.11.0-SNAPSHOT antlr4 ANTLR 4 Tool @@ -34,7 +34,7 @@ org.antlr ST4 - 4.3.3 + 4.3.4 org.abego.treelayout @@ -44,12 +44,12 @@ org.glassfish javax.json - 1.0.4 + 1.1.4 com.ibm.icu icu4j - 69.1 + 71.1 diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg index 2df658642f..e0dd9a9887 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/CSharp/CSharp.stg @@ -557,7 +557,7 @@ ErrorHandler.Sync(this); = TokenStream.LT(1); switch (TokenStream.LA(1)) { - + break;}; separator="\n"> default: @@ -569,7 +569,7 @@ LL1OptionalBlock(choice, alts, error) ::= << State = ; ErrorHandler.Sync(this); switch (TokenStream.LA(1)) { - + break;}; separator="\n"> default: @@ -676,7 +676,7 @@ Sync(s) ::= "Sync();" ThrowNoViableAlt(t) ::= "throw new NoViableAltException(this);" TestSetInline(s) ::= << -}; separator=" || "> +}; separator=" || "> >> // Java language spec 15.19 - shift operators mask operands rather than overflow to 0... need range test @@ -684,9 +684,9 @@ testShiftInRange(shiftAmount) ::= << (() & ~0x3f) == 0 >> -// produces smaller bytecode only when bits.ttypes contains more than two items +// produces smaller bytecode only when bits.tokens contains more than two items bitsetBitfieldComparison(s, bits) ::= <% -(})> && ((1L \<\< ) & ()}; separator=" | ">)) != 0) +})> && ((1L \<\< ) & L) != 0 %> isZero ::= [ @@ -698,13 +698,12 @@ offsetShift(shiftAmount, offset) ::= <% ( - ) %> -// produces more efficient bytecode when bits.ttypes contains at most two items bitsetInlineComparison(s, bits) ::= <% -==}; separator=" || "> +==}; separator=" || "> %> -cases(ttypes) ::= << -:}; separator="\n"> +cases(tokens) ::= << +:}; separator="\n"> >> InvokeRule(r, argExprsChunks) ::= << diff --git a/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg b/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg index cd48bbe004..81a6162a6f 100644 --- a/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg +++ b/tool/resources/org/antlr/v4/tool/templates/codegen/Cpp/Cpp.stg @@ -143,7 +143,7 @@ struct StaticData final { std::unique_ptr\ atn; }; -std::once_flag LexerOnceFlag; +::antlr4::internal::OnceFlag LexerOnceFlag; StaticData *LexerStaticData = nullptr; void LexerInitialize() { @@ -238,7 +238,7 @@ bool ::sempred(RuleContext *context, size_t ruleIndex, size_t predic void ::initialize() { - std::call_once(LexerOnceFlag, LexerInitialize); + ::antlr4::internal::call_once(LexerOnceFlag, LexerInitialize); } >> @@ -363,7 +363,7 @@ struct StaticData final { std::unique_ptr\ atn; }; -std::once_flag ParserOnceFlag; +::antlr4::internal::OnceFlag ParserOnceFlag; StaticData *ParserStaticData = nullptr; void ParserInitialize() { @@ -435,7 +435,7 @@ bool ::sempred(RuleContext *context, size_t ruleIndex, size_t predi void ::initialize() { - std::call_once(ParserOnceFlag, ParserInitialize); + ::antlr4::internal::call_once(ParserOnceFlag, ParserInitialize); } >> @@ -652,7 +652,7 @@ _errHandler->sync(this); LL1AltBlock(choice, preamble, alts, error) = _input->LT(1); switch (_input->LA(1)) { - { + { break; \} @@ -667,7 +667,7 @@ LL1OptionalBlock(choice, alts, error) ::= << setState(); _errHandler->sync(this); switch (_input->LA(1)) { - { + { break; \} @@ -793,7 +793,7 @@ ThrowNoViableAlt(t) ::= "throw NoViableAltException(this);" TestSetInlineHeader(s) ::= "" TestSetInline(s) ::= << -}; separator=" || "> +}; separator=" || "> >> // Java language spec 15.19 - shift operators mask operands rather than overflow to 0... need range test @@ -801,10 +801,9 @@ testShiftInRange(shiftAmount) ::= << (( & ~ 0x3fULL) == 0) >> -// produces smaller bytecode only when bits.ttypes contains more than two items bitsetBitfieldComparison(s, bits) ::= << -(})> && - ((1ULL \<\< ) & ()}; separator = "\n | ">)) != 0) +})> && + ((1ULL \<\< ) & ) != 0 >> isZero ::= [ @@ -816,13 +815,12 @@ offsetShift(shiftAmount, offset, prefix = false) ::= <% (:: - ):: %> -// produces more efficient bytecode when bits.ttypes contains at most two items bitsetInlineComparison(s, bits) ::= <% - == ::}; separator = "\n\n|| "> + == ::}; separator = "\n\n|| "> %> -cases(ttypes) ::= << -:::}; separator="\n"> +cases(tokens) ::= << +:::}; separator="\n"> >> InvokeRuleHeader(r, argExprsChunks) ::= "InvokeRuleHeader" @@ -1104,9 +1102,15 @@ recRuleAltPredicate(ruleName,opPrec) ::= "precpred(_ctx, )" recRuleSetReturnAction(src,name) ::= "recRuleSetReturnAction(src,name) $=$.;" recRuleSetStopToken() ::= "_ctx->stop = _input->LT(-1);" -recRuleAltStartAction(ruleName, ctxName, label) ::= << +recRuleAltStartAction(ruleName, ctxName, label, isListLabel) ::= << _localctx = _tracker.createInstance\<Context>(parentContext, parentState); -_localctx->

    ." diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg index 5597a9a737..61efce7a28 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Cpp.test.stg @@ -258,7 +258,7 @@ void foo() { >> Declare_foo() ::= <> diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg index 0f3e109b4b..a9c224fe47 100644 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg +++ b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Dart.test.stg @@ -242,7 +242,7 @@ TokenGetterListener(X) ::= << class LeafListener extends TBaseListener { void exitA(AContext ctx) { if (ctx.childCount==2) - TEST_platformStdoutWrite("${ctx.INT(0)?.symbol.text} ${ctx.INT(1)?.symbol.text} ${ctx.INTs()}"); + TEST_platformStdoutWrite("${ctx.INT(0)?.symbol.text} ${ctx.INT(1)?.symbol.text} ${ctx.INTs()}\n"); else print(ctx.ID()?.symbol); } @@ -255,7 +255,7 @@ RuleGetterListener(X) ::= << class LeafListener extends TBaseListener { void exitA(AContext ctx) { if (ctx.childCount==2) { - TEST_platformStdoutWrite("${ctx.b(0)?.start?.text} ${ctx.b(1)?.start?.text} ${ctx.bs()[0].start?.text}"); + TEST_platformStdoutWrite("${ctx.b(0)?.start?.text} ${ctx.b(1)?.start?.text} ${ctx.bs()[0].start?.text}\n"); } else print(ctx.b(0)?.start?.text); } @@ -281,7 +281,7 @@ LRWithLabelsListener(X) ::= << @parser::definitions { class LeafListener extends TBaseListener { void exitCall(CallContext ctx) { - TEST_platformStdoutWrite("${ctx.e()?.start?.text} ${ctx.eList()}"); + TEST_platformStdoutWrite("${ctx.e()?.start?.text} ${ctx.eList()}\n"); } void exitInt(IntContext ctx) { print(ctx.INT()?.symbol.text); diff --git a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg b/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg deleted file mode 100644 index cf2802edcd..0000000000 --- a/runtime-testsuite/resources/org/antlr/v4/test/runtime/templates/Explorer.test.stg +++ /dev/null @@ -1,300 +0,0 @@ -writeln(s) ::= < + '\\n';>> -write(s) ::= <;>> -writeList(s) ::= <;>> - -False() ::= "false" - -True() ::= "true" - -Not(v) ::= "!" - -Assert(s) ::= "" - -Cast(t,v) ::= "" - -Append(a,b) ::= " + " - -AppendStr(a,b) ::= <%%> - -Concat(a,b) ::= "" - -AssertIsList(v) ::= <> - -AssignLocal(s,v) ::= " = ;" - -InitIntMember(n,v) ::= <%this. = ;%> - -InitBooleanMember(n,v) ::= <%this. = ;%> - -InitIntVar(n,v) ::= <%%> - -IntArg(n) ::= "" - -VarRef(n) ::= "" - -GetMember(n) ::= <%this.%> - -SetMember(n,v) ::= <%this. = ;%> - -AddMember(n,v) ::= <%this. += ;%> - -MemberEquals(n,v) ::= <%this. === %> - -ModMemberEquals(n,m,v) ::= <%this. % === %> - -ModMemberNotEquals(n,m,v) ::= <%this. % != %> - -DumpDFA() ::= "this.dumpDFA();" - -Pass() ::= "" - -StringList() ::= "list" - -BuildParseTrees() ::= "this.buildParseTrees = true;" - -BailErrorStrategy() ::= <%this._errHandler = new antlr4.error.BailErrorStrategy();%> - -ToStringTree(s) ::= <%.toStringTree(null, this)%> - -Column() ::= "this.column" - -Text() ::= "this.text" - -ValEquals(a,b) ::= <%===%> - -TextEquals(a) ::= <%this.text===""%> - -PlusText(a) ::= <%"" + this.text%> - -InputText() ::= "this._input.getText()" - -LTEquals(i, v) ::= <%this._input.LT().text===%> - -LANotEquals(i, v) ::= <%this._input.LA()!=%> - -TokenStartColumnEquals(i) ::= <%this._tokenStartColumn===%> - -ImportListener(X) ::= << -@parser::header { -var Listener = require('./Listener').Listener; -} ->> - -GetExpectedTokenNames() ::= "this.getExpectedTokens().toString(this.literalNames)" - -RuleInvocationStack() ::= "antlr4.Utils.arrayToString(this.getRuleInvocationStack())" - -LL_EXACT_AMBIG_DETECTION() ::= <> - -ParserToken(parser, token) ::= <%.%> - -Production(p) ::= <%