changeset 33435:066a27297ba3 bytecode-interpreter

maint: Merge default to bytecode-interpreter
author Arun Giridhar <arungiridhar@gmail.com>
date Fri, 19 Apr 2024 12:57:20 -0400
parents e4e172cb662c (diff) 69eb4c27d8c8 (current diff)
children b6bb53ff12b4
files etc/NEWS.9.md libinterp/parse-tree/pt-eval.cc
diffstat 149 files changed, 28362 insertions(+), 345 deletions(-) [+]
line wrap: on
line diff
--- a/.github/workflows/make.yaml	Fri Apr 19 18:30:39 2024 +0200
+++ b/.github/workflows/make.yaml	Fri Apr 19 12:57:20 2024 -0400
@@ -14,7 +14,7 @@
       matrix:
         # For available GitHub-hosted runners, see:
         # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
-        os: [ubuntu-22.04, ubuntu-20.04]
+        os: [ubuntu-22.04]
         compiler: [gcc, clang]
         include:
           - compiler: gcc
@@ -30,15 +30,9 @@
           # "ccache" on Ubuntu 20.04 doesn't compress the cache.
           # Clang seems to generally require less cache size (smaller object files?).
           - ccache-max: 1.2G
-          - os: ubuntu-20.04
-            compiler: gcc
-            ccache-max: 6G
           - os: ubuntu-22.04
             compiler: gcc
             ccache-max: 1.2G
-          - os: ubuntu-20.04
-            compiler: clang
-            ccache-max: 3G
           - os: ubuntu-22.04
             compiler: clang
             ccache-max: 400M
@@ -140,7 +134,9 @@
 
       - name: check
         timeout-minutes: 60
-        run: XDG_RUNTIME_DIR=$RUNNER_TEMP xvfb-run -a make -C ./.build check | tee ./test-suite.log
+        run: |
+          XDG_RUNTIME_DIR=$RUNNER_TEMP xvfb-run -a \
+            make -C ./.build check-tree-evaluator | tee ./test-suite.log
 
       - name: display test suite log
         continue-on-error: true
@@ -148,6 +144,31 @@
         timeout-minutes: 5
         run: cat ./.build/test/fntests.log
 
+      - name: check with bytecode interpreter
+        timeout-minutes: 60
+        run: |
+          mv ./.build/test/fntests.log ./.build/test/fntests-no-bci.log
+          XDG_RUNTIME_DIR=$RUNNER_TEMP xvfb-run -a \
+            make -C ./.build check-bytecode-evaluator | tee ./test-suite-bci.log
+
+      - name: display test suite log with bytecode interpreter
+        continue-on-error: true
+        # Displaying the log shouldn't take long. Cancel the step if it does.
+        timeout-minutes: 5
+        run: cat ./.build/test/fntests.log
+
+      - name: difference between logs with and without bytecode interpreter
+        continue-on-error: true
+        run: |
+          echo "::group::diff test-suite.log"
+          echo "diff -urN ./test-suite.log ./test-suite-bci.log"
+          diff -urN ./test-suite.log ./test-suite-bci.log || true
+          echo "::endgroup::"
+          echo "::group::diff fntests.log"
+          echo "diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log"
+          diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log || true
+          echo "::endgroup::"
+
       - name: test history file creation
         # see bug #62365
         # Pipe to an interactive session to trigger appending the command to
@@ -174,6 +195,8 @@
         run:  |
           [ -n "$(grep -e "FAIL\s*0" ./test-suite.log)" ] || exit 1
           [ -z "$(grep -e "REGRESSION" ./test-suite.log)" ] || exit 1
+          [ -n "$(grep -e "FAIL\s*0" ./test-suite-bci.log)" ] || exit 1
+          [ -z "$(grep -e "REGRESSION" ./test-suite-bci.log)" ] || exit 1
           echo No unknown failing tests.
 
 
@@ -364,8 +387,8 @@
       - name: check
         timeout-minutes: 60
         run: |
-          XDG_RUNTIME_DIR=$RUNNER_TEMP \
-            xvfb-run -a make -C ./.build check | tee ./test-suite.log
+          XDG_RUNTIME_DIR=$RUNNER_TEMP xvfb-run -a \
+            make -C ./.build check-tree-evaluator | tee ./test-suite.log
 
       - name: display test suite log
         continue-on-error: true
@@ -373,6 +396,31 @@
         timeout-minutes: 5
         run: cat ./.build/test/fntests.log
 
+      - name: check with bytecode interpreter
+        timeout-minutes: 60
+        run: |
+          mv ./.build/test/fntests.log ./.build/test/fntests-no-bci.log
+          XDG_RUNTIME_DIR=$RUNNER_TEMP xvfb-run -a \
+            make -C ./.build check-bytecode-evaluator | tee ./test-suite-bci.log
+
+      - name: display test suite log with bytecode interpreter
+        continue-on-error: true
+        # Displaying the log shouldn't take long. Cancel the step if it does.
+        timeout-minutes: 5
+        run: cat ./.build/test/fntests.log
+
+      - name: difference between logs with and without bytecode interpreter
+        continue-on-error: true
+        run: |
+          echo "::group::diff test-suite.log"
+          echo "diff -urN ./test-suite.log ./test-suite-bci.log"
+          diff -urN ./test-suite.log ./test-suite-bci.log || true
+          echo "::endgroup::"
+          echo "::group::diff fntests.log"
+          echo "diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log"
+          diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log || true
+          echo "::endgroup::"
+
       - name: test history file creation
         # see bug #62365
         # Pipe to an interactive session to trigger appending the command to
@@ -398,6 +446,10 @@
             || echo "::warning::At least one test failed"
           [ -z "$(grep -e "REGRESSION" ./test-suite.log)" ] \
             || echo "::warning::At least one regression in test suite"
+          [ -n "$(grep -e "FAIL\s*0" ./test-suite-bci.log)" ] \
+            || echo "::warning::At least one test failed with bytecode interpreter"
+          [ -z "$(grep -e "REGRESSION" ./test-suite-bci.log)" ] \
+            || echo "::warning::At least one regression in test suite with bytecode interpreter"
           echo Finished analyzing test suite results.
 
 
@@ -412,7 +464,7 @@
 
       matrix:
         # For available GitHub-hosted runners, see: https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
-        os: [macos-14, macos-13]
+        os: [macos-14]
         # Most (or all) homebrew packages are compiled with clang and link
         # against libc++.  So we also use clang to avoid issues with symbols
         # that don't match.
@@ -431,14 +483,6 @@
             # Qt6 doesn't install pkg-config files on macOS by default. See: https://bugreports.qt.io/browse/QTBUG-86080
             # Homebrew installs "unofficial" .pc files for it in a non-default location.
             qt-pkg-config-path: opt/qt@6/libexec/lib/pkgconfig
-          - os: macos-13
-            qt: "5"
-            cc: "clang"
-            cxx: "clang++"
-            # SUNDIALS >= 6.5.0 doesn't compile with the default flags determined
-            # by the configure script. Set CXX manually to enable building with it.
-            cxx-compiler-flags: "-std=gnu++14"
-            qt-pkg-config-path: opt/qt@5/lib/pkgconfig
 
     env:
       CC: ${{ matrix.cc }}
@@ -581,7 +625,7 @@
         timeout-minutes: 60
         run: |
           echo 'makeinfo_program ("${HOMEBREW_PREFIX}/opt/texinfo/bin/makeinfo");' >> ./scripts/startup/site-rcfile
-          make -C ./.build check | tee ./test-suite.log
+          make -C ./.build check-tree-evaluator | tee ./test-suite.log
 
       - name: display test suite log
         continue-on-error: true
@@ -589,6 +633,30 @@
         timeout-minutes: 5
         run: cat ./.build/test/fntests.log
 
+      - name: check with bytecode interpreter
+        timeout-minutes: 60
+        run: |
+          mv ./.build/test/fntests.log ./.build/test/fntests-no-bci.log
+          make -C ./.build check-bytecode-evaluator | tee ./test-suite-bci.log
+
+      - name: display test suite log with bytecode interpreter
+        continue-on-error: true
+        # Displaying the log shouldn't take long. Cancel the step if it does.
+        timeout-minutes: 5
+        run: cat ./.build/test/fntests.log
+
+      - name: difference between logs with and without bytecode interpreter
+        continue-on-error: true
+        run: |
+          echo "::group::diff test-suite.log"
+          echo "diff -urN ./test-suite.log ./test-suite-bci.log"
+          diff -urN ./test-suite.log ./test-suite-bci.log || true
+          echo "::endgroup::"
+          echo "::group::diff fntests.log"
+          echo "diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log"
+          diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log || true
+          echo "::endgroup::"
+
       - name: test history file creation
         # see bug #62365
         # Pipe to an interactive session to trigger appending the command to
@@ -616,6 +684,8 @@
         run: |
           [ -n "$(grep -e "FAIL\s*0" ./test-suite.log)" ] || exit 1
           [ -z "$(grep -e "REGRESSION" ./test-suite.log)" ] || exit 1
+          [ -n "$(grep -e "FAIL\s*0" ./test-suite-bci.log)" ] || exit 1
+          [ -z "$(grep -e "REGRESSION" ./test-suite-bci.log)" ] || exit 1
           echo No unknown failing tests.
 
 
@@ -636,7 +706,7 @@
       matrix:
         # For available GitHub-hosted runners, see: https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
         os: [windows-latest]
-        msystem: [MINGW64, CLANG64]
+        msystem: [MINGW64]
         include:
           - msystem: MINGW64
             mingw-prefix: mingw64
@@ -647,22 +717,6 @@
             extra-config-flags: ""
             ccache-max: 0.9G
             allow-error: false
-          - msystem: CLANG64
-            mingw-prefix: clang64
-            target-prefix: mingw-w64-clang-x86_64
-            cc: clang
-            # It looks like we and graphicsmagick++ aren't ready for C++17 yet.
-            cxx: "clang++ -std=gnu++14"
-            f77: flang
-            # Clang seems to require a different set of dllexport attributes than GCC.
-            # autoconf and libtool still need help to correctly invoke flang.
-            extra-config-flags:
-              --disable-lib-visibility-flags
-              ac_cv_f77_compiler_gnu=yes
-              lt_cv_prog_gnu_ld=yes
-            ccache-max: 400M
-            # This configuration is not fully supported yet
-            allow-error: true
 
     env:
       CHERE_INVOKING: 1
@@ -836,7 +890,7 @@
           echo "makeinfo_program (sprintf ('%s && cd %s && perl makeinfo', [s=nthargout(2, @system, 'cygpath -w /usr/bin')](1:2), strtrim (s)));" \
             >> ./scripts/startup/site-rcfile
           export PATH=$(echo "$PATH" | sed -e "s|$MINGW_PREFIX/lib/ccache/bin:||g")
-          make -C ./.build check RUN_OCTAVE_OPTIONS="--no-gui-libs" | tee ./test-suite.log
+          make -C ./.build check-tree-evaluator RUN_OCTAVE_OPTIONS="--no-gui-libs" | tee ./test-suite.log
 
       - name: display test suite log
         continue-on-error: true
@@ -844,11 +898,35 @@
         timeout-minutes: 5
         run: cat ./.build/test/fntests.log
 
+      - name: check with bytecode interpreter
+        timeout-minutes: 60
+        run: |
+          mv ./.build/test/fntests.log ./.build/test/fntests-no-bci.log
+          make -C ./.build check-bytecode-evaluator RUN_OCTAVE_OPTIONS="--no-gui-libs" | tee ./test-suite-bci.log
+
+      - name: display test suite log with bytecode interpreter
+        continue-on-error: true
+        # Displaying the log shouldn't take long. Cancel the step if it does.
+        timeout-minutes: 5
+        run: cat ./.build/test/fntests.log
+
+      - name: difference between logs with and without bytecode interpreter
+        continue-on-error: true
+        run: |
+          echo "::group::diff test-suite.log"
+          echo "diff -urN ./test-suite.log ./test-suite-bci.log"
+          diff -urN ./test-suite.log ./test-suite-bci.log || true
+          echo "::endgroup::"
+          echo "::group::diff fntests.log"
+          echo "diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log"
+          diff -urN ./.build/test/fntests-no-bci.log ./.build/test/fntests.log || true
+          echo "::endgroup::"
+
       - name: test history file creation
         # see bug #62365
         # Pipe to an interactive session to trigger appending the command to
         # the history.  This will trigger the creation of a history file.
-        run:  |
+        run: |
           echo "history_file (make_absolute_filename ('./a/b/c/history')); disp ('test')" | ./.build/run-octave -i
           [ -f ./a/b/c/history ] || echo "::warning::Creating history file failed"
 
@@ -873,270 +951,6 @@
         run: |
           [ -n "$(grep -e "FAIL\s*0" ./test-suite.log)" ] || echo "::warning::At least one test failed"
           [ -z "$(grep -e "REGRESSION" ./test-suite.log)" ] || echo "::warning::At least one regression in test suite"
+          [ -n "$(grep -e "FAIL\s*0" ./test-suite-bci.log)" ] || echo "::warning::At least one test failed with bytecode interpreter"
+          [ -z "$(grep -e "REGRESSION" ./test-suite-bci.log)" ] || echo "::warning::At least one regression in test suite with bytecode interpreter"
           echo Finished analyzing test suite results.
-
-      - name: compress build artifact
-        if: matrix.msystem == 'MINGW64'
-        continue-on-error: true
-        run: |
-          cd /c/octave/pkg
-          tar -cvzf octave.tar.gz *
-
-      - name: upload build artifact
-        if: matrix.msystem == 'MINGW64'
-        continue-on-error: true
-        uses: actions/upload-artifact@v4
-        with:
-          name: ${{ matrix.target-prefix }}-octave-${{ steps.ccache-prepare.outputs.timestamp }}
-          path: C:\octave\pkg\octave.tar.gz
-          retention-days: 7
-
-
-  cygwin:
-    runs-on: ${{ matrix.os }}
-
-    defaults:
-      run:
-        # Use Cygwin bash as default shell
-        shell: C:\cygwin\bin\bash.exe --login -eo pipefail -o igncr '{0}'
-
-    strategy:
-      # Allow other runners in the matrix to continue if some fail
-      fail-fast: false
-
-      matrix:
-        # For available GitHub-hosted runners, see: https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
-        os: [windows-latest]
-
-    env:
-      CHERE_INVOKING: "1"
-      CC:  gcc
-      CXX:  g++
-      F77:  gfortran
-      # ccache 3.1 doesn't read configuration files
-      CCACHE_COMPRESS: "1"
-
-    steps:
-      - name: get CPU name
-        shell: pwsh
-        run : |
-          Get-CIMInstance -Class Win32_Processor | Select-Object -Property Name
-
-      - name: checkout repository
-        # This must be done before installing Cygwin because their git is
-        # incompatible with this action.
-        uses: actions/checkout@v4
-
-      - name: install Cygwin build environment
-        uses: cygwin/cygwin-install-action@v4
-        with:
-          # The packages are listed in (alphabetically sorted) blocks:
-          # The first block is for mandatory dependencies.
-          # The second block is for optional dependencies needed when building from a release tarball.
-          # The third block is for additional dependencies needed when building from a repository checkout.
-          # The fourth block is for additional run-time dependencies (to run test suite) that aren't needed to build.
-
-          # FIXME: libklu-devel should depend on libbtf-devel.  Install it manually even if Octave doesn't explicitly use it.
-          packages: >-
-            autoconf
-            automake
-            make
-            gcc-g++
-            gcc-fortran
-            bison
-            dash
-            flex
-            gperf
-            libtool
-            liblapack-devel
-            libpcre2-devel
-            libreadline-devel
-
-            ghostscript
-            gnuplot-base
-            libamd-devel
-            libarpack-devel
-            libbtf-devel
-            libbz2-devel
-            libcamd-devel
-            libccolamd-devel
-            libcholmod-devel
-            libcolamd-devel
-            libcurl-devel
-            libcxsparse-devel
-            libfftw3-devel
-            libfltk-devel
-            libfontconfig-devel
-            libfreetype-devel
-            libGL-devel
-            libgl2ps-devel
-            libglpk-devel
-            libGLU-devel
-            libgomp1
-            libGraphicsMagick-devel
-            libhdf5-devel
-            libiconv-devel
-            libklu-devel
-            libportaudio-devel
-            libqhull-devel
-            libqrupdate-devel
-            libqscintilla2_qt5-devel
-            libQt5Core-devel
-            libQt5Gui-devel
-            libQt5Help-devel
-            libsndfile-devel
-            libsuitesparseconfig-devel
-            libsundials-devel
-            libspqr-devel
-            libumfpack-devel
-            qt5-doc-tools
-            rapidjson-devel
-
-            ccache
-            git
-            icoutils
-            rsvg
-            texinfo
-
-            unzip
-            zip
-
-      - name: prepare ccache
-        # create key with human readable timestamp
-        # used in action/cache/restore and action/cache/save steps
-        id: ccache-prepare
-        run: |
-          echo "ccachedir=$(cygpath -m ~/.ccache)" >> $GITHUB_OUTPUT
-          echo "key=ccache:${{ matrix.os }}:cygwin:${{ github.ref }}:$(date +"%Y-%m-%d_%H-%M-%S"):${{ github.sha }}" >> $GITHUB_OUTPUT
-
-      - name: restore ccache
-        # Setup a github cache used to maintain the ccache from one run to the next
-        uses: actions/cache/restore@v4
-        with:
-          path: ${{ steps.ccache-prepare.outputs.ccachedir }}
-          key: ${{ steps.ccache-prepare.outputs.key }}
-          restore-keys: |
-            ccache:${{ matrix.os }}:cygwin:${{ github.ref }}
-            ccache:${{ matrix.os }}:cygwin:refs/heads/default
-
-      - name: configure ccache
-        run: |
-          # work around issue with ccache calling itself recursively
-          [ -f /usr/bin/ccache.exe ] && mv /usr/bin/ccache.exe /usr/bin/ccache
-          which ccache
-          ccache -V
-          # ccache 3.1 doesn't read configuration files
-          # test -d ${{ steps.ccache-prepare.outputs.ccachedir }} || mkdir -p ${{ steps.ccache-prepare.outputs.ccachedir }}
-          # echo "max_size = 0.9G" > ${{ steps.ccache-prepare.outputs.ccachedir }}/ccache.conf
-          # echo "compression = true" >> ${{ steps.ccache-prepare.outputs.ccachedir }}/ccache.conf
-          # limit maximum cache size to avoid exceeding the total disk or cache quota
-          ccache -M 0.9G
-          ccache -s
-          # create ccache symlinks for all compilers
-          test -d /usr/lib/ccache/bin || mkdir -p /usr/lib/ccache/bin
-          ln -s /usr/bin/ccache /usr/lib/ccache/bin/${CC}.exe
-          ln -s /usr/bin/ccache /usr/lib/ccache/bin/${CXX}.exe
-          ln -s /usr/bin/ccache /usr/lib/ccache/bin/${F77}.exe
-          # prepend path to ccache symlinks to PATH
-          echo 'export PATH="/usr/lib/ccache/bin:$PATH"' >> ~/.bash_profile
-
-      - name: bootstrap
-        run: GNULIB_URL=https://github.com/coreutils/gnulib.git ./bootstrap
-
-      - name: configure
-        # FIXME: Fix building with Java support.  Override JAVA_HOME for now.
-        # FIXME: How do we get a working TeX environment in Cygwin?  Disable building the documentation for now.
-        # FIXME: Exporting symbols for instantiated template classes doesn't work currently. Maybe, test again when Cygwin updated to a newer compiler than GCC 11.4.0?
-        run: |
-          echo $PATH
-          which $CC
-          echo $CC --version
-          $CC --version
-          which $CXX
-          echo $CXX --version
-          $CXX --version
-          which $F77
-          echo $F77 --version
-          $F77 --version
-          mkdir .build
-          cd .build && ../configure \
-            --libexecdir=/usr/lib \
-            --enable-shared \
-            --disable-java \
-            --disable-docs \
-            --disable-lib-visibility-flags \
-            JAVA_HOME="" \
-            EGREP="grep -E" \
-            FLIBS="-lgfortran -lquadmath"
-
-      - name: build
-        # Spawning processes seems to have a big overhead on this platform.  Use a somewhat larger number of parallel processes to compensate for that.
-        run: |
-          EGREP="grep -E" make -C ./.build all -j8 V=1
-
-      - name: ccache status
-        continue-on-error: true
-        run: ccache -s
-
-      - name: save ccache
-        # Save the cache after we are done (successfully) building
-        uses: actions/cache/save@v4
-        with:
-          path: ${{ steps.ccache-prepare.outputs.ccachedir }}
-          key: ${{ steps.ccache-prepare.outputs.key }}
-
-      - name: rebase binaries
-        run: |
-          find ${GITHUB_WORKSPACE}/.build -name "*.oct" -or -name "*.dll" | tee binaries.list
-          rebase -O -T binaries.list
-
-      - name: check
-        # Continuing on error in this step means that jobs will be "green" even
-        # if the test suite crashes.  But if we don't continue, we'll loose the
-        # ccache.
-        # It would be nice if we could mark the job as "yellow" (and continue)
-        # in that case.  The second best thing is to display a warning in the
-        # job summary (see below).
-        continue-on-error: true
-        timeout-minutes: 60
-
-        # mkoctfile seems to have trouble when gcc is called via ccache.  So,
-        # remove the directory with the ccache wrappers from PATH.
-
-        run: |
-          export PATH=$(echo "$PATH" | sed -e "s|/usr/lib/ccache/bin:||g")
-          EGREP="grep -E" make -C ./.build check | tee ./test-suite.log
-
-      - name: display test suite log
-        continue-on-error: true
-        # Displaying the log shouldn't take long.  Cancel the step if it does.
-        timeout-minutes: 5
-        run: cat ./.build/test/fntests.log
-
-      - name: test history file creation
-        # see bug #62365
-        # Pipe to an interactive session to trigger appending the command to
-        # the history.  This will trigger the creation of a history file.
-        run: |
-          echo "history_file (make_absolute_filename ('./a/b/c/history')); disp ('test')" | ./.build/run-octave -i
-          [ -f ./a/b/c/history ] || echo "::warning::Creating history file failed"
-
-      - name: install
-        run: |
-          make -C ./.build install
-
-      - name: test stand-alone executable
-        run: |
-          unset CC
-          unset CXX
-          export PATH=$(echo "$PATH" | sed -e "s|/usr/lib/ccache/bin:||g")
-          cd examples/code
-          mkoctfile --link-stand-alone embedded.cc -o embedded
-          ./embedded.exe
-
-      - name: analyze test suite results
-        # Make sure the test summary lists 0 "FAIL"s and no "REGRESSION"
-        run: |
-          [ -n "$(grep -e "FAIL\s*0" ./test-suite.log)" ] || echo "::warning::At least one test failed"
-          [ -z "$(grep -e "REGRESSION" ./test-suite.log)" ] || echo "::warning::At least one regression in test suite"
-          echo Finished analyzing test suite results.
--- a/Makefile.am	Fri Apr 19 18:30:39 2024 +0200
+++ b/Makefile.am	Fri Apr 19 12:57:20 2024 -0400
@@ -349,6 +349,14 @@
 	$(AM_V_GEN)$(call simple-filter-rule,build-aux/subst-script-vals.sh) && \
 	chmod a+rx $@
 
+if AMCOND_ENABLE_BYTECODE_EVALUATOR
+check-bytecode-evaluator:
+	$(MAKE) -C test check-bytecode-evaluator
+
+check-tree-evaluator:
+	$(MAKE) -C test check-tree-evaluator
+endif
+
 octave-config.h: config.h build-aux/mk-octave-config-h.sh
 	$(AM_V_GEN)$(SHELL) $(srcdir)/build-aux/mk-octave-config-h.sh $< > $@-t && \
 	$(simple_move_if_change_rule)
--- a/build-aux/mk-octave-config-h.sh	Fri Apr 19 18:30:39 2024 +0200
+++ b/build-aux/mk-octave-config-h.sh	Fri Apr 19 12:57:20 2024 -0400
@@ -104,6 +104,7 @@
 
 $SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_64.*$\)/#  \1/p' $config_h_file
 $SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_BOUNDS_CHECK.*$\)/#  \1/p' $config_h_file
+$SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_BYTECODE_EVALUATOR.*$\)/#  \1/p' $config_h_file
 $SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_INTERNAL_CHECKS.*$\)/#  \1/p' $config_h_file
 $SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_LIB_VISIBILITY_FLAGS.*$\)/#  \1/p' $config_h_file
 $SED -n 's/#\(\(undef\|define\) OCTAVE_ENABLE_OPENMP.*$\)/#  \1/p' $config_h_file
--- a/configure.ac	Fri Apr 19 18:30:39 2024 +0200
+++ b/configure.ac	Fri Apr 19 12:57:20 2024 -0400
@@ -1372,6 +1372,21 @@
     [Define to 1 to use Bison's push parser interface in the command line REPL.])
 fi
 
+### Configure compilation of *experimental* Virtual Machine evaluator.
+
+AC_C_BIGENDIAN()
+
+ENABLE_BYTECODE_EVALUATOR=yes
+AC_ARG_ENABLE([bytecode-evaluator],
+  [AS_HELP_STRING([--disable-bytecode-evaluator],
+    [don't compile *experimental* bytecode evaluator])],
+  [if test "$enableval" = no; then ENABLE_BYTECODE_EVALUATOR=no; fi], [])
+if test $ENABLE_BYTECODE_EVALUATOR = yes; then
+  AC_DEFINE(OCTAVE_ENABLE_BYTECODE_EVALUATOR, 1,
+    [Define to 1 to build experimental Virtual Machine evaluator.])
+fi
+AM_CONDITIONAL([AMCOND_ENABLE_BYTECODE_EVALUATOR], [test $ENABLE_BYTECODE_EVALUATOR = yes])
+
 ### Check for PCRE2 or PCRE regex library, requiring one to exist.
 
 have_pcre2=no
@@ -3356,6 +3371,7 @@
   Default pager:                 $DEFAULT_PAGER
   gnuplot:                       $GNUPLOT_BINARY
 
+  Build Bytecode Evaluator:             $ENABLE_BYTECODE_EVALUATOR
   Build Octave Qt GUI:                  $BUILD_QT_SUMMARY_MSG
   Build Java interface:                 $build_java
   Build static libraries:               $STATIC_LIBS
--- a/doc/interpreter/doccheck/aspell-octave.en.pws	Fri Apr 19 18:30:39 2024 +0200
+++ b/doc/interpreter/doccheck/aspell-octave.en.pws	Fri Apr 19 12:57:20 2024 -0400
@@ -89,6 +89,8 @@
 ButtonDownFcn
 BV
 ByRows
+bytecode
+Bytecode
 BZ
 bzip
 CallbackObject
--- a/doc/interpreter/doccheck/mk-undocumented-list.pl	Fri Apr 19 18:30:39 2024 +0200
+++ b/doc/interpreter/doccheck/mk-undocumented-list.pl	Fri Apr 19 12:57:20 2024 -0400
@@ -116,6 +116,7 @@
 # Exception list of functions not requiring a DOCSTRING
 ################################################################################
 # desktop : Remove when terminal widget is no longer experimental
+# bytecode : Remove when VM is no longer experimental
 ################################################################################
 __DATA__
 angle
@@ -125,6 +126,7 @@
 besselk
 bessely
 bug_report
+bytecode
 chdir
 dbnext
 debug
--- a/doc/interpreter/vectorize.txi	Fri Apr 19 18:30:39 2024 +0200
+++ b/doc/interpreter/vectorize.txi	Fri Apr 19 12:57:20 2024 -0400
@@ -43,6 +43,7 @@
 * Function Application::       Applying functions to arrays, cells, and structs
 * Accumulation::               Accumulation functions
 * Memoization::                Memoization techniques
+* Bytecode interpreter::       Bytecode interpreter
 * Miscellaneous Techniques::   Other techniques for speeding up code
 * Examples::
 @end menu
@@ -628,6 +629,15 @@
 
 @DOCSTRING(clearAllMemoizedCaches)
 
+@node Bytecode interpreter
+@section Bytecode interpreter
+
+Octave's bytecode interpreter allows faster execution of existing code.
+Beginning with Octave version 9 the bytecode interpreter is built-in by
+default.  However, it is still considered experimental and the related
+functions are not explicitly documented in this manual.  Instead, the command
+@code{bytecode} describes how to invoke it.
+
 @node Miscellaneous Techniques
 @section Miscellaneous Techniques
 @cindex execution speed
--- a/etc/NEWS.9.md	Fri Apr 19 18:30:39 2024 +0200
+++ b/etc/NEWS.9.md	Fri Apr 19 12:57:20 2024 -0400
@@ -76,6 +76,14 @@
 
 ### General improvements
 
+- Octave now has an experimental bytecode interpreter for m-code.  Speedups
+  from 2X to 40X have been observed for different kinds of m-code.  This
+  feature is considered experimental for now.  M-code that cannot yet be
+  handled by the bytecode interpreter falls back automatically to the existing
+  tree-walker interpreter.  User tests of the bytecode interpreter are
+  encouraged.  To learn more, type `bytecode` or `help bytecode` at the Octave
+  prompt.
+
 - `dec2base`, `dec2bin`, and `dec2hex` have all been overhauled.  All three
   functions now accommodate negative inputs and fractional inputs, and repeated
   code between the functions has been reduced or eliminated.  Previously only
--- a/libinterp/corefcn/call-stack.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/call-stack.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -41,6 +41,9 @@
 #include "ov-fcn.h"
 #include "ov-usr-fcn.h"
 #include "pager.h"
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode-vm.h"
+#endif
 #include "stack-frame.h"
 #include "syminfo.h"
 #include "symrec.h"
@@ -467,6 +470,110 @@
   m_curr_frame = new_frame_idx;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+void call_stack::push (vm &vm, octave_user_script *fcn, int nargout, int nargin)
+{
+  std::size_t new_frame_idx;
+  std::shared_ptr<stack_frame> parent_link;
+  std::shared_ptr<stack_frame> static_link;
+
+  get_new_frame_index_and_links (new_frame_idx, parent_link, static_link);
+
+  std::shared_ptr<stack_frame> new_frame =
+    stack_frame::create_bytecode_script (
+       m_evaluator,
+       fcn,
+       vm,
+       new_frame_idx, // ??? index
+       parent_link,
+       static_link,
+       nargout,
+       nargin);
+
+  m_cs.push_back (new_frame);
+
+  m_curr_frame = new_frame_idx;
+}
+
+void call_stack::push (vm &vm, octave_user_function *fcn, int nargout, int nargin,
+                       const std::shared_ptr<stack_frame>& closure_frames)
+{
+  std::size_t new_frame_idx;
+  std::shared_ptr<stack_frame> parent_link;
+  std::shared_ptr<stack_frame> static_link;
+
+  get_new_frame_index_and_links (new_frame_idx, parent_link, static_link);
+
+  std::shared_ptr<stack_frame> new_frame;
+
+  if (fcn->is_nested_function ())
+    new_frame = stack_frame::create_bytecode_nested (m_evaluator, fcn, vm,
+                                           new_frame_idx,
+                                           parent_link, static_link, closure_frames,
+                                           nargout, nargin);
+  else
+    {
+      CHECK_PANIC (fcn->is_anonymous_function ());
+      new_frame = stack_frame::create_bytecode_anon (m_evaluator, fcn, vm,
+                                                     new_frame_idx,
+                                                     parent_link, static_link, closure_frames,
+                                                     nargout, nargin);
+    }
+
+  m_cs.push_back (new_frame);
+
+  m_curr_frame = new_frame_idx;
+}
+
+void call_stack::push (vm &vm, octave_user_function *fcn, int nargout, int nargin)
+{
+  std::size_t new_frame_idx;
+  std::shared_ptr<stack_frame> parent_link;
+  std::shared_ptr<stack_frame> static_link;
+
+  get_new_frame_index_and_links (new_frame_idx, parent_link, static_link);
+
+  if (fcn->is_nested_function())
+    {
+      std::shared_ptr<stack_frame> new_frame
+        = stack_frame::create_bytecode_nested (m_evaluator, fcn, vm,
+                                               new_frame_idx, // ??? index
+                                               parent_link, static_link, nullptr,
+                                               nargout, nargin);
+
+      m_cs.push_back (new_frame);
+
+      m_curr_frame = new_frame_idx;
+    }
+  else if (fcn->is_anonymous_function())
+    {
+      std::shared_ptr<stack_frame> new_frame
+        = stack_frame::create_bytecode (m_evaluator, fcn, vm,
+                                        new_frame_idx, // ??? index
+                                        parent_link, static_link,
+                                        nargout, nargin);
+
+      m_cs.push_back (new_frame);
+
+      m_curr_frame = new_frame_idx;
+    }
+  else
+    {
+      std::shared_ptr<stack_frame> new_frame
+        = stack_frame::create_bytecode (m_evaluator, fcn, vm,
+                                        new_frame_idx, // ??? index
+                                        parent_link, static_link,
+                                        nargout, nargin);
+
+      m_cs.push_back (new_frame);
+
+      m_curr_frame = new_frame_idx;
+    }
+}
+
+#endif
+
 void
 call_stack::push (octave_function *fcn)
 {
@@ -1221,6 +1328,12 @@
   m_cs[m_curr_frame]->set_nargout (nargout);
 }
 
+void
+call_stack::set_active_bytecode_ip (int ip)
+{
+  m_cs[m_curr_frame]->set_active_bytecode_ip (ip);
+}
+
 octave_value
 call_stack::get_auto_fcn_var (stack_frame::auto_var_type avt) const
 {
--- a/libinterp/corefcn/call-stack.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/call-stack.h	Fri Apr 19 12:57:20 2024 -0400
@@ -50,6 +50,10 @@
 class symbol_info_list;
 class unwind_protect;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class vm;
+#endif
+
 class OCTINTERP_API call_stack
 {
 public:
@@ -164,6 +168,17 @@
 
   void push (octave_function *fcn);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  void push (vm &vm, octave_user_function *fcn, int nargout, int nargin);
+
+  void push (vm &vm, octave_user_script *fcn, int nargout, int nargin);
+
+  void push (vm &vm, octave_user_function *fcn, int nargout, int nargin,
+             const std::shared_ptr<stack_frame>& closure_frames);
+
+#endif
+
   void set_location (int l, int c)
   {
     if (! m_cs.empty ())
@@ -304,6 +319,8 @@
 
   octave_value get_auto_fcn_var (stack_frame::auto_var_type avt) const;
 
+  void set_active_bytecode_ip (int ip);
+
 private:
 
   void get_new_frame_index_and_links
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/corefcn/compile.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,706 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if defined (HAVE_CONFIG_H)
+#  include "config.h"
+#endif
+
+#include "ovl.h"
+#include "ov.h"
+#include "defun.h"
+#include "variables.h"
+#include "interpreter.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode-vm.h"
+#  include "pt-bytecode-walk.h"
+#endif
+
+OCTAVE_BEGIN_NAMESPACE(octave)
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// If TRUE, use VM evaluator rather than tree walker.
+bool V__vm_enable__ = true;
+
+// Cleverly hidden in pt-bytecode-vm.cc to prevent inlining here
+extern "C" void dummy_mark_1 (void);
+extern "C" void dummy_mark_2 (void);
+
+#endif
+
+DEFUN (__dummy_mark_1__, , ,
+       doc: /* -*- texinfo -*-
+@deftypefn {} {} __dummy_mark_1__ ()
+
+Dummy function that calls the C-function @code{void dummy_mark_1 (void)}
+that does nothing.
+
+This is useful for marking start and end for Callgrind analysis or as an entry
+point for @code{gdb}.
+
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  dummy_mark_1 ();
+
+  return {};
+
+#else
+
+  err_disabled_feature ("__dummy_mark_1__", "byte-compiled functions");
+
+#endif
+}
+
+DEFUN (__dummy_mark_2__, , ,
+       doc: /* -*- texinfo -*-
+@deftypefn {} {} __dummy_mark_2__ ()
+
+Dummy function that calls the C-function @code{void dummy_mark_2 (void)}
+that does nothing.
+
+This is useful for marking start and end for Callgrind analysis or as an entry
+point for @code{gdb}.
+
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  dummy_mark_2 ();
+
+  return {};
+
+#else
+
+  err_disabled_feature ("__dummy_mark_2__", "byte-compiled functions");
+
+#endif
+}
+
+DEFUN (__vm_clear_cache__, , ,
+  doc: /* -*- texinfo -*-
+@deftypefn {} {@var{val} =} __vm_clear_cache__ ()
+Internal function.
+
+Clear cache of bytecode-compiled functions.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_compile__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave::load_path::signal_clear_fcn_cache ();
+
+  return octave_value {true};
+
+#else
+
+  err_disabled_feature ("__vm_clear_cache__", "byte-compiled functions");
+
+#endif
+}
+
+DEFUN (__vm_print_trace__, , ,
+  doc: /* -*- texinfo -*-
+@deftypefn {} {@var{print_trace} =} __vm_print_trace__ ()
+Internal function.
+
+Print a debug trace from the VM@.
+
+The print state toggles on or off with each call to the function.
+
+There must be a breakpoint set in an m-file for the trace to actually print
+anything.
+
+The return value is true if a trace will be printed and false otherwise.
+
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  vm::m_trace_enabled = !vm::m_trace_enabled;
+
+  return octave_value {vm::m_trace_enabled};
+
+#else
+
+  err_disabled_feature ("__vm_print_trace__", "byte-compiled functions");
+
+#endif
+}
+
+DEFUN (__ref_count__, args, ,
+  doc: /* -*- texinfo -*-
+@deftypefn {} {@var{count} =} __ref_count__ (@var{obj})
+Internal function.
+
+Return the reference count for an object.
+@end deftypefn */)
+{
+  int nargin = args.length ();
+
+  if (nargin != 1)
+    print_usage ();
+
+  octave_value ov = args (0);
+
+  return octave_value {ov.get_count ()};
+}
+
+DEFMETHOD (__vm_is_executing__, interp, , ,
+  doc: /* -*- texinfo -*-
+@deftypefn {} {@var{tf} =} __vm_is_executing__ ()
+Internal function.
+
+Return true if the VM is executing the function calling
+@code{__vm_is_executing__ ()}, and false otherwise.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_enable__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  auto frame = interp.get_evaluator ().get_current_stack_frame ();
+  if (!frame)
+    error ("Invalid current frame");
+
+  auto caller_frame = frame->static_link ();
+  if (!caller_frame)
+    error ("Invalid caller frame");
+
+  bool bytecode_running = caller_frame->is_bytecode_fcn_frame ();
+
+  return octave_value {bytecode_running};
+
+#else
+
+  octave_unused_parameter (interp);
+
+  err_disabled_feature ("__vm_is_executing__", "byte-compiled functions");
+
+#endif
+}
+
+DEFMETHOD (__vm_profile__, interp, args, ,
+  doc: /* -*- texinfo -*-
+@deftypefn  {} {} __vm_profile__ on
+@deftypefnx {} {} __vm_profile__ off
+@deftypefnx {} {} __vm_profile__ resume
+@deftypefnx {} {} __vm_profile__ clear
+@deftypefnx {} {@var{T} =} __vm_profile__ ("info")
+@deftypefnx {} {} __vm_profile__
+Internal function.
+
+Profile code running in the VM.
+
+@table @code
+@item __vm_profile__ on
+Start the profiler.  Any previously collected data is cleared.
+
+@item __vm_profile__ off
+Stop profiling.  The collected data can be retrieved and examined with
+@code{T = profile ("info")}.
+
+@item __vm_profile__ clear
+Clear all collected profiler data.
+
+@item __vm_profile__ resume
+Restart profiling without clearing existing data.  All newly collected
+statistics are added to the existing ones.
+
+@item __vm_profile__
+Toggle between profiling and printing the result of the profiler.
+Clears the profiler on each print.
+
+@item __vm_profile__ info
+Print the profiler data.
+
+@end table
+
+Programming Note: The calling form that returns profiler data in a variable
+is not implemented yet.
+
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  int nargin = args.length ();
+
+  auto &evaler = interp.get_evaluator ();
+
+  std::string arg0;
+
+  if (nargin >= 1)
+   arg0 = args (0).string_value ();
+
+  if (!arg0.size ())
+    {
+      if (!vm::m_vm_profiler)
+        {
+          vm::m_vm_profiler = std::make_shared<vm_profiler> ();
+
+          vm::m_profiler_enabled = true;
+          evaler.vm_set_profiler_active (true);
+        }
+      else
+        {
+          evaler.vm_set_profiler_active (false);
+          vm::m_profiler_enabled = false;
+          auto p = vm::m_vm_profiler;
+          vm::m_vm_profiler = nullptr;
+
+          auto cpy = *p;
+          cpy.print_to_stdout ();
+        }
+    }
+  else if (arg0 == "on")
+    {
+      vm::m_profiler_enabled = false;
+      vm::m_vm_profiler = std::make_shared<vm_profiler> ();
+      vm::m_profiler_enabled = true;
+      evaler.vm_set_profiler_active (true);
+    }
+  else if (arg0 == "resume")
+    {
+      if (!vm::m_vm_profiler)
+        vm::m_vm_profiler = std::make_shared<vm_profiler> ();
+
+      vm::m_profiler_enabled = true;
+      evaler.vm_set_profiler_active (true);
+    }
+  else if (arg0 == "off")
+    {
+      evaler.vm_set_profiler_active (false);
+      vm::m_profiler_enabled = false;
+    }
+  else if (arg0 == "clear")
+    {
+      evaler.vm_set_profiler_active (false);
+      vm::m_profiler_enabled = false;
+      vm::m_vm_profiler = nullptr;
+    }
+  else if (arg0 == "info")
+    {
+      auto p_vm_profiler = vm::m_vm_profiler;
+      if (p_vm_profiler)
+        {
+          auto cpy = *p_vm_profiler;
+          cpy.print_to_stdout ();
+        }
+      else
+        warning ("Nothing recorded.");
+    }
+  else
+    print_usage ();
+
+  return octave_value {true};
+
+#else
+
+  octave_unused_parameter (interp);
+  octave_unused_parameter (args);
+
+  err_disabled_feature ("__vm_profile__", "byte-compiled functions");
+
+#endif
+}
+
+DEFMETHOD (__vm_print_bytecode__, interp, args, ,
+  doc: /* -*- texinfo -*-
+@deftypefn  {} {@var{code} =} __vm_print_bytecode__ (@var{fcn_name})
+@deftypefnx {} {@var{code} =} __vm_print_bytecode__ (@var{fcn_handle})
+Internal function.
+
+Print the bytecode of a function name or function handle.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_compile__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  int nargin = args.length ();
+
+  if (nargin != 1)
+    print_usage ();
+
+  octave_value ov;
+
+  if (args (0).is_string ())
+    {
+      std::string fn_name = args(0).string_value ();
+      symbol_table& symtab = interp.get_symbol_table ();
+
+      ov = symtab.find_function (fn_name);
+
+      if (!ov.is_defined ())
+        {
+          error ("Function not defined: %s", fn_name.c_str ());
+        }
+    }
+  else
+    ov = args (0);
+
+  octave_user_code *ufn = nullptr;
+  octave_fcn_handle *h = nullptr;
+
+  if (ov.is_function_handle ())
+    {
+      h = ov.fcn_handle_value ();
+      if (!h)
+        error ("Invalid function handle");
+      ufn = h->user_function_value ();
+    }
+  else
+   ufn = ov.user_code_value ();
+
+  std::string fn_name = ufn->name ();
+
+  if (!ufn || (!ufn->is_user_function () && !ufn->is_user_script ()))
+    {
+      error ("Function not a user function or script: %s", fn_name.c_str ());
+    }
+
+  // Nested functions need to be compiled via their parent
+  bool is_nested = ufn->is_nested_function ();
+
+  bool try_compile = !ufn->is_compiled () && V__vm_enable__ && !is_nested;
+
+  if (try_compile && h && h->is_anonymous ())
+    h->compile ();
+  else if (try_compile)
+    vm::maybe_compile_or_compiled (ufn, 0);
+  else if (!ufn->is_compiled ())
+    error ("Function not compiled: %s", fn_name.c_str ());
+
+  if (!ufn->is_compiled ())
+    error ("Function can't be compiled: %s", fn_name.c_str ());
+
+  auto bc = ufn->get_bytecode ();
+
+  print_bytecode (bc);
+
+  return octave_value {true};
+
+#else
+
+  octave_unused_parameter (interp);
+  octave_unused_parameter (args);
+
+  err_disabled_feature ("__vm_print_bytecode__", "byte-compiled functions");
+
+#endif
+}
+
+DEFMETHOD (__vm_is_compiled__, interp, args, ,
+  doc: /* -*- texinfo -*-
+@deftypefn  {} {@var{tf} =} __vm_is_compiled__ (@var{fcn_name})
+@deftypefnx {} {@var{tf} =} __vm_is_compiled__ (@var{fcn_handle})
+Internal function.
+
+Return true if the specified function name or function handle has been
+compiled to bytecode, and false otherwise.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_compile__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  int nargin = args.length ();
+
+  if (nargin != 1)
+    print_usage ();
+
+  std::string fcn_to_compile;
+  octave_fcn_handle *handle_to_compile = nullptr;
+
+  bool do_handle = false;
+
+  if (args (0).is_string ())
+    fcn_to_compile = args (0).string_value ();
+  else if (args (0).is_function_handle ())
+    {
+      handle_to_compile = args (0).fcn_handle_value ();
+      do_handle = true;
+    }
+  else
+    error ("First argument need to be a function name or function handle.");
+
+  try
+    {
+      if (do_handle)
+        {
+          octave_user_function *ufn = handle_to_compile->user_function_value ();
+          if (!ufn)
+            return octave_value {false};
+          return octave_value {ufn->is_compiled ()};
+        }
+      else
+        {
+          std::string name = fcn_to_compile;
+          symbol_table& symtab = interp.get_symbol_table ();
+          octave_value ov = symtab.find_function (name);
+
+          if (!ov.is_defined ())
+            return octave_value {false};
+
+          octave_user_code *ufn = ov.user_code_value ();
+          if (!ufn)
+            return octave_value {false};
+
+          return octave_value {ufn->is_compiled ()};
+        }
+    }
+  catch (execution_exception &)
+    {
+      return octave_value {false};
+    }
+
+#else
+
+  octave_unused_parameter (interp);
+  octave_unused_parameter (args);
+
+  err_disabled_feature ("__vm_is_compiled__", "byte-compiled functions");
+
+#endif
+}
+
+DEFMETHOD (__vm_compile__, interp, args, ,
+       doc: /* -*- texinfo -*-
+@deftypefn  {} {@var{status} =} __vm_compile__ (@var{fcn_name})
+@deftypefnx {} {@var{status} =} __vm_compile__ (@var{fcn_name}, "print")
+@deftypefnx {} {@var{status} =} __vm_compile__ (@var{fcn_name}, "clear")
+Internal function.
+
+Compile the specified function to bytecode.
+
+The compiled function and its subfunctions will be executed by the VM when
+called.
+
+The @qcode{"print"} option prints the bytecode after compilation.
+
+The @qcode{"clear"} option removes the bytecode from the VM instead.
+
+Return true on success, and false otherwise.
+
+@strong{Note:}: Do not recompile or clear the bytecode of a running function
+with @code{__vm_compile__}.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_print_bytecode__, __vm_clear_cache__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  int nargin = args.length ();
+
+  if (! nargin)
+    print_usage ();
+
+  std::string fcn_to_compile;
+  octave_fcn_handle *handle_to_compile = nullptr;
+
+  bool do_clear = false;
+  bool do_print = false;
+
+  if (args (0).is_string ())
+    fcn_to_compile = args (0).string_value ();
+  else if (args (0).is_function_handle ())
+    handle_to_compile = args (0).fcn_handle_value ();
+  else
+    error ("First argument need to be a function name or function handle.");
+
+  for (int i = 1; i < nargin; i++)
+    {
+      auto arg = args(i);
+
+      if (! arg.is_string())
+        error ("Non string argument");
+
+      std::string arg_s = arg.string_value ();
+
+      if (arg_s == "clear")
+        do_clear = true;
+
+      if (arg_s == "print")
+        do_print = true;
+    }
+
+  if (do_clear && handle_to_compile)
+    {
+      octave_user_function *ufn = handle_to_compile->user_function_value ();
+      if (!ufn)
+        error ("Invalid function handle");
+
+      ufn->clear_bytecode ();
+
+      return octave_value {true};
+    }
+  else if (do_clear)
+    {
+      std::string name = fcn_to_compile;
+      symbol_table& symtab = interp.get_symbol_table ();
+      octave_value ov = symtab.find_function (name);
+
+      if (!ov.is_defined ())
+        {
+          error ("Function not defined: %s", name.c_str ());
+        }
+
+      octave_user_code *ufn = ov.user_code_value ();
+
+      if (!ufn || (!ufn->is_user_function () && !ufn->is_user_script ()))
+        {
+          error ("Function not an user function or script: %s", name.c_str ());
+        }
+
+      ufn->clear_bytecode ();
+
+      return octave_value {true};
+    }
+
+  if (handle_to_compile)
+    {
+      octave_user_function *ufn = handle_to_compile->user_function_value ();
+      if (!ufn)
+        error ("Invalid function handle");
+
+      if (ufn->is_nested_function ())
+        error ("Nested functions need to be compiled via their parent");
+
+      // Anonymous functions need to be compiled via their handle
+      // to get the locals.
+      if (handle_to_compile->is_anonymous ())
+        {
+          handle_to_compile->compile ();
+          if (do_print && ufn->is_compiled ())
+            {
+              auto bc = ufn->get_bytecode ();
+              print_bytecode (bc);
+            }
+
+            return octave_value {true};
+        }
+      else
+        {
+          // Throws on errors
+          compile_user_function (*ufn, do_print);
+
+          return octave_value {true};
+        }
+    }
+  else
+    {
+      std::string name = fcn_to_compile;
+      symbol_table& symtab = interp.get_symbol_table ();
+      octave_value ov = symtab.find_function (name);
+
+      if (!ov.is_defined ())
+        {
+          error ("Function not defined: %s", name.c_str ());
+        }
+
+      if (!ov.is_user_function () && !ov.is_user_script ())
+        {
+          error ("Function is not a user function or script: %s", name.c_str ());
+        }
+
+      octave_user_code *ufn = ov.user_code_value ();
+
+      if (!ufn || (!ufn->is_user_function () && !ufn->is_user_script ()))
+        {
+          error ("Function is not really user function or script: %s", name.c_str ());
+        }
+
+      if (ufn->is_nested_function ())
+        error ("Nested functions need to be compiled via their parent");
+
+      // Throws on errors
+      compile_user_function (*ufn, do_print);
+    }
+
+  return octave_value {true};
+
+#else
+
+  octave_unused_parameter (interp);
+  octave_unused_parameter (args);
+
+  err_disabled_feature ("__vm_compile__", "byte-compiled functions");
+
+#endif
+}
+
+DEFUN (__vm_enable__, args, nargout,
+       doc: /* -*- texinfo -*-
+@deftypefn  {} {@var{val} =} __vm_enable__ ()
+@deftypefnx {} {@var{old_val} =} __vm_enable__ (@var{new_val})
+@deftypefnx {} {@var{old_val} =} __vm_enable__ (@var{new_val}, "local")
+Query or set the internal variable that determines whether Octave automatically
+compiles functions to bytecode and executes them in a virtual machine (VM).
+
+@strong{Warning:} The virtual machine feature is experimental.
+
+The default value is false while the VM is still experimental.  Users must
+explicitly call @code{__vm_enable__ (1)} to use it.
+
+When false, Octave uses a traditional tree walker to evaluate statements parsed
+from m-code.  When true, Octave translates parsed statements to an intermediate
+representation that is then evaluated by a virtual machine.
+
+When called from inside a function with the @qcode{"local"} option, the setting
+is changed locally for the function and any subroutines it calls.  The original
+setting is restored when exiting the function.
+
+Once compiled to bytecode, the function will always be evaluated by the VM
+regardless of the state of @code{__vm_enable__}, until the bytecode is cleared
+by, e.g., @qcode{"clear all"}, or a modification to the function's m-file.
+
+@c FIXME: Use seealso macro when functions are no longer experimental.
+See also: __vm_compile__.
+@end deftypefn */)
+{
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  return set_internal_variable (V__vm_enable__, args, nargout,
+                                "__vm_enable__");
+
+#else
+
+  octave_unused_parameter (args);
+  octave_unused_parameter (nargout);
+
+  err_disabled_feature ("__vm_enable__", "byte-compiled functions");
+
+#endif
+}
+
+OCTAVE_END_NAMESPACE(octave)
--- a/libinterp/corefcn/load-path.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/load-path.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -256,6 +256,8 @@
 m_dir_info_list (), m_init_dirs (), m_command_line_path ()
 { }
 
+std::atomic<octave_idx_type> load_path::s_n_updated;
+
 void
 load_path::initialize (bool set_initial_path)
 {
@@ -297,6 +299,8 @@
 void
 load_path::clear ()
 {
+  signal_clear_fcn_cache ();
+
   m_dir_info_list.clear ();
 
   m_top_level_package.clear ();
@@ -420,6 +424,8 @@
   // preserve the correct directory ordering for new files that
   // have appeared.
 
+  signal_clear_fcn_cache ();
+
   m_top_level_package.clear ();
 
   m_package_map.clear ();
--- a/libinterp/corefcn/load-path.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/load-path.h	Fri Apr 19 12:57:20 2024 -0400
@@ -213,7 +213,15 @@
   static const int OCT_FILE = 2;
   static const int MEX_FILE = 4;
 
+  static octave_idx_type get_weak_n_updated () { return s_n_updated; }
+
+  static void signal_clear_fcn_cache ()
+  {
+    s_n_updated++;
+  }
+
 private:
+  static std::atomic<octave_idx_type> s_n_updated;
 
   class dir_info
   {
--- a/libinterp/corefcn/module.mk	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -145,6 +145,7 @@
   %reldir%/coct-hdf5-types.c \
   %reldir%/colamd.cc \
   %reldir%/colloc.cc \
+  %reldir%/compile.cc \
   %reldir%/conv2.cc \
   %reldir%/daspk.cc \
   %reldir%/dasrt.cc \
--- a/libinterp/corefcn/stack-frame.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/stack-frame.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -52,6 +52,11 @@
 #include "utils.h"
 #include "variables.h"
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "ov-ref.h"
+#  include "pt-bytecode-vm.h"
+#endif
+
 OCTAVE_BEGIN_NAMESPACE(octave)
 
 // FIXME: There should probably be a display method for the script,
@@ -85,6 +90,11 @@
 class script_stack_frame;
 class user_fcn_stack_frame;
 class scope_stack_frame;
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class bytecode_fcn_stack_frame;
+class bytecode_script_stack_frame;
+class bytecode_nested_fcn_stack_frame;
+#endif
 
 class stack_frame_walker
 {
@@ -109,8 +119,1685 @@
 
   virtual void
   visit_scope_stack_frame (scope_stack_frame&) = 0;
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  virtual void
+  visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame&) = 0;
+
+  virtual void
+  visit_bytecode_script_stack_frame (bytecode_script_stack_frame&) = 0;
+
+  virtual void
+  visit_bytecode_nested_fcn_stack_frame (bytecode_nested_fcn_stack_frame&) = 0;
+
+#endif
 };
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// Base class for bytecode_fcn_stack_frame, bytecode_script_stack_frame and bytecode_nested_fcn_stack_frame
+// covering common functionality.
+//
+// The bytecode interpreter stack does not directly translate to the tree_evaluator's stack,
+// so there is a translation table to make accessing the bytecode interpreter stack opaque
+// for non-bytecode interpreter use.
+//
+// The octave_values are not acctually stored in the bytecode_fcn_stack_frame
+// object, but on the bytecode interpreter's stack. Extra values created by eg. 'eval ("e = 3;")'
+// are stored in these frame objects though.
+
+class bytecode_frame : public stack_frame
+{
+public:
+  // Dont allow copying, moving etc
+  bytecode_frame () = delete;
+  bytecode_frame (const bytecode_frame& elt) = delete;
+  bytecode_frame& operator = (const bytecode_frame& elt) = delete;
+  bytecode_frame& operator = (bytecode_frame&& elt) = delete;
+
+  bytecode_frame (tree_evaluator& tw,
+                  std::size_t index,
+                  const std::shared_ptr<stack_frame>& parent_link,
+                  const std::shared_ptr<stack_frame>& static_link,
+                  const std::shared_ptr<stack_frame>& access_link,
+                  vm &vm, octave_user_code *fcn,
+                  int nargout, int nargin) 
+    : stack_frame (tw, index, parent_link, static_link, access_link),
+    m_unwind_data (vm.m_unwind_data), // TODO: m_unwind_data should be replaced with a bytecode pointer instead
+    m_stack_start (vm.m_sp),
+    m_fcn (fcn),
+    m_name_data (vm.m_name_data),
+    m_code (vm.m_code),
+    m_ip (0),
+    m_nargout (nargout),
+    m_nargin (nargin)
+    {}
+
+  unwind_data *m_unwind_data;
+  stack_element *m_stack_start;
+  octave_user_code *m_fcn;
+  std::string *m_name_data;
+  unsigned char *m_code;
+  int m_ip;
+  int m_nargout;
+  int m_nargin;
+
+  // Needed to see how many shared_pointers are poing to the frame object
+  std::weak_ptr<stack_frame> m_weak_ptr_to_self;
+  
+  // Number of symbols at compile time (includes VM internal symbols)
+  unsigned n_compiled_syms () const
+  {
+    return m_unwind_data->m_ids_size;
+  }
+
+  // Number of user symbols at compile time
+  unsigned n_compiled_usr_syms () const
+  {
+    return m_unwind_data->m_n_orig_scope_size;
+  }
+
+  unsigned n_extra_size () const
+  {
+    return m_lazy_data ? m_lazy_data->m_extra_slots.size () : 0;
+  }
+
+  unsigned n_compiled_frame_depth () const
+  {
+    return m_unwind_data->m_external_frame_offset_to_internal.size ();
+  }
+
+  void vm_clear_for_cache ()
+  {
+    m_parent_link = nullptr;
+    m_static_link = nullptr;
+    m_access_link = nullptr;
+    m_dispatch_class.clear ();
+
+    dispose ();
+  }
+
+  std::shared_ptr<stack_frame> get_ith_access_link (unsigned n) const
+  {
+    std::shared_ptr<stack_frame> nxt = access_link ();
+
+    while (n--)
+      {
+        if (nxt)
+          nxt = nxt->access_link ();
+      }
+
+    return nxt;
+  }
+
+  // vm_clear_for_cache () and the dtor need to mirror eachother
+  // so they both call dispose()
+  void dispose ()
+  {
+    if (m_lazy_data)
+      {
+        if (m_lazy_data->m_stack_cpy)
+          {
+            // Note: int nargout at offset 0
+            for (unsigned i = 1; i < n_compiled_syms (); i++)
+              m_lazy_data->m_stack_cpy[i].ov.~octave_value ();
+            delete [] m_lazy_data->m_stack_cpy;
+          }
+        delete m_lazy_data->m_unwind_protect_frame;
+        delete m_lazy_data;
+        m_lazy_data = nullptr;
+      }
+  }
+
+  // Since a reference to the stackframe can be saved somewhere
+  // we need to check at stack unwind in the VM if that is the case
+  // and save the variables on the VM stack in this frame object so
+  // they can be accessed.
+  void vm_unwinds ()
+  {
+    bool is_alone = m_weak_ptr_to_self.use_count () <= 2; // Two seems about right
+
+    if (m_lazy_data)
+      {
+        delete m_lazy_data->m_unwind_protect_frame;
+        m_lazy_data->m_unwind_protect_frame = nullptr;
+
+        // Restore warningstates
+        if (m_fcn)
+          {
+            auto usr_fn_p = m_fcn->user_function_value (true);
+            if (usr_fn_p)
+              usr_fn_p->restore_warning_states (); // TODO: octave_user_function::restore_warning_states() could be static.
+          }
+      }
+
+    if (is_alone)
+      {
+        if (m_lazy_data)
+          delete m_lazy_data;
+
+        // Zero these so it is easier to find a "use-after-unwind"
+        // error
+        m_lazy_data = nullptr;
+        m_stack_start = nullptr;
+        m_code = nullptr;
+        m_name_data = nullptr;
+        m_unwind_data = nullptr;
+
+        return;
+      }
+
+    // Copy the stack to the frame
+    size_t stack_slots = n_compiled_syms ();
+
+    lazy_data ();
+
+    m_lazy_data->m_stack_cpy = new octave::stack_element[stack_slots];
+
+    m_lazy_data->m_stack_cpy[0].i = m_stack_start[0].i; // Copy int nargout at offset 0
+    // Copy each octave_value in slots on the stack
+    for (unsigned i = 1; i < n_compiled_syms (); i++)
+      new (&m_lazy_data->m_stack_cpy[i].ov) octave_value {m_stack_start[i].ov};
+
+    m_stack_start = m_lazy_data->m_stack_cpy;
+  }
+
+  // To keep down the footprint of the frame some seldom used
+  // variables are lazy initialized and stored in *m_lazy_data
+  struct lazy_data_struct
+  {
+    octave_value m_ignored;
+    octave_value m_arg_names;
+    octave_value m_saved_warnings_states;
+
+    std::vector<octave_value> m_extra_slots;
+
+    unwind_protect *m_unwind_protect_frame = nullptr;
+    stack_element *m_stack_cpy = nullptr;
+    bool m_is_script;
+  };
+
+  lazy_data_struct & lazy_data ()
+  {
+    if (! m_lazy_data)
+      m_lazy_data = new lazy_data_struct {};
+    return *m_lazy_data;
+  }
+
+  lazy_data_struct & clazy_data () const
+  {
+    if (! m_lazy_data)
+      panic ("VM internal panic. Lazy data not set");
+    return *m_lazy_data;
+  }
+
+  lazy_data_struct *m_lazy_data = nullptr;
+
+  bool slot_is_global (std::size_t local_offset) const
+  {
+    octave_value *pov;
+
+    if (local_offset >= n_compiled_syms ())
+      {
+        CHECK_PANIC (m_lazy_data);
+        pov = &m_lazy_data->m_extra_slots.at (local_offset - n_compiled_syms ());
+      }
+    else
+      pov = &m_stack_start [local_offset].ov;
+    
+    if (! pov->is_ref ())
+      return false;
+
+    return pov->ref_rep ()->get_scope_flag () == GLOBAL;
+  }
+
+  bool slot_is_persistent (std::size_t local_offset) const
+  {
+    octave_value *pov;
+
+    if (local_offset >= n_compiled_syms ())
+      {
+        CHECK_PANIC (m_lazy_data);
+        pov = &m_lazy_data->m_extra_slots.at (local_offset - n_compiled_syms ());
+      }
+    else
+      pov = &m_stack_start [local_offset].ov;
+
+    if (! pov->is_ref ())
+      return false;
+    return pov->ref_rep ()->get_scope_flag () == PERSISTENT;
+  }
+
+  // Bytecode frames have another slot numbering than the scope objects,
+  // so there are internal functions that deal in "internal offset"
+
+  std::size_t internal_size () const
+  {
+    return n_compiled_syms () + n_extra_size ();
+  }
+
+  // Overloads of functions in the parent class stack_frame
+
+  symbol_scope get_scope () const
+  {
+    return m_fcn->scope ();
+  }
+
+  octave_function * function () const { return m_fcn; }
+
+  std::size_t size () const
+  {
+    return n_compiled_usr_syms () + n_extra_size ();
+  }
+
+  bool is_bytecode_fcn_frame () const { return true; }
+
+  int line () const
+  {
+    loc_entry loc = vm::find_loc (m_ip, m_unwind_data->m_loc_entry); // TODO: Does not work in nested bytecode stack frames
+    return loc.m_line;
+  }
+
+  int column () const
+  {
+    loc_entry loc = vm::find_loc (m_ip, m_unwind_data->m_loc_entry);
+    return loc.m_col;
+  }
+
+  unwind_protect *unwind_protect_frame ()
+  {
+    if (! lazy_data ().m_unwind_protect_frame)
+      lazy_data ().m_unwind_protect_frame = new unwind_protect ();
+
+    return lazy_data ().m_unwind_protect_frame;
+  }
+
+  octave_value get_active_bytecode_call_arg_names ()
+  {
+
+    // Handle ARG_NAMES
+    if (! m_unwind_data)
+      return Cell {};
+
+    int best_match = -1;
+    int best_start = -1;
+
+    auto &entries = m_unwind_data->m_argname_entries;
+    for (unsigned i = 0; i < entries.size (); i++)
+      {
+        int start = entries[i].m_ip_start;
+        int end = entries[i].m_ip_end;
+
+        if (start > (m_ip - 1) || end < (m_ip - 1))
+          continue;
+
+        if (best_match != -1)
+          {
+            if (best_start > start)
+              continue;
+          }
+
+        best_match = i;
+        best_start = start;
+      }
+
+    if (best_match == -1)
+      return Cell {};
+
+    Cell c = entries[best_match].m_arg_names;
+    return c;
+  }
+
+  std::string sym_name_from_external_offset (std::size_t external_offset, std::size_t frame_offset = 0)
+  {
+    auto scope = get_scope ();
+
+    for (auto &kv : scope.symbols()) // TODO: Lookup on external offset?
+      {
+        auto &rec = kv.second;
+
+        if (rec.data_offset () != external_offset)
+          continue;
+        if (rec.frame_offset () != frame_offset)
+          continue;
+
+        return rec.name ();
+      }
+
+    return "";
+  }
+
+  void set_active_bytecode_ip (int ip)
+  {
+    m_ip = ip;
+  }
+
+  std::string inputname (int n, bool ids_only) const
+  {
+    std::string name;
+
+    octave_value ov_arg_names = get_auto_fcn_var (stack_frame::ARG_NAMES);
+    Array<std::string> arg_names = ov_arg_names.cellstr_value ();
+
+    if (n >= 0 && n < arg_names.numel ())
+      {
+        name = arg_names(n);
+
+        if (ids_only && ! m_static_link->is_variable (name))
+          name = "";
+      }
+
+    return name;
+  }
+
+  void set_nargin (int nargin) { m_nargin = nargin; }
+  void set_nargout (int nargout) { m_nargout = nargout; }
+
+  void break_closure_cycles (const std::shared_ptr<stack_frame> &frame)
+  {
+    if (m_stack_start)
+      {
+        for (unsigned i = 1; i < n_compiled_syms (); i++)
+          {
+            // break_closure_cycles () is making a clone of nil values
+            // but we want nil values to have the same m_rep pointer, on the
+            // VM stack. is_nil () wont return true on a cloned nil value.
+            if (! m_stack_start[i].ov.is_nil ())
+              m_stack_start[i].ov.break_closure_cycles (frame);
+          }
+      }
+
+    if (m_lazy_data)
+      {
+        for (octave_value &ov : m_lazy_data->m_extra_slots)
+          {
+            if (! ov.is_nil ())
+              ov.break_closure_cycles (frame);
+          }
+      }
+
+    if (m_access_link)
+      m_access_link->break_closure_cycles (frame);
+  }
+
+  // Convert external offset to internal slot offset.
+  std::size_t external_to_local_offset (std::size_t external_offset, std::size_t frame_offset = 0) const
+  {
+    auto it = m_unwind_data->m_external_frame_offset_to_internal.at (frame_offset).find (external_offset);
+    if (it == m_unwind_data->m_external_frame_offset_to_internal[frame_offset].end ())
+    {
+      CHECK_PANIC (frame_offset == 0);
+      // If the offset is smaller then the amount of user symbols, it should have been in the table
+      CHECK_PANIC (external_offset >= n_compiled_usr_syms ());
+
+      // The offsets that are not in the original translation table are in the extra slots added dynamically
+      return n_compiled_syms () + (external_offset - n_compiled_usr_syms ());
+    }
+
+    return it->second;
+  }
+
+  octave_value varval_internal (std::size_t internal_offset) const
+  {
+    octave_value *pov;
+
+    if (internal_offset < n_compiled_syms ())
+      {
+        // The symbol is on the VM stack
+        pov = &m_stack_start[internal_offset].ov;
+      }
+    else
+      {
+        // The symbol is in the extra slots in this frame object
+        pov = &clazy_data ().m_extra_slots.at (internal_offset - n_compiled_syms ());
+      }
+
+    // globals, persistent are stored as 'octave_value_ref' objects. We need to dereference those.
+    if (pov->is_ref ())
+      return pov->ref_rep ()->deref ();
+    return *pov;
+  }
+
+  // The using declaration will avoid warnings about partially-overloaded
+  // virtual functions.
+  using stack_frame::varval;
+  using stack_frame::varref;
+
+  octave_value varval (std::size_t external_offset) const
+  {
+    std::size_t local_offset = external_to_local_offset (external_offset);
+    return varval_internal (local_offset);
+  }
+
+  octave_value& varref_internal (std::size_t local_offset, bool deref_refs)
+  {
+    std::size_t extra_size = n_extra_size ();
+    std::size_t n_stack_slots = n_compiled_syms ();
+
+    octave_value *pov;
+
+    if (local_offset < n_stack_slots)
+      {
+        pov = &m_stack_start [local_offset].ov;
+      }
+    else
+      {
+        std::size_t extra_offset = local_offset - n_stack_slots;
+
+        CHECK_PANIC (m_lazy_data);
+        CHECK_PANIC (extra_offset < extra_size);
+
+        pov = &m_lazy_data->m_extra_slots.at (extra_offset);
+      }
+
+    if (deref_refs && pov->is_ref ())
+      return pov->ref_rep ()->ref ();
+    return *pov;
+  }
+
+  octave_value& varref (std::size_t external_offset, bool deref_refs)
+  {
+    std::size_t local_offset = external_to_local_offset (external_offset);
+    return varref_internal (local_offset, deref_refs);
+  }
+
+  octave_value get_auto_fcn_var (auto_var_type avt) const
+  {
+    switch (avt)
+      {
+        case stack_frame::NARGIN:
+          return octave_value {m_nargin};
+        case stack_frame::NARGOUT:
+          return octave_value {m_nargout};
+        case stack_frame::SAVED_WARNING_STATES:
+          if (!m_lazy_data)
+            return {};
+          else
+            return m_lazy_data->m_saved_warnings_states;
+        case stack_frame::IGNORED:
+          if (!m_lazy_data)
+            return {};
+          else
+            return m_lazy_data->m_ignored;
+        case stack_frame::ARG_NAMES:
+        {
+          // If the current bytecode stack frame is the root one in the VM, the caller
+          // sets ARG_NAMES in the root bytecode stack frame
+          if (m_lazy_data)
+            {
+              octave_value ov = m_lazy_data->m_arg_names;
+              if (ov.is_defined ())
+                return ov;
+            }
+          // In bytecode stack frames, the arg names are stored in the caller frame.
+          return m_parent_link->get_active_bytecode_call_arg_names ();
+        }
+        default:
+          panic ("bytecode_frame::get_auto_fcn_var() : Invalid call idx=%d", static_cast<int> (avt));
+      }
+  }
+
+  void set_auto_fcn_var (auto_var_type avt, const octave_value& val)
+  {
+    switch (avt)
+      {
+        case stack_frame::NARGIN:
+          m_nargin = val.int_value ();
+          return;
+        case stack_frame::NARGOUT:
+          m_nargout = val.int_value ();
+          return;
+        case stack_frame::SAVED_WARNING_STATES:
+          lazy_data ().m_saved_warnings_states = val;
+          return;
+        case stack_frame::IGNORED:
+          lazy_data ().m_ignored = val;
+          return;
+        case stack_frame::ARG_NAMES:
+          lazy_data ().m_arg_names = val;
+          return;
+        default:
+          panic ("bytecode_frame::set_auto_fcn_var() : Invalid call idx=%d", static_cast<int> (avt));
+      }
+  }
+
+  void internal_resize (std::size_t arg)
+  {
+    int diff = static_cast<int> (arg) - static_cast<int> (internal_size ());
+
+    if (diff > 0)
+      {
+        auto &lazy = lazy_data ();
+        lazy.m_extra_slots.resize (lazy.m_extra_slots.size () + diff);
+      }
+  }
+
+  void set_scope_flag_internal (std::size_t internal_offset, std::size_t external_offset, std::size_t frame_offset, scope_flags flag)
+  {
+    scope_flags current_flag = get_scope_flag_internal (internal_offset);
+
+    bool is_global = current_flag == GLOBAL;
+    bool is_pers = current_flag == PERSISTENT;
+
+
+    octave_value *ov; // Pointer to the slot
+
+    if (internal_offset >= n_compiled_syms ())
+      {
+        CHECK_PANIC (m_lazy_data);
+        ov = &m_lazy_data->m_extra_slots.at (internal_offset - n_compiled_syms ());
+      }
+    else
+      {
+        ov = &m_stack_start [internal_offset].ov;
+      }
+
+    if (flag == GLOBAL)
+      {
+        if (is_global)
+          return;
+
+        CHECK_PANIC (! is_pers); // Invalid state
+        CHECK_PANIC (frame_offset == 0);
+
+        std::string name;
+        if (internal_offset >= n_compiled_syms ())
+          name = sym_name_from_external_offset (external_offset);
+        else
+          name = m_name_data [internal_offset];
+
+        CHECK_PANIC (name != "");
+
+        if (ov->is_ref ())
+          {
+            octave_value_ref *r = ov->ref_rep ();
+            if (r->is_local_ref ())
+              r->mark_globalness_in_owning_frame (true);
+            else
+              *ov = octave_value {new octave_value_ref_global {name}};
+          }
+        else
+          *ov = octave_value {new octave_value_ref_global {name}};
+        return;
+      }
+
+    if (flag == PERSISTENT)
+      {
+        if (is_pers)
+          return;
+
+        CHECK_PANIC (! is_global);
+        CHECK_PANIC (frame_offset == 0);
+
+        *ov = octave_value {new octave_value_ref_persistent {get_scope (), static_cast<int> (external_offset)}};
+
+        return;
+      }
+
+    if (flag == LOCAL)
+      {
+        if (! is_global && ! is_pers)
+          return;
+
+        // Clear the global or persistent ref on the stack
+        if (is_global || is_pers)
+          {
+            // Clear the ref in its slot
+            if (ov->is_ref ())
+              {
+                octave_value_ref *r = ov->ref_rep ();
+                if (r->is_local_ref ())
+                  r->mark_globalness_in_owning_frame (false);
+                else
+                  *ov = octave_value {};
+              }
+            else
+              *ov = octave_value {};
+          }
+
+        return;
+      }
+
+    panic ("VM internal error: Strange state: %d", flag);
+  }
+
+  void set_scope_flag (std::size_t external_offset, scope_flags flag)
+  {
+    std::size_t internal_offset = external_to_local_offset (external_offset);
+
+    set_scope_flag_internal (internal_offset, external_offset, 0, flag);
+  }
+
+  stack_frame::scope_flags get_scope_flag (std::size_t external_offset) const
+  {
+    return get_scope_flag_internal (external_to_local_offset (external_offset));
+  }
+
+  stack_frame::scope_flags get_scope_flag_internal (std::size_t internal_offset) const
+  {
+    if (internal_offset >= internal_size ())
+      return LOCAL;
+
+    octave_value *pov;
+
+    // Is the slot on the original bytecode stack frame?
+    if (internal_offset < n_compiled_syms ())
+      {
+        pov = &m_stack_start [internal_offset].ov;
+      }
+    else
+      {
+        size_t extra_offset = internal_offset - n_compiled_syms ();
+        pov = &m_lazy_data->m_extra_slots.at (extra_offset);
+      }
+
+    if (! pov->is_ref ())
+      return LOCAL;
+    return pov->ref_rep ()->get_scope_flag ();
+  }
+};
+
+class bytecode_fcn_stack_frame : public bytecode_frame
+{
+public:
+
+  bytecode_fcn_stack_frame () = delete;
+
+  bytecode_fcn_stack_frame (tree_evaluator& tw,
+                            octave_user_code *fcn,
+                            std::size_t index,
+                            const std::shared_ptr<stack_frame>& parent_link,
+                            const std::shared_ptr<stack_frame>& static_link,
+                            vm &vm,
+                            int nargout, int nargin)
+    : bytecode_frame (tw, index, parent_link, static_link,
+                   nullptr, vm, fcn, nargout, nargin)
+  {
+    CHECK_PANIC (! fcn->is_user_script () && ! fcn->is_nested_function());
+  }
+
+  bytecode_fcn_stack_frame&
+  operator = (const bytecode_fcn_stack_frame& elt) = delete;
+
+  bytecode_fcn_stack_frame&
+  operator = (bytecode_fcn_stack_frame&& elt) = delete;
+
+  ~bytecode_fcn_stack_frame ()
+  {
+    // vm_clear_for_cache () need to mirror the dtor
+    dispose ();
+  }
+
+  void resize (std::size_t arg)
+  {
+    int diff = static_cast<int> (arg) - static_cast<int> (size ());
+
+    if (diff > 0)
+      internal_resize (internal_size () + diff);
+  }
+
+  stack_frame::scope_flags scope_flag (const symbol_record& sym) const
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    CHECK_PANIC (frame_offset == 0);
+
+    return get_scope_flag_internal (external_to_local_offset (external_offset));
+  }
+
+  // We only need to override one of each of these functions.  The
+  // using declaration will avoid warnings about partially-overloaded
+  // virtual functions.
+  using stack_frame::varval;
+  using stack_frame::varref;
+
+  octave_value varval (const symbol_record& sym) const
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    if (frame_offset >= n_compiled_frame_depth ())
+      return {};
+
+    std::size_t internal_offset = external_to_local_offset (external_offset, frame_offset);
+
+    // If the offset is out of range we return a nil ov. A varref() call would add
+    // an extra slot.
+    if (internal_offset >= internal_size ())
+      return {};
+
+    return varval_internal (internal_offset);
+  }
+
+  octave_value& varref (const symbol_record& sym, bool deref_refs)
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    CHECK_PANIC (frame_offset < n_compiled_frame_depth ());
+
+    std::size_t local_offset = external_to_local_offset (external_offset, frame_offset);
+
+    // If the offset is out of range we make room for it
+    if (local_offset >= internal_size ())
+      internal_resize (local_offset + 1);
+
+    return varref_internal (local_offset, deref_refs);
+  }
+
+  void mark_scope (const symbol_record& sym,
+                   scope_flags flag)
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    CHECK_PANIC (frame_offset == 0);
+
+    std::size_t local_offset = external_to_local_offset (external_offset);
+
+    if (local_offset >= internal_size ())
+      internal_resize (local_offset + 1);
+
+    set_scope_flag_internal (local_offset, external_offset, frame_offset, flag);
+  }
+
+  bool is_user_fcn_frame () const { return true; }
+
+  symbol_record lookup_symbol (const std::string& name) const
+  {
+    const stack_frame *frame = this;
+
+    while (frame)
+      {
+        symbol_scope scope = frame->get_scope ();
+
+        symbol_record sym = scope.lookup_symbol (name);
+
+        if (sym)
+          return sym;
+
+        std::shared_ptr<stack_frame> nxt = frame->access_link ();
+        frame = nxt.get ();
+      }
+
+    return symbol_record ();
+  }
+
+  symbol_record insert_symbol (const std::string& name)
+  {
+    symbol_scope scope = get_scope ();
+
+    symbol_record sym = scope.lookup_symbol (name);
+
+    if (!sym)
+      {
+        // If we have not created the extra slots, now is the time
+        lazy_data ();
+
+        sym = scope.find_symbol (name);
+
+        CHECK_PANIC (sym.is_valid ());
+        CHECK_PANIC (sym.frame_offset () == 0);
+
+        unsigned local_offset = external_to_local_offset (sym.data_offset ());
+
+        if (local_offset >= internal_size ())
+          internal_resize (local_offset + 1);
+      }
+
+    return sym;
+  }
+
+  void accept (stack_frame_walker& sfw);
+
+  void display (bool follow = true) const;
+};
+
+
+// Class for bytecode scripts
+//
+// Much of the functionaility is delegated to the eval frame
+// (.i.e top repl frame or calling function frame).
+
+class bytecode_script_stack_frame : public bytecode_frame
+{
+public:
+
+  bytecode_script_stack_frame () = delete;
+
+  bytecode_script_stack_frame (tree_evaluator& tw,
+                               octave_user_code *fcn,
+                               std::size_t index,
+                               const std::shared_ptr<stack_frame>& parent_link,
+                               const std::shared_ptr<stack_frame>& static_link,
+                               vm &vm,
+                               int nargout, int nargin)
+    : bytecode_frame (tw, index, parent_link, static_link,
+                   nullptr, vm, fcn, nargout, nargin)
+  {
+    CHECK_PANIC (fcn->is_user_script () && ! fcn->is_nested_function());
+  }
+
+  bytecode_script_stack_frame&
+  operator = (const bytecode_script_stack_frame& elt) = delete;
+
+  bytecode_script_stack_frame&
+  operator = (bytecode_script_stack_frame&& elt) = delete;
+
+  ~bytecode_script_stack_frame ()
+  {
+    // vm_clear_for_cache () need to mirror the dtor
+    dispose ();
+  }
+
+  stack_frame::scope_flags scope_flag (const symbol_record& sym) const
+  {
+    // Delegate to problem to the eval frame
+    auto sym_in_root = access_link ()->lookup_symbol (sym.name ());
+    if (! sym_in_root)
+      return LOCAL; // Not found, return nil
+
+    return access_link ()->scope_flag (sym_in_root);
+  }
+
+  // We only need to override one of each of these functions.  The
+  // using declaration will avoid warnings about partially-overloaded
+  // virtual functions.
+  using stack_frame::varval;
+  using stack_frame::varref;
+
+  octave_value varval (const symbol_record& sym) const
+  {
+    // Delegate to problem to the eval frame
+    auto sym_in_root = access_link ()->lookup_symbol (sym.name ());
+    if (! sym_in_root)
+      return {}; // Not found, return nil
+
+    return access_link ()->varval (sym_in_root);
+  }
+
+  octave_value& varref (const symbol_record& sym, bool deref_refs)
+  {
+    // Delegate to problem to the eval frame
+    auto sym_in_root = access_link ()->insert_symbol (sym.name ());
+    CHECK_PANIC (sym_in_root.frame_offset () == 0);
+
+    return access_link ()->varref (sym_in_root, deref_refs);
+  }
+
+  void mark_scope (const symbol_record& sym,
+                   scope_flags flag)
+  {
+    // Delegate to problem to the eval frame
+    auto sym_in_root = access_link ()->insert_symbol (sym.name ());
+    CHECK_PANIC (sym_in_root.frame_offset () == 0);
+
+    // TODO: script_stack_frame seems to error if frame offset is non-zero ->
+    //   if (frame_offset > 1)
+    //     error ("variables must be made PERSISTENT or GLOBAL in the first scope in which they are used");
+
+    return access_link ()->mark_scope (sym_in_root, flag);
+  }
+
+  bool is_user_script_frame () const { return true; }
+
+  symbol_record lookup_symbol (const std::string& name) const
+  {
+    symbol_scope scope = get_scope ();
+
+    symbol_record sym = scope.lookup_symbol (name);
+
+    if (sym)
+      {
+        CHECK_PANIC (sym.frame_offset () == 0);
+        return sym;
+      }
+
+    // Lookup in the eval frame
+    sym = m_access_link->lookup_symbol (name);
+
+    // Return symbol record with adjusted frame offset.
+    symbol_record new_sym = sym.dup ();
+
+    new_sym.set_frame_offset (sym.frame_offset () + 1);
+
+    return new_sym;
+  }
+
+  symbol_record insert_symbol (const std::string& name)
+  {
+    symbol_scope scope = get_scope ();
+
+    symbol_record sym = scope.lookup_symbol (name);
+
+    if (sym)
+      {
+        CHECK_PANIC(sym.frame_offset () == 0);
+        return sym;
+      }
+
+    // Insert the symbol in the eval frame
+    sym = m_access_link->insert_symbol (name);
+
+    // Return symbol record with adjusted frame offset.
+    symbol_record new_sym = sym.dup ();
+
+    new_sym.set_frame_offset (sym.frame_offset () + 1);
+
+    return new_sym;
+  }
+
+  void accept (stack_frame_walker& sfw);
+
+  void display (bool follow = true) const;
+
+  void vm_enter_script ()
+  {
+    CHECK_PANIC (m_fcn->is_user_script ());
+    // Check that there are no "extra slots" in the current frame. Those should have been added to the eval frame.
+    CHECK_PANIC (!(m_lazy_data && m_lazy_data->m_extra_slots.size () != 0));
+
+    lazy_data ().m_is_script = true;
+
+    auto eval_frame = access_link ();
+    auto parent_frame = static_link ();
+
+    // Set nargin to match the value of nargin in the eval frame
+    set_nargin (eval_frame->get_auto_fcn_var (stack_frame::NARGIN).int_value()); // TODO: Kinda wasteful fn calls
+
+    bool caller_is_eval_frame = eval_frame == parent_frame;
+    bool eval_frame_is_bytecode = eval_frame->is_bytecode_fcn_frame ();
+
+    // If the parent frame is a bytecode frame, and not the eval frame, we need to
+    // move the parent frame's values to the eval frame
+    if (!caller_is_eval_frame && parent_frame->is_bytecode_fcn_frame ())
+      {
+        auto *parent_frame_bc = static_cast<bytecode_fcn_stack_frame*> (parent_frame.get ());
+
+        // Check that there are no "extra slots" in the parent frame. Those should have been added to the eval frame
+        CHECK_PANIC (!(parent_frame_bc->m_lazy_data && parent_frame_bc->m_lazy_data->m_extra_slots.size () != 0));
+
+        // Move all user symbol values from the parent frame to the eval frame.
+        // Replace the values in the parent frame with a pointer-like object "octave_value_ref_vmlocal"
+        // pointing to the eval frame.
+        for (const auto &kv : parent_frame_bc->m_unwind_data->m_map_user_locals_names_to_slot)
+          {
+            const std::string &id_name = kv.first;
+            symbol_record sr_eval = eval_frame->lookup_symbol (id_name);
+            if (!sr_eval.is_valid ())
+              sr_eval = eval_frame->insert_symbol (id_name);
+            eval_frame->varref (sr_eval, false); // A bit silly, but allocates space for it
+
+            // We need to use the varref(size_t) since it gets in "behind" any global value in
+            // top scope, directly to the m_value vector. While the varref(symbol_record) returns
+            // a ref to the global value itself. Unless the frame offset is set, in which case we
+            // need to use the varref(symbol_record) variant to walk access frames properly. E.g.
+            // 'nest.tst' need frame offset when a script tries to access a variable from a nested
+            // function.
+            // TODO: Fix this hack
+            octave_value *ov_eval = sr_eval.frame_offset () ?
+                                      &eval_frame->varref (sr_eval, false) :
+                                      &eval_frame->varref (sr_eval.data_offset (), false);
+
+            symbol_record sr_parent = parent_frame->lookup_symbol (id_name); // TODO: Store slot nr instead?
+            CHECK_PANIC (sr_parent.is_valid () && sr_parent.frame_offset () == 0);
+            octave_value &ov_parent = parent_frame->varref (sr_parent.data_offset (), false);
+
+            // Assert that the ov in the parent is not a local vm ref, as that would be a leak.
+            // Unless the current frame has been moved by e.g. evalin() in which case there could
+            // be local vm ref:s in the eval frame from a different call chain than the current one.
+            CHECK_PANIC (!(ov_parent.is_ref () && ov_parent.ref_rep ()->is_local_ref ()) || !stacks_in_order ());
+
+            bool is_global_in_eval_frame = eval_frame->is_global (sr_eval);
+            bool is_global_in_parent_frame = parent_frame->is_global (sr_parent);
+            CHECK_PANIC (is_global_in_eval_frame == is_global_in_parent_frame);
+
+            if (is_global_in_parent_frame)
+              CHECK_PANIC (ov_parent.is_ref () && ov_parent.ref_rep ()->is_global_ref ());
+
+            if (!is_global_in_parent_frame || eval_frame_is_bytecode)
+              *ov_eval = ov_parent;
+            else
+              *ov_eval = {};
+
+            ov_parent = octave_value {new octave_value_ref_vmlocal {sr_eval, eval_frame.get ()}};
+          }
+      }
+
+    // Move all user symbols from the eval frame to the current frame we are entering.
+    // Replace the moved values in the eval frame with a pointer-like object "octave_value_ref_vmlocal"
+    // pointing to the current frame.
+    for (const auto &kv : m_unwind_data->m_map_user_locals_names_to_slot)
+      {
+        const std::string &id_name = kv.first;
+        symbol_record sr_eval = eval_frame->lookup_symbol (id_name);
+        if (!sr_eval.is_valid ())
+          sr_eval = eval_frame->insert_symbol (id_name);
+        eval_frame->varref (sr_eval, false); // A bit silly, but allocates space for it
+
+        octave_value *ov_eval = sr_eval.frame_offset () ?
+                                  &eval_frame->varref (sr_eval, false) :
+                                  &eval_frame->varref (sr_eval.data_offset (), false);
+
+        symbol_record sr_current = lookup_symbol (id_name);
+        CHECK_PANIC (sr_current.is_valid () && sr_current.frame_offset () == 0);
+        octave_value &ov_current = varref (sr_current.data_offset (), false);
+
+        CHECK_PANIC (!(ov_current.is_ref () && ov_current.ref_rep ()->is_local_ref ()) || !stacks_in_order ());
+        CHECK_PANIC (!(ov_eval->is_ref () && ov_eval->ref_rep ()->is_local_ref ()) || !stacks_in_order ());
+
+        bool is_global_in_eval_frame = eval_frame->is_global (sr_eval);
+
+        if (is_global_in_eval_frame)
+          ov_current = octave_value {new octave_value_ref_global {id_name}};
+        else
+          ov_current = *ov_eval;
+
+        *ov_eval = octave_value {new octave_value_ref_vmlocal {sr_current, this}};
+      }
+  }
+
+  void vm_exit_script ()
+  {
+    if (!m_fcn->is_user_script ()) // Nothing to do for non-script frames
+      return;
+
+    // Restore values from the VM stack frame to the original frame
+
+    // Check that there are no "extra slots" in the current frame. Those should have been added to the eval frame.
+    CHECK_PANIC (!(m_lazy_data && m_lazy_data->m_extra_slots.size () != 0));
+
+    lazy_data ().m_is_script = true;
+
+    auto eval_frame = access_link ();
+    auto parent_frame = static_link ();
+
+    bool caller_is_eval_frame = eval_frame == parent_frame;
+    bool eval_frame_is_bytecode = eval_frame->is_bytecode_fcn_frame ();
+
+    // Move all user symbols from the current frame to the eval frame.
+    for (const auto &kv : m_unwind_data->m_map_user_locals_names_to_slot)
+      {
+        const std::string &id_name = kv.first;
+        symbol_record sr_eval = eval_frame->lookup_symbol (id_name);
+        CHECK_PANIC (sr_eval.is_valid ());
+
+        octave_value *ov_eval = sr_eval.frame_offset () ?
+                                  &eval_frame->varref (sr_eval, false) :
+                                  &eval_frame->varref (sr_eval.data_offset (), false);
+
+        symbol_record sr_current = lookup_symbol (id_name);
+        CHECK_PANIC (sr_current.is_valid () && sr_current.frame_offset () == 0);
+        octave_value &ov_current = varref (sr_current.data_offset (), false);
+
+        CHECK_PANIC (!(ov_current.is_ref () && ov_current.ref_rep ()->is_local_ref ()) || !stacks_in_order ());
+
+        bool is_global_in_eval_frame = eval_frame->is_global (sr_eval);
+        bool is_global_in_current_frame = is_global (sr_current);
+        CHECK_PANIC (is_global_in_eval_frame == is_global_in_current_frame);
+
+        if (is_global_in_current_frame)
+          CHECK_PANIC (ov_current.is_ref () && ov_current.ref_rep ()->is_global_ref ());
+
+        if (!is_global_in_current_frame || eval_frame_is_bytecode)
+          *ov_eval = ov_current;
+        else
+          *ov_eval = {};
+
+        ov_current = {};
+      }
+
+    // Move all values the parent frame needs to it from the eval frame,
+    // if the parent frame is a bytecode frame.
+    if (!caller_is_eval_frame && parent_frame->is_bytecode_fcn_frame ())
+      {
+        auto *parent_frame_bc = static_cast<bytecode_fcn_stack_frame*> (parent_frame.get ());
+
+        // Check that there are no "extra slots" in the parent frame. Those should have been added to the eval frame
+        CHECK_PANIC (!(parent_frame_bc->m_lazy_data && parent_frame_bc->m_lazy_data->m_extra_slots.size () != 0));
+
+        // Move all values the parent frame needs to it from the eval frame.
+        // In the eval frame, put a pointer-like object "octave_value_ref_vmlocal"
+        // pointing to the parent frame
+        for (const auto &kv : parent_frame_bc->m_unwind_data->m_map_user_locals_names_to_slot)
+          {
+            const std::string &id_name = kv.first;
+            symbol_record sr_eval = eval_frame->lookup_symbol (id_name);
+            CHECK_PANIC (sr_eval.is_valid ());
+            octave_value *ov_eval = sr_eval.frame_offset () ?
+                                      &eval_frame->varref (sr_eval, false) :
+                                      &eval_frame->varref (sr_eval.data_offset (), false);
+
+            symbol_record sr_parent = parent_frame->lookup_symbol (id_name); // TODO: Store slot nr instead?
+            CHECK_PANIC (sr_parent.is_valid () && sr_parent.frame_offset () == 0);
+            octave_value &ov_parent = parent_frame->varref (sr_parent.data_offset (), false);
+
+            CHECK_PANIC (!(ov_eval->is_ref () && ov_eval->ref_rep ()->is_local_ref ()) || !stacks_in_order ());
+
+            bool is_global_in_eval_frame = eval_frame->is_global (sr_eval);
+            bool is_global_in_parent_frame = parent_frame->is_global (sr_parent);
+            CHECK_PANIC (is_global_in_eval_frame == is_global_in_parent_frame);
+
+            if (!is_global_in_parent_frame || eval_frame_is_bytecode)
+              ov_parent = *ov_eval;
+            else
+              ov_parent = octave_value {new octave_value_ref_global {id_name}};
+
+            *ov_eval = octave_value {new octave_value_ref_vmlocal {sr_parent, parent_frame.get ()}};
+          }
+      }
+  }
+
+private:
+
+  // Returns true of the stack frames under this frame are in order, i.e.
+  // there is no active evalin(), dbupdown or similar.
+  bool stacks_in_order ()
+  {
+    auto frame = parent_link ();
+    unsigned expected_idx = index ();
+
+    while (frame)
+      {
+        if (frame->index () != expected_idx)
+          return false;
+
+        frame = frame->parent_link ();
+        expected_idx--;
+      }
+
+    return true;
+  }
+};
+
+class bytecode_nested_fcn_stack_frame : public bytecode_frame
+{
+public:
+
+  bytecode_nested_fcn_stack_frame () = delete;
+
+  bytecode_nested_fcn_stack_frame (tree_evaluator& tw,
+                                   octave_user_code *fcn,
+                                   std::size_t index,
+                                   const std::shared_ptr<stack_frame>& parent_link,
+                                   const std::shared_ptr<stack_frame>& static_link,
+                                   const std::shared_ptr<stack_frame>& access_link,
+                                   vm &vm,
+                                   int nargout, int nargin)
+    : bytecode_frame (tw, index, parent_link, static_link,
+                      // If a access_link is provided, we are dealing with a closure from a function handle.
+                      // Otherwise find the access frame with get_access_link()
+                      access_link ? access_link : get_access_link (static_cast<octave_user_function*> (fcn), static_link),
+                    vm, fcn, nargout, nargin)
+  { }
+
+  ~bytecode_nested_fcn_stack_frame ()
+  {
+    // vm_clear_for_cache () need to mirror the dtor
+    dispose ();
+  }
+
+  static std::shared_ptr<stack_frame>
+  get_access_link (octave_user_function *fcn,
+                   const std::shared_ptr<stack_frame>& static_link)
+  {
+    std::shared_ptr<stack_frame> alink;
+
+    symbol_scope fcn_scope = fcn->scope ();
+
+    CHECK_PANIC(fcn_scope.is_nested ());
+
+    if (! static_link)
+      error ("internal call stack error (invalid static link)");
+
+    symbol_scope caller_scope = static_link->get_scope ();
+
+    int nesting_depth = fcn_scope.nesting_depth ();
+    int caller_nesting_depth = caller_scope.nesting_depth ();
+
+    if (caller_nesting_depth < nesting_depth)
+      {
+        // FIXME: do we need to ensure that the called
+        // function is a child of the caller?  Does it hurt
+        // to panic_unless this condition, at least for now?
+
+        alink = static_link;
+      }
+    else
+      {
+        // FIXME: do we need to check that the parent of the
+        // called function is also a parent of the caller?
+        // Does it hurt to panic_unless this condition, at least
+        // for now?
+
+        int links_to_follow = caller_nesting_depth - nesting_depth + 1;
+
+        alink = static_link;
+
+        while (alink && --links_to_follow >= 0)
+          alink = alink->access_link ();
+
+        if (! alink)
+          error ("internal function nesting error (invalid access link)");
+      }
+
+    return alink;
+  }
+
+  void resize (std::size_t arg)
+  {
+    int diff = static_cast<int> (arg) - static_cast<int> (size ());
+
+    if (diff > 0)
+      internal_resize (internal_size () + diff);
+  }
+
+  stack_frame::scope_flags scope_flag (const symbol_record& sym) const
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    std::size_t internal_offset;
+
+    // Note, quirk: A variable that is not added to a nested frame is not to be reported as global
+    // eventhough it is global in the parent frame.
+
+    if (frame_offset)
+      {
+        auto it = m_unwind_data->m_external_frame_offset_to_internal.at (frame_offset).find (external_offset);
+        bool found = it != m_unwind_data->m_external_frame_offset_to_internal[frame_offset].end ();
+        if (! found)
+          return LOCAL;
+
+        internal_offset = it->second;
+      }
+    else
+      internal_offset = external_to_local_offset (external_offset);
+
+    return get_scope_flag_internal (internal_offset);
+  }
+
+  // We only need to override one of each of these functions.  The
+  // using declaration will avoid warnings about partially-overloaded
+  // virtual functions.
+  using stack_frame::varval;
+  using stack_frame::varref;
+
+  octave_value varval (const symbol_record& sym) const
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    if (frame_offset >= n_compiled_frame_depth ())
+      return {};
+
+    if (frame_offset)
+      {
+        auto access_frame = get_ith_access_link (frame_offset - 1);
+        CHECK_PANIC (access_frame);
+
+        return access_frame->varval (external_offset);
+      }
+
+    std::size_t internal_offset = external_to_local_offset (external_offset, frame_offset);
+
+    // If the offset is out of range we return a nil ov. A varref() call would add
+    // an extra slot.
+    if (internal_offset >= internal_size ())
+      return {};
+
+    return varval_internal (internal_offset);
+  }
+
+  octave_value& varref (const symbol_record& sym, bool deref_refs)
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    CHECK_PANIC (frame_offset < n_compiled_frame_depth ());
+
+    if (frame_offset)
+      {
+        auto access_frame = get_ith_access_link (frame_offset - 1);
+        CHECK_PANIC (access_frame);
+
+        return access_frame->varref (external_offset, deref_refs);
+      }
+
+    std::size_t local_offset = external_to_local_offset (external_offset, frame_offset);
+
+    // If the offset is out of range we make room for it
+    if (local_offset >= internal_size ())
+      internal_resize (local_offset + 1);
+
+    return varref_internal (local_offset, deref_refs);
+  }
+
+  void mark_scope (const symbol_record& sym,
+                   scope_flags flag)
+  {
+    std::size_t external_offset = sym.data_offset ();
+    std::size_t frame_offset = sym.frame_offset ();
+
+    if (frame_offset > 0 && (flag == PERSISTENT || flag == GLOBAL))
+      error ("variables must be made PERSISTENT or GLOBAL in the first scope in which they are used");
+    else if (frame_offset > 0)
+      return;
+
+    std::size_t local_offset = external_to_local_offset (external_offset, frame_offset);
+
+    if (local_offset >= internal_size ())
+      internal_resize (local_offset + 1);
+
+    set_scope_flag_internal (local_offset, external_offset, frame_offset, flag);
+  }
+
+  bool is_user_fcn_frame () const { return true; }
+
+  symbol_record lookup_symbol (const std::string& name) const
+  {
+    // Search the "scope" object of this and any nested frame
+    // The scope object will have e.g. variables added by scripts, global declares or eval
+    const stack_frame *frame = this;
+    std::size_t frame_cntr = 0;
+    while (frame)
+      {
+        symbol_scope scope = frame->get_scope ();
+
+        symbol_record sym = scope.lookup_symbol (name);
+
+        if (sym)
+          {
+            // Return symbol record with adjusted frame offset relative
+            // to the one lookup is done on, i.e. the 'this' frame.
+            symbol_record new_sym = sym.dup ();
+            new_sym.set_frame_offset (new_sym.frame_offset () + frame_cntr);
+            return new_sym;
+          }
+
+        std::shared_ptr<stack_frame> nxt = frame->access_link ();
+        frame = nxt.get ();
+        frame_cntr++;
+      }
+
+    return symbol_record ();
+  }
+
+  symbol_record insert_symbol (const std::string& name)
+  {
+    symbol_scope scope = get_scope ();
+
+    symbol_record sym = scope.lookup_symbol (name);
+
+    if (! sym)
+      {
+        // If we have not created the extra slots, now is the time
+        lazy_data ();
+
+        sym = scope.find_symbol (name);
+
+        CHECK_PANIC (sym.is_valid ());
+        CHECK_PANIC (sym.frame_offset () == 0);
+
+        unsigned local_offset = external_to_local_offset (sym.data_offset ());
+
+        if (local_offset >= internal_size ())
+          internal_resize (local_offset + 1);
+      }
+
+    return sym;
+  }
+
+  void accept (stack_frame_walker& sfw);
+
+  void display (bool follow = true) const;
+
+  void vm_enter_nested ()
+  {
+    // We got multiple scenarios.
+    //
+    // 1. The parents are bytecode functions in the order
+    //    according to the nesting of nested functions. This should be the
+    //    most common use. Self-recursive calls are handled too.
+    //
+    // 2. If a nested function calls a sibling nested function, the parent frames
+    //    which are not direct parents to the nested sibling funtion need to be skipped
+    //    while searching for the matriarch frame.
+    //
+    // 3. A nested function is called via a handle.
+
+    bool is_direct_call = true;
+
+    auto parent_frame = static_link ();
+
+    bytecode_frame *child_bc_frame = this;
+
+    // Walk the parent(s) to see if they are in direct order and all bytecode frames.
+    // Collect the bsp. (base stack pointer).
+
+    // TODO: There might be a need to check that all children share access frame?
+    int n_nested_depth = m_unwind_data->m_n_nested_fn;
+    std::vector<stack_element*> v_parent_bsps;
+    int i;
+    for (i = 0; i < n_nested_depth; i++)
+      {
+        bool collect_frame_bsp = true;
+        bool forward_child_ptr = true;
+
+        if (! parent_frame->is_bytecode_fcn_frame ())
+          {
+            is_direct_call = false;
+            break;
+          }
+
+        auto *parent_bc_frame = static_cast<bytecode_frame*> (parent_frame.get ());
+
+        // Recursive self call?
+        if (parent_bc_frame->m_unwind_data->m_id == child_bc_frame->m_unwind_data->m_id)
+          {
+            collect_frame_bsp = false;
+            i--;
+          }
+        // Not direct parent?
+        else if (parent_bc_frame->m_unwind_data->m_id != child_bc_frame->m_unwind_data->m_parent_id)
+          {
+            // Sibling or their children?
+            if (parent_bc_frame->m_unwind_data->m_matriarch_id == child_bc_frame->m_unwind_data->m_matriarch_id)
+              {
+                collect_frame_bsp = false;
+                forward_child_ptr = false; // Keep looking for the parent of the current child ptr
+                i--;
+              }
+            else
+              {
+                // At the last depth the access frame might be the final parent.
+                if (i + 1 != n_nested_depth)
+                  {
+                    is_direct_call = false;
+                    break;
+                  }
+
+                auto access_frame = access_link ();
+
+                if (! access_frame || ! access_frame->is_bytecode_fcn_frame ())
+                  {
+                    is_direct_call = false;
+                    break;
+                  }
+
+                auto access_bc_frame = static_cast<bytecode_fcn_stack_frame*> (access_frame.get ());
+                // Check that the access frame the matriarch of the current frame.
+                if (access_bc_frame->m_unwind_data->m_id != m_unwind_data->m_matriarch_id)
+                  {
+                    is_direct_call = false;
+                    break;
+                  }
+
+                // The access frame is the last parent
+                parent_bc_frame = access_bc_frame;
+              }
+          }
+
+        // We don't collect recursive self-calls' or siblings' or siblings' childrens' frames
+        if (collect_frame_bsp)
+          v_parent_bsps.push_back (parent_bc_frame->m_stack_start);
+
+        // Skip copying the shared pointer if we don't need it as there is no next iteration
+        // since shared_ptr:s are quite expensive to use.
+        if (i + 1 == n_nested_depth)
+          break;
+
+        parent_frame = parent_frame->static_link ();
+        if (forward_child_ptr)
+          child_bc_frame = parent_bc_frame;
+      }
+
+    if (is_direct_call)
+      {
+        for (unwind_data::nested_var_offset &d : m_unwind_data->m_v_nested_vars)
+        {
+          int parent_slot = d.m_slot_parent;
+          int nested_slot = d.m_slot_nested;
+          int depth = d.m_depth;
+
+          stack_element *owner_bsp = v_parent_bsps.at (depth - 1);
+
+          octave_value &orig_ov = owner_bsp[parent_slot].ov; // On the parent stack
+          octave_value &nested_ov = m_stack_start[nested_slot].ov; // On the current stack
+
+          // If the ov on the parent stack is a pointer reference we need to follow it.
+          if (orig_ov.is_ref ())
+            {
+              auto ref_rep = orig_ov.ref_rep ();
+              if (ref_rep->is_ptr_ref ())
+                orig_ov = ref_rep->ref ();
+            }
+
+          CHECK_PANIC (&orig_ov != &nested_ov);
+
+          // Make the nested ov reference the ov on the parent stack.
+          nested_ov = new octave_value_ref_ptr (&orig_ov);
+        }
+      }
+    else
+      {
+        // For a nested function at nesting depth n we need to collect n
+        // access links.
+        auto first_context_frame = access_link ();
+        CHECK_PANIC (first_context_frame);
+
+        std::vector<decltype(first_context_frame)> v_frames {std::move (first_context_frame)};
+
+        // TODO: Need to make test cases to verify that this works for nested nested function handles from
+        //       used in other function than the original root function.
+        for (int j = 1; j < m_unwind_data->m_n_nested_fn; j++) // 1, since first one already added
+          {
+            auto &upper_frame = v_frames.back ();
+            auto lower_frame = upper_frame->access_link ();
+
+            CHECK_PANIC (lower_frame);
+            v_frames.push_back (lower_frame);
+          }
+
+        // For each variable that refer to variables on the parent frames, we need to link
+        // the local variable of the current frame to the correct slot on the parents'.
+        for (unwind_data::nested_var_offset &d : m_unwind_data->m_v_nested_vars)
+          {
+            int parent_slot = d.m_slot_parent;
+            int nested_slot = d.m_slot_nested;
+            int depth = d.m_depth;
+
+            octave_value &nested_ov = m_stack_start[nested_slot].ov; // On the current stack
+
+            auto &context_frame = v_frames.at (depth - 1);
+            auto context_scope = context_frame->get_scope ();
+
+            auto sym = context_scope.find_symbol (m_name_data [nested_slot]);
+            CHECK_PANIC (sym.is_valid ());
+
+            // For bytecode frames we just do a pointer octave_value object refering to
+            // to address in memory. For other frames we need to access it via the dynamic frame pointer
+            // (since the memory in those can move around)
+            if (context_frame->is_bytecode_fcn_frame ())
+              {
+                auto *context_bc_frame = static_cast<bytecode_fcn_stack_frame*> (context_frame.get ());
+                auto owner_bsp = context_bc_frame->m_stack_start;
+                octave_value &orig_ov = owner_bsp[parent_slot].ov; // On the parent stack
+
+                // If the ov on the parent stack is a pointer reference we need to follow it.
+                if (orig_ov.is_ref ())
+                  {
+                    auto ref_rep = orig_ov.ref_rep ();
+                    if (ref_rep->is_ptr_ref ())
+                      orig_ov = ref_rep->ref ();
+                  }
+
+                CHECK_PANIC (&orig_ov != &nested_ov);
+
+                nested_ov = new octave_value_ref_ptr (&orig_ov); // Pointer object octave_value_ref_ptr to parent stack
+              }
+            else
+              nested_ov = new octave_value_ref_vmlocal (sym, context_frame.get ());
+          }
+      }
+  }
+};
+
+void bytecode_fcn_stack_frame::accept (stack_frame_walker& sfw)
+{
+  sfw.visit_bytecode_fcn_stack_frame (*this);
+}
+
+void bytecode_script_stack_frame::accept (stack_frame_walker& sfw)
+{
+  sfw.visit_bytecode_script_stack_frame (*this);
+}
+
+void bytecode_nested_fcn_stack_frame::accept (stack_frame_walker& sfw)
+{
+  sfw.visit_bytecode_nested_fcn_stack_frame (*this);
+}
+
+void bytecode_fcn_stack_frame::display (bool) const
+{
+  std::ostream& os = octave_stdout;
+
+  os << "-- [bytecode_fcn_stack_frame] (" << this << ") --" << std::endl;
+
+  os << "fcn: " << m_fcn->name ()
+     << " (" << m_fcn->type_name () << ")" << std::endl;
+
+  display_scope (os, get_scope ());
+}
+
+void bytecode_script_stack_frame::display (bool) const
+{
+  std::ostream& os = octave_stdout;
+
+  os << "-- [bytecode_script_stack_frame] (" << this << ") --" << std::endl;
+
+  os << "fcn: " << m_fcn->name ()
+     << " (" << m_fcn->type_name () << ")" << std::endl;
+
+  display_scope (os, get_scope ());
+}
+
+void bytecode_nested_fcn_stack_frame::display (bool) const
+{
+  std::ostream& os = octave_stdout;
+
+  os << "-- [bytecode_nested_fcn_stack_frame] (" << this << ") --" << std::endl;
+
+  os << "fcn: " << m_fcn->name ()
+     << " (" << m_fcn->type_name () << ")" << std::endl;
+
+  display_scope (os, get_scope ());
+}
+
+#endif
+
 class compiled_fcn_stack_frame : public stack_frame
 {
 public:
@@ -184,12 +1871,20 @@
     return m_static_link->varval (sym);
   }
 
-  octave_value& varref (const symbol_record& sym)
+  octave_value& varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                        , bool deref_refs
+#endif
+                        )
   {
     // Look in closest stack frame that contains values (either the
     // top scope, or a user-defined function or script).
 
-    return m_static_link->varref (sym);
+    return m_static_link->varref (sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                  , deref_refs
+#endif
+                                  );
   }
 
   std::string inputname (int n, bool ids_only) const
@@ -323,7 +2018,11 @@
 
   octave_value varval (const symbol_record& sym) const;
 
-  octave_value& varref (const symbol_record& sym);
+  octave_value& varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                        , bool deref_refs
+#endif
+                        );
 
   std::string inputname (int n, bool ids_only) const
   {
@@ -417,11 +2116,51 @@
 
   void set_scope_flag (std::size_t data_offset, scope_flags flag)
   {
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+    bool was_global = m_flags.at (data_offset) == scope_flags::GLOBAL;
+
+#endif
+
     m_flags.at (data_offset) = flag;
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+    bool is_global = flag == scope_flags::GLOBAL;
+
+    // If the VM is running scripts it places octave_value_ref objects in
+    // the top scope to "steal" the variables from it to be able to keep the
+    // canonical copy on its active stack frame. When it unwinds, the top scope
+    // gets the value back.
+    //
+    // If e.g. evalin () changes the globalness state of a symbol in the top scope,
+    // the VM need to be notified, if the variable is on the VM stack.
+    if (was_global != is_global)
+      {
+        octave_value ov = m_values.at (data_offset);
+        // Only the VM spreads ref objects around, so this is only true if the VM is running
+        if (ov.is_ref ())
+          {
+            octave_value cpy = ov;
+            // Pop the value in the top scope to avoid recursive loop, since mark_globalness_in_owning_frame()
+            // will call mark_global (), which will walk the stack frames down to the root.
+            m_values.at (data_offset) = octave_value {};
+            octave_value_ref *ref = cpy.ref_rep ();
+            ref->mark_globalness_in_owning_frame (is_global);
+            // We need the ref back in place if the flag changes again
+            m_values.at (data_offset) = cpy;
+          }
+      }
+
+#endif
   }
 
   octave_value get_auto_fcn_var (auto_var_type avt) const
   {
+    if (avt != stack_frame::auto_var_type::ARG_NAMES)
+      return m_auto_vars.at (avt);
+    if (m_parent_link->is_bytecode_fcn_frame ())
+      return m_parent_link->get_active_bytecode_call_arg_names ();
     return m_auto_vars.at (avt);
   }
 
@@ -441,7 +2180,11 @@
     return m_values.at (data_offset);
   }
 
-  octave_value& varref (std::size_t data_offset)
+  octave_value& varref (std::size_t data_offset
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                        , bool
+#endif
+                        )
   {
     return m_values.at (data_offset);
   }
@@ -564,7 +2307,11 @@
 
   octave_value varval (const symbol_record& sym) const;
 
-  octave_value& varref (const symbol_record& sym);
+  octave_value& varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                        , bool deref_refs
+#endif
+                        );
 
   std::string inputname (int n, bool ids_only) const;
 
@@ -636,7 +2383,11 @@
 
   octave_value varval (const symbol_record& sym) const;
 
-  octave_value& varref (const symbol_record& sym);
+  octave_value& varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                        , bool deref_refs
+#endif
+                        );
 
   std::string inputname (int, bool) const
   {
@@ -724,6 +2475,40 @@
       alink->accept (*this);
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  void visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame& frame)
+  {
+    clean_frame (frame);
+
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
+#endif
+
+  void visit_bytecode_script_stack_frame (bytecode_script_stack_frame& frame)
+  {
+    clean_frame (frame);
+
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
+  void visit_bytecode_nested_fcn_stack_frame (bytecode_nested_fcn_stack_frame& frame)
+  {
+    clean_frame (frame);
+
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
 private:
 
   void maybe_clear_symbol (stack_frame& frame, const symbol_record& sym)
@@ -964,6 +2749,39 @@
       alink->accept (*this);
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  void visit_bytecode_fcn_stack_frame (bytecode_fcn_stack_frame& frame)
+  {
+    append_list (frame);
+
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
+#endif
+
+  void visit_bytecode_script_stack_frame (bytecode_script_stack_frame& frame)
+  {
+    // For scripts, only collect symbol info in the outer most frame
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
+  void visit_bytecode_nested_fcn_stack_frame (bytecode_nested_fcn_stack_frame& frame)
+  {
+    append_list (frame);
+
+    std::shared_ptr<stack_frame> alink = frame.access_link ();
+
+    if (alink)
+      alink->accept (*this);
+  }
+
 private:
 
   typedef std::pair<std::string, symbol_info_list> syminf_list_elt;
@@ -1131,6 +2949,143 @@
   return new scope_stack_frame (tw, scope, index, parent_link, static_link);
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+std::shared_ptr<stack_frame> stack_frame::create_bytecode_script (
+                   tree_evaluator& tw,
+                   octave_user_script *fcn,
+                   vm &vm,
+                   std::size_t index,
+                   const std::shared_ptr<stack_frame>& parent_link,
+                   const std::shared_ptr<stack_frame>& static_link,
+                   int nargout, int nargin)
+{
+  bytecode_script_stack_frame *new_frame_raw
+    = new bytecode_script_stack_frame (tw, fcn, index, parent_link, static_link,
+                                       vm, nargout, nargin);
+  std::shared_ptr<stack_frame> frame (new_frame_raw);
+
+  // The bytecode stackframe needs to know if it needs to save away
+  // all the stack variables. So it need to keep track of if it is saved
+  // somewhere outsite the VM
+  new_frame_raw->m_weak_ptr_to_self = frame;
+
+  std::shared_ptr<stack_frame> eval_frame = static_link;
+
+  while (true)
+    {
+      if (eval_frame->is_user_script_frame ())
+        eval_frame = eval_frame->access_link ();
+      else if (eval_frame->is_bytecode_fcn_frame ())
+        {
+          bytecode_fcn_stack_frame *bcf = static_cast<bytecode_fcn_stack_frame *> (eval_frame.get ());
+          if (bcf->m_lazy_data && bcf->m_lazy_data->m_is_script)
+            eval_frame = eval_frame->access_link ();
+          else
+            break;
+        }
+      else
+        break;
+    }
+
+  frame->m_access_link = eval_frame;
+
+  return frame;
+}
+
+std::shared_ptr<stack_frame> stack_frame::create_bytecode_nested (
+                   tree_evaluator& tw,
+                   octave_user_code *fcn,
+                   vm &vm,
+                   std::size_t index,
+                   const std::shared_ptr<stack_frame>& parent_link,
+                   const std::shared_ptr<stack_frame>& static_link,
+                   const std::shared_ptr<stack_frame>& access_link,
+                   int nargout, int nargin)
+{
+  bytecode_nested_fcn_stack_frame *new_frame_raw
+    = new bytecode_nested_fcn_stack_frame (tw, fcn, index, parent_link, static_link, access_link,
+                                        vm, nargout, nargin);
+  std::shared_ptr<stack_frame> frame (new_frame_raw);
+
+  CHECK_PANIC (frame->m_access_link); // Should always be set
+
+  // The bytecode stackframe needs to know if it needs to save away
+  // all the stack variables. So it need to keep track of if it is saved
+  // somewhere outsite the VM
+  new_frame_raw->m_weak_ptr_to_self = frame;
+
+  return frame;
+}
+
+std::shared_ptr<stack_frame> stack_frame::create_bytecode_anon (
+                   tree_evaluator& tw,
+                   octave_user_code *fcn,
+                   vm &vm,
+                   std::size_t index,
+                   const std::shared_ptr<stack_frame>& parent_link,
+                   const std::shared_ptr<stack_frame>& static_link,
+                   const std::shared_ptr<stack_frame>& access_link,
+                   int nargout, int nargin)
+{
+  auto new_frame = create_bytecode (tw, fcn, vm, index, parent_link, static_link, nargout, nargin);
+  new_frame->m_access_link = access_link;
+
+  return new_frame;
+}
+
+std::shared_ptr<stack_frame> stack_frame::create_bytecode (
+                   tree_evaluator& tw,
+                   octave_user_code *fcn,
+                   vm &vm,
+                   std::size_t index,
+                   const std::shared_ptr<stack_frame>& parent_link,
+                   const std::shared_ptr<stack_frame>& static_link,
+                   int nargout, int nargin)
+{
+  // If we have any cached shared_ptr to empty bytecode_fcn_stack_frame objects
+  // we use on of those
+  if (vm.m_frame_ptr_cache.size ())
+    {
+      std::shared_ptr<stack_frame> new_frame = std::move (vm.m_frame_ptr_cache.back ());
+      vm.m_frame_ptr_cache.pop_back ();
+
+      bytecode_fcn_stack_frame *p = static_cast<bytecode_fcn_stack_frame*> (new_frame.get ());
+      // Most objects where cleared when the shared_ptr was put into the cache but call the
+      // dtor anyways to be sure.
+      p->~bytecode_fcn_stack_frame ();
+      // Placement new into the storage managed by the shared_ptr
+      new (p) bytecode_fcn_stack_frame (tw, fcn, index, parent_link, static_link, vm, nargout, nargin);
+
+      // The bytecode stackframe needs to know if it needs to save away
+      // all the stack variables. So it need to keep track of if it is saved
+      // somewhere outsite the VM
+      p->m_weak_ptr_to_self = new_frame;
+
+      CHECK_PANIC(! fcn->is_nested_function ());
+
+      return new_frame;
+    }
+  else
+    {
+      bytecode_fcn_stack_frame *new_frame_raw
+        = new bytecode_fcn_stack_frame (tw, fcn, index, parent_link, static_link,
+                                        vm, nargout, nargin);
+      std::shared_ptr<stack_frame> new_frame (new_frame_raw);
+
+      // The bytecode stackframe needs to know if it needs to save away
+      // all the stack variables. So it need to keep track of if it is saved
+      // somewhere outsite the VM
+      new_frame_raw->m_weak_ptr_to_self = new_frame;
+
+      CHECK_PANIC(! fcn->is_nested_function ());
+
+      return new_frame;
+    }
+}
+
+#endif
+
 // This function is only implemented and should only be called for
 // user_fcn stack frames.  Anything else indicates an error in the
 // implementation, but we'll simply warn if that happens.
@@ -1393,7 +3348,11 @@
 }
 
 octave_value&
-stack_frame::varref (std::size_t)
+stack_frame::varref (std::size_t
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                   , bool
+#endif
+                                   )
 {
   // This function should only be called for user_fcn_stack_frame or
   // scope_stack_frame objects.  Anything else indicates an error in
@@ -2079,7 +4038,11 @@
 }
 
 octave_value&
-script_stack_frame::varref (const symbol_record& sym)
+script_stack_frame::varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                          , bool deref_refs
+#endif
+                                          )
 {
   std::size_t frame_offset;
   std::size_t data_offset;
@@ -2102,7 +4065,11 @@
   switch (frame->get_scope_flag (data_offset))
     {
     case LOCAL:
-      return frame->varref (data_offset);
+      return frame->varref (data_offset
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                            , deref_refs
+#endif
+                            );
 
     case PERSISTENT:
       {
@@ -2422,7 +4389,11 @@
 }
 
 octave_value&
-user_fcn_stack_frame::varref (const symbol_record& sym)
+user_fcn_stack_frame::varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                            , bool deref_refs
+#endif
+                                            )
 {
   std::size_t frame_offset = sym.frame_offset ();
   std::size_t data_offset = sym.data_offset ();
@@ -2444,7 +4415,11 @@
   switch (frame->get_scope_flag (data_offset))
     {
     case LOCAL:
-      return frame->varref (data_offset);
+      return frame->varref (data_offset
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                            , deref_refs
+#endif
+                            );
 
     case PERSISTENT:
       {
@@ -2465,8 +4440,18 @@
 {
   std::string name;
 
-  Array<std::string> arg_names
-    = m_auto_vars.at (stack_frame::ARG_NAMES).cellstr_value ();
+  Array<std::string> arg_names;
+
+  auto parent_frame = parent_link ();
+
+  if (parent_frame && parent_frame->is_bytecode_fcn_frame ())
+    {
+      // The bytecode interpreter does not set ARG_NAMES for called non bytecode functions,
+      // since the bytecode interpreter looks up ARG_NAMES in the calling stack frame.
+      arg_names = parent_frame->get_active_bytecode_call_arg_names ().cellstr_value ();
+    }
+  else
+    arg_names = m_auto_vars.at (stack_frame::ARG_NAMES).cellstr_value ();
 
   if (n >= 0 && n < arg_names.numel ())
     {
@@ -2575,8 +4560,16 @@
   switch (get_scope_flag (data_offset))
     {
     case LOCAL:
-      return m_values.at (data_offset);
-
+      {
+        octave_value ov = m_values.at (data_offset);
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+        if (ov.is_ref ())
+          return ov.ref_rep ()->deref ();
+#endif
+
+        return ov;
+      }
     case PERSISTENT:
       return m_scope.persistent_varval (data_offset);
 
@@ -2588,7 +4581,11 @@
 }
 
 octave_value&
-scope_stack_frame::varref (const symbol_record& sym)
+scope_stack_frame::varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                         , bool deref_refs
+#endif
+                                         )
 {
   // There is no access link for scope frames, so the frame
   // offset must be zero.
@@ -2601,8 +4598,16 @@
   switch (get_scope_flag (data_offset))
     {
     case LOCAL:
-      return m_values.at (data_offset);
-
+      {
+        octave_value &ov = m_values.at (data_offset);
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+        if (deref_refs && ov.is_ref ())
+          return ov.ref_rep ()->ref ();
+#endif
+
+        return ov;
+      }
     case PERSISTENT:
       return m_scope.persistent_varref (data_offset);
 
--- a/libinterp/corefcn/stack-frame.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/stack-frame.h	Fri Apr 19 12:57:20 2024 -0400
@@ -105,6 +105,10 @@
 
 class stack_frame_walker;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class vm;
+#endif
+
 class stack_frame
 {
 public:
@@ -182,6 +186,49 @@
           const std::shared_ptr<stack_frame>& parent_link,
           const std::shared_ptr<stack_frame>& static_link);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  // Bytecode function stackframe
+  static std::shared_ptr<stack_frame>
+  create_bytecode (tree_evaluator& tw,
+                   octave_user_code *fcn,
+                   vm &vm,
+                   std::size_t index,
+                   const std::shared_ptr<stack_frame>& parent_link,
+                   const std::shared_ptr<stack_frame>& static_link,
+                   int nargout, int nargin);
+
+  static std::shared_ptr<stack_frame>
+  create_bytecode_nested (tree_evaluator& tw,
+                          octave_user_code *fcn,
+                          vm &vm,
+                          std::size_t index,
+                          const std::shared_ptr<stack_frame>& parent_link,
+                          const std::shared_ptr<stack_frame>& static_link,
+                          const std::shared_ptr<stack_frame>& access_link,
+                          int nargout, int nargin);
+
+  static std::shared_ptr<stack_frame>
+  create_bytecode_anon (tree_evaluator& tw,
+                        octave_user_code *fcn,
+                        vm &vm,
+                        std::size_t index,
+                        const std::shared_ptr<stack_frame>& parent_link,
+                        const std::shared_ptr<stack_frame>& static_link,
+                        const std::shared_ptr<stack_frame>& access_link,
+                        int nargout, int nargin);
+
+  static std::shared_ptr<stack_frame>
+  create_bytecode_script (tree_evaluator& tw,
+                          octave_user_script *fcn,
+                          vm &vm,
+                          std::size_t index,
+                          const std::shared_ptr<stack_frame>& parent_link,
+                          const std::shared_ptr<stack_frame>& static_link,
+                          int nargout, int nargin);
+
+#endif
+
   stack_frame (const stack_frame& elt) = default;
 
   stack_frame& operator = (const stack_frame& elt) = delete;
@@ -196,6 +243,7 @@
   virtual bool is_user_script_frame () const { return false; }
   virtual bool is_user_fcn_frame () const { return false; }
   virtual bool is_scope_frame () const { return false; }
+  virtual bool is_bytecode_fcn_frame () const { return false; }
 
   virtual void clear_values ();
 
@@ -432,6 +480,16 @@
     install_variable (sym, value, global);
   }
 
+  virtual octave_value get_active_bytecode_call_arg_names ()
+  {
+    panic_impossible (); // Only bytecode frame need to implement this
+  }
+
+  virtual void set_active_bytecode_ip (int)
+  {
+    panic_impossible (); // Only bytecode frame need to implement this
+  }
+
   virtual octave_value get_auto_fcn_var (auto_var_type) const = 0;
 
   virtual void set_auto_fcn_var (auto_var_type, const octave_value&) = 0;
@@ -451,9 +509,17 @@
   }
 
 
-  virtual octave_value& varref (const symbol_record& sym) = 0;
+  virtual octave_value& varref (const symbol_record& sym
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                , bool deref_refs = true
+#endif
+                                ) = 0;
 
-  virtual octave_value& varref (std::size_t data_offset);
+  virtual octave_value& varref (std::size_t data_offset
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+                                , bool deref_refs = true
+#endif
+                                );
 
   virtual std::string inputname (int n, bool ids_only) const;
 
@@ -595,6 +661,18 @@
 
   bool is_closure_context () const { return m_is_closure_context; }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  // The VM needs to tell the bytecode stackframe that it unwinds so
+  // that it can check whether to save the stack.
+  virtual void vm_unwinds () {}
+  virtual void vm_clear_for_cache () {}
+  virtual void vm_enter_script () { panic_impossible (); }
+  virtual void vm_exit_script () { panic_impossible (); }
+  virtual void vm_enter_nested () { panic_impossible (); }
+
+#endif
+
 protected:
 
   // Reference to the call stack that contains this frame.  Global
--- a/libinterp/corefcn/toplev.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/toplev.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -368,6 +368,12 @@
         { "ENABLE_64", false },
 #endif
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+        { "ENABLE_BYTECODE_EVALUATOR", true },
+#else
+        { "ENABLE_BYTECODE_EVALUATOR", false },
+#endif
+
 #if defined (OCTAVE_ENABLE_COMMAND_LINE_PUSH_PARSER)
         { "ENABLE_COMMAND_LINE_PUSH_PARSER", true },
 #else
--- a/libinterp/corefcn/variables.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/corefcn/variables.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -1236,6 +1236,11 @@
 
   string_vector argv = args.make_argv ("clear");
 
+  // FIXME: This action should probably happen in the functions that
+  // are called below, not here.
+  // Mark any function cache in use by the VM as invalid
+  octave::load_path::signal_clear_fcn_cache ();
+
   if (argc == 1)
     {
       do_clear_variables (interp, argv, argc, true);
--- a/libinterp/octave-value/module.mk	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -62,6 +62,7 @@
   %reldir%/ov-perm.h \
   %reldir%/ov-range-traits.h \
   %reldir%/ov-range.h \
+  %reldir%/ov-ref.h \
   %reldir%/ov-re-diag.h \
   %reldir%/ov-re-mat.h \
   %reldir%/ov-scalar.h \
@@ -70,6 +71,7 @@
   %reldir%/ov-typeinfo.h \
   %reldir%/ov-usr-fcn.h \
   %reldir%/ov.h \
+  %reldir%/ov-vm.h \
   %reldir%/ovl.h \
   $(OV_INTTYPE_INC) \
   $(OV_SPARSE_INC)
@@ -131,6 +133,7 @@
   %reldir%/ov-oncleanup.cc \
   %reldir%/ov-perm.cc \
   %reldir%/ov-range.cc \
+  %reldir%/ov-ref.cc \
   %reldir%/ov-re-diag.cc \
   %reldir%/ov-re-mat.cc \
   %reldir%/ov-scalar.cc \
--- a/libinterp/octave-value/ov-base-diag.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-diag.h	Fri Apr 19 12:57:20 2024 -0400
@@ -245,6 +245,15 @@
 
   OCTINTERP_API octave_value fast_elem_extract (octave_idx_type n) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   DMT m_matrix;
--- a/libinterp/octave-value/ov-base-mat.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-mat.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -600,3 +600,55 @@
   else
     return false;
 }
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+template <typename MT>
+octave_value
+octave_base_matrix<MT>::checked_full_matrix_elem (octave_idx_type i) const
+{
+  return m_matrix.checkelem (i);
+}
+
+template <typename MT>
+octave_value
+octave_base_matrix<MT>::checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const
+{
+  return m_matrix.checkelem (i, j);
+}
+
+template <typename MT>
+octave_value
+octave_base_matrix<MT>::vm_extract_forloop_value (octave_idx_type counter)
+{
+  // TODO: Maybe this is slow? Should preferably be done once per loop
+  octave_value_list idx;
+  octave_value arg = octave_value (this, true);
+
+  dim_vector dv = arg.dims ().redim (2);
+  octave_idx_type nrows = dv(0);
+
+  if (arg.ndims () > 2)
+    arg = arg.reshape (dv);
+
+  octave_idx_type iidx;
+
+  // for row vectors, use single index to speed things up.
+  if (nrows == 1)
+    {
+      idx.resize (1);
+      iidx = 0;
+    }
+  else
+    {
+      idx.resize (2);
+      idx(0) = octave_value::magic_colon_t;
+      iidx = 1;
+    }
+
+  // One based indexing
+  idx(iidx) = counter + 1;
+  return arg.index_op (idx).storable_value ();
+}
+
+#endif
--- a/libinterp/octave-value/ov-base-mat.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-mat.h	Fri Apr 19 12:57:20 2024 -0400
@@ -87,6 +87,16 @@
   OCTINTERP_OVERRIDABLE_FUNC_API
   void maybe_economize () { m_matrix.maybe_economize (); }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  OCTINTERP_OVERRIDABLE_FUNC_API bool
+  vm_need_storable_call () const { return true; }
+
+  OCTINTERP_OVERRIDABLE_FUNC_API bool
+  is_maybe_function () const { return false; }
+
+#endif
+
   // We don't need to override all three forms of subsref.  The using
   // declaration will avoid warnings about partially-overloaded virtual
   // functions.
@@ -233,6 +243,25 @@
   OCTINTERP_OVERRIDABLE_FUNC_API const void *
   mex_get_data () const { return m_matrix.data (); }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  OCTINTERP_API octave_value
+  vm_extract_forloop_value (octave_idx_type idx);
+
+  OCTINTERP_API octave_value
+  checked_full_matrix_elem (octave_idx_type i) const;
+
+  OCTINTERP_API octave_value
+  checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const;
+
+  OCTINTERP_OVERRIDABLE_FUNC_API octave_base_value::vm_call_dispatch_type
+  vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   MT m_matrix;
--- a/libinterp/octave-value/ov-base-scalar.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-scalar.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -231,3 +231,14 @@
   else
     return false;
 }
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+template <typename ST>
+octave_value
+octave_base_scalar<ST>::vm_extract_forloop_value (octave_idx_type)
+{
+  return octave_value (scalar);
+}
+
+#endif
--- a/libinterp/octave-value/ov-base-scalar.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-scalar.h	Fri Apr 19 12:57:20 2024 -0400
@@ -191,6 +191,28 @@
   OCTINTERP_API bool
   fast_elem_insert_self (void *where, builtin_type_t btyp) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  OCTINTERP_API octave_value
+  vm_extract_forloop_value (octave_idx_type idx);
+
+  OCTINTERP_OVERRIDABLE_FUNC_API bool vm_need_dispatch_assign_rhs ()
+  { return false; }
+
+  OCTINTERP_OVERRIDABLE_FUNC_API bool vm_need_dispatch_assign_lhs ()
+  { return false; }
+
+  OCTINTERP_OVERRIDABLE_FUNC_API bool vm_need_dispatch_push ()
+  { return false; }
+
+  OCTINTERP_OVERRIDABLE_FUNC_API octave_base_value::vm_call_dispatch_type
+  vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   // The value of this scalar.
--- a/libinterp/octave-value/ov-base-sparse.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base-sparse.h	Fri Apr 19 12:57:20 2024 -0400
@@ -240,6 +240,15 @@
 
   OCTINTERP_API octave_value fast_elem_extract (octave_idx_type n) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   OCTINTERP_API octave_value
--- a/libinterp/octave-value/ov-base.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -280,6 +280,28 @@
   octave::err_invalid_index (nm.c_str ());
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+octave_value
+octave_base_value::vm_extract_forloop_value (octave_idx_type idx)
+{
+  return fast_elem_extract (idx).as_double_or_copy ();
+}
+
+double
+octave_base_value::vm_extract_forloop_double (octave_idx_type)
+{
+  error ("Type error extracting for loop iterator double value for VM");
+}
+
+bool
+octave_base_value::maybe_update_double (double)
+{
+  return false;
+}
+
+#endif
+
 octave_value
 octave_base_value::simple_subsasgn (char type, octave_value_list& idx,
                                     const octave_value& rhs)
@@ -942,6 +964,35 @@
   return nullptr;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+octave_base_value::vm_call_dispatch_type
+octave_base_value::vm_dispatch_call ()
+{
+  // This is the fallback way to determine the dispatch type
+  // for octave_base_value classes that does not implement vm_dispatch_call ()
+
+  bool has_function_cache = this->has_function_cache ();
+  bool is_defined = this->is_defined ();
+
+  if (! has_function_cache && is_defined)
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  else if (has_function_cache)
+      return vm_call_dispatch_type::OCT_CALL;
+  else
+      return vm_call_dispatch_type::OCT_FN_LOOKUP;
+}
+
+octave_value_ref *
+octave_base_value::ref_rep ()
+{
+  err_wrong_type_arg ("octave_base_value::ref_value()", type_name ());
+
+  return nullptr;
+}
+
+#endif
+
 octave_user_function *
 octave_base_value::user_function_value (bool silent)
 {
@@ -1416,6 +1467,22 @@
   return false;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+octave_value
+octave_base_value::checked_full_matrix_elem (octave_idx_type) const
+{
+  err_wrong_type_arg ("octave_base_value::checked_full_matrix_elem (octave_idx_type)", type_name ());
+}
+
+octave_value
+octave_base_value::checked_full_matrix_elem (octave_idx_type, octave_idx_type) const
+{
+  err_wrong_type_arg ("octave_base_value::checked_full_matrix_elem (octave_idx_type, octave_idx_type)", type_name ());
+}
+
+#endif
+
 static octave_base_value *
 oct_conv_matrix_conv (const octave_base_value&)
 {
--- a/libinterp/octave-value/ov-base.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-base.h	Fri Apr 19 12:57:20 2024 -0400
@@ -79,6 +79,12 @@
 class octave_fcn_handle;
 class octave_value_list;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class octave_value_ref;
+class octave_fcn_cache;
+class octave_value_vm;
+#endif
+
 enum builtin_type_t
 {
   btyp_double,
@@ -277,6 +283,10 @@
 
   friend class octave_value;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  friend class octave_value_vm;
+#endif
+
   OCTINTERP_API octave_base_value ();
 
   octave_base_value (const octave_base_value&) : octave_base_value () { }
@@ -831,6 +841,45 @@
 
   virtual OCTINTERP_API octave_base_value * make_storable_value ();
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  virtual bool vm_need_storable_call () const { return false; }
+
+  virtual bool vm_need_dispatch_assign_rhs () { return true; }
+
+  virtual bool vm_need_dispatch_assign_lhs () { return true; }
+
+  virtual bool vm_need_dispatch_push () { return true; }
+
+  enum class vm_call_dispatch_type {
+    OCT_SUBSREF,
+    OCT_FN_LOOKUP,
+    OCT_CALL,
+    OCT_HANDLE,
+    OCT_OBJECT,
+    OCT_NESTED_HANDLE,
+  };
+
+  virtual vm_call_dispatch_type vm_dispatch_call ();
+
+  virtual bool is_ref () const { return false; }
+
+  virtual bool is_vm_chainargs_wrapper () const { return false; }
+
+  virtual octave_value_ref * ref_rep ();
+
+  virtual octave_value
+  vm_extract_forloop_value (octave_idx_type idx);
+
+  virtual double
+  vm_extract_forloop_double (octave_idx_type idx);
+
+  virtual bool maybe_update_double (double d);
+
+  virtual bool is_trivial_range () const { return false; };
+
+#endif
+
   // Standard mappers.  Register new ones here.
   enum unary_mapper_t
   {
@@ -920,6 +969,16 @@
   virtual OCTINTERP_API bool
   fast_elem_insert_self (void *where, builtin_type_t btyp) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  virtual octave_value
+  checked_full_matrix_elem (octave_idx_type i) const;
+
+  virtual octave_value
+  checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const;
+
+#endif
+
 protected:
 
   // This should only be called for derived types.
--- a/libinterp/octave-value/ov-cell.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-cell.h	Fri Apr 19 12:57:20 2024 -0400
@@ -179,6 +179,15 @@
   // You should not use it anywhere else.
   const void * mex_get_data () const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
   octave_value_list
   simple_subsref (char type, octave_value_list& idx, int nargout);
 
--- a/libinterp/octave-value/ov-classdef.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-classdef.h	Fri Apr 19 12:57:20 2024 -0400
@@ -229,6 +229,15 @@
 
   OCTINTERP_API std::string file_name () const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 private:
 
   octave::cdef_meta_object m_object;
--- a/libinterp/octave-value/ov-colon.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-colon.h	Fri Apr 19 12:57:20 2024 -0400
@@ -75,6 +75,15 @@
   OCTINTERP_API void print_raw (std::ostream& os,
                                 bool pr_as_read_syntax = false) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 private:
 
   DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA_API (OCTINTERP_API)
--- a/libinterp/octave-value/ov-fcn-handle.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-fcn-handle.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -57,6 +57,9 @@
 #include "pr-output.h"
 #include "pt-arg-list.h"
 #include "pt-assign.h"
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode-walk.h"
+#endif
 #include "pt-cmd.h"
 #include "pt-eval.h"
 #include "pt-exp.h"
@@ -229,9 +232,24 @@
   friend bool is_equal_to (const simple_fcn_handle& fh1,
                            const simple_fcn_handle& fh2);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  octave_function *
+  get_cached_fcn (const octave_value_list &args);
+
+  octave_function *
+  get_cached_fcn (void *beg, void *end);
+
+  bool has_function_cache () const;
+#endif
+
 private:
 
   octave_value m_fcn;
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  // Only used by the VM via get_cached_fcn() and has_function_cache()
+  octave_fcn_cache m_cache;
+#endif
 };
 
 class scoped_fcn_handle : public base_fcn_handle
@@ -302,6 +320,17 @@
   friend bool is_equal_to (const scoped_fcn_handle& fh1,
                            const scoped_fcn_handle& fh2);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  octave_function *
+  get_cached_fcn (void *, void *) { return m_fcn.function_value (); }
+
+  octave_function *
+  get_cached_fcn (const octave_value_list&) { return m_fcn.function_value (); }
+
+  bool
+  has_function_cache () const { return true; }
+#endif
+
 protected:
 
   void find_function ();
@@ -373,6 +402,17 @@
   void print_raw (std::ostream&, bool pr_as_read_syntax,
                   int current_print_indent_level) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  octave_function *
+  get_cached_fcn (void *, void *) { return m_fcn.function_value (); }
+
+  octave_function *
+  get_cached_fcn (const octave_value_list&) { return m_fcn.function_value (); }
+
+  bool
+  has_function_cache () const { return true; }
+#endif
+
 protected:
 
   // The function we are handling.
@@ -442,6 +482,11 @@
     return m_stack_context;
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  std::shared_ptr<octave::stack_frame>
+  get_closure_frame () { return m_stack_context; }
+#endif
+
 protected:
 
   // Pointer to closure stack frames.
@@ -474,6 +519,11 @@
   friend bool is_equal_to (const weak_nested_fcn_handle& fh1,
                            const weak_nested_fcn_handle& fh2);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  std::shared_ptr<octave::stack_frame>
+  get_closure_frame () { return m_stack_context.lock (); }
+#endif
+
 protected:
 
   // Pointer to closure stack frames.
@@ -686,6 +736,21 @@
 
   bool parse (const std::string& fcn_text);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  octave_function *
+  get_cached_fcn (const octave_value_list&) { return m_fcn.function_value (); }
+
+  octave_function *
+  get_cached_fcn (void *, void *) { return m_fcn.function_value (); }
+
+  // TODO: This is a hack to get uncompiled anonymous functions to be subsrefed in the VM
+  bool has_function_cache () const
+  {
+    octave_function *fn = m_fcn.function_value ();
+    return fn ? fn->is_compiled () : false;
+  }
+#endif
+
 protected:
 
   // The function we are handling.
@@ -737,6 +802,14 @@
     return m_stack_context;
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  // Compile the underlying function to bytecode for the VM.
+  void compile ();
+
+  std::shared_ptr<octave::stack_frame>
+  get_closure_frame () { return m_stack_context; }
+#endif
+
 protected:
 
   // Pointer to closure stack frames.
@@ -771,6 +844,11 @@
   friend bool is_equal_to (const weak_anonymous_fcn_handle& fh1,
                            const weak_anonymous_fcn_handle& fh2);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  std::shared_ptr<octave::stack_frame>
+  get_closure_frame () { return m_stack_context.lock (); }
+#endif
+
 protected:
 
   // Pointer to closure stack frames.
@@ -962,6 +1040,227 @@
     return false;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// FIXME: Find a way to avoid duplication of code in
+// simple_fcn_handle::call
+
+octave_function *
+simple_fcn_handle::
+get_cached_fcn (const octave_value_list &args)
+{
+  if (m_cache.has_cached_function (args))
+    return m_cache.get_cached_fcn ();
+
+  {
+    // The lookup is done like in call()
+    interpreter& interp = __get_interpreter__ ();
+    symbol_table& symtab = interp.get_symbol_table ();
+
+    octave_value fcn_to_call;
+    octave_value ov_fcn = symtab.find_function (m_name, args);
+
+    if (m_fcn.is_defined ())
+      {
+        // A simple function was found when the handle was created.
+        // Use that unless we find a class method to override it.
+
+        fcn_to_call = m_fcn;
+
+        if (ov_fcn.is_defined ())
+          {
+            octave_function *fcn = ov_fcn.function_value ();
+
+            std::string dispatch_class = fcn->dispatch_class ();
+
+            if (fcn->is_class_method ())
+              {
+                // Function found through lookup is a class method
+                // so use it instead of the simple one found when
+                // the handle was created.
+
+                fcn_to_call = ov_fcn;
+              }
+          }
+      }
+    else
+      {
+        // There was no simple function found when the handle was
+        // created so use the one found here (if any).
+
+        fcn_to_call = ov_fcn;
+      }
+
+
+    if (! fcn_to_call.is_defined ())
+      err_invalid_fcn_handle (m_name);
+
+    m_cache.set_cached_function (fcn_to_call, args, 0);
+
+    return fcn_to_call.function_value ();
+  }
+}
+
+octave_function *
+simple_fcn_handle::
+get_cached_fcn (void *pbeg, void *pend)
+{
+  if (m_cache.has_cached_function (pbeg, pend))
+    return m_cache.get_cached_fcn ();
+
+  octave::stack_element *beg = static_cast<octave::stack_element *> (pbeg);
+  octave::stack_element *end = static_cast<octave::stack_element *> (pend);
+
+  octave_value_list args;
+  while (beg != end)
+    args.append ((beg++)->ov);
+
+  return get_cached_fcn (args); // TODO: Avoid extra call to has_cached_function()
+}
+
+// FIXME: Find a way to avoid duplication of code in
+// simple_fcn_handle::call
+// Like call(), but instead returns true if the call() would end
+// up with another call(), or false if there would be a subsref()
+// or an error on the path to subsref() call.
+bool
+simple_fcn_handle::has_function_cache () const
+{
+  // FIXME: if m_name has a '.' in the name, lookup first component.  If
+  // it is a classdef meta object, then build TYPE and IDX arguments and
+  // make a subsref call using them.
+
+  interpreter& interp = __get_interpreter__ ();
+
+  octave_value fcn_to_call;
+
+  // The following code is similar to part of
+  // tree_evaluator::visit_index_expression but simpler because it
+  // handles a more restricted case.
+
+  symbol_table& symtab = interp.get_symbol_table ();
+
+  std::size_t pos = m_name.find ('.');
+
+  if (pos != std::string::npos)
+    {
+      // FIXME: check to see which of these cases actually work in
+      // Octave and Matlab.  For the last two, assume handle is
+      // created before object is defined as an object.
+      //
+      // We can have one of
+      //
+      //   pkg-list . fcn  (args)
+      //   pkg-list . cls . meth (args)
+      //   class-name . method  (args)
+      //   class-name . static-method  (args)
+      //   object . method  (args)
+      //   object . static-method  (args)
+
+      // Evaluate package elements until we find a function,
+      // classdef object, or classdef_meta object that is not a
+      // package.  An object may only appear as the first element,
+      // then it must be followed directly by a function name.
+
+      std::size_t beg = 0;
+      std::size_t end = pos;
+
+      std::vector<std::string> idx_elts;
+
+      while (true)
+        {
+          end = m_name.find ('.', beg);
+
+          idx_elts.push_back (m_name.substr (beg, end-beg));
+
+          if (end == std::string::npos)
+            break;
+
+          beg = end+1;
+        }
+
+      std::size_t n_elts = idx_elts.size ();
+
+      bool have_object = false;
+      octave_value partial_expr_val;
+
+      // Lazy evaluation.  The first element was not known to be defined
+      // as an object in the scope where the handle was created.  See if
+      // there is a definition in the current scope.
+
+      partial_expr_val = interp.varval (idx_elts[0]);
+
+      if (partial_expr_val.is_defined ())
+        {
+          if (! partial_expr_val.is_classdef_object () || n_elts != 2)
+            return false;
+
+          have_object = true;
+        }
+      else
+        partial_expr_val = symtab.find_function (idx_elts[0], ovl ());
+
+      std::string type;
+      std::list<octave_value_list> arg_list;
+
+      for (std::size_t i = 1; i < n_elts; i++)
+        {
+          if (partial_expr_val.is_package ())
+            {
+              if (have_object)
+                return false;
+
+              type = ".";
+              arg_list.push_back (ovl (idx_elts[i]));
+
+              try
+                {
+                  // Silently ignore extra output values.
+
+                  octave_value_list tmp_list
+                    = partial_expr_val.subsref (type, arg_list, 0);
+
+                  partial_expr_val
+                    = tmp_list.length () ? tmp_list(0) : octave_value ();
+
+                  if (partial_expr_val.is_cs_list ())
+                    return false;
+
+                  arg_list.clear ();
+                }
+              catch (const index_exception&)
+                {
+                  return false;
+                }
+            }
+          else if (have_object || partial_expr_val.is_classdef_meta ())
+            {
+              // Object or class name must be the next to the last
+              // element (it was the previous one, so if this is the
+              // final element, it should be a classdef method,
+              // but we'll let the classdef or classdef_meta subsref
+              // function sort that out.
+              return false;
+            }
+          else
+            return false;
+        }
+
+      // If we get here, we must have a function to call.
+
+      if (! partial_expr_val.is_function ())
+        return false;
+
+      return true;
+    }
+  else
+    {
+      return true;
+    }
+}
+
+#endif
+
 octave_value_list
 simple_fcn_handle::call (int nargout, const octave_value_list& args)
 {
@@ -1871,6 +2170,11 @@
 
   octave_user_function *oct_usr_fcn = m_fcn.user_function_value ();
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  if (octave::vm::maybe_compile_or_compiled (oct_usr_fcn))
+    return octave::vm::call (tw, nargout, args, oct_usr_fcn, m_stack_context);
+#endif
+
   tw.push_stack_frame (oct_usr_fcn, m_stack_context);
 
   unwind_action act ([&tw] () { tw.pop_stack_frame (); });
@@ -2781,6 +3085,11 @@
 
   octave_user_function *oct_usr_fcn = m_fcn.user_function_value ();
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  if (octave::vm::maybe_compile_or_compiled (oct_usr_fcn, &m_local_vars))
+    return octave::vm::call (tw, nargout, args, oct_usr_fcn);
+#endif
+
   tw.push_stack_frame (oct_usr_fcn, m_local_vars, m_stack_context);
 
   unwind_action act ([&tw] () { tw.pop_stack_frame (); });
@@ -2788,6 +3097,25 @@
   return oct_usr_fcn->execute (tw, nargout, args);
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+void anonymous_fcn_handle::compile ()
+{
+  octave_user_code *usr_code = user_function_value ();
+
+  try
+    {
+      compile_anon_user_function (*usr_code, false, m_local_vars);
+    }
+  catch (std::exception &e)
+    {
+      warning ("Auto-compilation of anonymous function failed with message %s", e.what ());
+      usr_code->set_compilation_failed (true);
+    }
+}
+
+#endif
+
 octave_value
 anonymous_fcn_handle::workspace () const
 {
--- a/libinterp/octave-value/ov-fcn-handle.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-fcn-handle.h	Fri Apr 19 12:57:20 2024 -0400
@@ -165,6 +165,10 @@
 
   virtual void compile () { }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  virtual std::shared_ptr<octave::stack_frame> get_closure_frame () { return nullptr; }
+#endif
+
 protected:
 
   void warn_load (const char *file_type) const;
@@ -375,6 +379,33 @@
   friend bool
   is_equal_to (const octave_fcn_handle& fh1, const octave_fcn_handle& fh2);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_function *
+  get_cached_fcn (void *beg, void *end) { return m_rep->get_cached_fcn (beg, end); }
+
+  octave_function *
+  get_cached_fcn (const octave_value_list& args) { return m_rep->get_cached_fcn (args); }
+
+  bool has_function_cache () const { return m_rep->has_function_cache (); }
+
+  vm_call_dispatch_type vm_dispatch_call ()
+  {
+    if (m_rep->is_nested () || m_rep->is_anonymous ())
+      return vm_call_dispatch_type::OCT_NESTED_HANDLE;
+    if (m_rep->has_function_cache ())
+      return vm_call_dispatch_type::OCT_CALL;
+
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+  void compile () { m_rep->compile (); }
+
+  std::shared_ptr<octave::stack_frame>
+  get_closure_frame () { return m_rep->get_closure_frame (); }
+
+#endif
+
 private:
 
   std::shared_ptr<octave::base_fcn_handle> m_rep;
--- a/libinterp/octave-value/ov-fcn.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-fcn.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -39,6 +39,8 @@
 #include "pt-eval.h"
 #include "symtab.h"
 
+#include "pt-bytecode-walk.h"
+
 octave_base_value *
 octave_function::clone () const
 {
@@ -67,9 +69,215 @@
 octave_function::call (octave::tree_evaluator& tw, int nargout,
                        const octave_value_list& args)
 {
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  octave_user_function *usr = this->user_function_value(true);
+  if (octave::vm::maybe_compile_or_compiled (usr))
+    return octave::vm::call (tw, nargout, args, usr);
+#endif
+
   tw.push_stack_frame (this);
 
   octave::unwind_action act ([&tw] () { tw.pop_stack_frame (); });
 
   return execute (tw, nargout, args);
 }
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+bool
+octave_fcn_cache::has_cached_function (void *pbeg, void *pend) const
+{
+  octave::stack_element *beg = static_cast<octave::stack_element *> (pbeg);
+  octave::stack_element *end = static_cast<octave::stack_element *> (pend);
+
+  if (m_n_updated == 0)
+    return false;
+
+  unsigned vec_n = m_cached_args.size ();
+
+  unsigned n_args = end - beg;
+  if (n_args != vec_n)
+    return false;
+
+  for (unsigned i = 0; i < n_args; i++)
+    {
+      if (beg[i].ov.type_id () != m_cached_args [i])
+        return false;
+    }
+
+  return true;
+}
+
+void
+octave_fcn_cache::set_cached_function (octave_value ov,
+                                       const octave_value_list &args,
+                                       octave_idx_type current_n_updated)
+{
+  clear_cached_function ();
+
+  if (!ov.is_defined ())
+    return;
+  // Arbitrary limit on how many args we keep track of in caches.
+  if (args.length () > 32)
+    return;
+
+  // We need to keep a reference to the metaobject for as long as the function is alive
+  if (ov.is_classdef_meta ())
+    m_cached_object = ov;
+
+  std::vector<int> v_types;
+
+  for (int i = 0; i < args.length (); i++)
+    {
+      // FIXME: We don't cache methods or functions with class object
+      // arguments. Classes need some kind of unique simple key for this
+      // simple approach.
+      if (args(i).isobject())
+        return;
+
+      v_types.push_back (args (i).type_id ());
+    }
+
+  m_cached_args = v_types;
+  m_cached_function = ov;
+
+  m_n_updated = current_n_updated;
+}
+
+octave_value
+octave_fcn_cache::
+get_cached_obj ()
+{
+  octave_function *fcn = nullptr;
+
+  octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated ();
+  if (has_cached_function (nullptr, nullptr))
+    {
+      if (m_n_updated == current_n_updated)
+        return m_cached_function;
+      else
+        clear_cached_function ();
+    }
+
+  if (! fcn)
+    {
+      octave::interpreter& interp =
+        octave::__get_interpreter__ ();
+
+      octave::symbol_table& symtab = interp.get_symbol_table ();
+      octave_value val = symtab.find_function (m_fcn_name, octave_value_list {});
+
+      if (val.is_function ())
+        {
+          fcn = val.function_value (true);
+          set_cached_function (val, octave_value_list {}, current_n_updated);
+          return val;
+        }
+
+      val = symtab.find_function (m_fcn_name);
+      if (val.is_function ())
+        {
+          return val;
+        }
+    }
+
+  return {};
+}
+
+octave_function *
+octave_fcn_cache::
+get_cached_fcn_internal (const octave_value_list& args)
+{
+  clear_cached_function ();
+
+  octave_function *fcn = nullptr;
+  octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated ();
+
+  octave::interpreter& interp =
+    octave::__get_interpreter__ ();
+
+  octave::symbol_table& symtab = interp.get_symbol_table ();
+  octave_value val = symtab.find_function (m_fcn_name, args);
+
+  if (val.is_function ())
+    {
+      fcn = val.function_value (true);
+      set_cached_function (val, args, current_n_updated);
+      return fcn;
+    }
+
+  val = symtab.find_function (m_fcn_name);
+  if (val.is_function ())
+    {
+      return val.function_value (true);
+    }
+
+  return fcn;
+}
+
+octave_function *
+octave_fcn_cache::
+get_cached_fcn_if_fresh ()
+{
+  octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated ();
+  if (m_n_updated == current_n_updated)
+    return get_cached_fcn ();
+  return nullptr;
+}
+
+octave_function *
+octave_fcn_cache::
+get_cached_fcn (const octave_value_list& args)
+{
+  octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated ();
+  if (OCTAVE_LIKELY (has_cached_function (args)))
+    if (OCTAVE_LIKELY (m_n_updated == current_n_updated))
+      return m_cached_function.function_value (true);
+
+  return get_cached_fcn_internal (args);
+}
+
+octave_function *
+octave_fcn_cache::
+get_cached_fcn (void *pbeg, void *pend)
+{
+  octave_idx_type current_n_updated = octave::load_path::get_weak_n_updated ();
+  if (OCTAVE_LIKELY (has_cached_function (pbeg, pend)))
+    if (OCTAVE_LIKELY (m_n_updated == current_n_updated))
+      return m_cached_function.function_value (true);
+
+  octave::stack_element *beg = static_cast<octave::stack_element *> (pbeg);
+  octave::stack_element *end = static_cast<octave::stack_element *> (pend);
+
+  octave_value_list args;
+  for (; beg != end; beg++)
+    {
+      if (OCTAVE_UNLIKELY (beg->ov.is_cs_list ()))
+        args.append (beg->ov.list_value ());
+      else
+        args.append (beg->ov);
+    }
+
+  return get_cached_fcn_internal (args);
+}
+
+octave_value_list
+octave_fcn_cache::
+call (octave::tree_evaluator& tw,
+      octave_function *fcn,
+      const octave_value_list& args,
+      int nargout)
+{
+  try
+    {
+      return fcn->call (tw, nargout, args);
+    }
+  catch (octave::index_exception& ie)
+    {
+      error ("Proper error message here for function calls");
+      // Maybe return the octave_function pointer?
+      //tw.final_index_error (ie, m_expr);
+    }
+}
+
+#endif
--- a/libinterp/octave-value/ov-fcn.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-fcn.h	Fri Apr 19 12:57:20 2024 -0400
@@ -49,6 +49,93 @@
 
 // Functions.
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// Class that holds a cached reference to a octave function
+// for use in the bytecode VM.
+class OCTINTERP_API octave_fcn_cache : public octave_base_value
+{
+public:
+  octave_fcn_cache (const std::string &name) :m_fcn_name (name) { }
+  octave_fcn_cache () {}
+
+  octave_base_value *
+  clone () const { return new octave_fcn_cache (*this); }
+
+  bool is_function_cache () const { return true; }
+
+  bool has_function_cache () const { return true; }
+
+  vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_CALL;
+  }
+
+  octave_function *
+  get_cached_fcn (const octave_value_list& args);
+
+  octave_function *
+  get_cached_fcn (void *beg, void *end);
+
+  octave_function *
+  get_cached_fcn () { return m_cached_function.function_value (); }
+
+  octave_value
+  get_cached_obj ();
+
+  octave_value_list
+  call (octave::tree_evaluator& tw,
+        octave_function *fcn,
+        const octave_value_list& args,
+        int nargout);
+
+  void set_cached_function (octave_value ov, const octave_value_list &args, octave_idx_type current_n_updated);
+
+  bool has_cached_function (const octave_value_list &args) const
+  {
+    if (m_n_updated == 0)
+      return false;
+
+    unsigned vec_n = m_cached_args.size ();
+
+    unsigned n_args = args.length ();
+    if (n_args != vec_n)
+      return false;
+
+    for (unsigned i = 0; i < n_args; i++)
+      {
+        if (args (i).type_id () != m_cached_args [i])
+          return false;
+      }
+
+    return true;
+  }
+
+  bool has_cached_function (void *beg, void *end) const;
+
+  octave_function * get_cached_fcn_if_fresh ();
+
+private:
+
+  octave_function * get_cached_fcn_internal (const octave_value_list& args);
+
+  void clear_cached_function ()
+  {
+    m_cached_object = octave_value {};
+    m_cached_function = octave_value {};
+    m_n_updated = 0;
+    m_cached_args.clear ();
+  }
+
+  octave_value m_cached_object;
+  octave_value m_cached_function;
+  std::vector<int> m_cached_args;
+  octave_idx_type m_n_updated = 0;
+  std::string m_fcn_name;
+};
+
+#endif
+
 class OCTINTERP_API octave_function : public octave_base_value
 {
 public:
@@ -238,6 +325,21 @@
   execute (octave::tree_evaluator& tw, int nargout = 0,
            const octave_value_list& args = octave_value_list ()) = 0;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_CALL;
+  }
+
+  octave_function *
+  get_cached_fcn (void *, void *) { return function_value (); }
+
+  octave_function *
+  get_cached_fcn (const octave_value_list&) { return function_value (); }
+
+  bool has_function_cache () const { return true; }
+#endif
+
 protected:
 
   octave_function (const std::string& nm,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/octave-value/ov-ref.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,193 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2007-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if defined (HAVE_CONFIG_H)
+#  include "config.h"
+#endif
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include "ov.h"
+#include "ov-ref.h"
+
+#include "interpreter.h"
+#include "interpreter-private.h"
+
+DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_global,
+                                     "global value reference",
+                                     "global value reference");
+DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_persistent,
+                                     "global value persistent",
+                                     "global value persistent");
+
+DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_vmlocal,
+                                     "local vm value reference",
+                                     "local vm value reference");
+
+DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA (octave_value_ref_ptr,
+                                     "local vm value pointer",
+                                     "local vm value pointer");
+
+void
+octave_value_ref::maybe_call_dtor ()
+{
+  ref ().m_rep->maybe_call_dtor ();
+}
+
+bool
+octave_value_ref::is_defined () const
+{
+  return const_cast<octave_value_ref*> (this)->ref ().m_rep->is_defined ();
+}
+
+bool
+octave_value_ref::is_maybe_function () const
+{
+  return const_cast<octave_value_ref*> (this)->ref ().m_rep->is_maybe_function ();
+}
+
+octave_base_value *
+octave_value_ref::unique_clone ()
+{
+  return ref ().m_rep->unique_clone ();
+}
+
+octave_value
+octave_value_ref::simple_subsasgn (char type, octave_value_list& idx, const octave_value& rhs)
+{
+  octave_value ans = ref ().m_rep->simple_subsasgn (type, idx, rhs);
+  ref () = ans;
+  return octave_value {this, true};
+}
+
+octave_value
+octave_value_ref::subsasgn (const std::string& type,
+                            const std::list<octave_value_list>& idx,
+                            const octave_value& rhs)
+{
+  octave_value ans = ref ().m_rep->subsasgn (type, idx, rhs);
+  ref () = ans;
+  return octave_value {this, true};
+}
+
+octave_value
+octave_value_ref_global::deref ()
+{
+  auto &interp = octave::__get_interpreter__();
+  return interp.global_varval (m_name);
+}
+
+void
+octave_value_ref_global::set_value (octave_value val)
+{
+  auto &interp = octave::__get_interpreter__();
+  interp.global_assign (m_name, val);
+}
+
+octave_value&
+octave_value_ref_global::ref ()
+{
+  auto& tw = octave::__get_evaluator__();
+  return tw.global_varref (m_name);
+}
+
+octave_value
+octave_value_ref_persistent::deref ()
+{
+  return m_scope.persistent_varval (m_offset);
+}
+
+void
+octave_value_ref_persistent::set_value (octave_value val)
+{
+  octave_value &ov_pers = m_scope.persistent_varref (m_offset);
+  ov_pers = val;
+}
+
+octave_value &
+octave_value_ref_persistent::ref ()
+{
+  return m_scope.persistent_varref (m_offset);
+}
+
+octave_value &
+octave_value_ref_vmlocal::ref ()
+{
+  return m_frame->varref (m_sym.data_offset ());
+}
+
+octave_value
+octave_value_ref_vmlocal::deref ()
+{
+  return m_frame->varval (m_sym.data_offset ());
+}
+
+void
+octave_value_ref_vmlocal::set_value (octave_value val)
+{
+  m_frame->varref (m_sym.data_offset ()) = val;
+}
+
+octave::stack_frame::scope_flags
+octave_value_ref_vmlocal::get_scope_flag ()
+{
+  return m_frame->get_scope_flag (m_sym.data_offset ());
+}
+
+void
+octave_value_ref_vmlocal::mark_globalness_in_owning_frame (bool should_be_global)
+{
+  if (should_be_global)
+    m_frame->set_scope_flag (m_sym.data_offset (), octave::stack_frame::scope_flags::GLOBAL);
+  else
+    m_frame->set_scope_flag (m_sym.data_offset (), octave::stack_frame::scope_flags::LOCAL);
+}
+
+octave_value &
+octave_value_ref_ptr::ref ()
+{
+  if (m_pov->is_ref ())
+    return m_pov->ref_rep ()->ref ();
+  return *m_pov;
+}
+
+octave_value
+octave_value_ref_ptr::deref ()
+{
+  if (m_pov->is_ref ())
+    return m_pov->ref_rep ()->deref ();
+  return *m_pov;
+}
+
+void
+octave_value_ref_ptr::set_value (octave_value val)
+{
+  if (m_pov->is_ref ())
+    m_pov->ref_rep ()->set_value (val);
+  else
+    *m_pov = val;
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/octave-value/ov-ref.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,190 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 1996-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if ! defined (octave_ov_ref_h)
+#define octave_ov_ref_h 1
+
+#include "octave-config.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include "ov-base.h"
+#include "ovl.h"
+#include "symscope.h"
+#include "symrec.h"
+#include "stack-frame.h"
+#include <string>
+#include <memory>
+
+// octave_value_ref is to be used by the VM to implement
+// global and persistent values.
+//
+// octave_value_ref need to overload any virtual call
+// used by the assign and push slot op-codes.
+//
+// Any octave_value_ref should never leave the VM slots
+// on the VM stack.
+
+// Abstract type
+class OCTINTERP_API
+octave_value_ref : public octave_base_value
+{
+public:
+    octave_value_ref () = default;
+    ~octave_value_ref () = default;
+
+    octave_value_ref * ref_rep () { return this; }
+    bool is_ref () const { return true; }
+
+    virtual octave_value deref () = 0;
+    virtual void set_value (octave_value val) = 0;
+    virtual octave_value & ref () = 0;
+    virtual void maybe_save_state  () {};
+
+    virtual bool is_global_ref () const { return false; }
+    virtual bool is_persistent_ref () const { return false; }
+    virtual bool is_ptr_ref () const { return false; }
+    virtual bool is_local_ref () const { return false; }
+
+    virtual octave::stack_frame::scope_flags get_scope_flag () = 0;
+    virtual void mark_globalness_in_owning_frame (bool /* should_be_global */) {}
+
+    void maybe_call_dtor ();
+    octave_value simple_subsasgn (char type, octave_value_list& idx, const octave_value& rhs);
+    octave_value subsasgn (const std::string& type, const std::list<octave_value_list>& idx, const octave_value& rhs);
+    octave_base_value * unique_clone ();
+    bool is_defined () const;
+    bool is_maybe_function () const;
+};
+
+class OCTINTERP_API
+octave_value_ref_global : public octave_value_ref
+{
+public:
+    octave_value_ref_global () = default;
+    ~octave_value_ref_global () = default;
+    octave_value_ref_global (std::string name)
+        : m_name (name) {};
+
+    octave_value deref ();
+    octave_value & ref ();
+    void set_value (octave_value val);
+
+    bool is_global_ref () const { return true; }
+
+    octave::stack_frame::scope_flags get_scope_flag ()
+    {
+      return octave::stack_frame::scope_flags::GLOBAL;
+    }
+
+private:
+    std::string m_name;
+
+    DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
+};
+
+class OCTINTERP_API
+octave_value_ref_persistent : public octave_value_ref
+{
+public:
+    octave_value_ref_persistent () = default;
+    ~octave_value_ref_persistent () = default;
+    octave_value_ref_persistent (octave::symbol_scope scope, int offset)
+        : m_offset (offset), m_scope (scope) {};
+
+    octave_value deref ();
+    octave_value & ref ();
+    void set_value (octave_value val);
+
+    bool is_persistent_ref () const { return true; }
+
+    octave::stack_frame::scope_flags get_scope_flag ()
+    {
+      return octave::stack_frame::scope_flags::PERSISTENT;
+    }
+
+private:
+    int m_offset;
+    octave::symbol_scope m_scope = octave::symbol_scope::invalid ();
+
+    DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
+};
+
+class OCTINTERP_API
+octave_value_ref_vmlocal : public octave_value_ref
+{
+public:
+    octave_value_ref_vmlocal () = default;
+    ~octave_value_ref_vmlocal () = default;
+    octave_value_ref_vmlocal (octave::symbol_record sym, octave::stack_frame *frame)
+        : m_frame (frame), m_sym (sym) { }
+
+    octave_value deref ();
+    octave_value & ref ();
+    void set_value (octave_value val);
+
+    octave::stack_frame::scope_flags get_scope_flag ();
+
+    void mark_globalness_in_owning_frame (bool should_be_global);
+
+    bool is_local_ref () const { return true; }
+
+private:
+    octave::stack_frame *m_frame = nullptr;
+    octave::symbol_record m_sym;
+
+    DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
+};
+
+class OCTINTERP_API
+octave_value_ref_ptr : public octave_value_ref
+{
+public:
+    octave_value_ref_ptr () = default;
+    ~octave_value_ref_ptr () = default;
+    octave_value_ref_ptr (octave_value *pov)
+        : m_pov (pov) { }
+
+    octave_value deref ();
+    octave_value & ref ();
+    void set_value (octave_value val);
+
+    octave::stack_frame::scope_flags get_scope_flag ()
+    {
+      if (m_pov->is_ref ())
+        return m_pov->ref_rep ()->get_scope_flag ();
+      return octave::stack_frame::scope_flags::LOCAL;
+    }
+
+    bool is_ptr_ref () const { return true; }
+private:
+    octave_value *m_pov;
+
+    DECLARE_OV_TYPEID_FUNCTIONS_AND_DATA
+};
+
+#endif
+
+#endif
--- a/libinterp/octave-value/ov-struct.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-struct.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -1119,6 +1119,43 @@
   return retval;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+octave_value
+octave_struct::vm_extract_forloop_value (octave_idx_type counter)
+{
+  // TODO: Maybe this is slow? Should preferably be done once per loop
+  octave_value_list idx;
+  octave_value arg = octave_value_factory::make_copy (this);
+
+  dim_vector dv = arg.dims ().redim (2);
+  octave_idx_type nrows = dv(0);
+
+  if (arg.ndims () > 2)
+    arg = arg.reshape (dv);
+
+  octave_idx_type iidx;
+
+  // for row vectors, use single index to speed things up.
+  if (nrows == 1)
+    {
+      idx.resize (1);
+      iidx = 0;
+    }
+  else
+    {
+      idx.resize (2);
+      idx(0) = octave_value::magic_colon_t;
+      iidx = 1;
+    }
+
+  // One based indexing
+  idx(iidx) = counter + 1;
+  return arg.index_op (idx).storable_value ();
+}
+
+#endif
+
 DEFINE_OV_TYPEID_FUNCTIONS_AND_DATA(octave_scalar_struct, "scalar struct",
                                     "struct");
 
--- a/libinterp/octave-value/ov-struct.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-struct.h	Fri Apr 19 12:57:20 2024 -0400
@@ -100,6 +100,12 @@
 
   dim_vector dims () const { return m_map.dims (); }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_value vm_extract_forloop_value (octave_idx_type idx);
+
+#endif
+
   std::size_t byte_size () const;
 
   // This is the number of elements in each field.  The total number
@@ -162,6 +168,15 @@
   bool
   fast_elem_insert (octave_idx_type n, const octave_value& x);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   // The associative array used to manage the structure data.
@@ -286,6 +301,15 @@
 
   bool fast_elem_insert_self (void *where, builtin_type_t btyp) const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return vm_call_dispatch_type::OCT_SUBSREF;
+  }
+
+#endif
+
 protected:
 
   // The associative array used to manage the structure data.
--- a/libinterp/octave-value/ov-usr-fcn.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-usr-fcn.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -55,6 +55,10 @@
 #include "pt-misc.h"
 #include "pt-pr-code.h"
 #include "pt-stmt.h"
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode-vm.h"
+#  include "pt-bytecode-walk.h"
+#endif
 #include "pt-walk.h"
 #include "symtab.h"
 #include "interpreter-private.h"
@@ -111,6 +115,24 @@
              m_file_name.c_str ());
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+void
+octave_user_code::clear_bytecode ()
+{
+  m_bytecode = octave::bytecode {};
+
+  auto subs = subfunctions ();
+  for (auto kv : subs)
+    {
+      octave_user_function *sub = kv.second.user_function_value ();
+      if (sub)
+        sub->clear_bytecode ();
+    }
+}
+
+#endif
+
 std::string
 octave_user_code::get_code_line (std::size_t line)
 {
@@ -196,6 +218,32 @@
 octave_user_script::call (octave::tree_evaluator& tw, int nargout,
                           const octave_value_list& args)
 {
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  if (octave::vm::maybe_compile_or_compiled (this))
+    {
+      // Check that either:
+      //     * There is an eval frame and that it is the top scope or a bytecode frame
+      //     * The caller is top scope or a bytecode frame
+      // , to allow executing the script in the VM. I.e. don't execute scripts in the VM
+      // if the caller is an user function that is not compiled.
+
+      // TODO: "octave_value varval (std::size_t data_offset) const" and "varref" would need to
+      // follow ref_rep() like scope_stack_frame::varref() does for having un-compiled functions
+      // as an eval frame, but that might maybe degrade performance somewhat of the evaluator.
+
+      auto frame = tw.current_user_frame ();
+      auto access_frame = frame->access_link ();
+
+      bool access_frame_is_vm_or_top = access_frame && (access_frame->is_scope_frame () || access_frame->is_bytecode_fcn_frame ());
+      bool caller_is_vm_or_top = frame->is_scope_frame () || frame->is_bytecode_fcn_frame ();
+
+      if (access_frame_is_vm_or_top || caller_is_vm_or_top)
+        return octave::vm::call (tw, nargout, args, this);
+      else
+        warning ("Executing compiled scripts in the VM from an un-compiled function is not supported yet");
+    }
+#endif
+
   tw.push_stack_frame (this);
 
   octave::unwind_action act ([&tw] () { tw.pop_stack_frame (); });
@@ -555,6 +603,11 @@
 octave_user_function::call (octave::tree_evaluator& tw, int nargout,
                             const octave_value_list& args)
 {
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  if (octave::vm::maybe_compile_or_compiled (this))
+    return octave::vm::call (tw, nargout, args, this);
+#endif
+
   tw.push_stack_frame (this);
 
   octave::unwind_action act ([&tw] () { tw.pop_stack_frame (); });
--- a/libinterp/octave-value/ov-usr-fcn.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov-usr-fcn.h	Fri Apr 19 12:57:20 2024 -0400
@@ -38,6 +38,9 @@
 #include "symscope.h"
 #include "token.h"
 #include "unwind-prot.h"
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode.h"
+#endif
 
 class string_vector;
 
@@ -125,8 +128,30 @@
 
   octave_value dump () const;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  void set_bytecode (octave::bytecode &bytecode)
+  {
+    m_bytecode = bytecode;
+  }
+
+  void clear_bytecode ();
+
+  bool is_compiled () const { return m_bytecode.m_code.size (); }
+
+  octave::bytecode &get_bytecode () { return m_bytecode; }
+
+  bool compilation_failed () { return m_compilation_failed; }
+  void set_compilation_failed (bool val) { m_compilation_failed = val; }
+#endif
+
 protected:
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  bool m_compilation_failed = false;
+
+  octave::bytecode m_bytecode;
+#endif
+
   void get_file_info ();
 
   // Our symbol table scope.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/octave-value/ov-vm.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,241 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 1996-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if ! defined (octave_ov_vm_h)
+#define octave_ov_vm_h 1
+
+#include "octave-config.h"
+
+#include "ov.h"
+#include "load-path.h"
+
+
+// octave_value_vm is to be used only by the VM
+// and need to have the same bit representation as
+// an octave_value.
+//
+// A octave_value_vm object might not be assigned
+// to itself or have a nullptr m_rep when being
+// assigned to.
+
+class octave_value_vm
+{
+public:
+  octave_value_vm ()
+      : m_rep (octave_value::nil_rep ())
+  {
+      m_rep->m_count++;
+  }
+
+  octave_value_vm (octave_base_value *rep, bool count_add1 = true)
+    : m_rep (rep)
+  {
+    if (count_add1)
+      m_rep->m_count++;
+  }
+
+  octave_value_vm (const octave_value_vm& a)
+      : m_rep (a.m_rep)
+  {
+      m_rep->m_count++;
+  }
+  octave_value_vm (const octave_value& a)
+      : m_rep (a.m_rep)
+  {
+      m_rep->m_count++;
+  }
+
+  octave_value_vm (octave_value_vm&& a)
+  : m_rep (a.m_rep)
+  {
+      a.m_rep = nullptr;
+  }
+  octave_value_vm (octave_value&& a)
+  : m_rep (a.m_rep)
+  {
+      a.m_rep = nullptr;
+  }
+
+  ~octave_value_vm () __attribute__ ((always_inline))
+  {
+      // Because we define a move constructor and a move assignment
+      // operator, rep may be a nullptr here.  We should only need to
+      // protect the move assignment operator in a similar way.
+
+    if (m_rep && --m_rep->m_count == 0)
+      delete m_rep;
+  }
+
+  octave_value_vm& operator = (const octave_value_vm& a)
+  {
+    if (--m_rep->m_count == 0)
+      delete m_rep;
+
+    m_rep = a.m_rep;
+    m_rep->m_count++;
+
+    return *this;
+  }
+
+  octave_value_vm& operator = (octave_value_vm&& a)
+  {
+    if (--m_rep->m_count == 0)
+      delete m_rep;
+
+    m_rep = a.m_rep;
+    a.m_rep = nullptr;
+
+    return *this;
+  }
+
+  octave_value_vm& operator = (octave_value&& a)
+  {
+    if (--m_rep->m_count == 0)
+      delete m_rep;
+
+    m_rep = a.m_rep;
+    a.m_rep = nullptr;
+
+    return *this;
+  }
+
+  static void release_rep (octave_base_value *rep)
+  {
+    if (--rep->m_count == 0)
+      delete rep;
+  }
+
+  void steal_ov_rep (octave_value &&ov)
+  {
+    if (m_rep && --m_rep->m_count == 0)
+      delete m_rep;
+
+    m_rep = ov.m_rep;
+    ov.m_rep = nullptr;
+  }
+
+  octave_base_value & get_rep () { return *m_rep; }
+
+  octave_value_vm& operator = (octave_base_value *rep)
+  {
+    if (--m_rep->m_count == 0)
+      delete m_rep;
+
+    m_rep = rep;
+
+    return *this;
+  }
+
+  bool vm_need_dispatch_push () __attribute__ ((pure, always_inline, nothrow))
+  { return m_rep->vm_need_dispatch_push (); }
+
+  bool vm_need_dispatch_assign_rhs () __attribute__ ((pure, always_inline, nothrow))
+  { return m_rep->vm_need_dispatch_assign_rhs (); }
+
+  bool vm_need_dispatch_assign_lhs () __attribute__ ((pure, always_inline, nothrow))
+  { return m_rep->vm_need_dispatch_assign_lhs (); }
+
+  int type_id() const __attribute__ ((pure, always_inline, nothrow))
+  { return m_rep->type_id (); }
+
+  bool is_matrix_type () const __attribute__ ((pure, always_inline, nothrow))
+  { return m_rep->is_matrix_type (); }
+
+  octave_base_value *m_rep;
+};
+
+class octave_cached_value : public octave_base_value
+{
+public:
+
+  octave_cached_value ()
+  {
+    m_n_updated = octave::load_path::get_weak_n_updated ();
+  }
+
+  void set_cached_obj (octave_value cache_obj)
+  {
+    m_cached_object = cache_obj;
+  }
+
+  octave_value get_cached_value ()
+  {
+    return m_cached_object;
+  }
+
+  bool cache_is_valid ()
+  {
+    return m_n_updated == octave::load_path::get_weak_n_updated () && m_cached_object.is_defined ();
+  }
+
+  bool is_defined () const { return true; }
+
+
+private:
+
+  octave_value m_cached_object;
+  octave_idx_type m_n_updated = 0;
+};
+
+// Class that is a wrapper around arguments and subsref type (i.e. '(','{' and '.')
+// that are needed for building up the args for a classdef subsref. The object
+// that will be called is set in the constructor of the wrapper.
+//
+// The reason it is in an octave_value is to make unwinding of the VM stack easier.
+//
+// INDEX_STRUCT_SUBCALL adds one set of args and type for each execution of itself,
+// and the last INDEX_STRUCT_SUBCALL makes a subsref of the classdef object with all
+// the args.
+class octave_vm_chainargs_wrapper : public octave_base_value
+{
+public:
+  octave_vm_chainargs_wrapper (octave_value obj_to_call) : m_obj_to_call (obj_to_call) {}
+
+  // Invalid to call after steal
+  void append_args (octave_value_list &&ovl)
+  {
+    m_idxs.push_back (ovl);
+  }
+
+  // Invalid to call after steal
+  void append_type (char type)
+  {
+    m_types.push_back (type);
+  }
+
+  bool is_vm_chainargs_wrapper () const { return true; }
+
+  // Only callable once
+  std::list<octave_value_list> && steal_idxs () { return std::move (m_idxs); }
+  std::string && steal_types () { return std::move (m_types); }
+  octave_value && steal_obj_to_call () { return std::move (m_obj_to_call); }
+
+private:
+  std::list<octave_value_list> m_idxs;
+  std::string m_types;
+  octave_value m_obj_to_call;
+};
+
+#endif
--- a/libinterp/octave-value/ov.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -1491,6 +1491,25 @@
     return *this;
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+octave_value
+octave_value::maybe_as_trivial_range ()
+{
+  if (m_rep->is_trivial_range ())
+    return *this;
+  if (!is_range ())
+    return *this;
+
+  ov_range<double> range = range_value ();
+  if (!range.could_be_trivial_range ())
+    return *this;
+
+  return range.as_trivial_range ();
+}
+
+#endif
+
 octave_value
 octave_value::next_subsref (bool auto_add, const std::string& type,
                             const std::list<octave_value_list>& idx,
--- a/libinterp/octave-value/ov.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/octave-value/ov.h	Fri Apr 19 12:57:20 2024 -0400
@@ -53,6 +53,15 @@
 class scope_stack_frame;
 class base_value_stack_frame;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class vm;
+class bytecode_fcn_stack_frame;
+class bytecode_script_stack_frame;
+class bytecode_nested_fcn_stack_frame;
+class bytecode_anon_fcn_stack_frame;
+class bytecode_frame;
+#endif
+
 OCTAVE_END_NAMESPACE(octave)
 
 class Cell;
@@ -1556,6 +1565,113 @@
 
 protected:
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  // Functions for use by the VM.
+  friend class octave_value_ref;
+  friend class octave_value_vm;
+  friend class octave::vm;
+  friend class octave::bytecode_fcn_stack_frame;
+  friend class octave::bytecode_script_stack_frame;
+  friend class octave::bytecode_nested_fcn_stack_frame;
+  friend class octave::bytecode_frame;
+  friend class octave::scope_stack_frame;
+  friend class octave::base_value_stack_frame;
+  friend class octave_value_ref_ptr;
+
+  bool is_ref () const { return m_rep->is_ref (); }
+
+  bool is_vm_chainargs_wrapper () const { return m_rep->is_vm_chainargs_wrapper (); }
+
+  octave_value_ref * ref_rep () { return m_rep->ref_rep (); }
+
+  bool is_nil () const { return m_rep == nil_rep (); }
+
+  // True for the types based on ov-base-mat
+  bool is_full_num_matrix () const
+  { return m_rep->is_full_num_matrix (); }
+
+  bool is_function_cache () const
+  { return m_rep->is_function_cache (); }
+
+  // function handles might have a function cache embedded
+  bool has_function_cache () const
+  { return m_rep->has_function_cache (); }
+
+  octave_function * get_cached_fcn (const octave_value_list& args)
+  { return m_rep->get_cached_fcn (args); }
+
+  // Arguments need to be pointing to union stack_element:s.
+  // Using void* to avoid polluting namespace.
+  octave_function * get_cached_fcn (void *beg, void *end)
+  { return m_rep->get_cached_fcn (beg, end); }
+
+  // Returns true if the octave_value is either undefined or
+  // or a function.
+  bool is_maybe_function () const
+  { return m_rep->is_maybe_function (); }
+
+  bool vm_need_storable_call () const
+  {
+    return m_rep->vm_need_storable_call ();
+  };
+
+  bool vm_need_dispatch_assign_rhs ()
+  {
+    return m_rep->vm_need_dispatch_assign_rhs ();
+  }
+
+  bool vm_need_dispatch_assign_lhs ()
+  {
+    return m_rep->vm_need_dispatch_assign_lhs ();
+  }
+
+  bool vm_need_dispatch_push ()
+  {
+    return m_rep->vm_need_dispatch_push ();
+  }
+
+  octave_base_value::vm_call_dispatch_type vm_dispatch_call ()
+  {
+    return m_rep->vm_dispatch_call ();
+  }
+
+  void maybe_call_dtor () { m_rep->maybe_call_dtor (); }
+
+  octave_value
+  checked_full_matrix_elem (octave_idx_type n) const
+  { return m_rep->checked_full_matrix_elem (n); }
+
+  octave_value
+  checked_full_matrix_elem (octave_idx_type i, octave_idx_type j) const
+  { return m_rep->checked_full_matrix_elem (i, j); }
+
+  octave_value
+  vm_extract_forloop_value (octave_idx_type idx)
+  {
+    return m_rep->vm_extract_forloop_value (idx);
+  }
+
+  double
+  vm_extract_forloop_double (octave_idx_type idx)
+  {
+    return m_rep->vm_extract_forloop_double (idx);
+  }
+
+  bool
+  maybe_update_double (double d)
+  {
+    return m_rep->maybe_update_double (d);
+  }
+
+  octave_value
+  maybe_as_trivial_range ();
+
+  bool
+  is_trivial_range () { return m_rep->is_trivial_range (); }
+
+#endif
+
   //! The real representation.
   octave_base_value *m_rep;
 
--- a/libinterp/parse-tree/module.mk	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/parse-tree/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -15,6 +15,10 @@
   %reldir%/pt-assign.h \
   %reldir%/pt-binop.h \
   %reldir%/pt-bp.h \
+  %reldir%/pt-bytecode.h \
+  %reldir%/pt-bytecode-walk.h \
+  %reldir%/pt-bytecode-vm.h \
+  %reldir%/pt-bytecode-vm-internal.h \
   %reldir%/pt-cbinop.h \
   %reldir%/pt-cell.h \
   %reldir%/pt-check.h \
@@ -66,6 +70,8 @@
   %reldir%/pt-assign.cc \
   %reldir%/pt-binop.cc \
   %reldir%/pt-bp.cc \
+  %reldir%/pt-bytecode-walk.cc \
+  %reldir%/pt-bytecode-vm.cc \
   %reldir%/pt-cbinop.cc \
   %reldir%/pt-cell.cc \
   %reldir%/pt-check.cc \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode-vm-internal.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,493 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if ! defined (octave_pt_bytecode_vm_internal_h)
+#define octave_pt_bytecode_vm_internal_h 1
+
+#include "octave-config.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#define EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK(ovl,nargout) \
+do {\
+  if (nargout <= 1)\
+    PUSH_OV (ovl.first_or_nil_ov());\
+else /* TODO: Should be function call to keep code shorter. */\
+  {\
+    int actual_nargout = 0;\
+\
+    int n_retval = std::min (static_cast<int> (ovl.length ()), static_cast<int> (nargout));\
+    /* We want to push the ovl backwards */\
+    for (int ii = n_retval - 1; ii >= 0 && actual_nargout < nargout; ii--)\
+    {\
+      octave_value &arg = ovl (ii);\
+\
+      if (arg.is_cs_list ())\
+        {\
+          /* cs-list are also pushed backwards */\
+          octave_value_list args = arg.list_value ();\
+          /* We might need to skip the elements in the cs-list's end */ \
+          int n_left = nargout - actual_nargout;\
+          for (int j = std::min (static_cast<int> (args.length () - 1), n_left - 1);\
+                j >= 0 && actual_nargout < nargout; \
+                j--)\
+            {\
+              PUSH_OV (args (j));\
+              actual_nargout++;\
+            }\
+        }\
+      else\
+        {\
+          PUSH_OV (std::move (arg));\
+          actual_nargout++;\
+        }\
+    }\
+\
+    /* TODO: Need errors here for missing arguments in assignment somehow */ \
+    if (actual_nargout != nargout)\
+      {\
+        int diff = nargout - actual_nargout;\
+        stack_element *start = sp - actual_nargout;\
+        stack_lift (start, actual_nargout, diff);\
+        sp += diff;\
+      }\
+  }\
+} while (0)
+
+
+#define MAKE_BINOP(op) \
+{                                       \
+  octave_value &rhs = TOP_OV ();        \
+  octave_value &lhs = SEC_OV ();        \
+                                        \
+  try                                   \
+    {                                   \
+      octave_value ans =                \
+        binary_op (*m_ti,               \
+                    octave_value::op,   \
+                    lhs, rhs);          \
+      STACK_DESTROY (2);                \
+      PUSH_OV (std::move (ans));        \
+    }                                   \
+  CATCH_INTERRUPT_EXCEPTION             \
+  CATCH_INDEX_EXCEPTION                 \
+  CATCH_EXECUTION_EXCEPTION             \
+  CATCH_BAD_ALLOC                       \
+}                                       \
+
+#define MAKE_BINOP_SPECIALIZED(op_fn,jmp_target,op_target,target_type) \
+{                                                                                        \
+  octave_value &rhs = TOP_OV ();                                                         \
+  octave_value_vm &lhs = SEC_OV_VM ();                                                   \
+                                                                                         \
+  int rhs_type = rhs.type_id ();                                                         \
+  int lhs_type = lhs.type_id ();                                                         \
+  int t_type = target_type;                                                              \
+  if (OCTAVE_UNLIKELY (rhs_type != lhs_type || rhs_type != t_type))                      \
+    {                                                                                    \
+      ip[-2] = static_cast<unsigned char> (INSTR::op_target);                            \
+      goto jmp_target;                                                                   \
+    }                                                                                    \
+                                                                                         \
+  try                                                                                    \
+    {                                                                                    \
+      lhs = op_fn (lhs.get_rep (), rhs.get_rep ());                                      \
+      rhs.~octave_value ();                                                              \
+      STACK_SHRINK (1);                                                                  \
+    }                                                                                    \
+  CATCH_INTERRUPT_EXCEPTION                                                              \
+  CATCH_INDEX_EXCEPTION                                                                  \
+  CATCH_EXECUTION_EXCEPTION                                                              \
+  CATCH_BAD_ALLOC                                                                        \
+}                                                                                        \
+
+#define MAKE_BINOP_CST_SPECIALIZED(op_fn,jmp_target,op_target,target_type) \
+{                                                                              \
+  octave_value &cst = data [arg0];                                             \
+  octave_value &arg = TOP_OV ();                                               \
+  int lhs_is_cst = *ip++;                                                      \
+                                                                               \
+  int arg_type = arg.type_id ();                                               \
+  if (OCTAVE_UNLIKELY (arg_type != target_type))                               \
+    {                                                                          \
+      ip[-3] = static_cast<unsigned char> (INSTR::op_target);                  \
+      ip--;                                                                    \
+      goto jmp_target;                                                         \
+    }                                                                          \
+                                                                               \
+  try                                                                          \
+    {                                                                          \
+      octave_value ret =  lhs_is_cst ?                                         \
+        op_fn (cst.get_rep (), arg.get_rep ()) :                               \
+        op_fn (arg.get_rep (), cst.get_rep ());                                \
+      STACK_DESTROY (1);                                                       \
+      PUSH_OV (ret);                                                           \
+    }                                                                          \
+  CATCH_INTERRUPT_EXCEPTION                                                    \
+  CATCH_INDEX_EXCEPTION                                                        \
+  CATCH_EXECUTION_EXCEPTION                                                    \
+  CATCH_BAD_ALLOC                                                              \
+}                                                                              \
+
+#define MAKE_UNOP_SPECIALIZED(op_fn, jmp_target, op_target, target_type) \
+{                                                                                        \
+  octave_value &ov = TOP_OV ();                                                          \
+                                                                                         \
+  if (OCTAVE_UNLIKELY (ov.type_id () != target_type))                                    \
+    {                                                                                    \
+      /* Change the specialized opcode to the generic one */                             \
+      ip[-2] = static_cast<unsigned char> (INSTR::op_target);                            \
+      goto jmp_target;                                                                   \
+    }                                                                                    \
+                                                                                         \
+  try                                                                                    \
+    {                                                                                    \
+      ov = op_fn (ov.get_rep ());                                          \
+    }                                                                                    \
+  CATCH_INTERRUPT_EXCEPTION                                                              \
+  CATCH_INDEX_EXCEPTION                                                                  \
+  CATCH_EXECUTION_EXCEPTION                                                              \
+  CATCH_BAD_ALLOC                                                                        \
+}                                                                                        \
+
+#define MAKE_BINOP_SELFMODIFYING(op, jmp_target, op_target) \
+{                                                                                          \
+  octave_value &rhs = TOP_OV ();                                                           \
+  octave_value &lhs = SEC_OV ();                                                           \
+                                                                                           \
+  int rhs_type = rhs.type_id ();                                                           \
+  int lhs_type = lhs.type_id ();                                                           \
+  if (rhs_type == lhs_type && rhs_type == m_scalar_typeid)                                 \
+    {                                                                                      \
+      ip[-2] = static_cast<unsigned char> (INSTR::op_target);                              \
+      goto jmp_target;                                                                     \
+    }                                                                                      \
+                                                                                           \
+  try                                                                                      \
+    {                                                                                      \
+      octave_value ans =                                                                   \
+        binary_op (*m_ti,                                                                  \
+                    octave_value::op,                                                      \
+                    lhs, rhs);                                                             \
+      STACK_DESTROY (2);                                                                   \
+      PUSH_OV (std::move (ans));                                                           \
+    }                                                                                      \
+  CATCH_INTERRUPT_EXCEPTION                                                                \
+  CATCH_INDEX_EXCEPTION                                                                    \
+  CATCH_EXECUTION_EXCEPTION                                                                \
+  CATCH_BAD_ALLOC                                                                          \
+}
+
+#define MAKE_BINOP_CST_SELFMODIFYING(op, jmp_target, op_target) \
+{                                                                                             \
+  octave_value &cst = data [arg0];                                                            \
+  octave_value &arg = TOP_OV ();                                                              \
+  int lhs_is_cst = *ip++;                                                                     \
+                                                                                              \
+  int cst_type = cst.type_id ();                                                              \
+  int arg_type = arg.type_id ();                                                              \
+  if (OCTAVE_UNLIKELY (cst_type == arg_type && cst_type == m_scalar_typeid))                  \
+    {                                                                                         \
+      ip[-3] = static_cast<unsigned char> (INSTR::op_target);                                 \
+      ip--;                                                                                   \
+      goto jmp_target;                                                                        \
+    }                                                                                         \
+                                                                                              \
+  try                                                                                         \
+    {                                                                                         \
+      octave_value ans = lhs_is_cst ?                                                         \
+          binary_op (*m_ti,                                                                   \
+                      octave_value::op,                                                       \
+                      cst, arg)                                                               \
+        : binary_op (*m_ti,                                                                   \
+                      octave_value::op,                                                       \
+                      arg, cst);                                                              \
+      STACK_DESTROY (1);                                                                      \
+      PUSH_OV (std::move (ans));                                                              \
+    }                                                                                         \
+  CATCH_INTERRUPT_EXCEPTION                                                                   \
+  CATCH_INDEX_EXCEPTION                                                                       \
+  CATCH_EXECUTION_EXCEPTION                                                                   \
+  CATCH_BAD_ALLOC                                                                             \
+}
+
+#define CATCH_INDEX_EXCEPTION \
+catch (index_exception& ie)                              \
+{                                                        \
+  (*sp++).pee = ie.dup ();                               \
+  (*sp++).i = static_cast<int> (error_type::INDEX_ERROR);\
+  goto unwind;                                           \
+}                                                        \
+
+#define CATCH_INDEX_EXCEPTION_WITH_NAME \
+catch (index_exception& ie)                              \
+{                                                        \
+  ie.set_var (name_data [slot]);                         \
+  (*sp++).pee = ie.dup ();                               \
+  (*sp++).i = static_cast<int> (error_type::INDEX_ERROR);\
+  goto unwind;                                           \
+}                                                        \
+
+#define CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME(has_name)  \
+catch (index_exception& ie)                              \
+{                                                        \
+  if (has_name)                                          \
+    ie.set_var (name_data [slot]);                       \
+  (*sp++).pee = ie.dup ();                               \
+  (*sp++).i = static_cast<int> (error_type::INDEX_ERROR);\
+  goto unwind;                                           \
+}                                                        \
+
+#define CATCH_INTERRUPT_EXCEPTION \
+catch (interrupt_exception& e)                                          \
+  {                                                                     \
+    (*sp++).i = static_cast<int> (error_type::INTERRUPT_EXC);           \
+    goto unwind;                                                        \
+  }                                                                     \
+
+#define CATCH_EXECUTION_EXCEPTION \
+catch (execution_exception& e)                                          \
+  {                                                                     \
+    /* TODO: Id? */                                                     \
+    (*sp++).pee = new execution_exception {e};                          \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);           \
+                                                                        \
+    goto unwind;                                                        \
+  }                                                                     \
+
+#define CATCH_STACKPUSH_EXECUTION_EXCEPTION \
+catch (execution_exception& e)                                          \
+  {                                                                     \
+    m_could_not_push_frame = true;                                      \
+    (*sp++).pee = new execution_exception {e};                          \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);           \
+                                                                        \
+    goto unwind;                                                        \
+  }                                                                     \
+
+#define CATCH_STACKPUSH_BAD_ALLOC \
+catch (const std::bad_alloc&)                                           \
+{                                                                       \
+  m_could_not_push_frame = true;                                        \
+  (*sp++).i = static_cast<int> (error_type::BAD_ALLOC);                 \
+                                                                        \
+  goto unwind;                                                          \
+}
+
+#define CATCH_EXIT_EXCEPTION \
+catch (const exit_exception& e)                                         \
+{                                                                       \
+  (*sp++).i = e.exit_status ();                                         \
+  (*sp++).i = e.safe_to_return ();                                      \
+  (*sp++).i = static_cast<int> (error_type::EXIT_EXCEPTION);            \
+                                                                        \
+  goto unwind;                                                          \
+}
+
+#define CATCH_BAD_ALLOC \
+catch (const std::bad_alloc&)                                           \
+{                                                                       \
+  (*sp++).i = static_cast<int> (error_type::BAD_ALLOC);                 \
+                                                                        \
+  goto unwind;                                                          \
+}
+
+#define MAKE_BYTECODE_CALL \
+if (sp + stack_min_for_new_call >= m_stack + stack_size)                                          \
+  {                                                                                               \
+    (*sp++).pee = new execution_exception {"error","","VM is running out of stack space"};        \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);                                     \
+    goto unwind;                                                                                  \
+  }                                                                                               \
+/* We are now going to call another function */                                                   \
+/* compiled to bytecode */                                                                        \
+                                                                                                  \
+m_tw->set_active_bytecode_ip (ip - code);                                                         \
+stack_element *first_arg = sp - n_args_on_stack;                                                  \
+                                                                                                  \
+/* Push address to first arg (or would one would have been */                                     \
+/* if there are no args), so we can restore the sp at return */                                   \
+(*sp++).pse = first_arg;                                                                          \
+                                                                                                  \
+/* Push unwind data */                                                                            \
+(*sp++).pud = unwind_data;                                                                        \
+                                                                                                  \
+/* Push code */                                                                                   \
+(*sp++).puc = code;                                                                               \
+                                                                                                  \
+/* Push data */                                                                                   \
+(*sp++).pov = data;                                                                               \
+                                                                                                  \
+/* Push id names */                                                                               \
+(*sp++).ps = name_data;                                                                           \
+                                                                                                  \
+/* Push bsp */                                                                                    \
+(*sp++).pse = bsp;                                                                                \
+                                                                                                  \
+/* Push the instruction pointer */                                                                \
+(*sp++).puc = ip;                                                                                 \
+                                                                                                  \
+/* The amount of return values the caller actually wants. Not necesserely the */                  \
+/* same as the amount of return values the caller wants the callee to produce. */                 \
+/* (last on caller stack) */                                                                      \
+(*sp++).u = caller_nvalback;                                                                      \
+                                                                                                  \
+/* set callee bsp */                                                                              \
+m_sp = bsp = sp;                                                                                  \
+                                                                                                  \
+/* Push nargout (first on callee stack) */                                                        \
+(*sp++).u = nargout;                                                                              \
+                                                                                                  \
+/* Set the new data, code etc */                                                                  \
+bytecode &bc = usr_fcn->get_bytecode ();                                                          \
+if (OCTAVE_UNLIKELY (m_profiler_enabled))                                                         \
+  {                                                                                               \
+    auto p = vm::m_vm_profiler;                                                                   \
+    if (p)                                                                                        \
+      {                                                                                           \
+        std::string caller_name = data[2].string_value (); /* profiler_name () querried at compile time */ \
+        p->enter_fn (caller_name, bc);                                                            \
+      }                                                                                           \
+  }                                                                                               \
+m_data = data = bc.m_data.data ();                                                                \
+m_code = code = bc.m_code.data ();                                                                \
+m_name_data = name_data = bc.m_ids.data ();                                                       \
+m_unwind_data = unwind_data = &bc.m_unwind_data;                                                  \
+                                                                                                  \
+                                                                                                  \
+/* Set the ip to 0 */                                                                             \
+ip = code;                                                                                        \
+int n_returns_callee = static_cast<signed char> (*ip++); /* Negative for varargout */             \
+if (OCTAVE_UNLIKELY (n_returns_callee < 0))                                                       \
+  {                                                                                               \
+    if (n_returns_callee == -128) /* Anonymous function */                                        \
+      n_returns_callee = 1;                                                                       \
+    else                                                                                          \
+      n_returns_callee = -n_returns_callee;                                                       \
+  }                                                                                               \
+int n_args_callee = static_cast<signed char> (*ip++); /* Negative for varargin */                 \
+int n_locals_callee = POP_CODE_USHORT ();                                                         \
+                                                                                                  \
+if (n_args_callee < 0)                                                                            \
+{                                                                                                 \
+  sp[0].pv = static_cast<void*> (usr_fcn);                                                        \
+  goto varargin_call;                                                                             \
+}                                                                                                 \
+                                                                                                  \
+/* Construct return values - note nargout */                                                      \
+/* is allready pushed as a uint64 */                                                              \
+for (int ii = 1; ii < n_returns_callee; ii++)                                                     \
+  PUSH_OV ();                                                                                     \
+                                                                                                  \
+int n_args_on_callee_stack = 0;                                                                   \
+bool all_too_many_args = false;                                                                   \
+/* Move the args to the new stack */                                                              \
+for (int ii = 0; ii < n_args_on_stack; ii++)                                                      \
+  {                                                                                               \
+    octave_value &arg = first_arg[ii].ov;                                                         \
+                                                                                                  \
+    if (arg.is_cs_list ())                                                                        \
+      {                                                                                           \
+        octave_value_list args = arg.list_value ();                                               \
+        octave_idx_type n_el = args.length ();                                                    \
+        if (n_el + n_args_on_callee_stack > 512)                                                  \
+          {                                                                                       \
+            all_too_many_args = true;                                                             \
+          }                                                                                       \
+        else                                                                                      \
+          {                                                                                       \
+            for (int j = 0; j < n_el; j++)                                                        \
+              {                                                                                   \
+                PUSH_OV (args (j));                                                               \
+                n_args_on_callee_stack++;                                                         \
+              }                                                                                   \
+          }                                                                                       \
+      }                                                                                           \
+    else                                                                                          \
+      {                                                                                           \
+        PUSH_OV (std::move (arg));                                                                \
+        n_args_on_callee_stack++;                                                                 \
+      }                                                                                           \
+    /* Destroy the args */                                                                        \
+    arg.~octave_value ();                                                                         \
+  }                                                                                               \
+/* Construct missing args */                                                                      \
+for (int ii = n_args_on_callee_stack; ii < n_args_callee; ii++)                                   \
+  PUSH_OV ();                                                                                     \
+                                                                                                  \
+/* Construct locals */                                                                            \
+int n_locals_to_ctor =                                                                            \
+  n_locals_callee - n_args_callee - n_returns_callee;                                             \
+for (int ii = 0; ii < n_locals_to_ctor; ii++)                                                     \
+  PUSH_OV ();                                                                                     \
+                                                                                                  \
+try                                                                                               \
+  {                                                                                               \
+    m_tw->push_stack_frame(*this, usr_fcn, nargout, n_args_on_callee_stack);                      \
+  }                                                                                               \
+CATCH_STACKPUSH_EXECUTION_EXCEPTION /* Sets m_could_not_push_frame to true */                     \
+CATCH_STACKPUSH_BAD_ALLOC                                                                         \
+                                                                                                  \
+if (OCTAVE_UNLIKELY (m_output_ignore_data))                                                       \
+  {                                                                                               \
+    /* Called fn needs to know about ignored outputs .e.g. [~, a] = foo() */                      \
+    m_output_ignore_data->push_frame (*this);                                                     \
+  }                                                                                               \
+                                                                                                  \
+/* "auto var" in the frame object. This is needed if nargout() etc are called */                  \
+set_nargout (nargout);                                                                            \
+                                                                                                  \
+if (all_too_many_args)                                                                            \
+  {                                                                                               \
+    std::string fn_name = unwind_data->m_name;                                                    \
+    (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",                    \
+                                           fn_name + ": function called with over 512 inputs."    \
+                                           " Consider using varargin."};                          \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);                                     \
+    goto unwind;                                                                                  \
+  }                                                                                               \
+if (n_args_on_callee_stack > n_args_callee)                                                       \
+  {                                                                                               \
+    std::string fn_name = unwind_data->m_name;                                                    \
+    (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",                    \
+                                           fn_name + ": function called with too many inputs"};   \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);                                     \
+    goto unwind;                                                                                  \
+  }                                                                                               \
+/* N_RETURNS is negative for varargout */                                                         \
+int n_returns = N_RETURNS () - 1; /* %nargout in N_RETURNS */                                     \
+if (n_returns >= 0 && nargout > n_returns)                                                        \
+  {                                                                                               \
+    std::string fn_name = unwind_data->m_name;                                                    \
+    (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",                    \
+                                           fn_name + ": function called with too many outputs"};  \
+    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);                                     \
+    goto unwind;                                                                                  \
+  }                                                                                               \
+
+
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode-vm.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,8667 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if defined (HAVE_CONFIG_H)
+#  include "config.h"
+#endif
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include <iostream>
+#include <iomanip>
+
+#include "time-wrappers.h"
+
+#include "pt-bytecode-vm.h"
+#include "pt-bytecode-vm-internal.h"
+#include "pt-bytecode-walk.h"
+#include "ov.h"
+#include "error.h"
+#include "symtab.h"
+#include "interpreter-private.h"
+#include "interpreter.h"
+#include "pt-eval.h"
+#include "pt-tm-const.h"
+#include "pt-stmt.h"
+#include "ov-classdef.h"
+#include "ov-cs-list.h"
+#include "ov-ref.h"
+#include "ov-range.h"
+#include "ov-inline.h"
+#include "ov-cell.h"
+
+#include "ov-vm.h"
+#include "ov-fcn-handle.h"
+#include "ov-cs-list.h"
+
+//#pragma GCC optimize("O0")
+
+// Returns the uint16 value stored at 'p' taking endianess into account
+#ifdef WORDS_BIGENDIAN
+#define USHORT_FROM_UCHAR_PTR(p) (((p)[0] << 8) + (p)[1])
+#else
+#define USHORT_FROM_UCHAR_PTR(p) ((p)[0] + ((p)[1] << 8))
+#endif
+
+// Returns the uint16 value from two unsigned chars taking endianess into account
+#ifdef WORDS_BIGENDIAN
+#define USHORT_FROM_UCHARS(c1,c2) ((c1 << 8) | (c2))
+#else
+#define USHORT_FROM_UCHARS(c1,c2) ((c1) | (c2 << 8))
+#endif
+
+static bool ov_need_stepwise_subsrefs (octave_value &ov);
+static void copy_many_args_to_caller (octave::stack_element *sp, octave::stack_element *caller_stack_end,
+                                      int n_args_to_move, int n_args_caller_expects);
+static int lhs_assign_numel (octave_value &ov, const std::string& type, const std::list<octave_value_list>& idx);
+
+#define TODO(msg) error("Not done yet %d: " msg, __LINE__)
+#define ERR(msg) error("VM error %d: " msg, __LINE__)
+#define CHECK(cond)                                                            \
+  do {                                                                         \
+    if (!(cond))                                                               \
+      ERR("Internal VM conistency check failed, " #cond);                      \
+  } while ((0))
+#define PANIC(msg) error("VM panic %d: " msg, __LINE__)
+
+using namespace octave;
+
+static unsigned
+chars_to_uint (unsigned char *p)
+{
+  unsigned u = 0;
+#ifdef WORDS_BIGENDIAN
+  u |= *p++ << 24;
+  u |= *p++ << 16;
+  u |= *p++ << 8;
+  u |= *p;
+#else
+  u |= *p++;
+  u |= *p++ << 8;
+  u |= *p++ << 16;
+  u |= *p << 24;
+#endif
+
+  return u;
+}
+
+std::vector<std::pair<int, std::string>>
+octave::opcodes_to_strings (bytecode &bc)
+{
+  return opcodes_to_strings (bc.m_code, bc.m_ids);
+}
+
+std::vector<std::pair<int, std::string>>
+octave::opcodes_to_strings (std::vector<unsigned char> &v_code, std::vector<std::string> &names)
+{
+  unsigned char *p = v_code.data ();
+  unsigned char *code = p;
+  int n = v_code.size ();
+  bool wide_opext_active = false;
+
+  // Skip some framedata
+  p += 4;
+
+  std::vector<std::pair<int, std::string>> v_pair_row_str;
+
+#define CASE_START(type) \
+  case INSTR::type:                     \
+  { /* Line stored in s */              \
+  std::string s;                        \
+  /* Code offset */                     \
+  int ip = static_cast<int> (p - code); \
+  s += #type;                           \
+  /* vec for id names */                \
+  std::vector<std::string> v_ids;       \
+
+#define CASE_END()        \
+    if (v_ids.size ())                  \
+      {                                 \
+        s += " #";                      \
+        for (auto ss : v_ids)           \
+          s += " " + ss;                \
+      }                                 \
+    v_pair_row_str.push_back ({ip, s}); \
+    break;}                             \
+
+#define PRINT_OP(type)    \
+  CASE_START (type)       \
+  CASE_END ()             \
+
+#define PCHAR() \
+    {\
+      if (wide_opext_active)                  \
+        {                                     \
+          wide_opext_active = false;          \
+          PSHORT()                            \
+        }                                     \
+      else                                    \
+        {                                     \
+          p++;                                \
+          CHECK_END ();                       \
+          s += " " + std::to_string (*p);     \
+        }\
+    }
+
+#define PCHAR_AS_CHAR() \
+    {p++;                                     \
+    CHECK_END ();                             \
+    s += std::string {" '"} + static_cast<char> (*p) + "'";}
+
+#define PSHORT() \
+    {p++;                                        \
+    CHECK_END ();                                \
+    unsigned char b0 = *p;                       \
+    p++;                                         \
+    CHECK_END ();                                \
+    unsigned char b1 = *p;                       \
+    unsigned u = USHORT_FROM_UCHARS (b0, b1);    \
+    s += " " + std::to_string (u);}
+
+#define PSSLOT() \
+    {p++;                                                            \
+    CHECK_END ();                                                    \
+    s += " " + std::to_string (*p);                                  \
+    v_ids.push_back (std::string {*p < names.size() ?                \
+                                      names[*p].c_str() :            \
+                                      "INVALID SLOT"});}
+
+#define PSLOT() \
+    {if (wide_opext_active)                                         \
+      PWSLOT ()                                                     \
+    else                                                            \
+      PSSLOT ()                                                     \
+    wide_opext_active = false;}
+
+#define PWSLOT() \
+    {p++;                                                           \
+    CHECK_END ();                                                   \
+    unsigned char b0 = *p;                                          \
+    p++;                                                            \
+    CHECK_END ();                                                   \
+    unsigned char b1 = *p;                                          \
+    unsigned u = USHORT_FROM_UCHARS (b0, b1);                       \
+    s += " " + std::to_string (u);                                  \
+    v_ids.push_back (std::string {u < names.size() ?                \
+                                      names[u].c_str() :            \
+                                      "INVALID SLOT"});}
+
+#define CHECK_END() \
+  do {if (p >= v_code.data () + v_code.size ()) { error ("Invalid bytecode\n");}} while((0))
+
+#ifdef WORDS_BIGENDIAN
+#define PINT() \
+  do {\
+    unsigned u = 0;\
+    p++;\
+    CHECK_END ();\
+    u |= *p++ << 24;\
+    CHECK_END ();\
+    u |= *p++ << 16;\
+    CHECK_END ();\
+    u |= *p++ << 8;\
+    CHECK_END ();\
+    u |= *p;\
+    s += " " + std::to_string (u);\
+  } while (0);
+#else
+#define PINT() \
+  do {\
+    unsigned u = 0;\
+    p++;\
+    CHECK_END ();\
+    u |= *p++;\
+    CHECK_END ();\
+    u |= *p++ << 8;\
+    CHECK_END ();\
+    u |= *p++ << 16;\
+    CHECK_END ();\
+    u |= *p << 24;\
+    s += " " + std::to_string (u);\
+  } while (0);
+#endif
+
+  while (p < code + n)
+    {
+      switch (static_cast<INSTR> (*p))
+        {
+          PRINT_OP (ANON_MAYBE_SET_IGNORE_OUTPUTS)
+          PRINT_OP (EXT_NARGOUT)
+          PRINT_OP (POP)
+          PRINT_OP (DUP)
+          PRINT_OP (MUL)
+          PRINT_OP (MUL_DBL)
+          PRINT_OP (ADD)
+          PRINT_OP (ADD_DBL)
+          PRINT_OP (SUB)
+          PRINT_OP (SUB_DBL)
+          PRINT_OP (DIV)
+          PRINT_OP (DIV_DBL)
+          PRINT_OP (RET)
+          PRINT_OP (RET_ANON)
+          PRINT_OP (LE)
+          PRINT_OP (LE_DBL)
+          PRINT_OP (LE_EQ)
+          PRINT_OP (LE_EQ_DBL)
+          PRINT_OP (GR)
+          PRINT_OP (GR_DBL)
+          PRINT_OP (GR_EQ)
+          PRINT_OP (GR_EQ_DBL)
+          PRINT_OP (EQ)
+          PRINT_OP (EQ_DBL)
+          PRINT_OP (NEQ)
+          PRINT_OP (NEQ_DBL)
+          PRINT_OP (TRANS_MUL)
+          PRINT_OP (MUL_TRANS)
+          PRINT_OP (HERM_MUL)
+          PRINT_OP (MUL_HERM)
+          PRINT_OP (INCR_PREFIX)
+          PRINT_OP (ROT)
+          PRINT_OP (TRANS_LDIV)
+          PRINT_OP (HERM_LDIV)
+          PRINT_OP (POW_DBL)
+          PRINT_OP (POW)
+          PRINT_OP (LDIV)
+          PRINT_OP (EL_MUL)
+          PRINT_OP (EL_DIV)
+          PRINT_OP (EL_POW)
+          PRINT_OP (EL_AND)
+          PRINT_OP (EL_OR)
+          PRINT_OP (EL_LDIV)
+          PRINT_OP (NOT_DBL)
+          PRINT_OP (NOT_BOOL)
+          PRINT_OP (NOT)
+          PRINT_OP (UADD)
+          PRINT_OP (USUB)
+          PRINT_OP (USUB_DBL)
+          PRINT_OP (TRANS)
+          PRINT_OP (HANDLE_SIGNALS)
+          PRINT_OP (HERM)
+          PRINT_OP (UNARY_TRUE)
+          PRINT_OP (PUSH_TRUE)
+          PRINT_OP (PUSH_FALSE)
+          PRINT_OP (COLON2)
+          PRINT_OP (COLON3)
+          PRINT_OP (COLON2_CMD)
+          PRINT_OP (COLON3_CMD)
+          PRINT_OP (FOR_SETUP)
+          PRINT_OP (PUSH_NIL);
+          PRINT_OP (THROW_IFERROBJ);
+          PRINT_OP (BRAINDEAD_PRECONDITION);
+          PRINT_OP (PUSH_DBL_0);
+          PRINT_OP (PUSH_DBL_1);
+          PRINT_OP (PUSH_DBL_2);
+          PRINT_OP (ENTER_SCRIPT_FRAME);
+          PRINT_OP (EXIT_SCRIPT_FRAME);
+          PRINT_OP (ENTER_NESTED_FRAME);
+
+          CASE_START (WIDE)
+            wide_opext_active = true;
+          CASE_END ()
+
+          CASE_START (PUSH_FOLDED_CST) PSLOT () PSHORT () CASE_END ()
+          CASE_START (SET_FOLDED_CST) PSLOT () CASE_END ()
+
+          CASE_START (LOAD_CST)       PCHAR () CASE_END ()
+          CASE_START (LOAD_CST_ALT2)  PCHAR () CASE_END ()
+          CASE_START (LOAD_CST_ALT3)  PCHAR () CASE_END ()
+          CASE_START (LOAD_CST_ALT4)  PCHAR () CASE_END ()
+          CASE_START (LOAD_2_CST)     PCHAR () CASE_END ()
+          CASE_START (POP_N_INTS)     PCHAR () CASE_END ()
+          CASE_START (DUP_MOVE)       PCHAR () CASE_END ()
+
+          CASE_START (INDEX_STRUCT_SUBCALL) PCHAR ()  PCHAR () PCHAR () PCHAR () PCHAR_AS_CHAR () CASE_END ()
+
+          CASE_START (MUL_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (MUL_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+          CASE_START (DIV_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (DIV_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+          CASE_START (ADD_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (ADD_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+          CASE_START (SUB_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (SUB_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+          CASE_START (LE_CST)         PCHAR () PCHAR () CASE_END ()
+          CASE_START (LE_CST_DBL)     PCHAR () PCHAR () CASE_END ()
+          CASE_START (LE_EQ_CST)      PCHAR () PCHAR () CASE_END ()
+          CASE_START (LE_EQ_CST_DBL)  PCHAR () PCHAR () CASE_END ()
+          CASE_START (GR_CST)         PCHAR () PCHAR () CASE_END ()
+          CASE_START (GR_CST_DBL)     PCHAR () PCHAR () CASE_END ()
+          CASE_START (GR_EQ_CST)      PCHAR () PCHAR () CASE_END ()
+          CASE_START (GR_EQ_CST_DBL)  PCHAR () PCHAR () CASE_END ()
+          CASE_START (EQ_CST)         PCHAR () PCHAR () CASE_END ()
+          CASE_START (EQ_CST_DBL)     PCHAR () PCHAR () CASE_END ()
+          CASE_START (NEQ_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (NEQ_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+          CASE_START (POW_CST)        PCHAR () PCHAR () CASE_END ()
+          CASE_START (POW_CST_DBL)    PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (PUSH_CELL)      PCHAR () PCHAR () CASE_END ()
+          CASE_START (PUSH_CELL_BIG)  PINT () PINT () CASE_END ()
+
+          CASE_START (APPEND_CELL)    PCHAR () CASE_END ()
+
+          CASE_START (ASSIGN)                     PSLOT() CASE_END ()
+          CASE_START (BIND_ANS)                   PSLOT() CASE_END ()
+          CASE_START (INCR_ID_PREFIX)             PSLOT() CASE_END ()
+          CASE_START (INCR_ID_POSTFIX)            PSLOT() CASE_END ()
+          CASE_START (DECR_ID_PREFIX)             PSLOT() CASE_END ()
+          CASE_START (DECR_ID_POSTFIX)            PSLOT() CASE_END ()
+          CASE_START (INCR_ID_PREFIX_DBL)         PSLOT() CASE_END ()
+          CASE_START (INCR_ID_POSTFIX_DBL)        PSLOT() CASE_END ()
+          CASE_START (DECR_ID_PREFIX_DBL)         PSLOT() CASE_END ()
+          CASE_START (DECR_ID_POSTFIX_DBL)        PSLOT() CASE_END ()
+          CASE_START (FORCE_ASSIGN)               PSLOT() CASE_END ()
+          CASE_START (PUSH_SLOT_NARGOUT1)         PSLOT() CASE_END ()
+          CASE_START (PUSH_PI)                    PSLOT() CASE_END ()
+          CASE_START (PUSH_I)                     PSLOT() CASE_END ()
+          CASE_START (PUSH_E)                     PSLOT() CASE_END ()
+          CASE_START (PUSH_SLOT_NARGOUT1_SPECIAL) PSLOT() CASE_END ()
+          CASE_START (PUSH_SLOT_INDEXED)          PSLOT() CASE_END ()
+          CASE_START (PUSH_FCN_HANDLE)            PSLOT() CASE_END ()
+          CASE_START (PUSH_SLOT_NARGOUT0)         PSLOT() CASE_END ()
+          CASE_START (SET_SLOT_TO_STACK_DEPTH)    PSLOT() CASE_END ()
+
+          CASE_START (DISP)           PSLOT() PWSLOT() CASE_END ()
+          CASE_START (PUSH_SLOT_DISP) PSLOT() PWSLOT() CASE_END ()
+
+          CASE_START (JMP_IFDEF)                PSHORT() CASE_END ()
+          CASE_START (JMP_IFNCASEMATCH)         PSHORT() CASE_END ()
+          CASE_START (JMP)                      PSHORT() CASE_END ()
+          CASE_START (JMP_IF)                   PSHORT() CASE_END ()
+          CASE_START (JMP_IFN)                  PSHORT() CASE_END ()
+          CASE_START (JMP_IF_BOOL)              PSHORT() CASE_END ()
+          CASE_START (JMP_IFN_BOOL)             PSHORT() CASE_END ()
+          CASE_START (FOR_COMPLEX_SETUP)        PSHORT() CASE_END ()
+
+          CASE_START (INSTALL_FUNCTION)       PSLOT () PINT() CASE_END ()
+
+          CASE_START (ASSIGN_COMPOUND)        PSLOT () PCHAR () CASE_END ()
+
+          CASE_START (INDEX_ID_NARGOUT0)      PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_ID_NARGOUT1)      PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_IDNX)             PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_ID1_MAT_2D)       PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_ID1_MAT_1D)       PSLOT () PCHAR () CASE_END ()
+
+          CASE_START (INDEX_CELL_ID_NARGOUT0) PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_CELL_ID_NARGOUT1) PSLOT () PCHAR () CASE_END ()
+          CASE_START (INDEX_CELL_IDNX)        PSLOT () PCHAR () CASE_END ()
+
+          CASE_START (INDEX_CELL_ID_NARGOUTN) PSLOT () PCHAR () PCHAR () CASE_END ()
+          CASE_START (INDEX_IDN)              PSLOT () PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (SUBASSIGN_OBJ)          PCHAR () PCHAR () CASE_END ()
+          CASE_START (MATRIX)                 PCHAR () PCHAR () CASE_END ()
+          CASE_START (DUPN)                   PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (INDEX_ID1_MATHY_UFUN)   PCHAR () PSLOT () PCHAR () CASE_END ()
+
+          CASE_START (INDEX_OBJ)              PCHAR () PCHAR () PWSLOT () PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (FOR_COND) PSLOT () PSHORT () CASE_END ()
+
+          CASE_START (FOR_COMPLEX_COND) PSHORT () PWSLOT () PWSLOT () CASE_END ()
+
+          CASE_START (INDEX_STRUCT_NARGOUTN)  PCHAR () PWSLOT () PWSLOT () CASE_END ()
+          CASE_START (END_ID)                 PSLOT () PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (PUSH_SLOT_NX)           PSLOT () PCHAR () CASE_END ()
+          CASE_START (PUSH_SLOT_NARGOUTN)     PSLOT () PCHAR () CASE_END ()
+          CASE_START (BRAINDEAD_WARNING)      PSLOT () PCHAR () CASE_END ()
+          CASE_START (SUBASSIGN_STRUCT)       PSLOT () PWSLOT () CASE_END ()
+
+          CASE_START (SUBASSIGN_ID)         PSLOT () PCHAR () CASE_END ()
+          CASE_START (SUBASSIGN_ID_MAT_1D)  PSLOT () PCHAR () CASE_END ()
+          CASE_START (SUBASSIGN_ID_MAT_2D)  PSLOT () PCHAR () CASE_END ()
+          CASE_START (SUBASSIGN_CELL_ID)    PSLOT () PCHAR () CASE_END ()
+
+          CASE_START (EVAL) PCHAR () PINT () CASE_END ()
+
+          CASE_START (PUSH_ANON_FCN_HANDLE) PINT () CASE_END ()
+
+          CASE_START (INDEX_STRUCT_CALL)
+            PCHAR ()
+            PWSLOT ()
+            PCHAR ()
+            PCHAR_AS_CHAR ()
+          CASE_END ()
+
+          CASE_START (LOAD_FAR_CST) PINT () CASE_END ()
+
+          CASE_START (END_OBJ) PSLOT () PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (WORDCMD_NX) PSLOT () PCHAR () CASE_END ()
+          CASE_START (WORDCMD) PSLOT () PCHAR () PCHAR () CASE_END ()
+
+          CASE_START (SET_IGNORE_OUTPUTS)
+            PCHAR ()
+            int nn = *p;
+            PCHAR ()
+            for (int i = 0; i < nn; i++)
+              PCHAR ()
+          CASE_END ()
+
+          CASE_START (CLEAR_IGNORE_OUTPUTS)
+            PCHAR ()
+            int nn = *p;
+            for (int i = 0; i < nn; i++)
+              {
+                PWSLOT ()
+              }
+          CASE_END ()
+
+          CASE_START (END_X_N)
+            PCHAR ()
+
+            int nn = *p;
+            for (int i = 0; i < nn; i++)
+              {
+                PCHAR ()
+                PCHAR ()
+                PCHAR ()
+                PWSLOT ()
+              }
+          CASE_END ()
+
+          CASE_START (MATRIX_UNEVEN)
+            s += " TYPE";
+            PCHAR ()
+            int type = *p;
+
+            if (type == 1)
+              {
+                s += " ROWS"; PINT ();
+                s += " COLS"; PINT ();
+              }
+            else
+              {
+                if (p + 3 >= code + n)
+                  error ("invalid bytecode");
+                int i = chars_to_uint (p + 1);
+                s += " ROWS"; PINT ();
+                s += " COLS";
+                for (int j = 0; j < i; j++)
+                  PINT ();
+              }
+          CASE_END ()
+
+          CASE_START (SUBASSIGN_CHAINED)
+            PSLOT ();
+            PCHAR (); // op
+            PCHAR (); // nchained
+            int nn = *p;
+            for (int i = 0; i < nn; i++)
+              {
+                PCHAR ();
+                PCHAR ();
+              }
+          CASE_END ()
+
+          CASE_START (GLOBAL_INIT)
+              p++;
+              CHECK_END ();
+              if (static_cast<global_type> (*p) == global_type::GLOBAL)
+                s += " 'GLOBAL'";
+              else if (static_cast<global_type> (*p) == global_type::PERSISTENT)
+                s += " 'PERSISTENT'";
+
+              PWSLOT ()
+              PWSLOT ()
+
+              s += " HAS-TARGET";
+              PCHAR ()
+              int has_target = *p;
+              if (has_target)
+                {
+                  s += " AFTER INIT";
+                  PSHORT ();
+                }
+          CASE_END ()
+
+          CASE_START (ASSIGNN)
+            PCHAR ()
+            int n_slots = *p;
+            for (int i = 0; i < n_slots; i++)
+              PWSLOT ()
+          CASE_END ()
+
+          default:
+            CHECK_END ();
+            error ("unknown op: %d", *p);
+            break;
+        }
+      p++;
+    }
+
+  return v_pair_row_str;
+}
+
+void
+octave::print_bytecode(bytecode &bc)
+{
+  using std::cout;
+  using std::to_string;
+  using std::setw;
+
+  unsigned char *p = bc.m_code.data ();
+  int n = bc.m_code.size ();
+
+  CHECK (bc.m_data.size () >= 2);
+  cout << "metadata:\n";
+  cout << "\t" << bc.m_data[0].string_value () << "\n"; // function name
+  cout << "\t" << bc.m_data[1].string_value () << "\n\n"; // function type
+
+  cout << "frame:\n";
+  cout << "\t.n_return " << to_string (*p++) << "\n";
+  cout << "\t.n_args " << to_string (*p++) << "\n";
+  cout << "\t.n_locals " << to_string (*p++) << "\n\n";
+
+  cout << "slots:\n";
+  int idx = 0;
+  for (std::string local : bc.m_ids)
+    cout << setw (5) << to_string (idx++) << ": " << local << "\n";
+  cout << "\n";
+
+  cout << "source code lut:\n";
+  for (auto it : bc.m_unwind_data.m_loc_entry)
+    {
+      cout << "\tl:" << setw (5) << it.m_line <<
+              " c:" << setw (5) <<  it.m_col <<
+              " ip0:" << setw (5) << it.m_ip_start <<
+              " ip1:" << setw (5) << it.m_ip_end <<
+              "\n";
+    }
+
+  cout << "dbg tree object:\n";
+  for (auto it : bc.m_unwind_data.m_ip_to_tree)
+    {
+      cout << "\tip:" << it.first << " obj=" << it.second << "\n";
+    }
+
+  if (bc.m_unwind_data.m_v_nested_vars.size ())
+    {
+      cout << "Nested symbols table:\n";
+      for (auto it : bc.m_unwind_data.m_v_nested_vars)
+        {
+          cout << it.m_depth << ":nth parent's slot: " << it.m_slot_parent << ", child slot: " << it.m_slot_nested << "\n";
+        }
+    }
+
+  cout << "code: (n=" << n << ")\n";
+  auto v_ls = opcodes_to_strings (bc);
+  for (auto ls : v_ls)
+    {
+      cout << "\t" << setw(5) << ls.first << ": " << ls.second << "\n";
+    }
+}
+
+static int pop_code_int (unsigned char *ip)
+{
+  unsigned int ans;
+  ip -= 4;
+#ifdef WORDS_BIGENDIAN
+  ans = *ip++ << 24;
+  ans |= *ip++ << 16;
+  ans |= *ip++ << 8;
+  ans |= *ip++;
+#else
+  ans = *ip++;
+  ans |= *ip++ << 8;
+  ans |= *ip++ << 16;
+  ans |= *ip++ << 24;
+#endif
+  return ans;
+}
+
+static int pop_code_ushort (unsigned char *ip)
+{
+  unsigned int ans;
+  ip -= 2;
+#ifdef WORDS_BIGENDIAN
+  ans = *ip++ << 8;
+  ans |= *ip++;
+#else
+  ans = *ip++;
+  ans |= *ip++ << 8;
+#endif
+  return ans;
+}
+
+
+
+// Debug functions easy to break out into in gdb. Called by __dummy_mark_1__() in Octave
+extern "C" void dummy_mark_1 (void);
+extern "C" void dummy_mark_2 (void);
+
+#define POP_CODE() *ip++
+#define POP_CODE_INT() (ip++,ip++,ip++,ip++,pop_code_int (ip))
+#define POP_CODE_USHORT() (ip++, ip++, pop_code_ushort (ip))
+
+#define PUSH_OV(ov) \
+  do {                           \
+    new (sp++) octave_value (ov);  \
+  } while ((0))
+
+#define PUSH_OVB(ovb) \
+  do {                           \
+    new (sp++) octave_value_vm (ovb);  \
+  } while ((0))
+
+#define PUSH_OV_VM(ov) \
+  do {                           \
+    new (sp++) octave_value_vm (ov);  \
+  } while ((0))
+
+#define POP() (*--sp)
+
+#define TOP_OVB() (sp[-1]).ovb
+#define SEC_OVB() (sp[-2]).ovb
+
+#define TOP_OV_VM() (sp[-1]).ov_vm
+#define SEC_OV_VM() (sp[-2]).ov_vm
+
+#define TOP_OV() (sp[-1]).ov
+#define SEC_OV() (sp[-2]).ov
+#define THIRD_OV() (sp[-3]).ov
+#define FOURTH_OV() (sp[-4]).ov
+
+#define TOP() (sp[-1])
+#define SEC() (sp[-2])
+#define THIRD() (sp[-3])
+
+#define STACK_SHRINK(n) sp -= n
+#define STACK_GROW(n) sp += n
+#define STACK_DESTROY(n)               \
+  do {                                 \
+    for (int iii = 0; iii < n; iii++)  \
+      (*--sp).ov.~octave_value ();     \
+  } while ((0))
+
+static void stack_lift (stack_element *start, int n_elem, int n_lift)
+{
+  octave_value_list tmp;
+  for (int i = 0; i < n_elem; i++)
+    tmp.append (std::move (start[i].ov));
+  for (int i = 0; i < n_elem; i++)
+    start[i].ov.~octave_value ();
+
+  // Negative n_lift means we need to erase
+  for (int i = n_lift; i < 0; i++)
+    {
+      start[i].ov.~octave_value ();
+      new (start + i) octave_value;
+    }
+  for (int i = 0; i < n_lift; i++)
+    new (start + i) octave_value;
+
+  for (int i = 0; i < n_elem; i++)
+    new (start + n_lift + i) octave_value (std::move (tmp.xelem (i)));
+}
+
+static void append_cslist_to_ovl (octave_value_list &ovl, octave_value ov_cs)
+{
+  octave_value_list cslist = ov_cs.list_value (); // TODO: Wastefull copy. octave_cs_list has no const ref to m_list.
+  ovl.append (cslist);
+}
+
+
+// Note: The function assumes the ip points to the next opcode. I.e.
+// the current opcode it searches for entries for is at ip - 1.
+arg_name_entry get_argname_entry (int ip, unwind_data *unwind_data)
+{
+  int best_match = -1;
+  int best_start = -1;
+
+  auto &entries = unwind_data->m_argname_entries;
+  for (unsigned i = 0; i < entries.size (); i++)
+    {
+      int start = entries[i].m_ip_start;
+      int end = entries[i].m_ip_end;
+
+      if (start > (ip - 1) || end < (ip - 1))
+        continue;
+
+      if (best_match != -1)
+        {
+          if (best_start > start)
+            continue;
+        }
+
+      best_match = i;
+      best_start = start;
+    }
+
+  if (best_match == -1)
+    return {};
+
+  return entries[best_match];
+}
+
+// Expand any cs-list among the args on the stack.
+// I.e. if there is e.g. a two element cs-list
+// among the args, we need to expand it and move up the
+// rest of the args, on the stack.
+//
+// Modifies sp and n_args_on_stack ...
+#define EXPAND_ARGS_CSLISTS_ON_STACK \
+{                                                                 \
+  stack_element *pov = &sp[-n_args_on_stack];                     \
+  while (pov != sp)                                               \
+    {                                                             \
+      if (OCTAVE_UNLIKELY (pov->ov.is_cs_list ()))                \
+        {                                                         \
+          int n_change = expand_cslist_inplace (pov, sp);         \
+          sp += n_change;                                         \
+          pov += n_change;                                        \
+          n_args_on_stack += n_change;                            \
+        }                                                         \
+                                                                  \
+      pov++;                                                      \
+    }                                                             \
+}
+
+#define POP_STACK_RANGE_TO_OVL(ovl, beg, end) \
+do {                                               \
+  stack_element *pbeg = beg;                       \
+  stack_element *pend = end;                       \
+  while (pbeg != pend)                             \
+    {                                              \
+      if (OCTAVE_UNLIKELY (pbeg->ov.is_cs_list ()))\
+        {                                          \
+          append_cslist_to_ovl (ovl, pbeg->ov);    \
+          pbeg->ov.~octave_value ();               \
+        }                                          \
+      else                                         \
+        {                                          \
+          ovl.append (std::move (pbeg->ov));       \
+          pbeg->ov.~octave_value ();               \
+        }                                          \
+                                                   \
+      pbeg++;                                      \
+    }                                              \
+  sp = beg;                                        \
+                                                   \
+} while (0)
+
+#define COPY_STACK_RANGE_TO_OVL(ovl, beg, end) \
+do {                                \
+  stack_element *pbeg = beg;        \
+  stack_element *pend = end;        \
+  while (pbeg != pend)                             \
+    {                                              \
+      if (OCTAVE_UNLIKELY (pbeg->ov.is_cs_list ()))\
+        {                                          \
+          append_cslist_to_ovl (ovl, pbeg->ov);    \
+        }                                          \
+      else                                         \
+        {                                          \
+          ovl.append (pbeg->ov);                   \
+        }                                          \
+                                                   \
+      pbeg++;                                      \
+    }                                              \
+} while (0)
+
+#define COMMA ,
+#define PRINT_VM_STATE(msg)                                                         \
+  do {                                                                              \
+    std::cout << msg;                                                               \
+    std::cout << "\n";                                                              \
+    std::cout << "sp  : " << sp << "\n";                                            \
+    std::cout << "bsp : " << bsp << "\n";                                           \
+    std::cout << "sp i: " << sp - bsp << "\n";                                      \
+    std::cout << "sp ii: " << sp - m_stack << "\n";                                 \
+    std::cout << "ip  : " << ip - code << "\n";                                     \
+    std::cout << "code: " << code << "\n";                                          \
+    std::cout << "data: " << data << "\n";                                          \
+    std::cout << "ids : " << name_data << "\n";                                     \
+    std::cout << "fn  : " << m_tw->get_current_stack_frame ()->fcn_name () << "\n"; \
+    std::cout << "Next op: " << std::to_string (*ip) << "\n\n";                     \
+  } while ((0))
+
+#define CHECK_STACK(n) \
+  do {\
+    for (unsigned i = 0; i < stack_pad; i++)\
+      {\
+        CHECK (m_stack0[i].u == stack_magic_int);\
+        CHECK (m_stack0[i + stack_size].u == stack_magic_int);\
+      }\
+    CHECK (sp <= m_stack + stack_size);\
+    CHECK (sp + n <= m_stack + stack_size);\
+    CHECK (sp >= m_stack);\
+  } while (0)
+
+#define CHECK_STACK_N(n) CHECK (sp + n <= m_stack + stack_size)
+
+// Access the octave_base_value as subclass type of an octave_value ov
+#define REP(type,ov) static_cast<type&> (const_cast<octave_base_value &> (ov.get_rep()))
+
+#define DISPATCH() do { \
+  if (OCTAVE_UNLIKELY (m_tw->vm_dbgprofecho_flag ())) /* Do we need to check for breakpoints? */\
+    goto debug_check;\
+  int opcode = ip[0];\
+  arg0 = ip[1];\
+  ip += 2;\
+  goto *instr [opcode]; /* Dispatch to next instruction */\
+} while ((0))
+
+#define DISPATCH_1BYTEOP() do { \
+  if (OCTAVE_UNLIKELY (m_tw->vm_dbgprofecho_flag ())) /* Do we need to check for breakpoints? */\
+    goto debug_check_1b;\
+  int opcode = arg0;\
+  arg0 = *ip++;\
+  goto *instr [opcode]; /* Dispatch to next instruction */\
+} while ((0))
+
+std::shared_ptr<vm_profiler> vm::m_vm_profiler;
+bool vm::m_profiler_enabled;
+bool vm::m_trace_enabled;
+
+// These two are used for pushing true and false ov:s to the
+// operand stack.
+static octave_value ov_true {true};
+static octave_value ov_false {false};
+#if defined (M_PI)
+  static octave_value ov_pi {M_PI};
+#else
+  // Initialized in vm::vm()
+  static octave_value ov_pi;
+#endif
+static octave_value ov_dbl_0 {0.0};
+static octave_value ov_dbl_1 {1.0};
+static octave_value ov_dbl_2 {2.0};
+
+static octave_value ov_i {Complex (0.0, 1.0)};
+#if defined (M_E)
+  static octave_value ov_e {M_E};
+#else
+  // Initialized in vm::vm()
+  static octave_value ov_e;
+#endif
+
+// TODO: Push non-nil and nil ov instead of true false to make some checks
+//       faster? Would they be faster?
+
+octave_value_list
+vm::execute_code (const octave_value_list &root_args, int root_nargout)
+{
+  // This field is set to true at each return from this function so we can
+  // assure in the caller that no exception escapes the VM in some way.
+  this->m_dbg_proper_return = false;
+
+  // Array of label pointers, corresponding to opcodes by position in
+  // the array. "&&" is label address, not rvalue reference.
+  static const void* instr[] =
+    {
+      &&pop,                                               // POP,
+      &&dup,                                               // DUP,
+      &&load_cst,                                          // LOAD_CST,
+      &&mul,                                               // MUL,
+      &&div,                                               // DIV,
+      &&add,                                               // ADD,
+      &&sub,                                               // SUB,
+      &&ret,                                               // RET,
+      &&assign,                                            // ASSIGN,
+      &&jmp_if,                                            // JMP_IF,
+      &&jmp,                                               // JMP,
+      &&jmp_ifn,                                           // JMP_IFN,
+      &&push_slot_nargout0,                                // PUSH_SLOT_NARGOUT0,
+      &&le,                                                // LE,
+      &&le_eq,                                             // LE_EQ,
+      &&gr,                                                // GR,
+      &&gr_eq,                                             // GR_EQ,
+      &&eq,                                                // EQ,
+      &&neq,                                               // NEQ,
+      &&index_id_nargout0,                                 // INDEX_ID_NARGOUT0,
+      &&push_slot_indexed,                                 // PUSH_SLOT_INDEXED,
+      &&pow,                                               // POW,
+      &&ldiv,                                              // LDIV,
+      &&el_mul,                                            // EL_MUL,
+      &&el_div,                                            // EL_DIV,
+      &&el_pow,                                            // EL_POW,
+      &&el_and,                                            // EL_AND,
+      &&el_or,                                             // EL_OR,
+      &&el_ldiv,                                           // EL_LDIV,
+      &&op_not,                                            // NOT,
+      &&uadd,                                              // UADD,
+      &&usub,                                              // USUB,
+      &&trans,                                             // TRANS,
+      &&herm,                                              // HERM,
+      &&incr_id_prefix,                                    // INCR_ID_PREFIX,
+      &&decr_id_prefix,                                    // DECR_ID_PREFIX,
+      &&incr_id_postfix,                                   // INCR_ID_POSTFIX,
+      &&decr_id_postfix,                                   // DECR_ID_POSTFIX,
+      &&for_setup,                                         // FOR_SETUP,
+      &&for_cond,                                          // FOR_COND,
+      &&pop_n_ints,                                        // POP_N_INTS,
+      &&push_slot_nargout1,                                // PUSH_SLOT_NARGOUT1,
+      &&index_id1,                                         // INDEX_ID_NARGOUT1,
+      &&push_fcn_handle,                                   // PUSH_FCN_HANDLE,
+      &&colon,                                             // COLON3,
+      &&colon,                                             // COLON2,
+      &&colon_cmd,                                         // COLON3_CMD,
+      &&colon_cmd,                                         // COLON2_CMD,
+      &&push_true,                                         // PUSH_TRUE,
+      &&push_false,                                        // PUSH_FALSE,
+      &&unary_true,                                        // UNARY_TRUE,
+      &&index_idn,                                         // INDEX_IDN,
+      &&assign_n,                                          // ASSIGNN,
+      &&push_slot_nargoutn,                                // PUSH_SLOT_NARGOUTN,
+      &&subassign_id,                                      // SUBASSIGN_ID,
+      &&end_id,                                            // END_ID,
+      &&matrix,                                            // MATRIX,
+      &&trans_mul,                                         // TRANS_MUL,
+      &&mul_trans,                                         // MUL_TRANS,
+      &&herm_mul,                                          // HERM_MUL,
+      &&mul_herm,                                          // MUL_HERM,
+      &&trans_ldiv,                                        // TRANS_LDIV,
+      &&herm_ldiv,                                         // HERM_LDIV,
+      &&wordcmd,                                           // WORDCMD,
+      &&handle_signals,                                    // HANDLE_SIGNALS,
+      &&push_cell,                                         // PUSH_CELL,
+      &&index_cell_id0,                                    // INDEX_CELL_ID_NARGOUT0,
+      &&index_cell_id1,                                    // INDEX_CELL_ID_NARGOUT1,
+      &&index_cell_idn,                                    // INDEX_CELL_ID_NARGOUTN,
+      &&incr_prefix,                                       // INCR_PREFIX,
+      &&rot,                                               // ROT,
+      &&init_global,                                       // GLOBAL_INIT,
+      &&assign_compound,                                   // ASSIGN_COMPOUND,
+      &&jmp_ifdef,                                         // JMP_IFDEF,
+      &&switch_cmp,                                        // JMP_IFNCASEMATCH,
+      &&braindead_precond,                                 // BRAINDEAD_PRECONDITION,
+      &&braindead_warning,                                 // BRAINDEAD_WARNING,
+      &&force_assign,                                      // FORCE_ASSIGN, // Accepts undefined rhs
+      &&push_nil,                                          // PUSH_NIL,
+      &&throw_iferrorobj,                                  // THROW_IFERROBJ,
+      &&index_struct_n,                                    // INDEX_STRUCT_NARGOUTN,
+      &&subasgn_struct,                                    // SUBASSIGN_STRUCT,
+      &&subasgn_cell_id,                                   // SUBASSIGN_CELL_ID,
+      &&index_obj,                                         // INDEX_OBJ,
+      &&subassign_obj,                                     // SUBASSIGN_OBJ,
+      &&matrix_big,                                        // MATRIX_UNEVEN,
+      &&load_far_cst,                                      // LOAD_FAR_CST,
+      &&end_obj,                                           // END_OBJ,
+      &&set_ignore_outputs,                                // SET_IGNORE_OUTPUTS,
+      &&clear_ignore_outputs,                              // CLEAR_IGNORE_OUTPUTS,
+      &&subassign_chained,                                 // SUBASSIGN_CHAINED,
+      &&set_slot_to_stack_depth,                           // SET_SLOT_TO_STACK_DEPTH,
+      &&dupn,                                              // DUPN,
+      &&debug,                                             // DEBUG,
+      &&index_struct_call,                                 // INDEX_STRUCT_CALL,
+      &&end_x_n,                                           // END_X_N,
+      &&eval,                                              // EVAL,
+      &&bind_ans,                                          // BIND_ANS,
+      &&push_anon_fcn_handle,                              // PUSH_ANON_FCN_HANDLE,
+      &&for_complex_setup,                                 // FOR_COMPLEX_SETUP, // opcode
+      &&for_complex_cond,                                  // FOR_COMPLEX_COND,
+      &&push_slot1_special,                                // PUSH_SLOT_NARGOUT1_SPECIAL,
+      &&disp,                                              // DISP,
+      &&push_slot_disp,                                    // PUSH_SLOT_DISP,
+      &&load_cst_alt2,                                     // LOAD_CST_ALT2,
+      &&load_cst_alt3,                                     // LOAD_CST_ALT3,
+      &&load_cst_alt4,                                     // LOAD_CST_ALT4,
+      &&load_2_cst,                                        // LOAD_2_CST,
+      &&mul_dbl,                                           // MUL_DBL,
+      &&add_dbl,                                           // ADD_DBL,
+      &&sub_dbl,                                           // SUB_DBL,
+      &&div_dbl,                                           // DIV_DBL,
+      &&pow_dbl,                                           // POW_DBL,
+      &&le_dbl,                                            // LE_DBL,
+      &&le_eq_dbl,                                         // LE_EQ_DBL,
+      &&gr_dbl,                                            // GR_DBL,
+      &&gr_eq_dbl,                                         // GR_EQ_DBL,
+      &&eq_dbl,                                            // EQ_DBL,
+      &&neq_dbl,                                           // NEQ_DBL,
+      &&index_id1_mat_1d,                                  // INDEX_ID1_MAT_1D,
+      &&index_id1_mat_2d,                                  // INDEX_ID1_MAT_2D,
+      &&push_pi,                                           // PUSH_PI,
+      &&index_math_ufun_id1,                               // INDEX_ID1_MATHY_UFUN,
+      &&subassign_id_mat_1d,                               // SUBASSIGN_ID_MAT_1D,
+      &&incr_id_prefix_dbl,                                // INCR_ID_PREFIX_DBL,
+      &&decr_id_prefix_dbl,                                // DECR_ID_PREFIX_DBL,
+      &&incr_id_postfix_dbl,                               // INCR_ID_POSTFIX_DBL,
+      &&decr_id_postfix_dbl,                               // DECR_ID_POSTFIX_DBL,
+      &&push_cst_dbl_0,                                    // PUSH_DBL_0,
+      &&push_cst_dbl_1,                                    // PUSH_DBL_1,
+      &&push_cst_dbl_2,                                    // PUSH_DBL_2,
+      &&jmp_if_bool,                                       // JMP_IF_BOOL,
+      &&jmp_ifn_bool,                                      // JMP_IFN_BOOL,
+      &&usub_dbl,                                          // USUB_DBL,
+      &&not_dbl,                                           // NOT_DBL,
+      &&not_bool,                                          // NOT_BOOL,
+      &&push_folded_cst,                                   // PUSH_FOLDED_CST,
+      &&set_folded_cst,                                    // SET_FOLDED_CST,
+      &&wide,                                              // WIDE
+      &&subassign_id_mat_2d,
+      &&enter_script_frame,
+      &&exit_script_frame,
+      &&ret_anon,
+      &&index_idnx,
+      &&index_cell_idnx,
+      &&push_slot_nx,
+      &&ext_nargout,
+      &&wordcmd_nx,
+      &&anon_maybe_set_ignore_output,
+      &&enter_nested_frame,
+      &&install_function,
+      &&dup_move,
+      &&mul_cst_dbl,
+      &&mul_cst,
+      &&add_cst_dbl,
+      &&add_cst,
+      &&div_cst_dbl,
+      &&div_cst,
+      &&sub_cst_dbl,
+      &&sub_cst,
+      &&le_cst_dbl,
+      &&le_cst,
+      &&le_eq_cst_dbl,
+      &&le_eq_cst,
+      &&gr_cst_dbl,
+      &&gr_cst,
+      &&gr_eq_cst_dbl,
+      &&gr_eq_cst,
+      &&eq_cst_dbl,
+      &&eq_cst,
+      &&neq_cst_dbl,
+      &&neq_cst,
+      &&pow_cst_dbl,
+      &&pow_cst,
+      &&push_i,
+      &&push_e,
+      &&index_struct_subcall,
+      &&push_cell_big,
+      &&append_cell,
+    };
+
+  if (OCTAVE_UNLIKELY (m_profiler_enabled))
+    {
+      auto p = vm::m_vm_profiler;
+      if (p)
+        {
+          std::string fn_name = m_data[2].string_value (); // profiler_name () querried at compile time
+          p->enter_fn (fn_name, "", m_unwind_data, m_name_data, m_code);
+        }
+    }
+
+#if defined (__GNUC__) && defined (__x86_64__)
+  // We strongly suggest to GCC to put sp, ip and bsp in actual registers with
+  // the "local register variable" extension.
+  //
+  // If GCC is not nudged to put these in registers, its register allocator
+  // might make the VM spend quite some time pushing and popping of the C-stack.
+  register int arg0 asm("r12");
+  register stack_element *sp asm("r14");    // Stack pointer register
+  register unsigned char *ip asm("r15");    // The instruction pointer register
+  register stack_element *bsp asm("r13");   // Base stack pointer
+#else
+  int arg0;
+  stack_element *sp;
+  unsigned char *ip;
+  stack_element *bsp;
+#endif
+
+  unsigned char *code; // The instruction base register
+
+  stack_element *rsp; // Root stack pointer. Marks the beginning of the VM stack
+
+  octave_value *data = m_data;
+  std::string *name_data = m_name_data;
+  unwind_data *unwind_data = m_unwind_data;
+
+  code = m_code;
+  ip = code;
+  m_ip = 0;
+
+  m_sp = m_bsp = m_rsp = sp = bsp = rsp = m_stack;
+
+  // Read the meta data for constructing a stack frame.
+  {
+#define N_RETURNS() static_cast<signed char> (code[0])
+#define N_ARGS() static_cast<signed char> (code[1])
+#define N_LOCALS() USHORT_FROM_UCHAR_PTR (code + 2)
+
+    int n_returns = static_cast<signed char> (*ip++);
+    // n_args is negative for varargin calls
+    int n_args = static_cast<signed char> (*ip++);
+    int n_locals = POP_CODE_USHORT (); // Note: An arg and return can share slot
+
+    bool is_varargin = n_args < 0;
+    bool is_varargout = n_returns < 0;
+
+    int n_root_args = root_args.length ();
+
+    if (is_varargin)
+      n_args = -n_args;
+    if (OCTAVE_UNLIKELY (n_returns < 0))  // Negative for varargout and anonymous functions
+      {
+        if (n_returns != -128)
+          n_returns = -n_returns;
+        else
+          n_returns = 1;
+      }
+
+    // The first return is always nargout, as a uint64
+    (*sp++).u = root_nargout;
+
+    // Construct nil octave_values for the return slots
+    for (int i = 1; i < n_returns; i++)
+      PUSH_OV (); // TODO: Might be an arg i.e "[a,i] = foo (i,b)"
+
+    // Push the args onto the stack, filling their local slots
+    if (!is_varargin)
+      {
+        int i = 0;
+        for (i = 0; i < n_root_args; i++)
+          PUSH_OV (root_args (i));
+        // If not all args are given, fill up with nil objects
+        for (; i < n_args; i++)
+          PUSH_OV ();
+
+        set_nargin (n_root_args);   // Needed for nargin function
+      }
+    else
+      {
+        // Dont push varargin arguments
+        int n_args_to_push = std::min (n_args - 1, n_root_args);
+        int ii = 0;
+        for (ii = 0; ii < n_args_to_push; ii++)
+          PUSH_OV (root_args (ii));
+
+        // Construct missing args (if any)
+        for (; ii < n_args - 1; ii++)
+          PUSH_OV ();
+
+        // The rest of the args are to be put in a cell and be put
+        // in the last argument slot
+        int n_varargin = n_root_args - n_args_to_push;
+
+        if (n_varargin > 0)
+          {
+            Cell cell(1, n_varargin);
+            int i;
+            for (i = 0; i < n_varargin; i++)
+              {
+                cell (0, i) = root_args (ii + i);
+              }
+            PUSH_OV (cell);
+          }
+        else
+          PUSH_OV (Cell (0,0)); // Empty cell into varargin's slot
+
+        set_nargin (n_args_to_push + n_varargin);
+      }
+    // Construct nil octave_values for locals in their slots
+    for (int i = 0; i < n_locals - n_args - n_returns; i++)
+      PUSH_OV ();
+
+    /* We do the number of args check after frame init so that the unwind is easier. */
+    if (!is_varargin && n_args < n_root_args)
+      {
+        std::string fn_name = unwind_data->m_name;
+        (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",
+                                               fn_name + ": function called with too many inputs"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        ip++; // unwind expects ip to point to two after the opcode being executed
+        goto unwind;
+      }
+    if (!is_varargout && root_nargout > n_returns - 1) // n_returns includes %nargout, so subtract one
+      {
+        std::string fn_name = unwind_data->m_name;
+        (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",
+                                               fn_name + ": function called with too many outputs"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        ip++;
+        goto unwind;
+      }
+
+    m_original_lvalue_list = m_tw->lvalue_list ();
+    m_tw->set_lvalue_list (nullptr);
+  }
+
+  // Go go go
+  DISPATCH ();
+
+pop:
+  {
+    (*--sp).ov.~octave_value ();
+    DISPATCH_1BYTEOP ();
+  }
+dup:
+  {
+    new (sp) octave_value ((sp[-1]).ov);
+    sp++;
+    DISPATCH_1BYTEOP ();
+  }
+load_cst:
+  {
+    // The next instruction is the offset in the data.
+    int offset = arg0;
+
+    // Copy construct it into the top of the stack
+    new (sp++) octave_value (data [offset]);
+
+    DISPATCH ();
+  }
+mul_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_mul, mul, MUL, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+mul:
+  MAKE_BINOP_SELFMODIFYING (binary_op::op_mul, mul_dbl, MUL_DBL)
+  DISPATCH_1BYTEOP();
+div_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_div, div, DIV, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+div:
+  MAKE_BINOP_SELFMODIFYING (binary_op::op_div, div_dbl, DIV_DBL)
+  DISPATCH_1BYTEOP();
+add_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_add, add, ADD, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+add:
+  MAKE_BINOP_SELFMODIFYING (binary_op::op_add, add_dbl, ADD_DBL)
+  DISPATCH_1BYTEOP();
+sub_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_sub, sub, SUB, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+sub:
+  MAKE_BINOP_SELFMODIFYING (binary_op::op_sub, sub_dbl, SUB_DBL)
+  DISPATCH_1BYTEOP();
+ret:
+  {
+    // If we have any active ~/"black hole", e.g. [~] = foo() in the stack
+    // the m_output_ignore_data pointer is live. We need to pop and reset
+    // lvalue lists for the tree walker.
+    if (OCTAVE_UNLIKELY (m_output_ignore_data))
+      {
+        m_output_ignore_data->pop_frame (*this);
+        output_ignore_data::maybe_delete_ignore_data (*this, 0);
+      }
+
+    // We need to tell the bytecode frame we are unwinding so that it can save
+    // variables on the VM stack if it is referenced from somewhere else.
+    m_tw->get_current_stack_frame ()->vm_unwinds ();
+
+    // Assert that the stack pointer is back where it should be
+    panic_unless (bsp + N_LOCALS() == sp);
+
+    int n_returns_callee = N_RETURNS ();
+
+    bool is_varargout = n_returns_callee < 0;
+    if (OCTAVE_UNLIKELY (is_varargout))
+      n_returns_callee = -n_returns_callee;
+    panic_unless (n_returns_callee > 0);
+
+    int n_locals_callee = N_LOCALS ();
+
+    // Destroy locals
+    //
+    // Note that we destroy from the bottom towards
+    // the top of the stack to calls ctors in the same
+    // order as the treewalker.
+    int n_dtor = n_locals_callee - n_returns_callee;
+
+    stack_element *first = sp - n_dtor;
+    while (first != sp)
+      {
+        (*first++).ov.~octave_value ();
+      }
+    sp -= n_dtor;
+
+    if (OCTAVE_UNLIKELY (is_varargout))
+      {
+        // Check that varargout is a cell or undefined
+        octave_value &ov_vararg = sp[-1].ov;
+
+        bool vararg_defined = ov_vararg.is_defined ();
+        if (vararg_defined && !ov_vararg.iscell ())
+          {
+            (*sp++).pee = new execution_exception {"error","","varargout must be a cell array object"};
+            (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+            goto unwind;
+          }
+      }
+
+    if (OCTAVE_UNLIKELY (m_profiler_enabled))
+      {
+        auto p = vm::m_vm_profiler;
+        if (p)
+          {
+            std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time
+            p->exit_fn (fn_name);
+          }
+      }
+
+    // Are we at the root routine?
+    if (bsp == rsp)
+      {
+        CHECK (m_output_ignore_data == nullptr); // This can't be active
+
+        // Collect return values in octave_value_list.
+        // Skip %nargout, the first value, which is an integer.
+        // n_returns_callee includes %nargout, but root_nargout doesn't.
+
+        octave_value_list ret;
+
+        int j;
+        // nargout 0 should still give one return value, if there is one
+        int n_root_wanted = std::max (root_nargout, 1);
+
+        if (is_varargout)
+          {
+            CHECK_PANIC(n_returns_callee >= 2);
+
+            octave_value ov_vararg = sp[-1].ov; // varargout on the top of the stack
+
+            bool vararg_defined = ov_vararg.is_defined ();
+
+            for (j = 1; j < n_returns_callee - 1 && j < n_root_wanted + 1; j++)
+              {
+                if (OCTAVE_UNLIKELY (bsp[j].ov.is_ref ()))
+                  ret.append (bsp[j].ov.ref_rep ()->deref ());
+                else
+                  ret.append (std::move (bsp[j].ov));
+                bsp[j].ov.~octave_value ();
+              }
+            // Append varargout to ret
+            if (vararg_defined && j < n_root_wanted + 1)
+              {
+                // Push the cell array elements to the stack
+                Cell cell_vararg = ov_vararg.cell_value ();
+                for (int i = 0; i < cell_vararg.numel () && j + i < n_root_wanted + 1; i++)
+                  {
+                    octave_value &arg = cell_vararg(i);
+                    ret.append (std::move (arg));
+                  }
+              }
+
+            // Destroy varargout and rest of return values, if any
+            for (; j < n_returns_callee; j++)
+              bsp[j].ov.~octave_value ();
+          }
+        else
+          {
+            for (j = 1; j < n_returns_callee && j < (n_root_wanted + 1); j++)
+              {
+                if (OCTAVE_UNLIKELY (bsp[j].ov.is_ref ()))
+                  ret.append (bsp[j].ov.ref_rep ()->deref ());
+                else
+                  ret.append (std::move (bsp[j].ov));
+                bsp[j].ov.~octave_value ();
+              }
+            // Destroy rest of return values, if any
+            for (; j < n_returns_callee; j++)
+              bsp[j].ov.~octave_value ();
+          }
+
+        //Note: Stack frame object popped by caller
+        CHECK_STACK (0);
+        this->m_dbg_proper_return = true;
+
+        m_tw->set_lvalue_list (m_original_lvalue_list);
+        return ret;
+      }
+
+    // If the root stack pointer is not the same as the base pointer,
+    // we are returning from a bytecode routine to another bytecode routine,
+    // so we have to restore the caller stack frame and cleanup the callee's.
+    //
+    // Essentially do the same thing as in the call but in reverse order.
+
+    // The sp now points one past the last return value
+    stack_element *caller_stack_end = sp - n_returns_callee;
+    sp = caller_stack_end; // sp points to one past caller stack
+
+    // The amount of return values the caller wants, as stored last on the caller stack.
+    // Note that this is not necessarily the same as nargout, the amount of return values the caller
+    // want the callee to produce, stored first on callee stack.
+    int caller_nval_back = (*--sp).u;
+
+    // Restore ip
+    ip = (*--sp).puc;
+
+    // Restore bsp
+    bsp = (*--sp).pse;
+
+    // Restore id names
+    name_data = (*--sp).ps;
+
+    // Restore data
+    data = (*--sp).pov;
+
+    // Restore code
+    code = (*--sp).puc;
+
+    // Restore unwind data
+    unwind_data = (*--sp).pud;
+
+    // Restore the stack pointer. The stored address is the first arg
+    // on the caller stack, or where it would have been if there are no args.
+    // The args were moved to the callee stack and destroyed on the caller
+    // stack in the call.
+    sp = sp[-1].pse;
+
+    // We now have the object that was called on the stack, destroy it
+    STACK_DESTROY (1);
+
+    // Move the callee's return values to the top of the stack of the caller.
+    // Renaming variables to keep my sanity.
+    int n_args_caller_expects = caller_nval_back;
+    int n_args_callee_has = n_returns_callee - 1; // Exclude %nargout
+    int n_args_actually_moved = 0;
+
+    if (OCTAVE_UNLIKELY (is_varargout))
+      {
+        // Expand the cell array and push the elements to the end of the callee stack
+        octave_value ov_vararg = std::move (caller_stack_end[n_args_callee_has].ov);
+
+        n_args_callee_has--; // Assume empty varargout
+
+        bool vararg_defined = ov_vararg.is_defined ();
+
+        if (vararg_defined)
+          {
+            // Push the cell array elements to the stack
+            Cell cell_vararg = ov_vararg.cell_value ();
+            octave_idx_type n = cell_vararg.numel ();
+
+            octave_idx_type n_to_push;
+            // Atleast one (for 'ans') and deduct the amount of args already on the stack
+            n_to_push = std::max (1, n_args_caller_expects) - n_args_callee_has;
+            // Can't be negative
+            n_to_push = n_to_push < 0 ? 0 : n_to_push;
+            // Can't push more than amount of elements in cell
+            n_to_push = std::min (n , n_to_push);
+
+            CHECK_STACK_N (n_to_push);
+
+            int i = 0;
+            for (; i < n_to_push; i++)
+              {
+                octave_value &arg = cell_vararg(i);
+                // Construct octave_value with placement new, in the end of the callee stack
+                new (&caller_stack_end[n_args_callee_has + 1 + i].ov) octave_value (std::move (arg)); // +1 for %nargout
+              }
+
+            n_args_callee_has += i;
+          }
+        else if (n_args_caller_expects)
+          {
+            // Push an empty varargout
+            new (&caller_stack_end[n_args_callee_has + 1].ov) octave_value {};
+
+            n_args_callee_has++;
+          }
+      }
+
+    int n_args_to_move = std::min (n_args_caller_expects, n_args_callee_has);
+
+    // If no return values is requested but there exists return values,
+    // we need to push one to be able to write it to ans.
+    if (n_args_caller_expects == 0 && n_args_callee_has)
+      {
+        n_args_actually_moved++;
+        PUSH_OV (std::move (caller_stack_end[1].ov));
+      }
+    // If the callee aint returning anything, we need to push a
+    // nil object, since the caller always anticipates atleast
+    // one object, even for nargout == 0.
+    else if (n_args_caller_expects == 0 && !n_args_callee_has)
+      PUSH_OV();
+    // If the stacks will overlap due to many returns, do copy via container
+    else if (sp + n_args_caller_expects >= caller_stack_end)
+      {
+        // This pushes 'n_args_to_move' number of return values and 'n_args_caller_expects - n_args_to_move'
+        // number of nils.
+        copy_many_args_to_caller (sp, caller_stack_end + 1, n_args_to_move, n_args_caller_expects);
+        n_args_actually_moved = n_args_caller_expects;
+        sp += n_args_actually_moved;
+      }
+    // Move 'n_args_to_move' return value from callee to caller
+    else
+      {
+        // If the caller wants '[a, b, ~]' and the callee has 'd e'
+        // we need to push 'nil' 'd' 'e'
+        for (int i = n_args_to_move; i < n_args_caller_expects; i++)
+          PUSH_OV ();
+        for (int i = 0; i < n_args_to_move; i++)
+          {
+            // Move into caller stack. Note that the order is reversed, such that
+            // a b c on the callee stack becomes c b a on the caller stack.
+            int idx = n_args_to_move - 1 - i;
+            octave_value &arg = caller_stack_end[1 + idx].ov;
+
+            PUSH_OV (std::move (arg));
+          }
+        n_args_actually_moved = n_args_caller_expects;
+      }
+
+    // Destroy the unused return values on the callee stack
+    for (int i = 0; i < n_args_callee_has; i++)
+      {
+        int idx = n_args_callee_has - 1 - i;
+        caller_stack_end[1 + idx].ov.~octave_value (); // Destroy ov in callee
+      }
+
+    // Pop the current dynamic stack frame
+    std::shared_ptr<stack_frame> fp = m_tw->pop_return_stack_frame ();
+    // If the pointer is not shared, stash it in a cache which is used
+    // to avoid having to allocate shared pointers each frame push.
+    // If it is a closure context, there might be weak pointers to it from function handles.
+    if (fp.unique () && m_frame_ptr_cache.size () < 8 && ! fp->is_closure_context () 
+        && fp->is_user_fcn_frame ())
+      {
+        fp->vm_clear_for_cache ();
+        m_frame_ptr_cache.push_back (std::move (fp));
+      }
+
+    // Continue execution back in the caller
+  }
+  DISPATCH ();
+assign:
+  {
+    // The next instruction is the slot number
+    int slot = arg0;
+
+    octave_value_vm &ov_rhs = TOP_OV_VM ();
+    octave_value_vm &ov_lhs = bsp[slot].ov_vm;
+
+    // Handle undefined, cs-lists, objects that need an unique call etc
+    // in a separate code block to keep assign short.
+    if (OCTAVE_UNLIKELY (ov_rhs.vm_need_dispatch_assign_rhs () ||
+                         ov_lhs.vm_need_dispatch_assign_lhs ()))
+        goto assign_dispath;
+
+    ov_lhs = std::move (ov_rhs); // Note move
+
+    ov_rhs.~octave_value_vm (); // Destroy the top of the stack.
+    STACK_SHRINK (1);
+  }
+  DISPATCH();
+
+// Note: Not an op-code. Only jumped to from assign above.
+assign_dispath:
+{
+  // Extract the slot number again
+  int slot = arg0;
+
+  octave_value &ov_rhs = TOP_OV ();
+  octave_value &ov_lhs = bsp[slot].ov;
+
+  // If rhs is a "comma separated list" we just assign the first one.
+  // E.g.:
+  // a = {1,2,3};
+  // b = a{:}; % This assignment
+  //
+  // TODO: Do some smart function in ov for this?
+  //       Combine with undefined check?
+  if (ov_rhs.is_cs_list ())
+    {
+      const octave_value_list lst = ov_rhs.list_value ();
+
+      if (lst.empty ())
+        {
+          // TODO: Need id, name
+          (*sp++).i = static_cast<int> (error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT);
+          goto unwind;
+        }
+
+      ov_rhs = lst(0);
+    }
+
+  if (ov_rhs.is_undefined ())
+    {
+      // TODO: Need id, name
+      (*sp++).i = static_cast<int> (error_type::RHS_UNDEF_IN_ASSIGNMENT);
+      goto unwind;
+    }
+
+  // If the object in the slot is the last one of it, we need
+  // to call its object dtor.
+  // TODO: Probably not needed since the Octave dtor will be called
+  //       by the C++ dtor of ov_lhs's m_count is 0??? The assign
+  //       function calls this function though ...
+  ov_lhs.maybe_call_dtor ();
+
+  if (ov_rhs.vm_need_storable_call ())
+    ov_rhs.make_storable_value (); // Some types have lazy copy
+
+  if (OCTAVE_LIKELY (!ov_lhs.is_ref ()))
+    ov_lhs = std::move (ov_rhs); // Note move
+  else
+    ov_lhs.ref_rep ()->set_value (std::move (ov_rhs));
+
+  STACK_DESTROY (1);
+}
+DISPATCH();
+
+jmp_if_bool:
+{
+  octave_value_vm &ov_1 = TOP_OV_VM ();
+
+  if (OCTAVE_UNLIKELY (ov_1.type_id () != m_bool_typeid))
+    {
+      // Change the specialized opcode to the generic one
+      ip[-2] = static_cast<unsigned char> (INSTR::JMP_IF);
+      goto jmp_if;
+    }
+
+  unsigned char b0 = arg0;
+  unsigned char b1 = *ip++;
+
+  int target = USHORT_FROM_UCHARS (b0, b1);
+
+  octave_bool &ovb_bool = REP (octave_bool, ov_1);
+
+  bool is_true = ovb_bool.octave_bool::is_true ();
+
+  ov_1.~octave_value_vm ();
+  STACK_SHRINK (1);
+
+  if (is_true)
+    ip = code + target;
+}
+DISPATCH ();
+
+jmp_if:
+  {
+    octave_value &ov_1 = TOP_OV ();
+
+    if (OCTAVE_UNLIKELY (ov_1.type_id () == m_bool_typeid))
+      {
+        // Change the generic opcode to the specialized one
+        ip[-2] = static_cast<unsigned char> (INSTR::JMP_IF_BOOL);
+        goto jmp_if_bool;
+      }
+
+    unsigned char b0 = arg0;
+    unsigned char b1 = *ip++;
+
+    int target = USHORT_FROM_UCHARS (b0, b1);
+
+    bool is_true;
+    if (ov_1.is_defined ())
+      {
+        try
+          {
+            is_true = ov_1.is_true ();
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+    else
+      {
+        (*sp++).i = static_cast<int> (error_type::IF_UNDEFINED);
+        goto unwind;
+      }
+
+    STACK_DESTROY (1);
+
+    if (is_true)
+      ip = code + target;
+  }
+  DISPATCH();
+jmp:
+  {
+    unsigned char b0 = arg0;
+    unsigned char b1 = *ip++;
+
+    int target = USHORT_FROM_UCHARS (b0, b1);
+    ip = code + target;
+  }
+  DISPATCH ();
+jmp_ifn_bool:
+{
+  octave_value_vm &ov_1 = TOP_OV_VM ();
+
+  if (OCTAVE_UNLIKELY (ov_1.type_id () != m_bool_typeid))
+    {
+      // Change the specialized opcode to the generic one
+      ip[-2] = static_cast<unsigned char> (INSTR::JMP_IFN);
+      goto jmp_ifn;
+    }
+
+  unsigned char b0 = arg0;
+  unsigned char b1 = *ip++;
+
+  int target = USHORT_FROM_UCHARS (b0, b1);
+
+  octave_bool &ovb_bool = REP (octave_bool, ov_1);
+
+  bool is_true = ovb_bool.octave_bool::is_true ();
+
+  ov_1.~octave_value_vm ();
+  STACK_SHRINK (1);
+
+  if (!is_true)
+    ip = code + target;
+}
+DISPATCH ();
+
+jmp_ifn:
+  {
+    octave_value &ov_1 = TOP_OV ();
+
+    if (OCTAVE_UNLIKELY (ov_1.type_id () == m_bool_typeid))
+      {
+        // Change the generic opcode to the specialized one
+        ip[-2] = static_cast<unsigned char> (INSTR::JMP_IFN_BOOL);
+        goto jmp_ifn_bool;
+      }
+
+    unsigned char b0 = arg0;
+    unsigned char b1 = *ip++;
+
+    int target = USHORT_FROM_UCHARS (b0, b1);
+
+    bool is_true;
+    if (ov_1.is_defined ()) //10
+      {
+        try
+          {
+            is_true = ov_1.is_true ();
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+    else
+      {
+        (*sp++).i = static_cast<int> (error_type::IF_UNDEFINED);
+        goto unwind;
+      }
+
+    STACK_DESTROY (1);
+
+    if (!is_true)
+      ip = code + target;
+  }
+  DISPATCH ();
+push_slot_nargoutn:
+  {
+    // The next instruction is the slot number
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    // Handle undefined (which might be an error or function
+    // call on command form) or a function object.
+    if (ov.is_maybe_function ())
+      goto cmd_fcn_or_undef_error;
+
+    ip++; // nargout not needed
+
+    // Push the value in the slot to the stack
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      PUSH_OV (ov);
+    else
+      PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing
+  }
+  DISPATCH();
+set_folded_cst:
+{
+  int slot = arg0;
+  octave_cached_value *ovb = static_cast<octave_cached_value*> (bsp[slot].ovb);
+  ovb->set_cached_obj (std::move (TOP_OV ()));
+  STACK_DESTROY (1);
+}
+DISPATCH();
+push_folded_cst:
+  {
+    int slot = arg0;
+    unsigned char b0 = *ip++;
+    unsigned char b1 = *ip++;
+
+    // If the slot value is defined it is a octave_cached_value, since only
+    // this opcode and SET_FOLDED_CST writes to the slot.
+
+    bool did_it = false;
+    octave_base_value *ovb = bsp[slot].ovb;
+    if (ovb->is_defined ())
+      {
+        octave_cached_value *ovbc = static_cast<octave_cached_value *> (ovb);
+        if (ovbc->cache_is_valid ())
+          {
+            // Use the cached value. Push it to the stack.
+            PUSH_OV (ovbc->get_cached_value ());
+            // Jump over the initialization code (the folded code) of the
+            // cached value
+            int target = USHORT_FROM_UCHARS (b0, b1);
+            ip = code + target;
+
+            did_it = true;
+          }
+      }
+
+    if (! did_it)
+      {
+        // Put a octave_cached_value in the slot for SET_FOLDED_CST
+        bsp[slot].ov = octave_value {new octave_cached_value};
+      }
+  }
+  DISPATCH();
+
+push_slot_nargout0:
+push_slot_nargout1:
+push_slot1_special:
+push_slot_nx:
+  {
+    int slot = arg0;
+
+    octave_base_value *ovb = bsp[slot].ovb;
+
+    // Some ov:s need some checks before pushing
+    if (OCTAVE_UNLIKELY (ovb->vm_need_dispatch_push ()))
+      goto push_slot_dispatch;
+
+    PUSH_OVB (ovb);
+  }
+  DISPATCH();
+// This is not an op-code and is only jumped to from above opcode.
+push_slot_dispatch:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    // Handle some special cases separately.
+    // I.e. cmd fn calls or classdef metas.
+    // Also error if no function-ish thing is found
+    // in lookups.
+    if (ov.is_maybe_function ())
+      goto cmd_fcn_or_undef_error;
+
+    // Push the value in the slot to the stack
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      PUSH_OV (ov);
+    else
+      PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing
+  }
+  DISPATCH();
+
+disp:
+  {
+    octave_value &ov = TOP_OV ();
+    // 0 is magic slot number that indicates no name or always not a command
+    // for this opcode.
+    int slot = arg0;
+    int slot_was_cmd = POP_CODE_USHORT (); // Marker for if the preceding call was a command call
+
+    bool call_was_cmd = false;
+    if (slot_was_cmd)
+      {
+        octave_value &ov_call_was_cmd = bsp[slot_was_cmd].ov;
+        if (ov_call_was_cmd.is_defined ())
+          call_was_cmd = true;
+      }
+
+    if (m_tw->statement_printing_enabled () && ov.is_defined ())
+      {
+        interpreter& interp = m_tw->get_interpreter ();
+
+        if (ov.is_cs_list ())
+          {
+            octave_value_list ovl = ov.list_value ();
+
+            for (int i = 0; i < ovl.length (); i++)
+              {
+                octave_value el_ov = ovl(i);
+                // We are not printing undefined elements
+                if (el_ov.is_undefined ())
+                  continue;
+                octave_value_list el_ovl {el_ov};
+                el_ovl.stash_name_tags (string_vector ("ans"));
+                m_tw->set_active_bytecode_ip (ip - code); // Needed if display calls inputname()
+
+                try
+                  {
+                    interp.feval ("display", el_ovl);
+                  }
+                CATCH_INTERRUPT_EXCEPTION
+                CATCH_INDEX_EXCEPTION
+                CATCH_EXECUTION_EXCEPTION
+                CATCH_BAD_ALLOC
+                CATCH_EXIT_EXCEPTION
+              }
+          }
+        else
+          {
+            octave_value_list ovl;
+            ovl.append (ov);
+
+            if (call_was_cmd)
+              ovl.stash_name_tags (string_vector ("ans"));
+            else if (slot != 0)
+              ovl.stash_name_tags (string_vector (name_data[slot]));
+            else
+              ovl.stash_name_tags (string_vector {});
+
+            m_tw->set_active_bytecode_ip (ip - code); // Needed if display calls inputname()
+
+            try
+              {
+                interp.feval ("display", ovl);
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+
+          }
+      }
+
+    STACK_DESTROY (1);
+  }
+  DISPATCH ();
+
+push_slot_disp:
+  {
+    int slot = arg0;
+    int slot_was_cmd = POP_CODE_USHORT ();
+    octave_value &ov = bsp[slot].ov;
+    octave_value &ov_was_cmd = bsp[slot_was_cmd].ov;
+
+    // Handle some special cases separately.
+    // I.e. cmd fn calls or classdef metas.
+    // Also error if no function-ish thing is found
+    // in lookups.
+
+    // Assume that the pushed slot will not be a cmd.
+    // disp will later use the ov_was_cmd slot to choose between printing
+    // 'ans = ...' or 'foo = ...'
+    ov_was_cmd = octave_value ();
+
+    if (ov.is_maybe_function ())
+      {
+        if (ov.is_undefined ()) // class objects are defined
+          ov_was_cmd = true;
+        ip -= 2; // Rewind to slot so the state matches 'push_slot_nargoutn' and 'push_slot_dispatch'.
+        goto cmd_fcn_or_undef_error;
+      }
+
+    // Push the value in the slot to the stack
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      PUSH_OV (ov);
+    else
+      PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing
+  }
+  DISPATCH();
+
+// Some kludge to handle the possibility of command form function calls.
+cmd_fcn_or_undef_error:
+  {
+    int slot = arg0;
+    octave_value ov = bsp[slot].ov;
+    bool is_ref = ov.is_ref ();
+    if (is_ref)
+      ov = ov.ref_rep ()->deref ();
+
+    // Check to opcode to see how many nargout there are.
+    // Also skip ip to the end of the opcode.
+    int nargout;
+    bool push_classdef_metas = false;
+    int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+
+    INSTR opcode = static_cast<INSTR> (*(ip - 2 + wide_opcode_offset));
+    if (opcode == INSTR::PUSH_SLOT_NARGOUT1 ||
+        opcode == INSTR::PUSH_PI || opcode == INSTR::PUSH_I || opcode == INSTR::PUSH_E)
+      nargout = 1;
+    else if (opcode == INSTR::PUSH_SLOT_NARGOUT0)
+      nargout = 0;
+    else if (opcode == INSTR::PUSH_SLOT_NARGOUTN)
+      nargout = *ip++;
+    else if (opcode == INSTR::PUSH_SLOT_NARGOUT1_SPECIAL)
+      {
+        push_classdef_metas = true;
+        nargout = 1;
+      }
+    else if (opcode == INSTR::PUSH_SLOT_DISP)
+      {
+        nargout = 0;
+        ip += 2; // Skip the maybe command slot
+      }
+    else if (opcode == INSTR::PUSH_SLOT_NX)
+      {
+        nargout = bsp[0].i;
+      }
+    else
+      PANIC ("Invalid opcode");
+
+    bool ov_defined1 = ov.is_defined ();
+
+    if (!ov_defined1 && ov.is_nil ())
+      {
+        ov = octave_value (new octave_fcn_cache (name_data[slot]));
+        if (bsp[slot].ov.is_ref ())
+          bsp[slot].ov.ref_rep ()->set_value (ov);
+        else
+          bsp[slot].ov = ov;
+      }
+
+    if (!ov_defined1 && ov.is_function_cache ())
+      {
+        try
+          {
+            octave_fcn_cache &cache = REP (octave_fcn_cache, ov);
+            ov = cache.get_cached_obj ();
+          }
+        CATCH_EXECUTION_EXCEPTION
+      }
+
+    if (! ov.is_defined ())
+      {
+        (*sp++).ps = new std::string {name_data[slot]};
+        (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+        goto unwind;
+      }
+
+    // When executing op-code PUSH_SLOT_NARGOUT1_SPECIAL ...
+    // Essentially if we have a foo{1} where foo is a classdef
+    // we need to push it for the {1} indexing.
+    if (push_classdef_metas && ov.is_classdef_meta ())
+      PUSH_OV (ov);
+    else if (ov.is_function ())
+      {
+        octave_function *fcn = ov.function_value (true); //TODO: Unwind on error?
+
+        // TODO: Bytecode call
+        if (fcn)
+          {
+
+            if (fcn->is_compiled ())
+              {
+                octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+
+                // Alot of code in this define
+                PUSH_OV (ov); // Calling convention anticipates object to call on the stack.
+                int n_args_on_stack = 0;
+                int caller_nvalback = nargout; // Caller wants as many values returned as it wants the callee to produce
+                MAKE_BYTECODE_CALL
+
+                // Now dispatch to first instruction in the
+                // called function
+              }
+            else
+              {
+              try
+                {
+                  m_tw->set_active_bytecode_ip (ip - code);
+                  octave_value_list ovl = fcn->call (*m_tw, nargout);
+
+                  EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ovl, nargout);
+                }
+              CATCH_INTERRUPT_EXCEPTION
+              CATCH_INDEX_EXCEPTION
+              CATCH_EXECUTION_EXCEPTION
+              CATCH_BAD_ALLOC
+              CATCH_EXIT_EXCEPTION
+            }
+          }
+        else
+          PUSH_OV (ov); // TODO: The walker does this. Sane?
+      }
+    else
+      PUSH_OV (ov); // TODO: The walker does this. Sane?
+  }
+  DISPATCH ();
+le_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_le, le, LE, m_scalar_typeid)
+  DISPATCH_1BYTEOP ();
+le:
+  MAKE_BINOP_SELFMODIFYING (binary_op::op_lt, le_dbl, LE_DBL)
+  DISPATCH_1BYTEOP ();
+le_eq_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_le_eq, le_eq, LE_EQ, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+le_eq:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_le, le_eq_dbl, LE_EQ_DBL)
+  DISPATCH_1BYTEOP();
+gr_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_gr, gr, GR, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+gr:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_gt, gr_dbl, GR_DBL)
+  DISPATCH_1BYTEOP();
+gr_eq_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_gr_eq, gr_eq, GR_EQ, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+gr_eq:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_ge, gr_eq_dbl, GR_EQ_DBL)
+  DISPATCH_1BYTEOP();
+eq_dbl:
+  MAKE_BINOP_SPECIALIZED(m_fn_dbl_eq, eq, EQ, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+eq:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_eq, eq_dbl, EQ_DBL)
+  DISPATCH_1BYTEOP();
+neq_dbl:
+  MAKE_BINOP_SPECIALIZED(m_fn_dbl_neq, neq, NEQ, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+neq:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_ne, neq_dbl, NEQ_DBL)
+  DISPATCH_1BYTEOP();
+
+
+index_id1_mat_1d:
+{
+  int slot = arg0;
+  ip++; // n_args_on_stack ignored
+
+  octave_base_value *arg1 = TOP_OVB ();
+  octave_value &mat = SEC_OV ();
+
+  bool is_scalar = arg1->type_id () == m_scalar_typeid; // scalar is C "double"
+  bool is_mat = mat.is_full_num_matrix ();
+  // If the args have change types we need to use the generic index opcode
+  if (OCTAVE_UNLIKELY (!is_scalar || !is_mat))
+    {
+      // Rewind ip ton_args_on_stack
+      ip -= 1;
+      int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+      // Change the specialized opcode to the generic one
+      ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INDEX_ID_NARGOUT1);
+      goto index_id1;
+    }
+
+  try
+    {
+      octave_scalar *arg1_double = static_cast<octave_scalar*> (arg1);
+
+      double idx_double = arg1_double->double_value ();
+      octave_idx_type idx = static_cast<octave_idx_type> (idx_double);
+
+      if (static_cast<double> (idx) != idx_double)
+        err_invalid_index (idx_double - 1, // Expects zero-indexed index
+                           1,  // The 1st index has the wrong dimension
+                           1); // Total amount of dimensions
+      if (idx <= 0)
+        err_invalid_index (idx - 1, 1, 1);
+
+      // Arguments are one-indexed but checked_full_matrix_elem() is 0-indexed.
+      octave_value ans = mat.checked_full_matrix_elem (idx - 1);
+      STACK_DESTROY (2);
+      PUSH_OV (std::move (ans));
+    }
+  CATCH_INTERRUPT_EXCEPTION
+  CATCH_INDEX_EXCEPTION_WITH_NAME
+  CATCH_EXECUTION_EXCEPTION
+  CATCH_BAD_ALLOC
+  CATCH_EXIT_EXCEPTION
+}
+DISPATCH();
+
+index_id1_mat_2d:
+{
+  int slot = arg0;
+  ip++; // n_args_on_stack ignored
+
+  octave_base_value *arg2 = TOP_OVB (); // Collumn index
+  octave_base_value *arg1 = SEC_OVB (); // Row index
+  octave_value &mat = THIRD_OV ();
+
+  bool is_scalar; // scalar as in C "double"
+  is_scalar = arg1->type_id () == m_scalar_typeid;
+  is_scalar = arg2->type_id () == m_scalar_typeid && is_scalar;
+
+  bool is_mat = mat.is_full_num_matrix ();
+  // If the args have change types we need to use the generic index opcode
+  if (OCTAVE_UNLIKELY (!is_scalar || !is_mat))
+    {
+      // Rewind ip to n_args_on_stack
+      ip -= 1;
+      int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+      // Change the specialized opcode to the generic one
+      ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INDEX_ID_NARGOUT1);
+      goto index_id1;
+    }
+
+  try
+    {
+      octave_scalar *arg1_double = static_cast<octave_scalar*> (arg1);
+
+      double idx1_double = arg1_double->double_value ();
+      octave_idx_type idx1 = static_cast<octave_idx_type> (idx1_double);
+
+      if (static_cast<double> (idx1) != idx1_double)
+        err_invalid_index (idx1_double - 1, // Expects zero-indexed index
+                           1,  // The 1st index has the wrong dimension
+                           2); // Total amount of dimensions
+      if (idx1 <= 0)
+        err_invalid_index (idx1 - 1, 1, 2);
+
+      octave_scalar *arg2_double = static_cast<octave_scalar*> (arg2);
+
+      double idx2_double = arg2_double->double_value ();
+      octave_idx_type idx2 = static_cast<octave_idx_type> (idx2_double);
+
+      if (static_cast<double> (idx2) != idx2_double)
+        err_invalid_index (idx2_double - 1, // Expects zero-indexed index
+                           2,  // The 1st index has the wrong dimension
+                           2); // Total amount of dimensions
+      if (idx2 <= 0)
+        err_invalid_index (idx2 - 1, 2, 2);
+
+      // Arguments are one-indexed but checked_full_matrix_elem() is 0-indexed.
+      octave_value ans = mat.checked_full_matrix_elem (idx1 - 1, idx2 - 1);
+      STACK_DESTROY (3);
+      PUSH_OV (std::move (ans));
+    }
+  CATCH_INTERRUPT_EXCEPTION
+  CATCH_INDEX_EXCEPTION_WITH_NAME
+  CATCH_EXECUTION_EXCEPTION
+  CATCH_BAD_ALLOC
+  CATCH_EXIT_EXCEPTION
+}
+DISPATCH();
+
+index_math_ufun_id1:
+{
+  auto ufn = static_cast<octave_base_value::unary_mapper_t> (arg0);
+  ip++; // slot number ignored
+  ip++; // "n_args_on_stack" ignored. Always 1
+
+  // The object to index is before the arg on the stack
+  octave_value &arg = TOP_OV ();
+  octave_value &ov = SEC_OV ();
+
+  if (OCTAVE_UNLIKELY (arg.type_id () != m_scalar_typeid ||
+      !ov.is_function_cache ()))
+    {
+      ip -= 1; // Rewind ip to n_args_on_stack
+      arg0 = ip[-1]; // set arg0 to slot
+      goto index_math_ufun_id1_dispatch;
+    }
+
+  // We need to check so the user has not defined some function
+  // that overrides the builtin ones.
+  octave_function *fcn;
+  try
+    {
+      octave_fcn_cache &cache = REP (octave_fcn_cache, ov);
+      fcn = cache.get_cached_fcn (&sp[-1], &sp[0]); // sp[-1] is the arg, sp[0] is the stack end
+    }
+  CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+  if (OCTAVE_UNLIKELY (!fcn->is_builtin_function ()))
+    {
+      ip -= 1; // Rewind ip to n_args_on_stack
+      arg0 = ip[-1]; // set arg0 to slot
+      goto index_math_ufun_id1_dispatch;
+    }
+
+  octave_scalar *ovb_arg = static_cast<octave_scalar*> (TOP_OVB ());
+
+  SEC_OV () = ovb_arg->octave_scalar::map (ufn);
+  STACK_DESTROY (1);
+}
+DISPATCH ();
+
+push_pi:
+// Specialization to push pi fast as a scalar.
+//
+// If the user have messed up 'pi' opcode PUSH_SLOT_NARGOUT1
+// is used instead.
+{
+  // The next instruction is the slot number
+  int slot = arg0;
+
+  octave_value &ov = bsp[slot].ov;
+  // If the slot value is not a function cache we do a
+  // PUSH_SLOT_NARGOUT1 which will most likely put a
+  // function cache in the slot (unless the user has done a
+  // "pi = 123;" or whatever).
+  if (OCTAVE_UNLIKELY (!ov.is_function_cache ()))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // We need to check so the user has not defined some pi function
+  octave_function *fcn;
+  try
+    {
+      octave_fcn_cache &cache = REP (octave_fcn_cache, ov);
+      fcn = cache.get_cached_fcn_if_fresh ();
+      if (! fcn)
+        fcn = cache.get_cached_fcn (static_cast<octave_value*> (nullptr), static_cast<octave_value*> (nullptr));
+    }
+  CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+  if (OCTAVE_UNLIKELY (fcn != m_pi_builtin_fn))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // The user wanna push 3.1415...
+  PUSH_OV (ov_pi);
+}
+DISPATCH();
+
+push_i:
+// Specialization to push i (the imaginary unit) fast as a scalar.
+//
+// If the user use i as a variable opcode PUSH_SLOT_NARGOUT1
+// is used instead.
+{
+  int slot = arg0;
+
+  octave_value &ov = bsp[slot].ov;
+  // If the slot value is not a function cache we do a
+  // PUSH_SLOT_NARGOUT1 which will most likely put a
+  // function cache in the slot (unless the user has done a
+  // "i = 123;" or whatever).
+  if (OCTAVE_UNLIKELY (!ov.is_function_cache ()))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // We need to check so the user has not defined some i function
+  octave_function *fcn;
+  try
+    {
+      octave_fcn_cache &cache = REP (octave_fcn_cache, ov);
+      fcn = cache.get_cached_fcn_if_fresh ();
+      if (! fcn)
+        fcn = cache.get_cached_fcn (static_cast<octave_value*> (nullptr), static_cast<octave_value*> (nullptr));
+    }
+  CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+  if (OCTAVE_UNLIKELY (fcn != m_i_builtin_fn))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // The user wanna push i ...
+  PUSH_OV (ov_i);
+}
+DISPATCH();
+
+push_e:
+// Specialization to push e fast as a scalar.
+//
+// If the user use 'e' as a variable opcode PUSH_SLOT_NARGOUT1
+// is used instead.
+{
+  int slot = arg0;
+
+  octave_value &ov = bsp[slot].ov;
+  // If the slot value is not a function cache we do a
+  // PUSH_SLOT_NARGOUT1 which will most likely put a
+  // function cache in the slot (unless the user has done a
+  // "e = 123;" or whatever).
+  if (OCTAVE_UNLIKELY (!ov.is_function_cache ()))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // We need to check so the user has not defined some pi function
+  octave_function *fcn;
+  try
+    {
+      octave_fcn_cache &cache = REP (octave_fcn_cache, ov);
+      fcn = cache.get_cached_fcn_if_fresh ();
+      if (! fcn)
+        fcn = cache.get_cached_fcn (static_cast<octave_value*> (nullptr), static_cast<octave_value*> (nullptr));
+    }
+  CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+  if (OCTAVE_UNLIKELY (fcn != m_e_builtin_fn))
+    {
+      goto push_slot_nargout1;
+    }
+
+  // The user wanna push e...
+  PUSH_OV (ov_e);
+}
+DISPATCH();
+
+  {
+    // TODO: Too much code. Should be broken out?
+
+    // Note: Beutifully interleaved if branches and goto labels
+    int nargout, slot;
+    bool specialization_ok;
+    if (0)
+      {
+index_idnx:
+        slot = arg0;
+        nargout = bsp[0].i;
+        specialization_ok = false;
+      }
+    else if (0)
+      {
+index_idn:
+        slot = arg0; // Needed if we need a function lookup
+        nargout = *ip++;
+        specialization_ok = false;
+      }
+    else if (0)
+      {
+index_id1:
+        slot = arg0;
+        nargout = 1;
+        specialization_ok = true;
+      }
+    else if (0)
+      {
+index_id_nargout0:
+        slot = arg0;
+        nargout = 0;
+        specialization_ok = false;
+      }
+    else
+      {
+index_math_ufun_id1_dispatch: // Escape dispatch for index_math_ufun_id1 specialization
+        slot = arg0;
+        nargout = 1;
+        specialization_ok = false;
+      }
+
+    int n_args_on_stack = *ip++;
+
+    // The object to index is before the args on the stack
+    octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+    switch (ov.vm_dispatch_call ())
+      {
+        case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+          {
+            // Make an ovl with the args
+            octave_value_list ovl;
+
+            // The operands are on the top of the stack
+            bool all_args_double = true;
+            for (int i = n_args_on_stack - 1; i >= 0; i--)
+              {
+                octave_value &arg = sp[-1 - i].ov;
+                int type = arg.type_id ();
+                if (type != m_scalar_typeid)
+                  all_args_double = false;
+
+                if (OCTAVE_UNLIKELY (type == m_cslist_typeid))
+                  ovl.append (arg.list_value ());
+                else
+                  ovl.append (arg); // TODO: copied, not moved
+              }
+
+            // If the ov is a "full matrix", i.e. based on octave_base_matrix,
+            // and the arguments are all scalar, we modify this opcode to a
+            // specialized opcode for matrix scalar indexing.
+            if (nargout == 1 && all_args_double && ov.is_full_num_matrix () && specialization_ok)
+              {
+                if (n_args_on_stack == 1)
+                  {
+                    ip -= 1;
+                    int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+
+                    CHECK (ip[-2 + wide_opcode_offset] == static_cast<unsigned char> (INSTR::INDEX_ID_NARGOUT1));
+                    ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INDEX_ID1_MAT_1D);
+
+                    goto index_id1_mat_1d;
+                  }
+                else if (n_args_on_stack == 2)
+                  {
+                    ip -= 1;
+                    int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+
+                    CHECK (ip[-2 + wide_opcode_offset] == static_cast<unsigned char> (INSTR::INDEX_ID_NARGOUT1));
+                    ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INDEX_ID1_MAT_2D);
+
+                    goto index_id1_mat_2d;
+                  }
+              }
+
+            octave_value_list retval;
+
+            CHECK_PANIC (! ov.is_function () || ov.is_classdef_meta ()); // TODO: Remove
+
+            try
+              {
+                m_tw->set_active_bytecode_ip (ip - code);
+                retval = ov.simple_subsref ('(', ovl, nargout);
+                ovl.clear ();
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION_WITH_NAME
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+
+            ov = octave_value ();
+
+            STACK_DESTROY (n_args_on_stack + 1);
+            EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+          }
+        break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+          {
+            // It is probably a function call
+            CHECK_PANIC (ov.is_nil ()); // TODO :Remove
+
+            // Put a function cache object in the slot and in the local ov
+            ov = octave_value (new octave_fcn_cache (name_data[slot]));
+            if (OCTAVE_UNLIKELY (bsp[slot].ov.is_ref ()))
+              bsp[slot].ov.ref_rep ()->set_value (ov);
+            else
+              bsp[slot].ov = ov;
+          }
+          // Fallthrough
+        case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+        case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+        case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+          {
+            CHECK_PANIC (ov.has_function_cache ()); // TODO :Remove
+
+            octave_function *fcn;
+            try
+              {
+                stack_element *first_arg = &sp[-n_args_on_stack];
+                stack_element *end_arg = &sp[0];
+                fcn = ov.get_cached_fcn (first_arg, end_arg);
+              }
+            CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+            if (! fcn)
+              {
+                (*sp++).ps = new std::string {name_data[slot]};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+            else if (fcn->is_compiled ())
+              {
+                octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+
+                // Alot of code in this define
+                int caller_nvalback = nargout; // Caller wants as many values returned as it wants the callee to produce
+                MAKE_BYTECODE_CALL
+
+                // Now dispatch to first instruction in the
+                // called function
+              }
+            else
+              {
+                try
+                  {
+                    octave_value_list ovl;// = octave_value_list::make_ovl_from_stack_range (sp - n_args_on_stack, sp);
+                    //sp = sp - n_args_on_stack;
+                    // The operands are on the top of the stack
+                    POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                    m_tw->set_active_bytecode_ip (ip - code);
+                    octave_value_list ret = fcn->call (*m_tw, nargout, ovl);
+
+                    STACK_DESTROY (1);
+                    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                  }
+                CATCH_INTERRUPT_EXCEPTION
+                CATCH_INDEX_EXCEPTION
+                CATCH_EXECUTION_EXCEPTION
+                CATCH_BAD_ALLOC
+                CATCH_EXIT_EXCEPTION
+              }
+          }
+        break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+          {
+            (*sp++).i = n_args_on_stack;
+            (*sp++).i = nargout;
+            (*sp++).i = nargout; // "caller_nvalback". Caller wants as many values returned as it wants the callee to produce
+            (*sp++).i = slot;
+            goto make_nested_handle_call;
+          }
+      }
+  }
+  DISPATCH ();
+
+push_slot_indexed:
+  {
+    // The next instruction is the slot number
+    int slot = arg0;
+    octave_value &ov = bsp[slot].ov;
+
+    // Unlike push_slot this can't be a command function call
+    // so we don't need to check if this is a function.
+
+    // Push the value in the slot to the stack
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      PUSH_OV (ov);
+    else
+      PUSH_OV (ov.ref_rep ()->deref ()); // global, persistent ... need dereferencing
+
+  }
+  DISPATCH();
+
+pow_dbl:
+  MAKE_BINOP_SPECIALIZED (m_fn_dbl_pow, pow, POW, m_scalar_typeid)
+  DISPATCH_1BYTEOP();
+pow:
+  MAKE_BINOP_SELFMODIFYING(binary_op::op_pow, pow_dbl, POW_DBL)
+  DISPATCH_1BYTEOP();
+ldiv:
+  MAKE_BINOP(binary_op::op_ldiv)
+  DISPATCH_1BYTEOP();
+el_mul:
+  MAKE_BINOP(binary_op::op_el_mul)
+  DISPATCH_1BYTEOP();
+el_div:
+  MAKE_BINOP(binary_op::op_el_div)
+  DISPATCH_1BYTEOP();
+el_pow:
+  MAKE_BINOP(binary_op::op_el_pow)
+  DISPATCH_1BYTEOP();
+el_and:
+  MAKE_BINOP(binary_op::op_el_and)
+  DISPATCH_1BYTEOP();
+el_or:
+  MAKE_BINOP(binary_op::op_el_or)
+  DISPATCH_1BYTEOP();
+el_ldiv:
+  MAKE_BINOP(binary_op::op_el_ldiv)
+  DISPATCH_1BYTEOP();
+
+not_dbl:
+MAKE_UNOP_SPECIALIZED (m_fn_dbl_not, op_not, NOT, m_scalar_typeid);
+DISPATCH_1BYTEOP ();
+
+not_bool:
+MAKE_UNOP_SPECIALIZED (m_fn_bool_not, op_not, NOT, m_bool_typeid);
+DISPATCH_1BYTEOP ();
+
+op_not:
+  {
+    octave_value &ov = TOP_OV ();
+
+    int type_id = ov.type_id ();
+    if (OCTAVE_UNLIKELY (type_id == m_scalar_typeid))
+      {
+        // Change the generic opcode to the specialized one
+        ip[-2] = static_cast<unsigned char> (INSTR::NOT_DBL);
+        goto not_dbl;
+      }
+    else if (OCTAVE_UNLIKELY (type_id == m_bool_typeid))
+      {
+        // Change the generic opcode to the specialized one
+        ip[-2] = static_cast<unsigned char> (INSTR::NOT_BOOL);
+        goto not_bool;
+      }
+
+    try
+      {
+        octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_not,
+                                     ov);
+        ov.~octave_value ();
+
+        STACK_SHRINK (1);
+
+        new (sp++) octave_value (std::move (ans));
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP();
+uadd:
+  {
+    octave_value &ov = TOP_OV ();
+
+    try
+      {
+        octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_uplus,
+                                     ov);
+        ov.~octave_value ();
+
+        STACK_SHRINK (1);
+
+        new (sp++) octave_value (std::move (ans));
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP();
+
+usub_dbl:
+MAKE_UNOP_SPECIALIZED (m_fn_dbl_usub, usub, USUB, m_scalar_typeid);
+DISPATCH_1BYTEOP ();
+usub:
+  {
+    octave_value &ov = TOP_OV ();
+
+    if (OCTAVE_UNLIKELY (ov.type_id () == m_scalar_typeid))
+      {
+        // Change the generic opcode to the specialized one
+        ip[-2] = static_cast<unsigned char> (INSTR::USUB_DBL);
+        goto usub_dbl;
+      }
+
+    try
+      {
+        octave_value ans = unary_op (*m_ti, octave_value::unary_op::op_uminus,
+                                     ov);
+        ov.~octave_value ();
+
+        STACK_SHRINK (1);
+
+        new (sp++) octave_value (std::move (ans));
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP();
+trans:
+  {
+    octave_value &ov = TOP_OV ();
+
+    try
+      {
+        octave_value ans = unary_op (*m_ti,
+                                     octave_value::unary_op::op_transpose,
+                                     ov);
+        ov.~octave_value ();
+
+        STACK_SHRINK (1);
+
+        new (sp++) octave_value (std::move (ans));
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP();
+herm:
+  {
+    octave_value &ov = TOP_OV ();
+
+    try
+      {
+        octave_value ans = unary_op (*m_ti,
+                                     octave_value::unary_op::op_hermitian,
+                                     ov);
+        ov.~octave_value ();
+
+        STACK_SHRINK (1);
+
+        new (sp++) octave_value (std::move (ans));
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP();
+
+incr_id_prefix_dbl:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () != m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        // Change the specialized opcode to the generic one
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INCR_ID_PREFIX);
+        goto incr_id_prefix;
+      }
+
+    octave_scalar &scalar = REP (octave_scalar, ov);
+    double val = scalar.octave_scalar::double_value ();
+
+    if (!scalar.octave_scalar::maybe_update_double (val + 1))
+      ov = octave_value_factory::make (val + 1);
+
+    PUSH_OV (ov);
+  }
+  DISPATCH();
+incr_id_prefix:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () == m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        // Change the generic opcode to the specialized one
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INCR_ID_PREFIX_DBL);
+        goto incr_id_prefix_dbl;
+      }
+
+    try
+      {
+        if (OCTAVE_LIKELY (!ov.is_ref ()))
+          {
+            ov.non_const_unary_op (octave_value::unary_op::op_incr);
+            PUSH_OV (ov);
+          }
+        else
+          {
+            octave_value &ov_glb = ov.ref_rep ()->ref ();
+            ov_glb.non_const_unary_op (octave_value::unary_op::op_incr);
+            PUSH_OV (ov_glb);
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH();
+
+decr_id_prefix_dbl:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () != m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::DECR_ID_PREFIX);
+        goto decr_id_prefix;
+      }
+
+    octave_scalar &scalar = REP (octave_scalar, ov);
+    double val = scalar.octave_scalar::double_value ();
+
+    if (!scalar.octave_scalar::maybe_update_double (val - 1))
+      ov = octave_value_factory::make (val - 1);
+
+    PUSH_OV (ov);
+  }
+  DISPATCH();
+decr_id_prefix:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () == m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::DECR_ID_PREFIX_DBL);
+        goto decr_id_prefix_dbl;
+      }
+
+    try
+      {
+        if (OCTAVE_LIKELY (!ov.is_ref ()))
+          {
+            ov.non_const_unary_op (octave_value::unary_op::op_decr);
+            PUSH_OV (ov);
+          }
+        else
+          {
+            octave_value &ov_glb = ov.ref_rep ()->ref ();
+            ov_glb.non_const_unary_op (octave_value::unary_op::op_decr);
+            PUSH_OV (ov_glb);
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH();
+incr_id_postfix_dbl:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () != m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INCR_ID_POSTFIX);
+        goto incr_id_postfix;
+      }
+
+    octave_scalar &scalar = REP (octave_scalar, ov);
+    double val = scalar.octave_scalar::double_value ();
+
+    PUSH_OV (std::move (ov));
+    ov = octave_value_factory::make (val + 1);
+  }
+  DISPATCH();
+incr_id_postfix:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () == m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::INCR_ID_POSTFIX_DBL);
+        goto incr_id_postfix_dbl;
+      }
+
+    try
+      {
+        if (OCTAVE_LIKELY (!ov.is_ref ()))
+          {
+            octave_value copy = ov;
+            ov.non_const_unary_op (octave_value::unary_op::op_incr);
+            PUSH_OV (std::move (copy));
+          }
+        else
+          {
+            octave_value &ov_glb = ov.ref_rep ()->ref ();
+            octave_value copy = ov_glb;
+            ov_glb.non_const_unary_op (octave_value::unary_op::op_incr);
+            PUSH_OV (std::move (copy));
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH();
+decr_id_postfix_dbl:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () != m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::DECR_ID_POSTFIX);
+        goto decr_id_postfix;
+      }
+
+    octave_scalar &scalar = REP (octave_scalar, ov);
+    double val = scalar.octave_scalar::double_value ();
+
+    PUSH_OV (std::move (ov));
+    ov = octave_value_factory::make (val - 1);
+  }
+  DISPATCH();
+decr_id_postfix:
+  {
+    int slot = arg0;
+
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.type_id () == m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+        ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::DECR_ID_POSTFIX_DBL);
+        goto decr_id_postfix_dbl;
+      }
+
+    try
+      {
+        if (OCTAVE_LIKELY (!ov.is_ref ()))
+          {
+            octave_value copy = ov;
+            ov.non_const_unary_op (octave_value::unary_op::op_decr);
+            PUSH_OV (std::move (copy));
+          }
+        else
+          {
+            octave_value &ov_glb = ov.ref_rep ()->ref ();
+            octave_value copy = ov_glb;
+            ov_glb.non_const_unary_op (octave_value::unary_op::op_decr);
+            PUSH_OV (std::move (copy));
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH();
+for_setup:
+  {
+    octave_value &ov_range = TOP_OV ();
+
+    octave_idx_type n = ov_range.numel();
+
+    bool is_range = ov_range.is_range ();
+    //TODO: Kludge galore. Should be refactored into some virtual call.
+    if (is_range &&
+        (
+         ov_range.is_double_type () ||
+         ov_range.is_int64_type () ||
+         ov_range.is_uint64_type () ||
+         ov_range.is_int32_type () ||
+         ov_range.is_uint32_type () ||
+         ov_range.is_int16_type () ||
+         ov_range.is_uint16_type () ||
+         ov_range.is_int16_type () ||
+         ov_range.is_int8_type () ||
+         ov_range.is_int8_type () ||
+         ov_range.is_uint8_type () ||
+         ov_range.is_single_type()))
+      {
+        ov_range = ov_range.maybe_as_trivial_range ();
+      }
+    else if (is_range ||
+             ov_range.is_matrix_type () ||
+             ov_range.iscell () ||
+             ov_range.is_string () ||
+             ov_range.isstruct ())
+      {
+        // The iteration is column wise for these, so change
+        // n to the amount of columns rather then elements.
+        dim_vector dv = ov_range.dims ().redim (2);
+        n = dv(1);
+        if (ov_range.ndims () > 2)
+          ov_range = ov_range.reshape (dv);
+      }
+    else if (ov_range.is_scalar_type () || ov_range.is_undefined ())
+      ;
+    else
+      TODO ("Unsupported for rhs type");
+
+    // TODO: Kludgy classes.
+
+    if (!ov_range.is_trivial_range () && is_range)
+      {
+        // TODO: Wasteful copy of range.
+        auto range = ov_range.range_value ();
+        if (math::isinf (range.limit ()) || math::isinf (range.base ()))
+            warning_with_id ("Octave:infinite-loop",
+                     "FOR loop limit is infinite, will stop after %"
+                     OCTAVE_IDX_TYPE_FORMAT " steps", range.numel ());
+      }
+
+
+    // Push n to the stack
+    (*sp++).i = n;
+    // Push a counter to the stack, initialized so that it will
+    // increment to 0.
+    (*sp++).i = -1;
+
+    // For empty rhs just assign it to lhs
+    if (! n && ov_range.is_defined ())
+      {
+        // Slot from the for_cond that always follow a for_setup
+        int slot;
+        // The next opcode is in arg0, and is either WIDE or FOR_COND
+        if (arg0 == static_cast<int> (INSTR::WIDE))
+          {
+            // Byte layout: ip[-2]:FOR_SETUP, ip[-1]:WIDE, ip[0]:FOR_COND, ip[1:2]:wide slot
+            slot = USHORT_FROM_UCHAR_PTR (ip + 1);
+          }
+        else
+          {
+            // Byte layout: ip[-2]:FOR_SETUP, ip[-1]:FOR_COND, ip[0]:slot
+            slot = ip[0];
+          }
+        try
+        {
+          octave_value &lhs_ov = bsp[slot].ov;
+          if (!lhs_ov.is_ref ())
+            lhs_ov = ov_range.storable_value ();
+          else
+            lhs_ov.ref_rep ()->set_value (ov_range.storable_value ());
+        }
+        CATCH_EXECUTION_EXCEPTION
+      }
+  }
+DISPATCH_1BYTEOP ();
+
+for_cond:
+  {
+    // Check if we should exit the loop due to e.g. ctrl-c, or handle
+    // any other signals.
+    try
+      {
+        octave_quit ();
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    // Increase counter
+    TOP ().i++; // Wraps to zero first iteration
+
+    // Check if we done all iterations
+    // n is second element on the stack
+    if (TOP ().i == SEC ().i)
+      {
+        // The after address
+        unsigned char b0 = *ip++;
+        unsigned char b1 = *ip++;
+
+        int after = USHORT_FROM_UCHARS (b0, b1);
+
+        // goto after block
+        ip = code + after;
+      }
+    else
+      {
+        // Write the iteration's value to the for loop variable
+        int slot = arg0;
+        ip +=2; // Skip the after address
+
+        octave_idx_type counter = TOP ().i;
+
+        octave_value &ov_range = THIRD_OV ();
+        octave_value &ov_it = bsp[slot].ov;
+
+        if (ov_range.is_trivial_range ())
+          {
+            double val = REP (octave_trivial_range, ov_range).octave_trivial_range::vm_extract_forloop_double (counter);
+            if (!ov_it.maybe_update_double (val))
+              {
+                if (OCTAVE_LIKELY (!ov_it.is_ref ()))
+                  ov_it = octave_value_factory::make (val);
+                else
+                  ov_it.ref_rep ()->set_value (val);
+              }
+          }
+        else if (OCTAVE_LIKELY (!ov_it.is_ref ()))
+          ov_it = ov_range.vm_extract_forloop_value (counter);
+        else
+          ov_it.ref_rep ()->set_value (ov_range.vm_extract_forloop_value (counter));
+
+        // The next opcode is the start of the body
+      }
+  }
+  DISPATCH ();
+pop_n_ints:
+  {
+    sp -= arg0;
+    DISPATCH();
+  }
+push_fcn_handle:
+  {
+    int slot = arg0;
+
+    //octave_value &fcn_cache = bsp[slot].ov;
+
+    std::string handle_name = name_data[slot];
+
+    if (!handle_name.size () || handle_name[0] != '@')
+      TODO ("Strange handle name");
+
+    handle_name = handle_name.substr(1);
+
+    octave_value fcn_handle = m_tw->make_fcn_handle (handle_name);
+
+    PUSH_OV (std::move (fcn_handle));
+  }
+  DISPATCH ();
+colon:
+  {
+    bool is_for_cmd;
+
+    // Yes, we are doing this
+    if (0)
+      {
+colon_cmd:
+        is_for_cmd = true;
+      }
+    else
+      {
+        is_for_cmd = false;
+      }
+
+    bool has_incr = false;
+    if (ip[-2] == static_cast<int> (INSTR::COLON3) ||
+        ip[-2] == static_cast<int> (INSTR::COLON3_CMD))
+      has_incr = true;
+
+    octave_value ret;
+
+    if (has_incr)
+      {
+        octave_value &base = THIRD_OV ();
+        octave_value &incr = SEC_OV ();
+        octave_value &limit = TOP_OV ();
+
+        try
+        {
+          ret = colon_op(base, incr, limit, is_for_cmd);
+        }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+
+        STACK_DESTROY (3);
+      }
+    else
+      {
+        octave_value &base = SEC_OV ();
+        octave_value &limit = TOP_OV ();
+
+        try
+        {
+          ret = colon_op(base, limit, is_for_cmd);
+        }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+
+        STACK_DESTROY (2);
+      }
+
+    PUSH_OV (std::move (ret));
+  }
+  DISPATCH_1BYTEOP ();
+
+push_true:
+  {
+    PUSH_OV(ov_true);
+  }
+  DISPATCH_1BYTEOP ();
+push_false:
+  {
+    PUSH_OV(ov_false);
+  }
+  DISPATCH_1BYTEOP ();
+unary_true:
+  {
+    octave_value &op1 = TOP_OV ();
+
+    bool is_true;
+
+    try
+      {
+        is_true = op1.is_true ();
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    STACK_DESTROY (1);
+
+    if (is_true)
+      PUSH_OV (ov_true);
+    else
+      PUSH_OV (ov_false);
+
+  }
+  DISPATCH_1BYTEOP ();
+assign_n:
+  {
+    int n_slots = arg0;
+
+    int n_actual = 0;
+    do
+      {
+        // Move operand to the local at slot in relation to base stack pointer
+
+        octave_value &arg = (*--sp).ov;
+        int slot = POP_CODE_USHORT ();
+        octave_value &lhs_ov = bsp[slot].ov;
+
+
+        /* Expand cs_lists */
+        if (arg.is_cs_list ())
+          {
+            octave_value_list args = arg.list_value ();
+            for (int i = 0; i < args.length (); i++)
+              {
+                octave_value &ov_1 = args (i);
+
+                lhs_ov.maybe_call_dtor ();
+
+                if (ov_1.vm_need_storable_call ())
+                  ov_1.make_storable_value (); // Some types have lazy copy
+
+                if (ov_1.is_undefined ())
+                  {
+                    std::string &name = name_data[slot];
+
+                    // If the return value is ignored, undefined is OK
+                    bool is_ignored = false;
+                    if (name.size () >= 2 && name[0] == '%' && name[1] == '~')
+                      is_ignored = true;
+
+                    Matrix ignored;
+                    octave_value tmp = m_tw->get_auto_fcn_var (stack_frame::auto_var_type::IGNORED);
+                    if (tmp.is_defined ())
+                      {
+                        ignored = tmp.matrix_value ();
+
+                        int n_returns = N_RETURNS ();
+                        if (n_returns == -128)
+                          n_returns = 1;
+                        else if (n_returns < 0)
+                          n_returns = -n_returns;
+
+                        if (slot < n_returns)
+                          {
+                            int outputnum = n_returns - 1 - slot;
+
+                            octave_idx_type idx = ignored.lookup (outputnum);
+                            is_ignored = idx > 0 && ignored (idx - 1) == outputnum;
+                          }
+                      }
+
+                    if (!is_ignored)
+                      {
+                        (*sp++).pee = new execution_exception {"error", "", "element number " + std::to_string (n_actual + 1) + " undefined in return list"};
+                        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+                        goto unwind;
+                      }
+                  }
+
+                if (OCTAVE_LIKELY (!lhs_ov.is_ref ()))
+                  lhs_ov = std::move (ov_1); // Note move
+                else
+                  lhs_ov.ref_rep ()->set_value (ov_1);
+                n_actual++;
+              }
+          }
+        else
+          {
+            lhs_ov.maybe_call_dtor ();
+
+            if (arg.vm_need_storable_call ())
+              arg.make_storable_value (); // Some types have lazy copy
+
+            if (arg.is_undefined ())
+              {
+                std::string &name = name_data[slot];
+
+                // If the return value is ignored, undefined is OK
+                bool is_ignored = false;
+                if (name.size () >= 2 && name[0] == '%' && name[1] == '~')
+                  is_ignored = true;
+
+                Matrix ignored;
+                octave_value tmp = m_tw->get_auto_fcn_var (stack_frame::auto_var_type::IGNORED);
+                if (tmp.is_defined ())
+                  {
+                    ignored = tmp.matrix_value ();
+
+                    int n_returns = N_RETURNS ();
+                    if (n_returns == -128)
+                      n_returns = 1;
+                    else if (n_returns < 0)
+                      n_returns = -n_returns;
+
+                    if (slot < n_returns)
+                      {
+                        int outputnum = n_returns - 1 - slot;
+
+                        octave_idx_type idx = ignored.lookup (outputnum);
+                        is_ignored = idx > 0 && ignored (idx - 1) == outputnum;
+                      }
+                  }
+                if (!is_ignored)
+                  {
+                    (*sp++).pee = new execution_exception {"error", "", "element number " + std::to_string (n_actual + 1) + " undefined in return list"};
+                    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+                    goto unwind;
+                  }
+              }
+
+            if (OCTAVE_LIKELY (!lhs_ov.is_ref ()))
+              lhs_ov = std::move (arg); // Note move
+            else
+              lhs_ov.ref_rep ()->set_value (arg);
+
+            n_actual++;
+          }
+
+          arg.~octave_value (); // Destroy the operand
+      }
+    while (n_actual < n_slots);
+  }
+  DISPATCH ();
+
+subassign_id_mat_2d:
+{
+  int slot = arg0;
+  ip++; // nargs always two
+
+  // The top of the stack is the rhs value
+  octave_value &rhs = TOP_OV ();
+  octave_value &arg2 = SEC_OV ();
+  octave_value &arg1 = THIRD_OV ();
+  // The ov to subassign to
+  octave_value &mat_ov = bsp[slot].ov;
+
+  int rhs_type_id = rhs.type_id ();
+  int arg1_type_id = arg1.type_id ();
+  int arg2_type_id = arg2.type_id ();
+  int mat_type_id = mat_ov.type_id ();
+
+  if (rhs_type_id != m_scalar_typeid || mat_type_id != m_matrix_typeid ||
+    arg2_type_id != m_scalar_typeid || arg1_type_id != arg2_type_id)
+  {
+    // Rewind ip to the 2nd byte of the opcode
+    ip -= 1;
+    int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+    // Change the specialized opcode to the general one
+    ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::SUBASSIGN_ID);
+    goto subassign_id;
+  }
+
+  try
+    {
+      mat_ov.make_unique ();
+
+      octave_scalar &rhs_scalar = REP (octave_scalar, rhs);
+      octave_scalar &arg1_scalar = REP (octave_scalar, arg1);
+      octave_scalar &arg2_scalar = REP (octave_scalar, arg2);
+
+      double idx2_dbl = arg2_scalar.octave_scalar::double_value ();
+      octave_idx_type idx2 = idx2_dbl - 1;
+      double idx1_dbl = arg1_scalar.octave_scalar::double_value ();
+      octave_idx_type idx1 = idx1_dbl - 1;
+      double val = rhs_scalar.octave_scalar::double_value ();
+
+      octave_matrix &mat_ovb = REP (octave_matrix, mat_ov);
+      NDArray &arr = mat_ovb.matrix_ref ();
+      // Handle out-of-bound or non-integer index in the generic opcode
+      if (idx1 >= arr.rows () || idx1 < 0 ||
+          idx1 != idx1_dbl - 1)
+        {
+          // Rewind ip to the 2nd byte of the opcode
+          ip -= 1;
+          goto subassign_id;
+        }
+      if (idx2 >= arr.cols () || idx2 < 0 ||
+          idx2 != idx2_dbl - 1)
+        {
+          // Rewind ip to the 2nd byte of the opcode
+          ip -= 1;
+          goto subassign_id;
+        }
+      if (arr.dims ().ndims () != 2)
+        {
+          // Rewind ip to the 2nd byte of the opcode
+          ip -= 1;
+          goto subassign_id;
+        }
+
+      // The NDArray got its own m_rep that might be shared
+      arr.make_unique ();
+
+      arr.xelem (idx1, idx2) = val;
+    }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+  STACK_DESTROY (3);
+}
+DISPATCH ();
+
+subassign_id_mat_1d:
+{
+  int slot = arg0;
+  ip++; // nargs always one
+
+  // The top of the stack is the rhs value
+  octave_value &rhs = TOP_OV ();
+  octave_value &arg = SEC_OV ();
+  // The ov to subassign to
+  octave_value &mat_ov = bsp[slot].ov;
+
+  int rhs_type_id = rhs.type_id ();
+  int arg_type_id = arg.type_id ();
+  int mat_type_id = mat_ov.type_id ();
+
+  if (rhs_type_id != m_scalar_typeid || mat_type_id != m_matrix_typeid ||
+    arg_type_id != m_scalar_typeid)
+  {
+    // Rewind ip to the 2nd byte of the opcode
+    ip -= 1;
+    int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+    // Change the specialized opcode to the general one
+    ip[-2 + wide_opcode_offset] = static_cast<unsigned char> (INSTR::SUBASSIGN_ID);
+    goto subassign_id;
+  }
+
+  try
+    {
+      mat_ov.make_unique ();
+
+      octave_scalar &rhs_scalar = REP (octave_scalar, rhs);
+      octave_scalar &arg_scalar = REP (octave_scalar, arg);
+
+      double idx_dbl = arg_scalar.octave_scalar::double_value ();
+      octave_idx_type idx = idx_dbl - 1;
+      double val = rhs_scalar.octave_scalar::double_value ();
+
+      octave_matrix &mat_ovb = REP (octave_matrix, mat_ov);
+      NDArray &arr = mat_ovb.matrix_ref ();
+      // Handle out-of-bound or non-integer index in the generic opcode
+      if (idx >= arr.numel () || idx < 0 ||
+          idx != idx_dbl - 1)
+        {
+          // Rewind ip to the 2nd byte of the opcode
+          ip -= 1;
+          goto subassign_id;
+        }
+
+      // The NDArray got its own m_rep that might be shared
+      arr.make_unique ();
+
+      arr.xelem (idx) = val;
+    }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+  STACK_DESTROY (2);
+}
+DISPATCH ();
+
+subassign_id:
+  {
+    // The args to the subassign are on the operand stack
+    int slot = arg0;
+    int nargs = *ip++;
+
+    // The top of the stack is the rhs value
+    octave_value &rhs = TOP_OV ();
+    // First argument
+    stack_element *parg = sp - 1 - nargs;
+
+    // Move the args to an ovl
+    // TODO: Should actually be a move
+    bool all_args_are_scalar = true;
+    octave_value_list args;
+    for (int i = 0; i < nargs; i++)
+    {
+      octave_value &arg = parg[i].ov;
+      // We need to expand cs-lists
+      if (arg.type_id () != m_scalar_typeid)
+        all_args_are_scalar = false;
+      if (arg.is_cs_list ())
+        args.append (arg.list_value ());
+      else
+        args.append (arg);
+    }
+
+    // The ov to subassign to
+    octave_value &ov = bsp[slot].ov;
+
+    if ((nargs == 1 || nargs == 2) && all_args_are_scalar && ov.type_id () == m_matrix_typeid &&
+        rhs.type_id () == m_scalar_typeid)
+      {
+        int wide_opcode_offset = slot < 256 ? 0 : -1; // If WIDE is used, we need to look further back
+
+        unsigned char opcode = nargs == 1 ? static_cast<unsigned char> (INSTR::SUBASSIGN_ID_MAT_1D) : static_cast<unsigned char> (INSTR::SUBASSIGN_ID_MAT_2D);
+
+        // If the opcode allready is SUBASSIGN_ID_MAT_1D we were sent back to
+        // SUBASSIGN_ID to handle some error or edgecase, so don't go back.
+        if ( ip[-3 + wide_opcode_offset] != opcode)
+          {
+            // Rewind ip to the 2nd byte of the opcode
+            ip -= 1;
+            // Change the general opcode to the specialized one
+            ip[-2 + wide_opcode_offset] = opcode;
+            if (nargs == 1)
+              goto subassign_id_mat_1d;
+            else
+              goto subassign_id_mat_2d;
+          }
+      }
+
+    // TODO: Room for performance improvement here maybe
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      ov.make_unique ();
+    else
+      ov.ref_rep ()->ref ().make_unique ();
+
+    if (rhs.is_cs_list ())
+      {
+        const octave_value_list lst = rhs.list_value ();
+
+        if (lst.empty ())
+          {
+            // TODO: Need id, name
+            // TODO: Make execution_exception like the others instead of its own error_type
+            (*sp++).i = static_cast<int> (error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT);
+            goto unwind;
+          }
+
+        rhs = lst(0);
+      }
+
+    // E.g. scalars do not update them self inplace
+    // but create a new octave_value, so we need to
+    // copy the return value to the slot.
+
+    try
+      {
+        ov = ov.simple_subsasgn('(', args, rhs);
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    // Destroy the args on the operand stack aswell as rhs
+    STACK_DESTROY (nargs + 1);
+  }
+  DISPATCH ();
+
+end_id:
+  {
+    // Indexed variable
+    int slot = arg0;
+    // Amount of args to the index, i.e. amount of dimensions
+    // being indexed.
+    // E.g. foo (1,2,3) => 3
+    int nargs = *ip++;
+    // Index of the end, in the index, counting from 0.
+    // E.g. foo (1, end, 3) => 1
+    int idx = *ip++;
+
+    octave_value ov = bsp[slot].ov;
+
+    if (ov.is_ref ())
+      ov = ov.ref_rep ()->deref ();
+
+    if (ov.is_undefined ())
+      {
+        (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        goto unwind;
+      }
+
+    octave_value end_idx;
+    if (ov.isobject ())
+      {
+        try
+          {
+            end_idx = handle_object_end (ov, idx, nargs);
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+    else
+      end_idx = octave_value (ov.end_index (idx, nargs));
+
+    PUSH_OV (std::move (end_idx));
+  }
+  DISPATCH ();
+end_obj:
+  {
+    // Slot that stores the stack depth of the indexed object
+    int slot = arg0;
+    // Amount of args to the index, i.e. amount of dimensions
+    // being indexed.
+    // E.g. foo (1,2,3) => 3
+    int nargs = *ip++;
+    // Index of the end, in the index, counting from 0.
+    // E.g. foo (1, end, 3) => 1
+    int idx = *ip++;
+
+    octave_value &stack_depth = bsp[slot].ov;
+    // Indexed object
+    octave_value &ov = bsp[stack_depth.int_value () - 1].ov;
+
+    if (ov.is_undefined ())
+      {
+        (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        goto unwind;
+      }
+
+    octave_value end_idx;
+    if (ov.isobject ())
+      {
+        try
+          {
+            end_idx = handle_object_end (ov, idx, nargs);
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+    else
+      end_idx = octave_value (ov.end_index (idx, nargs));
+
+    PUSH_OV (std::move (end_idx));
+  }
+  DISPATCH ();
+
+end_x_n:
+  {
+    // Since 'end' in "foo (bar (1, end))" can refer
+    // to the end of 'foo' if 'bar' is a function we
+    // need to scan inner to outer after a defined
+    // object to find the end of.
+
+    int n_ids = arg0;
+    int i;
+
+    for (i = 0; i < n_ids;)
+      {
+        // Amount of args to the index, i.e. amount of dimensions
+        // being indexed.
+        // E.g. foo (1,2,3) => 3
+        int nargs = *ip++;
+        // Index of the end, in the index, counting from 0.
+        // E.g. foo (1, end, 3) => 1
+        int idx = *ip++;
+        // type 0: Like 'end_id:'
+        // type 1: Like 'end_obj:'
+        int type = *ip++;
+        // Slot that stores:
+        //    the object that is being indexed for type 0
+        //    the stack depth of the indexed object for type 1
+        int slot = POP_CODE_USHORT ();
+
+        octave_value ov = bsp[slot].ov;
+
+        if (ov.is_ref ())
+          ov = ov.ref_rep ()->deref ();
+
+        // If the type is 1, the ov in the slot is the stack depth
+        // of the object being indexed.
+        if (type == 1)
+          ov = bsp[ov.int_value () - 1].ov;
+
+        bool is_undef = ov.is_undefined ();
+
+        // If the outer most indexed object is not defined
+        // it is an error.
+        if (is_undef && i + 1 == n_ids)
+          {
+            (*sp++).pee = new execution_exception {"error","","invalid use of 'end': may only be used to index existing value"};
+            (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+            goto unwind;
+          }
+        else if (is_undef)
+          {
+            i++;
+            continue; // Look if the next outer object is defined.
+          }
+
+        octave_value end_idx;
+        if (ov.isobject ())
+          {
+            try
+              {
+                end_idx = handle_object_end (ov, idx, nargs);
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+
+          }
+        else
+          end_idx = octave_value (ov.end_index (idx, nargs));
+
+        PUSH_OV (std::move (end_idx));
+        i++;
+        break;
+      }
+
+    // Skip any unread objects to index
+    for (; i < n_ids; i++)
+      ip += 5;
+  }
+  DISPATCH ();
+
+eval:
+  {
+    int nargout = arg0;
+    int tree_idx = POP_CODE_INT ();
+    CHECK (tree_idx < 0); // Should always be negative to mark for eval. Otherwise it is debug data
+
+    auto it = unwind_data->m_ip_to_tree.find (tree_idx);
+    CHECK (it != unwind_data->m_ip_to_tree.end ());
+
+    tree_expression *te = static_cast <tree_expression*> (it->second);
+
+    octave_value_list retval;
+    try
+    {
+      retval = te->evaluate_n (*m_tw, nargout);
+    }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+  }
+  DISPATCH ();
+bind_ans:
+  {
+    int slot = arg0;
+    octave_value &ans_on_stack = TOP_OV ();
+    octave_value &ans_in_slot = bsp [slot].ov;
+
+    if (ans_on_stack.is_defined ())
+      {
+        if (!ans_on_stack.is_cs_list ())
+          {
+            ans_in_slot.maybe_call_dtor ();
+            if (ans_on_stack.vm_need_storable_call ())
+              ans_on_stack.make_storable_value ();
+
+            if (OCTAVE_LIKELY (!ans_in_slot.is_ref ()))
+              ans_in_slot = std::move (ans_on_stack); // Note move
+            else
+              ans_in_slot.ref_rep ()->set_value (ans_on_stack);
+          }
+        else
+          {
+            // We need to recursivly expand any cs-list and assign
+            // the elements one by one to ans.
+            std::vector<octave_value> v_el;
+
+            std::vector<octave_value_list> v_ovl_stack; // "recursive" stacks
+            std::vector<int> v_ovl_idx_stack;
+
+            v_ovl_stack.push_back (ans_on_stack.list_value ());
+            v_ovl_idx_stack.push_back (0);
+
+            while (true)
+              {
+              redo:
+                octave_value_list &lst = v_ovl_stack.back ();
+                int &idx = v_ovl_idx_stack.back (); // Note: reference changes in loop
+
+                for (; idx < lst.length (); idx++)
+                  {
+                    octave_value ov = lst (idx);
+                    if (ov.is_cs_list ())
+                      {
+                        idx++;
+                        v_ovl_stack.push_back (ov.list_value ());
+                        v_ovl_idx_stack.push_back (0);
+                        goto redo;
+                      }
+                    else if (ov.is_defined ())
+                      v_el.push_back (ov);
+                  }
+
+                v_ovl_stack.pop_back ();
+                v_ovl_idx_stack.pop_back ();
+
+                if (v_ovl_stack.size () == 0)
+                  break;
+              }
+
+            // Assign all elements to ans one by one
+            for (auto &ov_rhs : v_el)
+              {
+                ans_in_slot.maybe_call_dtor ();
+                if (ov_rhs.vm_need_storable_call ())
+                  ov_rhs.make_storable_value ();
+
+                if (OCTAVE_LIKELY (!ans_in_slot.is_ref ()))
+                  ans_in_slot = std::move (ov_rhs); // Note move
+                else
+                  ans_in_slot.ref_rep ()->set_value (ov_rhs);
+              }
+          }
+      }
+
+    STACK_DESTROY (1);
+  }
+DISPATCH ();
+
+push_anon_fcn_handle:
+{
+  ip--; // Rewind ip for int macro underneath
+  int tree_idx = POP_CODE_INT ();
+
+  auto it = unwind_data->m_ip_to_tree.find (tree_idx);
+  CHECK (it != unwind_data->m_ip_to_tree.end ());
+
+  tree_anon_fcn_handle *tree_h = reinterpret_cast <tree_anon_fcn_handle*> (it->second);
+
+  octave_value ret = m_tw->evaluate_anon_fcn_handle (*tree_h);
+  octave_fcn_handle *fn_h = ret.fcn_handle_value ();
+  CHECK (fn_h);
+  fn_h->compile ();
+
+  PUSH_OV (ret);
+}
+DISPATCH ();
+
+for_complex_setup:
+{
+  octave_value &ov_rhs = TOP_OV ();
+  ov_rhs.make_unique (); // TODO: Dunno if needed
+  unsigned char b0 = arg0;
+  unsigned char b1 = *ip++;
+
+  int target = USHORT_FROM_UCHARS (b0, b1);
+
+  if (ov_rhs.is_undefined ())
+    {
+      (*sp++).i = 1; // Need two native ints on the stack so they can be popped by the POP_N_INTS
+      (*sp++).i = 2; // after the for loop body.
+      ip = code + target;
+      DISPATCH ();
+    }
+
+  if (!ov_rhs.isstruct ())
+    {
+      (*sp++).i = 1; // Need two native ints on the stack so they can be popped by the unwind.
+      (*sp++).i = 2;
+      (*sp++).pee = new execution_exception {"error", "", "in statement 'for [X, Y] = VAL', VAL must be a structure"};
+      (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+      goto unwind;
+    }
+
+  octave_map map = ov_rhs.map_value ();
+  string_vector keys = map.keys ();
+  octave_idx_type n = keys.numel ();
+
+  // Push n to the stack
+  (*sp++).i = n;
+  // Push a counter to the stack, initialized so that it will
+  // increment to 0.
+  (*sp++).i = -1;
+}
+DISPATCH ();
+
+for_complex_cond:
+{
+  // Increase counter
+  TOP ().i++; // Wraps to zero first iteration
+
+  // Check if we done all iterations
+  // n is second element on the stack
+  if (TOP ().i == SEC ().i)
+    {
+      // The after address
+      unsigned char b0 = arg0;
+      unsigned char b1 = *ip++;
+
+      int after = USHORT_FROM_UCHARS (b0, b1);
+
+      // goto after block
+      ip = code + after;
+    }
+  else
+    {
+      ip++; // Skip 2nd part of afteraddress
+      int slot_key = POP_CODE_USHORT ();
+      int slot_value = POP_CODE_USHORT ();
+      octave_idx_type counter = TOP ().i;
+
+      octave_value &ov_rhs = THIRD_OV (); // This is always a struct
+      octave_value &ov_key = bsp[slot_key].ov;
+      octave_value &ov_val = bsp[slot_value].ov;
+
+      // TODO: Abit wasteful copying map_value () each time but whatever
+      //       who uses complex for loops anyways.
+      std::string key = ov_rhs.map_value ().keys () [counter];
+      const Cell val_lst = ov_rhs.map_value ().contents (key);
+
+      octave_idx_type n = val_lst.numel ();
+      octave_value val = (n == 1) ? val_lst(0) : octave_value (val_lst);
+
+      if (counter == 0)
+        {
+          ov_val.maybe_call_dtor (); // The first iteration these could be class objects ...
+          ov_key.maybe_call_dtor ();
+        }
+
+      val.make_unique (); // TODO: Dunno if needed
+
+      if (ov_val.is_ref ())
+        ov_val.ref_rep ()->set_value (val);
+      else
+        ov_val = val;
+
+      if (ov_val.is_ref ())
+        ov_key.ref_rep ()->set_value (key);
+      else
+        ov_key = key;
+    }
+}
+DISPATCH ();
+
+/* For dynamic m*n matrix where m and n < 256 */
+matrix:
+  {
+    int nrows = arg0;
+    int ncols = *ip++;
+    int n_el = nrows * ncols;
+
+    // The first element is down the stack
+    // and the last element is at the top.
+    stack_element *first_arg = &sp[-n_el];
+
+    // The stack pointer is pointing to the first unused
+    // stack position, so it is the end pointer.
+    stack_element *end_arg = sp;
+
+    try
+      {
+        tm_const tmp (first_arg, end_arg, ncols, *m_tw);
+
+        octave_value &&ov = tmp.concat (' ');
+
+        STACK_DESTROY (n_el);
+
+        PUSH_OV (ov);
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH ();
+matrix_big:
+  {
+    int type = arg0;
+
+    /* type 0 indicates a matrix that has unequal length of the rows.
+     *
+     * Any other value than zero indicates a big "rectangle" matrix
+     * with more than 255 elements in a row or column. */
+    if (type == 0)
+      {
+        int nrows = POP_CODE_INT ();
+
+        std::vector<int> length_rows;
+
+        int n_el = 0;
+        for (int i = 0; i < nrows; i++)
+          {
+            int row_length = POP_CODE_INT ();
+            length_rows.push_back (row_length);
+            n_el += row_length;
+          }
+
+        // The first element is down the stack
+        // and the last element is at the top.
+        stack_element *first_arg = &sp[-n_el];
+
+        // The stack pointer is pointing to the first unused
+        // stack position, so it is the end pointer.
+        stack_element *end_arg = sp;
+
+        try
+          {
+            tm_const tmp (first_arg, end_arg, length_rows, *m_tw);
+
+            octave_value &&ov = tmp.concat (' ');
+
+            STACK_DESTROY (n_el);
+
+            PUSH_OV (ov);
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+    else
+      {
+        int nrows = POP_CODE_INT ();
+        int ncols = POP_CODE_INT ();
+        int n_el = nrows * ncols;
+
+        // The first element is down the stack
+        // and the last element is at the top.
+        stack_element *first_arg = &sp[-n_el];
+
+        // The stack pointer is pointing to the first unused
+        // stack position, so it is the end pointer.
+        stack_element *end_arg = sp;
+
+        try
+          {
+            tm_const tmp (first_arg, end_arg, ncols, *m_tw);
+
+            octave_value &&ov = tmp.concat (' ');
+
+            STACK_DESTROY (n_el);
+
+            PUSH_OV (ov);
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+  }
+  DISPATCH ();
+trans_mul:
+  MAKE_BINOP(compound_binary_op::op_trans_mul)
+  DISPATCH_1BYTEOP();
+mul_trans:
+  MAKE_BINOP(compound_binary_op::op_mul_trans)
+  DISPATCH_1BYTEOP();
+herm_mul:
+  MAKE_BINOP(compound_binary_op::op_herm_mul)
+  DISPATCH_1BYTEOP();
+mul_herm:
+  MAKE_BINOP(compound_binary_op::op_mul_herm)
+  DISPATCH_1BYTEOP();
+trans_ldiv:
+  MAKE_BINOP(compound_binary_op::op_trans_ldiv)
+  DISPATCH_1BYTEOP();
+herm_ldiv:
+  MAKE_BINOP(compound_binary_op::op_herm_ldiv)
+  DISPATCH_1BYTEOP();
+
+  {
+    int slot; // Needed if we need a function lookup
+    int nargout;
+    int n_args_on_stack;
+
+    if (0)
+      {
+wordcmd_nx:
+        slot = arg0;
+        nargout = bsp[0].i;
+        n_args_on_stack = *ip++;
+      }
+    else if (0)
+      {
+wordcmd:
+        slot = arg0;
+        nargout = *ip++;
+        n_args_on_stack = *ip++;
+      }
+
+    // The object to index is before the args on the stack
+    octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+    switch (ov.vm_dispatch_call ())
+      {
+        case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+          {
+            CHECK_PANIC (ov.is_nil ()); // TODO: Remove
+
+            // Put a function cache object in the slot and in the local ov
+            ov = octave_value (new octave_fcn_cache (name_data[slot]));
+            if (bsp[slot].ov.is_ref ())
+              bsp[slot].ov.ref_rep ()->set_value (ov);
+            else
+              bsp[slot].ov = ov;
+          }
+          // Fallthrough
+        case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+        case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+        case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+          {
+            octave_function *fcn;
+            try
+              {
+                stack_element *first_arg = &sp[-n_args_on_stack];
+                stack_element *end_arg = &sp[0];
+                fcn = ov.get_cached_fcn (first_arg, end_arg);
+              }
+            CATCH_EXECUTION_EXCEPTION
+
+            if (! fcn)
+              {
+                (*sp++).ps = new std::string {name_data[slot]};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+
+            if (fcn->is_compiled ())
+              {
+                octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+                // Alot of code in this define
+                int caller_nvalback = nargout; // Caller wants as many values returned as it wants the callee to produce
+                MAKE_BYTECODE_CALL
+
+                // Now dispatch to first instruction in the
+                // called function
+              }
+            else
+              {
+
+                octave_value_list ovl;
+                // The operands are on the top of the stack
+                POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                try
+                  {
+                    m_tw->set_active_bytecode_ip (ip - code);
+                    octave_value_list ret = fcn->call (*m_tw, nargout, ovl);
+
+                    STACK_DESTROY (1);
+                    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                  }
+                CATCH_INTERRUPT_EXCEPTION
+                CATCH_INDEX_EXCEPTION_WITH_NAME
+                CATCH_EXECUTION_EXCEPTION
+                CATCH_BAD_ALLOC
+                CATCH_EXIT_EXCEPTION
+
+              }
+          }
+          break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+        case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+          PANIC ("Invalid dispatch");
+      }
+  }
+  DISPATCH ();
+handle_signals:
+  {
+    // Check if there is any signal to handle
+    try
+      {
+        octave_quit ();
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH_1BYTEOP ();
+push_cst_dbl_0:
+{
+  PUSH_OV (ov_dbl_0);
+}
+DISPATCH_1BYTEOP ();
+push_cst_dbl_1:
+{
+  PUSH_OV (ov_dbl_1);
+}
+DISPATCH_1BYTEOP ();
+push_cst_dbl_2:
+{
+  PUSH_OV (ov_dbl_2);
+}
+DISPATCH_1BYTEOP ();
+
+  // The PUSH_CELL and PUSH_CELL_BIG opcodes pushes a cell to the stack which later code will assign
+  // elements to with APPEND_CELL. Two counters for the active column and row are also pushed.
+  {
+    int n_rows;
+    int n_cols;
+
+    if (0)
+      {
+push_cell:
+        n_rows = arg0;
+        n_cols = POP_CODE ();
+      }
+    else if (0)
+      {
+push_cell_big:
+        ip--; // Rewind ip so that it points to the first byte after the PUSH_CELL_BIG opcode
+        n_rows = POP_CODE_INT ();
+        n_cols = POP_CODE_INT ();
+      }
+
+    // The size is a guess. In the end, the size might differ.
+    Cell cell(n_rows, n_cols);
+
+    PUSH_OV (cell);
+
+    // The APPEND_CELL opcodes need to keep track of which row and column
+    // they are supposed to add to, since any element can be a cs-list,
+    // and the index to assign to can't be statically decided.
+    PUSH_OV (new octave_int64_scalar {});
+    PUSH_OV (new octave_int64_scalar {});
+  }
+  DISPATCH ();
+
+append_cell:
+{
+  // The stack looks like this:
+  // top: Element to add
+  //  -1: Row counter
+  //  -2: Column counter
+  //  -3: The cell to add elements to
+
+  // Essentially there is an APPEND_CELL opcode after
+  // each element argument. The last APPEND_CELL in a row
+  // has arg0 set to a number to indicate if it is the last
+  // column in the row, with distinction between:
+  //
+  // a middle row == 1
+  // the last row of many == 2
+  // the last row of one == 3
+  // the last row of many == 4
+  //
+  // This is needed since the first row sets how many columns the
+  // other rows need to have and the last rows need to pop the two
+  // counters on the stack.
+
+  // Note that 'b = {}; c = {b{:}, b{:}}" makes c size 2x0
+  // while 'b = {}; c = {b{:}}" makes c size 0x0
+
+  int last = arg0;
+
+  // The element we need to insert into the cell
+  octave_value ov = std::move (TOP_OV ());
+  STACK_SHRINK (1);
+
+  // The cell we are adding the element to
+  octave_value &ov_cell = THIRD_OV (); 
+  octave_cell &ovb_cell = REP (octave_cell, ov_cell);
+
+  Cell &cell = ovb_cell.octave_cell::matrix_ref ();
+
+  octave_idx_type n_rows = cell.rows ();
+  octave_idx_type n_cols = cell.cols ();
+
+  // The column counter
+  octave_value &ov_i_col = SEC_OV ();
+  octave_int64_scalar &ovb_i_col = REP (octave_int64_scalar, ov_i_col);
+  auto &i_col = ovb_i_col.octave_int64_scalar::scalar_ref ();
+
+  octave_idx_type i_col_idx = i_col;
+
+  // The row counter
+  octave_value &ov_i_row = TOP_OV ();
+  octave_int64_scalar &ovb_i_row = REP (octave_int64_scalar, ov_i_row);
+  auto &i_row = ovb_i_row.octave_int64_scalar::scalar_ref ();
+
+  octave_idx_type i_row_idx = i_row;
+
+  if (ov.is_cs_list ())
+    {
+      octave_value_list ovl = ov.list_value ();
+      octave_idx_type n = ovl.length ();
+
+      // If we are operating on the first row, increase its size if we
+      // are about to overflow.
+      if (i_row_idx == 0 && i_col_idx + n > n_cols)
+        {
+          cell.resize (dim_vector (n_rows, i_col_idx + n));
+          n_cols = i_col_idx + n;
+        }
+      
+      // If there is room in the row, insert the elements into it.
+      // Note that if there is no room, no element will be added to the cell,
+      // there will be an error after the row's last element's arg is executed.
+      // I.e. all the arg expressions in the row are always executed before
+      // the error.
+      if (i_col_idx + n <= n_cols)
+        {
+          // Insert the elements of the cs-list into to the row of the cell.
+          for (octave_idx_type i = 0; i < n; i++)
+            cell (i_row_idx, i_col_idx + i) = ovl (i);
+        }
+
+      i_col += n;
+      i_col_idx += n;
+    }
+  else if (ov.is_defined ())
+    {
+      // If we are operating on the first row, increase its size if we
+      // are about to overflow.
+      if (i_row_idx == 0 && i_col_idx >= n_cols)
+        {
+          cell.resize (dim_vector (1, i_col_idx + 1));
+          n_cols++;
+        }
+
+      // If there is room in the row, insert the element into it.
+      // Note that if there is no room, no element will be added to the cell,
+      // there will be an error after the row's last element's arg is executed.
+      // I.e. all the arg expressions in the row are always executed before
+      // the error.
+      if (i_col_idx < n_cols)
+        cell (i_row_idx, i_col_idx) = ov;
+
+      i_col = i_col + static_cast<octave_int64> (1);
+      i_col_idx++;
+    }
+  else
+    {
+      ; // If the arg is undefined, nothing is added to the row in the cell.
+    }
+
+  if (last == 1) // Last element in a middle row in a cell with multiple rows.
+    {
+      // The amount of columns in a row has to match the first row's.
+      if (i_col_idx && i_col_idx != n_cols)
+        {
+          (*sp++).pee = new execution_exception {"error","","number of columns must match"};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+
+      // Prepare for APPEND_CELL to operate on the next row.
+      i_row +=  i_col_idx ? 1L : 0; // Only advance row idx if something was inserted.
+      i_col = 0;
+    }
+  else if (last == 2) // Last element in the last row in a cell with multiple rows.
+    {
+      // The amount of columns in a row has to match the first row's unless
+      // the amount of columns in the current row is zero.
+      if (i_col_idx && i_col_idx != n_cols)
+        {
+          (*sp++).pee = new execution_exception {"error","","number of columns must match"};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+
+      if (i_col_idx)
+        i_row_idx += 1; // If this row was not empty
+      else if (n_cols == 0)
+        i_row_idx += 1; // If this row was empty and is supposed to be empty.
+
+      // If all the args for a row were empty, the next row's args were inserted into the empty row,
+      // so there might be trailing empty rows that we need to remove.
+      if (i_row_idx != n_rows)
+        {
+          cell.resize (dim_vector (i_row_idx, n_cols));
+        }
+
+      // Destroy the col and row counters
+      STACK_DESTROY (2);
+      // The cell is now on the top of the stack
+    }
+  else if (last == 3) // Last element in row in a cell with one row
+    {
+      // If a smaller number of columns were inserted than there are arguments
+      // (the amounts of args sets the initial size) we need to shrink the cell
+      if (i_col_idx < n_cols)
+        {
+          // If the row is empty, the resulting cell should be 0x0.
+          //   "b = {}; c = {b{:}}" yields c as a 0x0 cell
+          // but:
+          //   "b = {}; c = {b{:}; b{:}}" yields c as a 2x0 cell
+          cell.resize (dim_vector (i_col_idx ? 1 : 0, i_col_idx));
+        }
+
+      // Destroy the col and row counters
+      STACK_DESTROY (2);
+      // The cell is now on the top of the stack
+    }
+  else if (last == 4) // Last element in the first row, more than one row total
+    {
+      // If a smaller number of columns were inserted than there are arguments
+      // (the amounts of args sets the initial size) we need to shrink the cell
+      if (i_col_idx < n_cols)
+        {
+          cell.resize (dim_vector (n_rows, i_col_idx));
+        }
+
+      // Prepare for APPEND_CELL to operate on the next row
+      i_col = 0;
+      // Always advance to next row, even if first row was empty since
+      // if the first row was empty, all rows need to be empty.
+      i_row += 1L;
+    }
+}
+DISPATCH ();
+
+  {
+    // TODO: Too much code. Should be broken out?
+    // Something made sp not be in r15.
+
+    int nargout, slot;
+    if (0)
+      {
+index_cell_idnx:
+        slot = arg0; // Needed if we need a function lookup
+        nargout = bsp[0].i;
+      }
+    else if (0)
+      {
+index_cell_idn:
+        slot = arg0; // Needed if we need a function lookup
+        nargout = *ip++;
+      }
+    else if (0)
+index_cell_id1:
+      {
+        slot = arg0;
+        nargout = 1;
+      }
+    else if (0)
+index_cell_id0:
+      {
+        slot = arg0;
+        nargout = 0;
+      }
+
+    int n_args_on_stack = *ip++;
+
+    // The object to index is before the args on the stack
+    octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+    switch (ov.vm_dispatch_call ())
+      {
+        case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+          {
+            std::list<octave_value_list> idx; // TODO: mallocs!
+
+            // Make an ovl with the args
+            octave_value_list ovl;
+            // The operands are on the top of the stack
+            POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+            idx.push_back(ovl);
+
+            // TODO: subsref might throw index error
+            octave_value_list retval;
+
+            try
+              {
+                m_tw->set_active_bytecode_ip (ip - code);
+                retval = ov.subsref("{", idx, nargout);
+                idx.clear ();
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION_WITH_NAME
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+
+            bool is_fcn = (retval.length () ?
+                            retval(0).is_function() : false);
+
+            // "FIXME: when can the following happen?  In what case does indexing
+            //  result in a value that is a function?  Classdef method calls?
+            //  Something else?"
+
+            if (OCTAVE_LIKELY (!is_fcn))
+              {
+                idx.clear ();
+                // TODO: Necessary? I guess it might trigger dtors
+                // or something?
+                ov = octave_value ();
+              }
+            else
+              {
+                octave_value val = retval(0);
+                octave_function *fcn = val.function_value (true);
+
+                if (fcn)
+                  {
+                    octave_value_list final_args;
+
+                    if (! idx.empty ())
+                      final_args = idx.front ();
+
+                    try
+                      {
+                        m_tw->set_active_bytecode_ip (ip - code);
+                        retval = fcn->call (*m_tw, nargout, final_args);
+                      }
+                    CATCH_INTERRUPT_EXCEPTION
+                    CATCH_INDEX_EXCEPTION
+                    CATCH_EXECUTION_EXCEPTION
+                    CATCH_BAD_ALLOC
+                    CATCH_EXIT_EXCEPTION
+                  }
+
+                idx.clear ();
+                ov = octave_value ();
+                val = octave_value ();
+              }
+
+            STACK_DESTROY (1);
+            EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+          }
+          break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+          {
+            // Put a function cache object in the slot and in the local ov
+            ov = octave_value (new octave_fcn_cache (name_data[slot]));
+            if (bsp[slot].ov.is_ref ())
+              bsp[slot].ov.ref_rep ()->set_value (ov);
+            else
+              bsp[slot].ov = ov;
+          }
+          // Fallthrough
+        case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+        case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+        case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+          {
+            octave_function *fcn;
+            try
+              {
+                stack_element *first_arg = &sp[-n_args_on_stack];
+                stack_element *end_arg = &sp[0];
+                fcn = ov.get_cached_fcn (first_arg, end_arg);
+              }
+            CATCH_EXECUTION_EXCEPTION
+
+            if (! fcn)
+              {
+                (*sp++).ps = new std::string {name_data[slot]};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+
+            if (fcn->is_compiled ())
+              {
+                octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+                // Alot of code in this define
+                int caller_nvalback = nargout; // Caller wants as many values returned as it wants the callee to produce
+                MAKE_BYTECODE_CALL
+
+                // Now dispatch to first instruction in the
+                // called function
+              }
+            else
+              {
+                // Make an ovl with the args
+                octave_value_list ovl;
+                // The operands are on the top of the stack
+                POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                try
+                  {
+                    m_tw->set_active_bytecode_ip (ip - code);
+                    octave_value_list ret = fcn->call (*m_tw, nargout, ovl);
+
+                    STACK_DESTROY (1);
+                    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                  }
+                CATCH_INTERRUPT_EXCEPTION
+                CATCH_INDEX_EXCEPTION_WITH_NAME
+                CATCH_EXECUTION_EXCEPTION
+                CATCH_BAD_ALLOC
+                CATCH_EXIT_EXCEPTION
+
+              }
+          }
+          break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+          {
+            (*sp++).i = n_args_on_stack;
+            (*sp++).i = nargout;
+            (*sp++).i = nargout; // "caller_nvalback". Caller wants as many values returned as it wants the callee to produce
+            (*sp++).i = slot;
+            goto make_nested_handle_call;
+          }
+      }
+  }
+  DISPATCH ();
+
+incr_prefix:
+  {
+    octave_value &ov = TOP_OV ();
+    // Inplace
+    ov.non_const_unary_op (octave_value::unary_op::op_incr);
+  }
+  DISPATCH_1BYTEOP ();
+
+rot:
+  {
+    octave_value top_ov = TOP_OV ();
+    octave_value sec_ov = SEC_OV ();
+    STACK_DESTROY (2);
+    PUSH_OV (top_ov);
+    PUSH_OV (sec_ov);
+  }
+  DISPATCH_1BYTEOP ();
+
+varargin_call:
+  {
+    // We jump to here when a bytecode call notices it is
+    // calling a function with varargin.
+    //
+    // Continue where we left off. Restore temp variables from the stack.
+
+    octave_user_function *usr_fcn = static_cast<octave_user_function *> (sp[0].pv);
+
+    int n_returns_callee = static_cast<signed char> (ip[-4]);
+    if (OCTAVE_UNLIKELY (n_returns_callee < 0))
+      {
+        if (n_returns_callee == -128) /* Anonymous function */
+          n_returns_callee = 1;
+        else
+          n_returns_callee = -n_returns_callee;
+      }
+    int n_args_callee = -static_cast<signed char> (ip[-3]); // Note: Minus
+    int n_locals_callee = USHORT_FROM_UCHAR_PTR (ip - 2);
+
+    int nargout = sp[-1].i;
+
+    // Recreate first arg and n_args_on_stack
+    // from the stack
+    stack_element *first_arg = sp[-9].pse;
+    int n_args_on_stack = (sp - 9) - first_arg;
+
+    // Construct return values - note nargout
+    // is allready pushed as a uint64
+    for (int i = 1; i < n_returns_callee; i++)
+      PUSH_OV ();
+
+    int n_args_before_varargin =
+      std::min (n_args_callee - 1,
+                n_args_on_stack);
+    // Move the args to the new stack, except varargin
+    //
+    // We need to expand any cs-list, but only until the next
+    // argument would be in varargin. Those need to end up
+    // in the varargin cell array.
+    int ii;
+    int n_args_on_callee_stack = 0;
+    octave_value_list cs_args;
+    int cs_args_idx = 0;
+    for (ii = 0; ii < n_args_before_varargin; ii++)
+      {
+        octave_value &arg = first_arg[ii].ov;
+        if (arg.is_cs_list ())
+          {
+            cs_args = arg.list_value ();
+            cs_args_idx = 0;
+            for (int j = 0; j < cs_args.length ()
+                            && n_args_on_callee_stack < n_args_callee - 1; j++)
+              {
+                PUSH_OV (cs_args (j));
+                n_args_on_callee_stack++;
+                cs_args_idx++;
+              }
+          }
+        else
+          {
+            PUSH_OV (std::move (arg));
+            n_args_on_callee_stack++;
+          }
+
+        // Destroy the args
+        first_arg[ii].ov.~octave_value ();
+      }
+      // TODO: Expand cl_list? Smarter way? Do it in beginning ...
+
+    // Construct missing args, if any
+    for (int i = n_args_on_callee_stack; i < n_args_callee - 1; i++)
+      PUSH_OV ();
+
+    int n_args_in_varargin = n_args_on_stack - n_args_callee + 1; // "Surplus" args
+    int n_cells_left = cs_args.length () - cs_args_idx; // Amount of leftover cell ellements that need to go into varargin
+
+    int idx_cell = 0;
+    if (n_args_in_varargin > 0 || n_cells_left) // Anything to put in the varargin cell?
+      {
+        // TODO: Preallocate whole cell
+        Cell cell(n_cells_left ? 1 : 0, n_cells_left);
+
+        // Put the leftover objects from the cs-list expansion
+        // in the varargin cell, if any
+        for (int i = 0; i < n_cells_left; i++)
+          cell (0, idx_cell++) = cs_args (cs_args_idx + i);
+
+        // We need to expand cs-lists here too ...
+        for (int i = 0; i < n_args_in_varargin; i++)
+          {
+            // int col = n_args_in_varargin - 1 - i;
+            octave_value &arg = first_arg[ii + i].ov;
+
+            if (arg.is_cs_list ())
+              {
+                octave_value_list cs_args_i = arg.list_value ();
+                for (int j = 0; j < cs_args_i.length (); j++)
+                  {
+                    if (cell.numel () <= idx_cell)
+                      cell.resize (dim_vector {1, idx_cell + 1});
+                    cell (0, idx_cell++) = cs_args_i (j);
+                  }
+              }
+            else
+              {
+                if (cell.numel () <= idx_cell)
+                  cell.resize (dim_vector {1, idx_cell + 1});
+                cell (0, idx_cell++) = std::move (arg);
+              }
+
+            arg.~octave_value ();
+          }
+
+        // Push varargin to the stack
+        PUSH_OV (cell);
+      }
+    else
+      PUSH_OV (Cell (0,0)); // Empty cell into varargin's slot
+
+    // Construct locals
+    int n_locals_to_ctor =
+      n_locals_callee - n_args_callee - n_returns_callee;
+
+    CHECK_STACK (n_locals_to_ctor);
+    for (int i = 0; i < n_locals_to_ctor; i++)
+      PUSH_OV ();
+
+    int nargin = n_args_on_callee_stack + idx_cell; // n_args_callee count includes varargin
+
+try
+  {
+    m_tw->push_stack_frame(*this, usr_fcn, nargout, n_args_on_callee_stack);
+  }
+CATCH_STACKPUSH_EXECUTION_EXCEPTION /* Sets m_could_not_push_frame to true */
+CATCH_STACKPUSH_BAD_ALLOC
+
+  m_tw->set_nargin (nargin);
+
+if (OCTAVE_UNLIKELY (m_output_ignore_data))
+  {
+    /* Called fn needs to know about ignored outputs .e.g. [~, a] = foo() */
+m_output_ignore_data->push_frame (*this);
+  }
+
+    /* N_RETURNS is negative for varargout */
+    int n_returns = N_RETURNS () - 1; /* %nargout in N_RETURNS */
+    if (n_returns >= 0 && nargout > n_returns)
+      {
+        (*sp++).pee = new execution_exception {"error","","function called with too many outputs"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        goto unwind;
+      }
+
+    // Now dispatch to first instruction in the
+    // called function
+  }
+  DISPATCH ();
+
+// Not an opcode. Some opcodes jump here to handle nested handle function calls
+make_nested_handle_call:
+{
+  // Restore values from the stack
+  int slot = (*--sp).i;
+  int caller_nvalback = (*--sp).i;
+  int nargout = (*--sp).i;
+  int n_args_on_stack = (*--sp).i;
+
+  octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+  octave_function *fcn;
+  try
+    {
+      stack_element *first_arg = &sp[-n_args_on_stack];
+      stack_element *end_arg = &sp[0];
+      fcn = ov.get_cached_fcn (first_arg, end_arg);
+    }
+  CATCH_EXECUTION_EXCEPTION
+
+  if (! fcn)
+    {
+      (*sp++).ps = new std::string {name_data[slot]};
+      (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+      goto unwind;
+    }
+
+  if (fcn->is_compiled ())
+    {
+      octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+
+      // The code bellow is like MAKE_BYTECODE_CALL, but with support for setting an access frame from the handle
+      if (sp + stack_min_for_new_call >= m_stack + stack_size)
+        {
+          (*sp++).pee = new execution_exception {"error","","VM is running out of stack space"};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+      /* We are now going to call another function */
+      /* compiled to bytecode */
+
+      m_tw->set_active_bytecode_ip (ip - code);
+      stack_element *first_arg = sp - n_args_on_stack;
+
+      /* Push address to first arg (or would one would have been */
+      /* if there are no args), so we can restore the sp at return */
+      (*sp++).pse = first_arg;
+
+      /* Push unwind data */
+      (*sp++).pud = unwind_data;
+
+      /* Push code */
+      (*sp++).puc = code;
+
+      /* Push data */
+      (*sp++).pov = data;
+
+      /* Push id names */
+      (*sp++).ps = name_data;
+
+      /* Push bsp */
+      (*sp++).pse = bsp;
+
+      /* Push the instruction pointer */
+      (*sp++).puc = ip;
+
+      /* The amount of return values the caller actually wants. Not necesserely the */
+      /* same as the amount of return values the caller wants the callee to produce. */
+      /* (last on caller stack) */
+      (*sp++).u = caller_nvalback;
+
+      /* set callee bsp */
+      m_sp = bsp = sp;
+
+      /* Push nargout (first on callee stack) */
+      (*sp++).u = nargout;
+
+      /* Set the new data, code etc */
+      bytecode &bc = usr_fcn->get_bytecode ();
+      if (OCTAVE_UNLIKELY (m_profiler_enabled))
+        {
+          auto p = vm::m_vm_profiler;
+          if (p)
+            {
+              std::string caller_name = data[2].string_value (); /* profiler_name () querried at compile time */
+              p->enter_fn (caller_name, bc);
+            }
+        }
+      m_data = data = bc.m_data.data ();
+      m_code = code = bc.m_code.data ();
+      m_name_data = name_data = bc.m_ids.data ();
+      m_unwind_data = unwind_data = &bc.m_unwind_data;
+
+
+      /* Set the ip to 0 */
+      ip = code;
+      int n_returns_callee = static_cast<signed char> (*ip++); /* Negative for varargout */
+      if (OCTAVE_UNLIKELY (n_returns_callee < 0))
+        {
+          if (n_returns_callee == -128) /* Anonymous function */
+            n_returns_callee = 1;
+          else
+            n_returns_callee = -n_returns_callee;
+        }
+      int n_args_callee = static_cast<signed char> (*ip++); /* Negative for varargin */
+      int n_locals_callee = POP_CODE_USHORT ();
+
+      if (n_args_callee < 0)
+      {
+        sp[0].pv = static_cast<void*> (usr_fcn);
+        goto varargin_call;
+      }
+
+      /* Construct return values - note nargout */
+      /* is allready pushed as a uint64 */
+      for (int ii = 1; ii < n_returns_callee; ii++)
+        PUSH_OV ();
+
+      int n_args_on_callee_stack = 0;
+      bool all_too_many_args = false;
+      /* Move the args to the new stack */
+      for (int ii = 0; ii < n_args_on_stack; ii++)
+        {
+          octave_value &arg = first_arg[ii].ov;
+
+          if (arg.is_cs_list ())
+            {
+              octave_value_list args = arg.list_value ();
+              octave_idx_type n_el = args.length ();
+              if (n_el + n_args_on_callee_stack > 512)
+                {
+                  all_too_many_args = true;
+                }
+              else
+                {
+                  for (int j = 0; j < n_el; j++)
+                    {
+                      PUSH_OV (args (j));
+                      n_args_on_callee_stack++;
+                    }
+                }
+            }
+          else
+            {
+              PUSH_OV (std::move (arg));
+              n_args_on_callee_stack++;
+            }
+          /* Destroy the args */
+          arg.~octave_value ();
+        }
+      /* Construct missing args */
+      for (int ii = n_args_on_callee_stack; ii < n_args_callee; ii++)
+        PUSH_OV ();
+
+      /* Construct locals */
+      int n_locals_to_ctor =
+        n_locals_callee - n_args_callee - n_returns_callee;
+      for (int ii = 0; ii < n_locals_to_ctor; ii++)
+        PUSH_OV ();
+
+      try
+        {
+          octave_fcn_handle *h = ov.fcn_handle_value();
+          CHECK_PANIC (h);
+          CHECK_PANIC (h->is_nested () || h->is_anonymous ());
+          auto closure_frame = h->get_closure_frame ();
+
+          m_tw->push_stack_frame(*this, usr_fcn, nargout, n_args_on_callee_stack, closure_frame);
+        }
+      CATCH_STACKPUSH_EXECUTION_EXCEPTION /* Sets m_could_not_push_frame to true */
+      CATCH_STACKPUSH_BAD_ALLOC
+
+      if (OCTAVE_UNLIKELY (m_output_ignore_data))
+        {
+          /* Called fn needs to know about ignored outputs .e.g. [~, a] = foo() */
+          m_output_ignore_data->push_frame (*this);
+        }
+
+      /* "auto var" in the frame object. This is needed if nargout() etc are called */
+      set_nargout (nargout);
+
+      if (all_too_many_args)
+        {
+          std::string fn_name = unwind_data->m_name;
+          (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",
+                                                fn_name + ": function called with over 512 inputs."
+                                                " Consider using varargin."};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+      if (n_args_on_callee_stack > n_args_callee)
+        {
+          std::string fn_name = unwind_data->m_name;
+          (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",
+                                                fn_name + ": function called with too many inputs"};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+      /* N_RETURNS is negative for varargout */
+      int n_returns = N_RETURNS () - 1; /* %nargout in N_RETURNS */
+      if (n_returns >= 0 && nargout > n_returns)
+        {
+          std::string fn_name = unwind_data->m_name;
+          (*sp++).pee = new execution_exception {"error", "Octave:invalid-fun-call",
+                                                fn_name + ": function called with too many outputs"};
+          (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+          goto unwind;
+        }
+
+      // Now dispatch to first instruction in the
+      // called function
+    }
+  else
+    {
+      // Make an ovl with the args
+      octave_value_list ovl;
+      // The operands are on the top of the stack
+      POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+      try
+        {
+          m_tw->set_active_bytecode_ip (ip - code);
+          octave_value_list ret = ov.simple_subsref ('(', ovl, nargout);
+          ovl.clear ();
+
+          STACK_DESTROY (1);
+          EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+        }
+      CATCH_INTERRUPT_EXCEPTION
+      CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME(slot != 0)
+      CATCH_EXECUTION_EXCEPTION
+      CATCH_BAD_ALLOC
+      CATCH_EXIT_EXCEPTION
+    }
+}
+DISPATCH ();
+
+unwind:
+  {
+    ip--; // Rewind ip to after the opcode (i.e. arg0's position in the code)
+    // Push VM state
+    m_sp = sp;
+    m_bsp = bsp;
+    m_rsp = rsp;
+    m_code = code;
+    m_data = data;
+    m_name_data = name_data;
+    m_ip = ip - code;
+    m_unwind_data = unwind_data;
+
+    m_echo_prior_op_was_cond = false; // Used by the echo functionality
+
+    // Ther error_type is put on the stack before the jump to unwind.
+    error_type et = static_cast<error_type> (m_sp[-1].i);
+    m_sp--;
+
+    // Save current exception to the error system in handle_error ()
+    error_data errdat = handle_error (et);
+
+    // Run only unwind_protect code if the exception is the interrupt exception.
+    // I.e. no 'throw ... catch' code.
+    bool only_unwind_protect = et == error_type::INTERRUPT_EXC;
+
+    while (1)
+      {
+        // Find unwind entry for current value of the instruction pointer, unless we are dealing
+        // with a debug quit exception in which case no unwind entry is used.
+        unwind_entry *entry = nullptr;
+        if (et != error_type::DEBUG_QUIT)
+          entry = find_unwind_entry_for_current_state (only_unwind_protect);
+
+        unwind_entry_type type = unwind_entry_type::INVALID;
+        if (entry)
+          type = entry->m_unwind_entry_type;
+
+        // We need to figure out what stack depth we want.
+        // If we are unwinding in a try catch we need to save any
+        // nesting switch or for loop stack objects on the stack.
+        int target_stack_depth = N_LOCALS();
+        if (entry)
+          {
+            target_stack_depth += entry->m_stack_depth;
+          }
+
+        // Unwind the stack down to the locals
+        //
+        // If we got here from return op code we might allready have
+        // destroyed the locals when an error triggered.
+        while (m_sp - m_bsp > target_stack_depth)
+          {
+            // If the stack depth matches a for loop we need to
+            // pop some native ints.
+            //
+            // TODO: Wasteful search for forloop each iteration
+            int current_stack_depth = m_sp - m_bsp - N_LOCALS ();
+            int stack_depth_for_forloop =
+              find_unwind_entry_for_forloop (current_stack_depth);
+
+            if (stack_depth_for_forloop != -1 &&
+                current_stack_depth == stack_depth_for_forloop + 3)
+              {
+                m_sp -= 2; // Pop two ints
+                (*--m_sp).ov.~octave_value (); // Pop ov
+              }
+            else
+              (*--m_sp).ov.~octave_value ();
+          }
+
+        if (type == unwind_entry_type::UNWIND_PROTECT ||
+            type == unwind_entry_type::TRY_CATCH)
+          {
+            // Need to set some stuff for last_error etc and make the
+            // interpreter happy by reseting stuff
+            error_system& es = m_tw->get_interpreter().get_error_system ();
+
+            octave_scalar_map err_map;
+
+            err_map.assign ("message", es.last_error_message ());
+            err_map.assign ("identifier", es.last_error_id ());
+            err_map.assign ("stack", es.last_error_stack ());
+
+            m_tw->get_interpreter().recover_from_exception ();
+
+            // Set stack pointer and ip and dispatch
+            m_ip = entry->m_ip_target;
+            code = m_code;
+            ip = m_code + m_ip;
+            sp = m_sp;
+
+            // Push the error object that is either just poped right
+            // away by a POP instruction or assigned to the catch
+            // clause identifier.
+            PUSH_OV (err_map);
+
+            if (et == error_type::INTERRUPT_EXC)
+              m_unwinding_interrupt = true;
+
+            goto bail_unwind;
+          }
+
+        if (!m_could_not_push_frame)
+          {
+            auto sf = m_tw->get_current_stack_frame ();
+            if (sf->is_user_script_frame ())
+              sf->vm_exit_script ();
+            sf->vm_unwinds ();
+          }
+
+        // Destroy locals down to nargout
+        while (m_sp != m_bsp + 1)
+          {
+            (*--m_sp).ov.~octave_value ();
+          }
+
+        m_sp--; // nargout
+
+        if (m_sp == m_rsp)
+          break; // Got down to start of root stack frame
+
+        if (OCTAVE_UNLIKELY (m_profiler_enabled))
+          {
+            auto p = vm::m_vm_profiler;
+            if (p)
+              {
+                std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time
+                p->exit_fn (fn_name);
+              }
+          }
+
+        // Skipp caller_nvalback
+        m_sp--;
+
+        // Restore ip
+        ip = (*--m_sp).puc;
+
+        // Restore bsp
+        bsp = m_bsp = (*--m_sp).pse;
+
+        // Restore id names
+        name_data = m_name_data = (*--m_sp).ps;
+
+        // Restore data
+        data = m_data = (*--m_sp).pov;
+
+        // Restore code
+        code = m_code = (*--m_sp).puc;
+        m_ip = ip - m_code;
+
+        // Restore unwind data
+        unwind_data = m_unwind_data = (*--m_sp).pud;
+
+        // Restore the stack pointer
+        sp = m_sp = m_sp[-1].pse;
+
+        // Pop dynamic stackframe (unless it was never pushed)
+        if (!m_could_not_push_frame)
+          m_tw->pop_stack_frame ();
+        else
+          m_could_not_push_frame = false;
+
+        // If we are messing with the interpreters lvalue_list due to some
+        // ~ we need to restore stuff.
+        if (m_output_ignore_data)
+          {
+            m_output_ignore_data->pop_frame (*this);
+            output_ignore_data::maybe_delete_ignore_data (*this, 0);
+          }
+      }
+
+    if (m_output_ignore_data)
+      {
+        CHECK_PANIC (m_output_ignore_data->m_external_root_ignorer);
+        output_ignore_data::maybe_delete_ignore_data (*this, 1);
+      }
+
+    CHECK_PANIC (!m_output_ignore_data);
+
+    CHECK_STACK (0);
+    this->m_dbg_proper_return = true;
+
+    m_tw->set_lvalue_list (m_original_lvalue_list);
+
+    // Rethrow exceptions out of the VM
+    if (et == error_type::INTERRUPT_EXC)
+      throw interrupt_exception {};
+    else if (et == error_type::DEBUG_QUIT)
+      throw quit_debug_exception {errdat.m_debug_quit_all};
+    else if (et == error_type::EXIT_EXCEPTION)
+      throw exit_exception (errdat.m_exit_status, errdat.m_safe_to_return);
+    else
+      {
+        error_system& es = m_tw->get_interpreter().get_error_system ();
+        es.rethrow_error (es.last_error_id (), es.last_error_message (), es.last_error_stack ());
+      }
+
+  }
+bail_unwind:
+  DISPATCH ();
+
+init_global:
+  {
+    // The next instruction tells whether we should init a global or persistent
+    // variable.
+    global_type type = static_cast<global_type> (arg0);
+
+    // The next instruction is the local slot number for the global variable
+    int slot = POP_CODE_USHORT();
+    POP_CODE_USHORT(); // Not used TODO: Remove. Make this opcode use WIDE
+
+    std::string& name = name_data[slot];
+
+    octave_value &ov_slot = bsp[slot].ov;
+    bool slot_already_live = ov_slot.is_defined ();
+
+    bool is_marked_in_VM = ov_slot.is_ref (); // TODO: Can this be other refs?
+
+    // The next instruction is whether the global declare has an
+    // initialization value
+    bool has_init_code = *ip++;
+
+    // If the global was not allready created we need to assign a
+    // empty double matrix to it.
+    // If there already is a defined local in the slot we initialize
+    // the global with the local
+    // TODO: Should be a decrapation warning here for this
+    octave_value ov_default;
+    if (slot_already_live && !is_marked_in_VM)
+      ov_default = std::move (ov_slot);
+    else
+      ov_default = Matrix ();
+
+    if (!is_marked_in_VM)
+      ov_slot = octave_value {};
+
+    bool global_is_new_in_callstack = false;
+
+    if (type == global_type::GLOBAL)
+      {
+        if (is_marked_in_VM && ov_slot.ref_rep ()->is_persistent_ref ())
+          {
+            (*sp++).pee = new execution_exception {"error", "",
+              "can't make persistent variable '" + name + "' global"};
+            (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+            goto unwind;
+          }
+
+        auto frame = m_tw->get_current_stack_frame ();
+        auto sym = frame->insert_symbol (name);
+        // Note: Install variable wont override global's value with nil ov from
+        //       the "{}" argument.
+        frame->install_variable (sym, {}, 1);
+
+        octave_value &ov_gbl = m_tw->global_varref (name);
+        global_is_new_in_callstack = ov_gbl.is_undefined ();
+
+        // We assign the default before the init
+        if (global_is_new_in_callstack)
+          m_tw->global_assign (name, ov_default);
+
+        if (!is_marked_in_VM)
+          {
+            ov_slot = octave_value {new octave_value_ref_global {name}};
+          }
+
+        // TODO: Assert global_is_new_in_callstack != global_is_marked_in_VM
+        // but does not work until the dynamic stack is implemented.
+
+        // CHECK (global_is_new_in_callstack != global_is_marked_in_VM);
+      }
+    else if (type == global_type::PERSISTENT)
+      {
+        if (is_marked_in_VM && ov_slot.ref_rep ()->is_global_ref ())
+          {
+            (*sp++).pee = new execution_exception {"error", "",
+              "can't make global variable '" + name + "' persistent"};
+            (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+            goto unwind;
+          }
+
+        auto frame = m_tw->get_current_stack_frame();
+
+        symbol_record sym = frame->lookup_symbol(name);
+        try
+          {
+            // Throws if global or formal parameter
+            frame->make_persistent(sym);
+          }
+        CATCH_EXECUTION_EXCEPTION
+
+        auto scope = frame->get_scope ();
+
+        // TODO: Put the offset in the op-code instead?
+        auto it = unwind_data->m_slot_to_persistent_slot.find (slot);
+        CHECK (it != unwind_data->m_slot_to_persistent_slot.end ());
+        int pers_offset = it->second;
+
+        octave_value &ov_gbl = scope.persistent_varref (pers_offset);
+
+        global_is_new_in_callstack = ov_gbl.is_undefined ();
+        if (global_is_new_in_callstack)
+          {
+            ov_gbl = ov_default;
+          }
+
+        if (!is_marked_in_VM)
+          {
+            ov_slot = octave_value {new octave_value_ref_persistent {std::move (scope), pers_offset}};
+          }
+      }
+    else
+      ERR ("Wrong global type");
+
+    // If there is init code, then there is also a offset to the first
+    // instruction after the init code, to where we jump if the global is
+    // alread live.
+    int after;
+    if (has_init_code)
+      {
+        unsigned char b0 = *ip++;
+        unsigned char b1 = *ip++;
+        after = USHORT_FROM_UCHARS (b0, b1);
+
+        if (!global_is_new_in_callstack || slot_already_live)
+          ip = code + after;
+      }
+
+    // Now dispatch to either next instruction if no init, init or after init
+  }
+  DISPATCH ();
+assign_compound:
+  {
+    // The next instruction is the slot number
+    int slot = arg0;
+    // The next instruction is the type of compound operation
+    octave_value::assign_op op =
+      static_cast<octave_value::assign_op> (*ip++);
+
+    octave_value &ov_rhs = TOP_OV ();
+    octave_value &ov_lhs = bsp[slot].ov;
+
+    if (!ov_lhs.is_defined ()) // TODO: Also checked in .assign() ...
+      {
+        (*sp++).pee = new execution_exception {"error", "",
+          "in computed assignment A OP= X, A must be defined first"};
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        goto unwind;
+      }
+
+    try
+      {
+        // TODO: assign makes some stupid empty list and slows everything down
+        if (OCTAVE_LIKELY (!ov_lhs.is_ref ()))
+          ov_lhs.assign (op, ov_rhs); // Move code into here?
+        else
+          {
+            octave_value &glb_ref = ov_lhs.ref_rep ()->ref ();
+            glb_ref.assign (op, ov_rhs);
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    STACK_DESTROY (1);
+  }
+  DISPATCH ();
+jmp_ifdef:
+  {
+    octave_value &ov_1 = TOP_OV ();
+    unsigned char b0 = arg0;
+    unsigned char b1 = *ip++;
+
+    int target = USHORT_FROM_UCHARS (b0, b1);
+
+    if (ov_1.is_defined () && !ov_1.is_magic_colon ())
+      ip = code + target;
+
+    STACK_DESTROY (1);
+  }
+  DISPATCH ();
+switch_cmp:
+  {
+    octave_value &ov_label = TOP_OV ();
+    octave_value &ov_switch = SEC_OV ();
+    unsigned char b0 = arg0;
+    unsigned char b1 = *ip++;
+
+    int target = USHORT_FROM_UCHARS (b0, b1);
+
+    bool do_it;
+    if (ov_label.is_undefined ())
+      do_it = false;
+    else if (!ov_label.iscell ())
+      do_it = ov_switch.is_equal (ov_label);
+    else
+      {
+        do_it = false;
+        // Match all cell elements. Any will do
+        Cell cell (ov_label.cell_value ());
+
+        for (octave_idx_type i = 0; i < cell.rows (); i++)
+          {
+            for (octave_idx_type j = 0; j < cell.columns (); j++)
+              {
+                do_it = ov_switch.is_equal (cell(i,j));
+
+                if (do_it)
+                  break;
+              }
+          }
+      }
+
+    STACK_DESTROY (2);
+
+    if (!do_it)
+      ip = code + target;
+  }
+  DISPATCH ();
+
+braindead_precond:
+  {
+    octave_value &ov = TOP_OV();
+
+    bool do_braindead = false;
+    if (ov.ndims () == 2 && ov.rows () == 1 && ov.columns () == 1)
+      do_braindead = true;
+
+    STACK_DESTROY (1);
+
+    if (do_braindead)
+      PUSH_OV (ov_true);
+    else
+      PUSH_OV (ov_false);
+  }
+  DISPATCH_1BYTEOP ();
+
+braindead_warning:
+  {
+    // A slot stores whether we allready printed this warning for a particular
+    // place where there could be a braindead short circuit
+    int slot = arg0;
+    // The next codepoint is the type of warning
+    int type = *ip++; // asci '|' or '&'
+
+    octave_value& ov_warning = bsp[slot].ov;
+
+    if (ov_warning.is_nil ())
+      {
+        ov_warning = ov_true; // Don't print the warning next time
+        m_tw->set_active_bytecode_ip (ip - code); // The warning needs to be able to get line numbers.
+
+        // It is possible to specify that certain warning should be an error, so we need a try here.
+        try
+          {
+            warning_with_id ("Octave:possible-matlab-short-circuit-operator",
+                            "Matlab-style short-circuit operation performed for operator %c",
+                            type);
+          }
+        CATCH_EXECUTION_EXCEPTION
+      }
+  }
+  DISPATCH ();
+force_assign:
+  {
+    // The next instruction is the slot number
+    int slot = arg0;
+
+    octave_value &ov_rhs = TOP_OV ();
+    octave_value &ov_lhs = bsp[slot].ov;
+
+    ov_lhs.maybe_call_dtor ();
+
+    if (ov_rhs.vm_need_storable_call ())
+      ov_rhs.make_storable_value (); // Some types have lazy copy
+
+    if (OCTAVE_LIKELY (!ov_lhs.is_ref ()))
+      ov_lhs = std::move (ov_rhs); // Note move
+    else
+      ov_lhs.ref_rep ()->set_value (std::move (ov_rhs));
+
+    STACK_DESTROY (1);
+  }
+  DISPATCH();
+push_nil:
+  {
+    PUSH_OV(octave_value{});
+  }
+  DISPATCH_1BYTEOP();
+throw_iferrorobj:
+  {
+    octave_value& ov_top = TOP_OV ();
+
+    if (ov_top.is_defined ())
+      {
+        // This "error object" is created by the unwind: code
+        // and e.g. not from a user's error
+        octave_scalar_map map = ov_top.scalar_map_value ();
+
+        bool is_err_obj = map.isfield("message") &&
+                          map.isfield ("identifier") &&
+                          map.isfield ("stack");
+
+        if (!is_err_obj)
+          PANIC ("Strange error object on stack");
+
+        octave_value msg = map.getfield ("message");
+        octave_value id = map.getfield ("identifier");
+
+        STACK_DESTROY (1);
+
+        std::string s_msg  = msg.string_value ();
+        std::string s_id = id.string_value ();
+
+        octave_map err_stack = map.contents ("stack").xmap_value ("ERR.STACK must be a struct");
+
+        // Are we unwinding an interrupt exception?
+        if (m_unwinding_interrupt)
+          {
+            (*sp++).i = static_cast<int> (error_type::INTERRUPT_EXC);
+            goto unwind;
+          }
+
+        // On a rethrow, the C++ exception is always base class execution_exception.
+        // We use rethrow_error() to recreate a stack info object from the octave_map
+        // in an easy way.
+        try
+        {
+          error_system& es = m_tw->get_interpreter().get_error_system ();
+          es.rethrow_error (s_id, s_msg, err_stack);
+        }
+        catch(execution_exception& e)
+        {
+          (*sp++).pee =  new execution_exception {e};
+        }
+
+        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+        goto unwind;
+      }
+    else
+      STACK_DESTROY (1);
+  }
+  DISPATCH_1BYTEOP();
+
+index_struct_call:
+  {
+    // This opcode is a setup for a chain of opcodes that each does a part
+    // in a chained subsref.
+    //
+    // It is done like this since there can be bytecode function calls in the middle
+    // of the chain and the call can only return to an opcode, not some arbitrary point in
+    // the C++ code.
+
+    int nargout = arg0;
+    int slot = POP_CODE_USHORT ();
+    int n_args_on_stack = POP_CODE ();
+    char type = POP_CODE ();
+
+    // The object being indexed is on the stack under the arguments
+    octave_value &ov = sp[-1 - n_args_on_stack].ov;
+
+    // If there is a slot specified, we need to check if we need to call
+    // its octave_value (e.g. a handle) or a function corrensponding to its name.
+    if (slot)
+      {
+        if (ov.is_nil ())
+          {
+            // The slot the object being indexed was pushed from
+            octave_value &slot_ov = bsp[slot].ov;
+
+            // Put a function cache object in the slot and in the local ov
+            ov = octave_value (new octave_fcn_cache (name_data[slot]));
+            if (slot_ov.is_ref())
+              slot_ov.ref_rep ()->set_value (ov);
+            else
+              slot_ov = ov;
+          }
+
+        // Should we call the object?
+        if (ov.vm_dispatch_call ()
+            == octave_base_value::vm_call_dispatch_type::OCT_CALL)
+          {
+            CHECK_PANIC (ov.has_function_cache ());
+
+            octave_function *fcn;
+
+            octave_value_list ovl;
+            // The operands are on the top of the stack
+            POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+            try
+              {
+                if (type == '(')
+                  {
+                    fcn = ov.get_cached_fcn (ovl);
+                  }
+                else // { or .
+                  {
+                    fcn = ov.get_cached_fcn (static_cast<octave_value*> (nullptr), static_cast<octave_value*> (nullptr));
+                  }
+              }
+            CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+            if (! fcn)
+              {
+                (*sp++).ps = new std::string {name_data[slot]};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+
+            try
+              {
+                // TODO: Bytecode call
+
+                octave_value_list retval;
+                m_tw->set_active_bytecode_ip (ip - code);
+                if (type == '(')
+                  {
+                     // Skip the following subsref. Need to check for nargout extension.
+                    if (*ip == static_cast<unsigned char> (INSTR::EXT_NARGOUT))
+                      ip += 7; // Skip EXT_NARGOUT + STRUCT_INDEX_SUBCALL
+                    else
+                      ip += 6; // Skip STRUCT_INDEX_SUBCALL
+                    retval = fcn->call (*m_tw, nargout, ovl);
+                  }
+                else
+                  {
+                    retval = fcn->call (*m_tw, nargout, {});
+                  }
+
+                STACK_DESTROY (1); // Destroy the ov being indexed
+                PUSH_OV (retval.first_or_nil_ov ()); // Push the next ov to be indexed
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+          }
+      }
+
+  // The next instruction is a INDEX_STRUCT_SUBCALL
+  // One for each part of the chain
+  }
+  DISPATCH (); // TODO: Make hardcoded goto to index_struct_subcall:
+
+index_struct_n:
+  {
+    int nargout = arg0;
+
+    int slot = POP_CODE_USHORT (); // Needed if we need a function lookup
+    int slot_for_field = POP_CODE_USHORT ();
+
+    octave_value &ov = TOP_OV ();
+
+    std::string field_name = name_data [slot_for_field];
+
+    octave_value ov_field_name {field_name};
+
+    octave_value_list retval;
+
+    // TODO: Should be a "simple_subsref for "{" and "."
+    octave_value_list ovl_idx;
+    ovl_idx.append (ov_field_name);
+
+    std::list<octave_value_list> idx;
+    idx.push_back (ovl_idx);
+
+    try
+      {
+        m_tw->set_active_bytecode_ip (ip - code);
+        retval = ov.subsref(".", idx, nargout);
+
+        // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function.
+        //       Should preferably be done by .subsref?
+        octave_value val = (retval.length () ? retval(0) : octave_value ());
+        if (val.is_function ())
+          {
+            octave_function *fcn = val.function_value (true);
+
+            if (fcn)
+              {
+                retval = fcn->call (*m_tw, nargout, {});
+              }
+          }
+
+        idx.clear ();
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    STACK_DESTROY (1);
+    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+  }
+  DISPATCH ();
+
+subasgn_struct:
+  {
+    int slot = arg0;
+    int field_slot = POP_CODE_USHORT ();
+
+    // The top of the stack is the rhs value
+    octave_value &rhs = TOP_OV ();
+
+    // The ov to subassign to
+    octave_value &ov = bsp[slot].ov;
+
+    // TODO: Room for performance improvement here maybe
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      ov.make_unique ();
+    else
+      ov.ref_rep ()->ref ().make_unique ();
+
+    // TODO: Uggly containers
+    std::list<octave_value_list> idx;
+    octave_value_list ovl;
+
+    std::string field_name = name_data[field_slot];
+
+    octave_value ov_field_name {field_name};
+
+    ovl.append (ov_field_name);
+
+    idx.push_back (ovl);
+
+    // E.g. scalars do not update them self inplace
+    // but create a new octave_value, so we need to
+    // copy the return value to the slot.
+    try
+      {
+        ov = ov.subsasgn (".", idx, rhs);
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    STACK_DESTROY (1);
+  }
+  DISPATCH ();
+
+subasgn_cell_id:
+  {
+    // The args to the subassign are on the operand stack
+    int slot = arg0;
+    int nargs = *ip++;
+
+    // The top of the stack is the rhs value
+    octave_value &rhs = TOP_OV ();
+    // First argument
+    stack_element *parg = sp - 1 - nargs;
+
+    // Move the args to an ovl
+    // TODO: Should actually be a move
+    octave_value_list args;
+    for (int i = 0; i < nargs; i++)
+    {
+      octave_value &arg = parg[i].ov;
+      // We need to expand cs-lists
+      if (arg.is_cs_list ())
+        args.append (arg.list_value ());
+      else
+        args.append (arg);
+    }
+
+    // The ov to subassign to
+    octave_value &ov = bsp[slot].ov;
+    // TODO: Room for performance improvement here maybe
+    if (OCTAVE_LIKELY (!ov.is_ref ()))
+      ov.make_unique ();
+    else
+      ov.ref_rep ()->ref ().make_unique ();
+
+    // TODO: Uggly containers
+    std::list<octave_value_list> idx;
+    idx.push_back (args);
+
+    try
+      {
+        // E.g. scalars do not update them self inplace
+        // but create a new octave_value, so we need to
+        // copy the return value to the slot.
+        ov = ov.subsasgn("{", idx, rhs);
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION_WITH_NAME
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    // Destroy the args on the operand stack aswell as rhs
+    STACK_DESTROY (nargs + 1);
+  }
+  DISPATCH ();
+
+subassign_obj:
+  {
+    // The args to the subassign are on the operand stack
+    int nargs = arg0;
+    char type = *ip++;
+
+    // First argument
+    stack_element *parg = sp - nargs;
+    // lhs is under the args -- the target for the subassign
+    octave_value &lhs = (sp - nargs - 1)->ov;
+    lhs.make_unique (); // TODO: Room for performance improvement here maybe
+    // rhs is under the lhs
+    octave_value &rhs = (sp - nargs - 2)->ov; // lhs is written to this stack position
+
+    // Move the args to an ovl
+    // TODO: Should actually be a move
+    octave_value_list args;
+    for (int i = 0; i < nargs; i++)
+    {
+      octave_value &arg = parg[i].ov;
+      // We need to expand cs-lists
+      if (arg.is_cs_list ())
+        args.append (arg.list_value ());
+      else
+        args.append (arg);
+    }
+
+    // TODO: Uggly containers
+    std::list<octave_value_list> idx;
+    idx.push_back (args);
+
+    try
+      {
+        // E.g. scalars do not update them self inplace
+        // but create a new octave_value, so we need to
+        // copy the return value to the slot.
+        lhs = lhs.subsasgn(std::string {type}, idx, rhs);
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+
+    // We want lhs on the top of the stack after dropping all
+    // the args to SUBASSIGN_OBJ, so we move it to where rhs is
+    rhs = std::move (lhs);
+
+    // Destroy the args on the operand stack aswell as the
+    // stack position that we moved lhs out of.
+    STACK_DESTROY (nargs + 1);
+
+    // lhs is on the top of the stack now
+  }
+  DISPATCH ();
+
+index_obj:
+  {
+    int nargout = arg0;
+    int has_slot = *ip++;
+    int slot = POP_CODE_USHORT ();
+    int n_args_on_stack = *ip++;
+    char type = *ip++;
+
+    // The object to index is before the args on the stack
+    octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+    switch (ov.vm_dispatch_call ())
+      {
+        case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+          PANIC ("Invalid dispatch");
+        case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+          {
+            // TODO: subsref should take ovl instead and be chained,
+            // or something smarter
+            std::list<octave_value_list> idx; // TODO: mallocs!
+
+            // Make an ovl with the args
+            octave_value_list ovl;
+            // The operands are on the top of the stack
+            POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+            idx.push_back(ovl);
+
+            octave_value_list retval;
+
+            try
+              {
+                m_tw->set_active_bytecode_ip (ip - code);
+                retval = ov.subsref(std::string {type}, idx, nargout);
+                idx.clear ();
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME (has_slot)
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+
+            bool is_fcn = (retval.length () ?
+                            retval(0).is_function() : false);
+
+            // "FIXME: when can the following happen?  In what case does indexing
+            //  result in a value that is a function?  Classdef method calls?
+            //  Something else?"
+
+            if (OCTAVE_LIKELY (!is_fcn))
+              {
+                idx.clear ();
+                // TODO: Necessary? I guess it might trigger dtors
+                // or something?
+                ov = octave_value ();
+              }
+            else
+              {
+                octave_value val = retval(0);
+                octave_function *fcn = val.function_value (true);
+
+                if (fcn)
+                  {
+                    octave_value_list final_args;
+
+                    if (! idx.empty ())
+                      final_args = idx.front ();
+
+                    try
+                      {
+                        m_tw->set_active_bytecode_ip (ip - code);
+                        retval = fcn->call (*m_tw, nargout, final_args);
+                      }
+                    CATCH_INTERRUPT_EXCEPTION
+                    CATCH_INDEX_EXCEPTION_WITH_MAYBE_NAME (has_slot)
+                    CATCH_EXECUTION_EXCEPTION
+                    CATCH_BAD_ALLOC
+                    CATCH_EXIT_EXCEPTION
+                  }
+
+                idx.clear ();
+                ov = octave_value ();
+                val = octave_value ();
+              }
+
+            // Destroy the indexed variable on the stack
+            STACK_DESTROY (1);
+            EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+          }
+        break;
+
+        case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+          {
+            // If the first object is not an identifier we can't look it up for
+            // a function call.
+            if (!has_slot)
+              {
+                (*sp++).ps = new std::string {"temporary object"};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+
+            if (! ov.is_nil ())
+              {
+                TODO ("Not nil object for fcn cache replacement");
+              }
+
+            // It is probably a function call.
+            // Put a function cache object in the slot and in the local ov
+            // and jump into the if clause above to search for some function
+            // to call.
+            ov = octave_value (new octave_fcn_cache (name_data[slot]));
+            if (bsp[slot].ov.is_ref ())
+              bsp[slot].ov.ref_rep ()->set_value (ov);
+            else
+              bsp[slot].ov = ov;
+          }
+        // Fallthrough
+        case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+        case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+        case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+          {
+            octave_function *fcn;
+            try
+              {
+                stack_element *first_arg = &sp[-n_args_on_stack];
+                stack_element *end_arg = &sp[0];
+                fcn = ov.get_cached_fcn (first_arg, end_arg);
+              }
+            CATCH_EXECUTION_EXCEPTION
+
+            if (! fcn)
+              {
+                if (has_slot)
+                  (*sp++).ps = new std::string {name_data[slot]};
+                else
+                  (*sp++).ps = new std::string {"temporary object"};
+                (*sp++).i = static_cast<int> (error_type::ID_UNDEFINED);
+                goto unwind;
+              }
+
+            if (fcn->is_compiled ())
+              {
+                octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+                // Alot of code in this define
+                int caller_nvalback = nargout; // Caller wants as many values as it wants the callee to produce
+                MAKE_BYTECODE_CALL
+
+                // Now dispatch to first instruction in the
+                // called function
+              }
+            else
+              {
+                // Make an ovl with the args
+                octave_value_list ovl;
+                // The operands are on the top of the stack
+                POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                try
+                  {
+                    m_tw->set_active_bytecode_ip (ip - code);
+                    octave_value_list ret = fcn->call (*m_tw, nargout, ovl);
+
+                    STACK_DESTROY (1);
+                    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                  }
+                CATCH_INTERRUPT_EXCEPTION
+                CATCH_INDEX_EXCEPTION
+                CATCH_EXECUTION_EXCEPTION
+                CATCH_BAD_ALLOC
+                CATCH_EXIT_EXCEPTION
+
+              }
+          }
+      }
+
+  }
+  DISPATCH ();
+load_far_cst:
+  {
+    ip--;
+    int offset = POP_CODE_INT ();
+
+    // Copy construct it into the top of the stack
+    new (sp++) octave_value (data [offset]);
+
+    DISPATCH();
+  }
+
+anon_maybe_set_ignore_output:
+  {
+    if (m_output_ignore_data)
+      {
+        // We want to propagate the caller's ignore matrix.
+        octave_value current_ignore_matrix = m_tw->get_auto_fcn_var (stack_frame::auto_var_type::IGNORED);
+        m_output_ignore_data->set_ignore_anon (*this, current_ignore_matrix);
+      }
+  }
+  DISPATCH_1BYTEOP ();
+
+set_ignore_outputs:
+  {
+    if (!m_output_ignore_data)
+      {
+        m_output_ignore_data = new output_ignore_data;
+      }
+
+    int n_ignored = arg0;
+    int n_total = POP_CODE ();
+
+    Matrix M;
+    M.resize (1, n_ignored);
+
+    std::set<int> set_ignored;
+
+    for (int i = 0; i < n_ignored; i++)
+      {
+        int ignore_idx = POP_CODE ();
+        M (i) = ignore_idx;
+        set_ignored.insert (ignore_idx);
+      }
+
+    octave_value ignore_matrix {M};
+
+    // For calls into m-functions etc
+    auto *new_lvalue_list = new std::list<octave_lvalue> {};
+
+    for (int i = 0; i < n_total; i++)
+      {
+        octave_lvalue lval ({}, m_tw->get_current_stack_frame ());
+        if (set_ignored.find (i + 1) != set_ignored.end ())
+          lval.mark_black_hole ();
+        new_lvalue_list->push_back (lval);
+      }
+
+    m_output_ignore_data->set_ignore (*this, ignore_matrix, new_lvalue_list);
+  }
+  DISPATCH();
+
+clear_ignore_outputs:
+  {
+    if (m_output_ignore_data)
+      m_output_ignore_data->clear_ignore (*this);
+
+    output_ignore_data::maybe_delete_ignore_data (*this, 1);
+
+    // Clear any value written to the %~X slot(s)
+    int n_slots = arg0;
+    for (int i = 0; i < n_slots; i++)
+      {
+        int slot = POP_CODE_USHORT ();
+
+        octave_value &ov = bsp[slot].ov;
+
+        if (ov.get_count () == 1)
+          ov.call_object_destructor ();
+
+        ov = octave_value{};
+      }
+  }
+  DISPATCH();
+
+subassign_chained:
+  {
+    int slot = arg0;
+    octave_value::assign_op op = static_cast<octave_value::assign_op> (*ip++);
+    int n_chained = POP_CODE ();
+    std::vector<int> v_n_args;
+    std::string type (n_chained, 0);
+
+    for (int i = 0; i < n_chained; i++)
+      {
+        v_n_args.push_back (POP_CODE ());
+        type [i] = POP_CODE ();
+      }
+
+    std::list<octave_value_list> idx;
+    for (int i = 0; i < n_chained; i++)
+      {
+        octave_value_list ovl;
+        // foo (a1, a2).bar (a3, a4)
+        // are:
+        // TOP a4, a3, a2, a1
+        // on the stack now.
+        int n_args = v_n_args [n_chained - i - 1];
+        for (int j = 0; j < n_args; j++)
+          {
+            octave_value &arg = TOP_OV ();
+            if (arg.is_cs_list ())
+              ovl.append (arg.list_value ().reverse ()); // Expand cs-list
+            else
+              ovl.append (std::move (arg));
+            STACK_DESTROY (1);
+          }
+        ovl.reverse ();
+        idx.push_back (ovl);
+      }
+
+    idx.reverse ();
+
+    octave_value lhs = std::move (TOP_OV ());
+    STACK_DESTROY (1);
+    octave_value rhs = std::move (TOP_OV ());
+    STACK_DESTROY (1);
+
+    try
+      {
+        if (type.size () && type.back () != '(' && lhs_assign_numel (lhs, type, idx) != 1)
+          err_invalid_structure_assignment ();
+
+        if (slot)
+          {
+            octave_value &lhs_slot = bsp[slot].ov;
+
+            // We don't need to he lhs value put on the stack since we are working on a slot.
+            // Clear it to make assigns not need a new copy.
+            lhs = octave_value {};
+
+            if (OCTAVE_UNLIKELY (lhs_slot.is_ref ()))
+              {
+                octave_value &ov_ref = lhs_slot.ref_rep ()->ref ();
+                ov_ref.make_unique ();
+                ov_ref.assign (op, type, idx, rhs);
+              }
+            else
+              lhs_slot.assign (op, type, idx, rhs);
+
+            // Push a dummy octave_value. Always poped by a POP opcode.
+            PUSH_OV (octave_value {});
+          }
+        else
+          {
+            lhs.assign (op, type, idx, rhs);
+            // The value is pushed and used for further chaining.
+            PUSH_OV (lhs);
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH ();
+
+set_slot_to_stack_depth:
+  {
+    int slot = arg0;
+    int stack_depth = sp - bsp;
+    bsp[slot].ov = octave_value {stack_depth};
+  }
+  DISPATCH ();
+dupn:
+  {
+    int offset = arg0;
+    int n = POP_CODE ();
+    stack_element *first = sp - n - offset;
+    for (int i = 0; i < n; i++)
+      PUSH_OV (first[i].ov);
+  }
+  DISPATCH ();
+load_cst_alt2:
+  {
+    int offset = arg0;
+
+    // Copy construct it into the top of the stack
+    new (sp++) octave_value (data [offset]);
+
+    DISPATCH ();
+  }
+load_cst_alt3:
+  {
+    int offset = arg0;
+
+    // Copy construct it into the top of the stack
+    new (sp++) octave_value (data [offset]);
+
+    DISPATCH ();
+  }
+load_cst_alt4:
+  {
+    int offset = arg0;
+
+    // Copy construct it into the top of the stack
+    new (sp++) octave_value (data [offset]);
+
+    DISPATCH ();
+  }
+load_2_cst:
+{
+  // We are pushing two constants to the stack. E.g. for "3 * 2".
+  // The next instruction is the offset in the data of the lhs.
+  // rhs is right after.
+  int offset = arg0;
+
+  // Copy construct the two constants onto the top of the stack
+  new (sp++) octave_value (data [offset]);     // lhs in a binop
+  new (sp++) octave_value (data [offset + 1]); // rhs
+
+  DISPATCH ();
+}
+
+ret_anon:
+  {
+    // We need to tell the bytecode frame we are unwinding so that it can save
+    // variables on the VM stack if it is referenced from somewhere else.
+    m_tw->get_current_stack_frame ()->vm_unwinds ();
+
+    panic_unless (N_RETURNS () == -128);
+
+    int n_returns_callee = bsp[0].i; // Nargout on stack
+    if (n_returns_callee == 0)
+      n_returns_callee = 1;
+
+    int n_locals_callee = N_LOCALS (); // Amount of arguments, purely local variables and %nargout
+
+    int n_ret_on_stack = sp - bsp - n_locals_callee;
+
+    // Assert that the stack pointer is back where it should be, i.e. that there are between
+    // zero and nargout return values.
+    panic_unless (n_ret_on_stack >= 0 && n_ret_on_stack <= n_returns_callee);
+
+    stack_element *first_ret = sp - n_ret_on_stack;
+
+    // Destroy locals
+    //
+    // Note that we destroy from the bottom towards
+    // the top of the stack to calls ctors in the same
+    // order as the treewalker.
+
+    stack_element *first_pure_local = bsp + 1;
+    while (first_pure_local != first_ret)
+      {
+        (*first_pure_local++).ov.~octave_value ();
+      }
+
+    if (OCTAVE_UNLIKELY (m_profiler_enabled))
+      {
+        auto p = vm::m_vm_profiler;
+        if (p)
+          {
+            std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time
+            p->exit_fn (fn_name);
+          }
+      }
+
+    // If we have any active ~/"black hole", e.g. [~] = foo() in the stack
+    // the m_output_ignore_data pointer is live. We need to pop and reset
+    // lvalue lists for the tree walker.
+    if (OCTAVE_UNLIKELY (m_output_ignore_data))
+      {
+        m_output_ignore_data->pop_frame (*this);
+        output_ignore_data::maybe_delete_ignore_data (*this, 0);
+      }
+
+    // Are we at the root routine?
+    if (bsp == rsp)
+      {
+        CHECK (!m_output_ignore_data); // Should not be active
+
+        // Collect return values in octave_value_list.
+        // Skip %nargout, the first value, which is an integer.
+        // n_returns_callee includes %nargout, but root_nargout doesn't.
+        octave_value_list ret;
+
+        int j;
+        // nargout 0 should still give one return value, if there is one
+        int n_root_wanted = std::max (root_nargout, 1);
+        for (j = 0; j < n_ret_on_stack && j < n_root_wanted; j++)
+          {
+            int idx = n_ret_on_stack - 1 - j;
+            ret.append (std::move (first_ret[idx].ov));
+            first_ret[idx].ov.~octave_value ();
+          }
+        // Destroy rest of return values, if any
+        for (; j < n_ret_on_stack; j++)
+          {
+            int idx = n_ret_on_stack - j;
+            first_ret[idx].ov.~octave_value ();
+          }
+
+        //Note: Stack frame object popped by caller
+        CHECK_STACK (0);
+        this->m_dbg_proper_return = true;
+
+        m_tw->set_lvalue_list (m_original_lvalue_list);
+        return ret;
+      }
+
+    // If the root stack pointer is not the same as the base pointer,
+    // we are returning from a bytecode routine to another bytecode routine,
+    // so we have to restore the caller stack frame and cleanup the callee's.
+    //
+    // Essentially do the same thing as in the call but in reverse order.
+
+    // Point sp one past the last return value
+    stack_element *caller_stack_end = bsp;
+    sp = caller_stack_end; // sp points to one past caller stack
+
+    // The amount of return values the caller wants, as stored last on the caller stack.
+    // Note that this is not necessarily the same as nargout, the amount of return values the caller
+    // want the callee to produce, stored first on callee stack.
+    int caller_nval_back = (*--sp).u;
+
+    // Restore ip
+    ip = (*--sp).puc;
+
+    // Restore bsp
+    bsp = (*--sp).pse;
+
+    // Restore id names
+    name_data = (*--sp).ps;
+
+    // Restore data
+    data = (*--sp).pov;
+
+    // Restore code
+    code = (*--sp).puc;
+
+    // Restore unwind data
+    unwind_data = (*--sp).pud;
+
+    // Restore the stack pointer. The stored address is the first arg
+    // on the caller stack, or where it would have been if there are no args.
+    // The args were moved to the callee stack and destroyed on the caller
+    // stack in the call.
+    sp = sp[-1].pse;
+
+    // We now have the object that was called on the stack, destroy it
+    STACK_DESTROY (1);
+
+    // Move the callee's return values to the top of the stack of the caller.
+    // Renaming variables to keep my sanity.
+    int n_args_caller_expects = caller_nval_back;
+    int n_args_callee_has = n_ret_on_stack; // Excludes %nargout
+    int n_args_to_move = std::min (n_args_caller_expects, n_args_callee_has);
+    int n_args_actually_moved = 0;
+
+    // If no return values is requested but there exists return values,
+    // we need to push one to be able to write it to ans.
+    if (n_args_caller_expects == 0 && n_args_callee_has)
+      {
+        n_args_actually_moved++;
+        PUSH_OV (std::move (first_ret[0].ov));
+      }
+    // If the callee aint returning anything, we need to push a
+    // nil object, since the caller always anticipates atleast
+    // one object, even for nargout == 0.
+    else if (n_args_caller_expects == 0 && !n_args_callee_has)
+      PUSH_OV();
+    // If the stacks will overlap due to many returns, do copy via container
+    else if (sp + n_args_caller_expects >= caller_stack_end)
+      {
+        // This pushes 'n_args_to_move' number of return values and 'n_args_caller_expects - n_args_to_move'
+        // number of nils.
+        copy_many_args_to_caller (sp, first_ret, n_args_to_move, n_args_caller_expects);
+        n_args_actually_moved = n_args_caller_expects;
+        sp += n_args_actually_moved;
+      }
+    // Move 'n_args_to_move' return value from callee to caller
+    else
+      {
+        // If the caller wants '[a, b, ~]' and the callee has 'd e'
+        // we need to push 'nil' 'd' 'e'
+        for (int i = n_args_to_move; i < n_args_caller_expects; i++)
+          PUSH_OV ();
+        for (int i = 0; i < n_args_to_move; i++)
+          {
+            // Move into caller stack. Note that the order is reversed, such that
+            // a b c on the callee stack becomes c b a on the caller stack.
+            octave_value &arg = first_ret[i].ov;
+
+            PUSH_OV (std::move (arg));
+          }
+        n_args_actually_moved = n_args_caller_expects;
+      }
+
+    // Destroy the unused return values on the callee stack
+    for (int i = 0; i < n_args_callee_has; i++)
+      {
+        int idx = n_args_callee_has - 1 - i;
+        first_ret[idx].ov.~octave_value (); // Destroy ov in callee
+      }
+
+    // Pop the current dynamic stack frame
+    std::shared_ptr<stack_frame> fp = m_tw->pop_return_stack_frame ();
+    // If the pointer is not shared, stash it in a cache which is used
+    // to avoid having to allocate shared pointers each frame push.
+    if (fp.unique () && m_frame_ptr_cache.size () < 8)
+      {
+        fp->vm_clear_for_cache ();
+        m_frame_ptr_cache.push_back (std::move (fp));
+      }
+
+    // Continue execution back in the caller
+  }
+  DISPATCH ();
+
+/* Check whether we should enter the debugger on the next ip */
+{
+  bool onebyte_op;
+  if (0)
+    debug_check:
+    onebyte_op = false;
+  else if (0)
+    debug_check_1b:
+    onebyte_op = true;
+
+  {
+    int tmp_ip = ip - code;
+    if (onebyte_op)
+      tmp_ip--;
+
+    if (OCTAVE_UNLIKELY (m_trace_enabled))
+      {
+        PRINT_VM_STATE ("Trace: ");
+      }
+
+    // Handle the VM profiler
+    if (OCTAVE_UNLIKELY (m_profiler_enabled))
+      {
+        int64_t t1 = vm_profiler::unow ();
+
+        auto p = m_vm_profiler;
+        if (!p) // Only happens as a race between m_profiler_enabled and m_vm_profiler
+          goto debug_check_end;
+
+        std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time
+        vm_profiler::vm_profiler_fn_stats &stat = p->m_map_fn_stats[fn_name];
+
+        if (!stat.m_v_t.size ())
+          {
+            // The profiler got enabled after the current function was called.
+            p->enter_fn (fn_name, "", unwind_data, name_data, code);
+            stat.m_v_t.back () = -1;
+            stat.m_v_ip.back () = ip - code; // We are not at function start, so set ip to proper value.
+          }
+        else if (stat.m_v_t.back () != -1)
+          {
+            int64_t t0 = stat.m_v_t.back ();
+            int64_t dt = t1 - t0;
+
+            stat.add_t (dt);
+            p->add_t (dt);
+          }
+      }
+
+    // Handle the echo functionality.
+    if (m_tw->echo ())
+      {
+        int ip_offset = ip - code;
+        // In the beginning of functions we need to push an echo state for the function.
+        // push_echo_state () checks e.g. if the current function is supposed to be printed.
+        // The check is querried with echo_state ().
+        if (ip_offset == 4) // TODO: Make constexpr for first opcode offset
+          {
+            int type = m_unwind_data->m_is_script ? tree_evaluator::ECHO_SCRIPTS : tree_evaluator::ECHO_FUNCTIONS;
+            m_tw->push_echo_state (type, m_unwind_data->m_file);
+          }
+
+        if (!m_tw->echo_state ())
+          goto bail_echo;
+
+        auto it = unwind_data->m_ip_to_tree.find (tmp_ip);
+        if (it == unwind_data->m_ip_to_tree.end ())
+          goto bail_echo;
+
+        tree *t = it->second;
+        if (!t)
+          goto bail_echo;
+
+        int line = t->line ();
+        if (line < 0)
+          line = 1;
+
+        // We don't want to echo the condition checks in for loops, but
+        // reset the "last echoed" line to echo the next line properly.
+        switch (static_cast<INSTR> (*ip))
+          {
+            case INSTR::FOR_COND:
+            case INSTR::FOR_COMPLEX_COND:
+              m_echo_prior_op_was_cond = true;
+              goto bail_echo;
+            default:
+              break;
+          }
+
+        if (m_echo_prior_op_was_cond)
+          {
+            m_echo_prior_op_was_cond = false;
+            m_tw->set_echo_file_pos (line);
+          }
+
+        try
+          {
+            m_tw->echo_code (line);
+          }
+        catch (execution_exception &)
+          {
+            // echo_code() might throw if there is no file info
+            // attached to the executing function.
+            // Just ignore it.
+            // TODO: Might be a bug? Does this apply to the tree_evaluator?
+            //       Run "echo on all; test bytecode.tst" to recreate.
+          }
+
+        m_tw->set_echo_file_pos (line + 1);
+      }
+bail_echo:
+
+    // TODO: Check all trees one time and cache the result somewhere?
+    //       Until another bp is set? Debugging will be quite slow
+    //       with one check for each op-code.
+
+    if (m_tw->debug_mode ())
+      {
+        auto it = unwind_data->m_ip_to_tree.find (tmp_ip);
+
+        if (it == unwind_data->m_ip_to_tree.end ())
+          goto debug_check_end;
+
+        bool is_ret = *ip == static_cast<unsigned char> (INSTR::RET) || *ip == static_cast<unsigned char> (INSTR::RET_ANON);
+
+        m_sp = sp;
+        m_bsp = bsp;
+        m_rsp = rsp;
+        m_code = code;
+        m_data = data;
+        m_name_data = name_data;
+        m_ip = tmp_ip;
+        m_unwind_data = unwind_data;
+        m_tw->set_active_bytecode_ip (tmp_ip);
+
+        tree *t = it->second;
+
+        // do_breakpoint will check if there is a breakpoint attached
+        // to the relevant code and escape to the debugger repl
+        // if neccessary.
+        if (t)
+          {
+            try
+              {
+                m_tw->do_breakpoint (t->is_active_breakpoint (*m_tw), is_ret);
+              }
+            CATCH_INTERRUPT_EXCEPTION
+            CATCH_INDEX_EXCEPTION
+            CATCH_EXECUTION_EXCEPTION
+            CATCH_BAD_ALLOC
+            CATCH_EXIT_EXCEPTION
+            catch (const quit_debug_exception &qde)
+              {
+                (*sp++).i = qde.all ();
+                (*sp++).i = static_cast<int>(error_type::DEBUG_QUIT);
+                goto unwind;
+              }
+          }
+      }
+  }
+  debug_check_end:
+  {
+    if (OCTAVE_UNLIKELY (m_profiler_enabled))
+      {
+        auto p = m_vm_profiler;
+
+        if (p)
+          {
+            std::string fn_name = data[2].string_value (); // profiler_name () querried at compile time
+            vm_profiler::vm_profiler_fn_stats &stat = m_vm_profiler->m_map_fn_stats[fn_name];
+
+            // If someone enabled profiling in the debugger we need to wait until
+            // the debug_check: block is ran next time.
+            if (stat.m_v_t.size())
+              {
+                int tmp_ip = ip - code;
+                if (onebyte_op)
+                  tmp_ip--;
+                stat.m_v_ip.back () = tmp_ip; // Sets a new 'currently running ip'
+                stat.m_v_t.back () = vm_profiler::unow (); // Sets a new timestamp for the current ip
+              }
+          }
+      }
+  }
+  if (onebyte_op)
+    {
+      int opcode = ip[-1];
+      arg0 = ip[0];
+      ip++;
+      goto *instr [opcode];
+    }
+  else
+    {
+      int opcode = ip[0];
+      arg0 = ip[1];
+      ip += 2;
+      goto *instr [opcode];
+    }
+}
+
+debug: // TODO: Remove
+  {
+    if (m_tw->debug_mode ())
+      {
+        m_ip = ip - code;
+        m_sp = sp;
+        m_tw->set_active_bytecode_ip (ip - code);
+
+        try
+          {
+            m_tw->enter_debugger ();
+          }
+        CATCH_INTERRUPT_EXCEPTION
+        CATCH_INDEX_EXCEPTION
+        CATCH_EXECUTION_EXCEPTION
+        CATCH_BAD_ALLOC
+        CATCH_EXIT_EXCEPTION
+      }
+  }
+  DISPATCH ();
+
+  wide:
+  {
+    int opcode = arg0; // The opcode to execute next is in arg0, i.e. ip[-1]
+    // The next opcode needs its arg0, which is a unsigned short instead of the usual byte
+    // that DISPATCH() writes to arg0.
+    arg0 = USHORT_FROM_UCHAR_PTR (ip);
+    ip += 2; // Forward ip so it points to after the widened argument
+    goto *instr [opcode];
+  }
+
+  ext_nargout:
+  {
+    // This opcode replaces the first opcode argument of the next opcode, with the
+    // current function's nargout.
+    //
+    // Anonymous functions need to have a dynamic "expression nargout" on the
+    // root expression since the "expression nargout" is decided by the caller.
+    // E.g. '[a b] = anon ()' yields 2 for the root expression in 'anon'.
+    //
+    // In a ordinary function "expression nargout" is decided by the source row.
+
+    int opcode = arg0; // The opcode to execute next is in arg0, i.e. ip[-1]
+    // The next opcode needs its arg0, which is supposed to be a nargout value
+    arg0 = bsp[0].i; // %nargout is stored in the first slot in the stack frame
+    ip++; // Forward ip so it points to after the nargout argument in the next opcode
+    goto *instr [opcode]; // Execute the next opcode
+  }
+
+  dup_move:
+  {
+    // Copy the top of the stack and move it n positions down the stack.
+    int n = arg0;
+
+    octave_value ov = sp[-1].ov;
+    sp[-1 - n].ov = ov;
+  }
+  DISPATCH ();
+
+  enter_script_frame:
+  {
+    auto fp = m_tw->get_current_stack_frame ();
+    fp->vm_enter_script ();
+  }
+  DISPATCH_1BYTEOP ();
+  exit_script_frame:
+  {
+    auto fp = m_tw->get_current_stack_frame ();
+    fp->vm_exit_script ();
+  }
+  DISPATCH_1BYTEOP ();
+
+  enter_nested_frame:
+  {
+    auto fp = m_tw->get_current_stack_frame ();
+    fp->vm_enter_nested ();
+  }
+  DISPATCH_1BYTEOP ();
+
+  install_function:
+  {
+    int slot = arg0;
+    int fn_cst_idx = POP_CODE_INT ();
+
+    std::string fn_name = name_data [slot];
+
+    octave_value fn = data[fn_cst_idx];
+
+    symbol_table& symtab = m_tw->get_interpreter ().get_symbol_table ();
+    symtab.install_cmdline_function (fn_name, fn);
+
+    // Make sure that any variable with the same name as the new
+    // function is cleared.
+    octave_value &ov = bsp[slot].ov;
+
+    if (ov.is_ref ())
+      ov.ref_rep ()->set_value (octave_value {});
+    else
+      ov = octave_value {};
+  }
+  DISPATCH ();
+
+  mul_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_mul, mul_cst_dbl, MUL_CST_DBL);
+  DISPATCH ();
+  mul_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_mul, mul_cst, MUL_CST, m_scalar_typeid);
+  DISPATCH ();
+  add_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_add, add_cst_dbl, ADD_CST_DBL);
+  DISPATCH ();
+  add_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_add, add_cst, ADD_CST, m_scalar_typeid);
+  DISPATCH ();
+  div_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_div, div_cst_dbl, DIV_CST_DBL);
+  DISPATCH ();
+  div_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_div, div_cst, DIV_CST, m_scalar_typeid);
+  DISPATCH ();
+  sub_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_sub, sub_cst_dbl, SUB_CST_DBL);
+  DISPATCH ();
+  sub_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_sub, sub_cst, SUB_CST, m_scalar_typeid);
+  DISPATCH ();
+  le_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_lt, le_cst_dbl, LE_CST_DBL);
+  DISPATCH ();
+  le_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_le, le_cst, LE_CST, m_scalar_typeid);
+  DISPATCH ();
+  le_eq_cst:
+  MAKE_BINOP_CST_SELFMODIFYING (binary_op::op_le, le_eq_cst_dbl, LE_EQ_CST_DBL);
+  DISPATCH ();
+  le_eq_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_le_eq, le_eq_cst, LE_EQ_CST, m_scalar_typeid);
+  DISPATCH ();
+  gr_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_gr, gr_cst, GR_CST, m_scalar_typeid)
+  DISPATCH ();
+  gr_cst:
+  MAKE_BINOP_CST_SELFMODIFYING(binary_op::op_gt, gr_cst_dbl, GR_CST_DBL)
+  DISPATCH();
+  gr_eq_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_gr_eq, gr_eq_cst, GR_EQ_CST, m_scalar_typeid)
+  DISPATCH();
+  gr_eq_cst:
+  MAKE_BINOP_CST_SELFMODIFYING(binary_op::op_ge, gr_eq_cst_dbl, GR_EQ_CST_DBL)
+  DISPATCH();
+  eq_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED(m_fn_dbl_eq, eq_cst, EQ_CST, m_scalar_typeid)
+  DISPATCH();
+  eq_cst:
+  MAKE_BINOP_CST_SELFMODIFYING(binary_op::op_eq, eq_cst_dbl, EQ_CST_DBL)
+  DISPATCH();
+  neq_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED(m_fn_dbl_neq, neq_cst, NEQ_CST, m_scalar_typeid)
+  DISPATCH();
+  neq_cst:
+  MAKE_BINOP_CST_SELFMODIFYING(binary_op::op_ne, neq_cst_dbl, NEQ_CST_DBL)
+  DISPATCH();
+  pow_cst_dbl:
+  MAKE_BINOP_CST_SPECIALIZED (m_fn_dbl_pow, pow_cst, POW_CST, m_scalar_typeid)
+  DISPATCH();
+  pow_cst:
+  MAKE_BINOP_CST_SELFMODIFYING(binary_op::op_pow, pow_cst_dbl, POW_CST_DBL)
+  DISPATCH();
+
+  index_struct_subcall:
+  {
+    // The INDEX_STRUCT_SUBCALL opcode is a chain of INDEX_STRUCT_SUBCALL opcodes,
+    // that are always proceded by INDEX_STRUCT_CALL.
+    //
+    // INDEX_STRUCT_SUBCALL executes a subexpression of a chain of subsrefs.
+    //
+    // The opcode is more or less a "stackification" of tree_index_expression::evaluate_n ().
+
+    int nargout = arg0;
+    // TODO: Kludge alert. Mirror the behaviour in ov_classdef::subsref
+    // where under certain conditions a magic number nargout of -1 is
+    // expected to  maybe return a cs-list. "-1" in this context
+    // does not have the same meaning as in the VM, where it means
+    // a varargout with only one return symbol 'varargout'.
+    int subsref_nargout = nargout;
+    if (nargout == 255)
+      {
+        nargout = 1;
+        subsref_nargout = -1;
+      }
+    int i = *ip++;
+    int n = *ip++;
+    int n_args_on_stack = *ip++;
+    char type = *ip++;
+
+    // The object to index is before the args on the stack
+    octave_value &ov = (sp[-1 - n_args_on_stack]).ov;
+
+    bool ov_is_vm_chainargs_wrapper = ov.is_vm_chainargs_wrapper ();
+    bool need_stepwise_subsref = ov_need_stepwise_subsrefs(ov);
+
+    try
+      {
+        if (!ov_is_vm_chainargs_wrapper && need_stepwise_subsref)
+          {
+            switch (ov.vm_dispatch_call ())
+              {
+                case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+                  {
+                    // The last iteration the caller wants as many returned on the stack
+                    // as it wants the callee to produce.
+                    // In any other iteration the caller wants one value returned, but still
+                    // wants the callee to produce nargout number of return values.
+                    int caller_nvalback;
+                    if (i + 1 != n)
+                      caller_nvalback = 1;
+                    else
+                      caller_nvalback = nargout;
+                    
+                    (*sp++).i = n_args_on_stack;
+                    (*sp++).i = nargout;
+                    (*sp++).i = caller_nvalback;
+                    (*sp++).i = 0;
+                    goto make_nested_handle_call;
+                  }
+                case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+                  {
+                    (*sp++).pee = new execution_exception {"error", "", "invalid undefined value in chained index expression"}; // TODO: Uninformative?
+                    (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+                    goto unwind;
+                  }
+                case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+                  {
+                    octave_value_list ovl;
+                    // The operands are on the top of the stack
+                    POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                    CHECK_PANIC (! ov.is_function () || ov.is_classdef_meta ()); // TODO: Remove
+
+                    octave_value_list retval;
+                    try
+                      {
+                        m_tw->set_active_bytecode_ip (ip - code);
+                        retval = ov.simple_subsref (type, ovl, subsref_nargout);
+                      }
+                    CATCH_INTERRUPT_EXCEPTION
+                    catch (index_exception& ie)
+                      {
+                        // Fetch the name of the left most object being indexed from the arg name entries.
+                        int ip_offset = ip - code;
+                        arg_name_entry entry = get_argname_entry (ip_offset, unwind_data);
+
+                        if (entry.m_obj_name != "")
+                          {
+                            // Only set the name if the object is defined (i.e. not a function call to e.g. 'zero')
+                            // to match the error messages in tree_evaluator and pass index.tst
+                            if (m_tw->get_current_stack_frame ()->varval (entry.m_obj_name).is_defined ())
+                              ie.set_var (entry.m_obj_name);
+                          }
+
+                        (*sp++).pee = ie.dup ();
+                        (*sp++).i = static_cast<int> (error_type::INDEX_ERROR);
+                        goto unwind;
+                      }
+                    CATCH_EXECUTION_EXCEPTION
+                    CATCH_BAD_ALLOC
+                    CATCH_EXIT_EXCEPTION
+
+                    ov = octave_value ();
+                    STACK_DESTROY (1); // Destroy the object being indexed
+
+                    ovl.clear (); // Destroy the args
+
+                    bool is_last_iteration = i + 1 == n;
+
+                    if (is_last_iteration)
+                      {
+                        // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function.
+                        //       Should preferably be done by .subsref?
+                        //       If only classdefs does this, this is not really needed here.
+                        octave_value val = (retval.length () ? retval(0) : octave_value ());
+                        if (val.is_function ())
+                          {
+                            octave_function *fcn = val.function_value (true);
+
+                            if (fcn)
+                              {
+                                retval = fcn->call (*m_tw, nargout, {});
+                              }
+                          }
+                      }
+
+                    // Push one value if this iteration of INDEX_STRUCT_SUBCALL
+                    // is not the last one.
+                    if (!is_last_iteration)
+                      nargout = 1;
+                    EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+                  }
+                break;
+
+                case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+                case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+                case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+                  {
+                    CHECK_PANIC (ov.has_function_cache ()); // TODO :Remove
+
+                    octave_function *fcn;
+                    try
+                      {
+                        stack_element *first_arg = &sp[-n_args_on_stack];
+                        stack_element *end_arg = &sp[0];
+                        fcn = ov.get_cached_fcn (first_arg, end_arg);
+                      }
+                    CATCH_EXECUTION_EXCEPTION // parse errors might throw in classdefs
+
+                    if (! fcn)
+                      {
+                        (*sp++).pee = new execution_exception {"error", "", "invalid return value in chained index expression"}; // TODO: Uninformative?
+                        (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+                        goto unwind;
+                      }
+                    else if (fcn->is_compiled ())
+                      {
+                        octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+
+                        // The last iteration the caller wants as many returned on the stack
+                        // as it wants the callee to produce.
+                        // In any other iteration the caller wants one value returned, but still
+                        // wants the callee to produce nargout number of return values.
+                        int caller_nvalback;
+                        if (i + 1 != n)
+                          caller_nvalback = 1;
+                        else
+                          caller_nvalback = nargout;
+
+                        MAKE_BYTECODE_CALL
+
+                        // Now dispatch to first instruction in the
+                        // called function
+                      }
+                    else
+                      {
+                        try
+                          {
+                            octave_value_list ovl;
+                            // The operands are on the top of the stack
+                            POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+                            m_tw->set_active_bytecode_ip (ip - code);
+                            octave_value_list ret = fcn->call (*m_tw, nargout, ovl);
+
+                            STACK_DESTROY (1); // Destroy the object being indexed
+
+                            bool is_last_iteration = i + 1 == n;
+
+                            if (is_last_iteration)
+                              {
+                                // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function.
+                                //       Should preferably be done by .subsref?
+                                //       If only classdefs does this, this is not really needed here.
+                                octave_value val = (ret.length () ? ret(0) : octave_value ());
+                                if (val.is_function ())
+                                  {
+                                    octave_function *fcn_final = val.function_value (true);
+
+                                    if (fcn_final)
+                                      {
+                                        ret = fcn_final->call (*m_tw, nargout, {});
+                                      }
+                                  }
+                              }
+
+                            // Push one value if this iteration of INDEX_STRUCT_SUBCALL
+                            // is not the last one.
+                            if (!is_last_iteration)
+                              nargout = 1;
+                            EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                          }
+                        CATCH_INTERRUPT_EXCEPTION
+                        CATCH_INDEX_EXCEPTION
+                        CATCH_EXECUTION_EXCEPTION
+                        CATCH_BAD_ALLOC
+                        CATCH_EXIT_EXCEPTION
+                      }
+                  }
+                break;
+              }
+
+          }
+        else
+          {
+            if (!ov_is_vm_chainargs_wrapper)
+              {
+                // Replace the object being indexed on the stack with a wrapper object
+                // with the object being indexed in it.
+                ov = octave_value {new octave_vm_chainargs_wrapper {ov}};
+              }
+
+            octave_vm_chainargs_wrapper &ovb_chainargs = REP (octave_vm_chainargs_wrapper, ov);
+
+            { // Limit ovl to this scope to not polute the whole C++ scope with an invalid ovl after the move
+              octave_value_list ovl;
+              // The operands are on the top of the stack
+              POP_STACK_RANGE_TO_OVL (ovl, sp - n_args_on_stack, sp);
+
+              ovb_chainargs.append_args (std::move (ovl));
+              ovb_chainargs.append_type (type);
+            }
+
+            // In the last INDEX_STRUCT_SUBCALL the args have been collected and it is
+            // time to make the call.
+            if (i + 1 == n)
+              {
+                std::list<octave_value_list> idxs = ovb_chainargs.steal_idxs ();
+                std::string types = ovb_chainargs.steal_types ();
+
+                // Replace the wrapper object with the object being indexed.
+                // Since ov and ovb_chainargs are the same octave value
+                // there has to be a intermediate copy to keep the refcounts
+                // correct.
+                {
+                  octave_value tmp = ovb_chainargs.steal_obj_to_call ();
+                  ov = tmp;
+                  // Note: 'ovb_chainargs' is invalid from now on
+                }
+
+                switch (ov.vm_dispatch_call ())
+                  {
+                    case octave_base_value::vm_call_dispatch_type::OCT_NESTED_HANDLE:
+                    case octave_base_value::vm_call_dispatch_type::OCT_FN_LOOKUP:
+                      PANIC ("Invalid dispatch");
+                      break;
+                    case octave_base_value::vm_call_dispatch_type::OCT_SUBSREF:
+                      {
+                        CHECK_PANIC (! ov.is_function () || ov.is_classdef_meta ()); // TODO: Remove
+
+                        octave_value_list retval;
+                        try
+                          {
+                            m_tw->set_active_bytecode_ip (ip - code);
+                            retval = ov.subsref (types, idxs, subsref_nargout);
+                            idxs.clear ();
+                          }
+                        CATCH_INTERRUPT_EXCEPTION
+                        catch (index_exception& ie)
+                          {
+                            // Fetch the name of the left most object being indexed from the arg name entries.
+                            int ip_offset = ip - code - 1; // Minus one since ip points at next opcode now
+                            arg_name_entry entry = get_argname_entry (ip_offset, unwind_data);
+
+                            if (entry.m_obj_name != "")
+                              {
+                                // Only set the name if the object is defined (i.e. not a function call to e.g. 'zero')
+                                // to match the error messages in tree_evaluator and pass index.tst
+                                if (m_tw->get_current_stack_frame ()->varval (entry.m_obj_name).is_defined ())
+                                  ie.set_var (entry.m_obj_name);
+                              }
+
+                            (*sp++).pee = ie.dup ();
+                            (*sp++).i = static_cast<int> (error_type::INDEX_ERROR);
+                            goto unwind;
+                          }
+                        CATCH_EXECUTION_EXCEPTION
+                        CATCH_BAD_ALLOC
+                        CATCH_EXIT_EXCEPTION
+
+                        // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function.
+                        //       Should preferably be done by subsref()/call()?
+                        octave_value val = (retval.length () ? retval(0) : octave_value ());
+                        if (val.is_function ())
+                          {
+                            octave_function *fcn_final = val.function_value (true);
+
+                            if (fcn_final)
+                              {
+                                retval = fcn_final->call (*m_tw, nargout, {});
+                              }
+                          }
+
+                        STACK_DESTROY (1); // Destroy the object being indexed
+                        EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (retval, nargout);
+                      }
+                    break;
+
+                    case octave_base_value::vm_call_dispatch_type::OCT_CALL:
+                    case octave_base_value::vm_call_dispatch_type::OCT_HANDLE:
+                    case octave_base_value::vm_call_dispatch_type::OCT_OBJECT:
+                      {
+                        CHECK_PANIC (ov.is_function ()); // TODO :Remove
+
+                        octave_function *fcn = ov.function_value ();
+
+                        // tree_evaluator silently ignored nullptr here
+                        if (! fcn)
+                          {
+                            (*sp++).pee = new execution_exception {"error", "", "invalid return value in chained index expression"}; // TODO: Uninformative?
+                            (*sp++).i = static_cast<int> (error_type::EXECUTION_EXC);
+                            goto unwind;
+                          }
+#if 0 // TODO: Support for bytecode calls to ctors
+                    else if (fcn->is_compiled ())
+                      {
+                        octave_user_code *usr_fcn = static_cast<octave_user_code *> (fcn);
+
+                        // Alot of code in this define
+                        int nargout = 1;
+                        MAKE_BYTECODE_CALL
+
+                        // Now dispatch to first instruction in the
+                        // called function
+                      }
+#endif
+                        else
+                          {
+                            try
+                              {
+                                if (idxs.size () != 1)
+                                  error ("unexpected extra index at end of expression");
+                                if (type != '(')
+                                  error ("invalid index type '%c' for function call",
+                                    type);
+
+                                octave_value_list final_args = idxs.front ();
+
+                                m_tw->set_active_bytecode_ip (ip - code);
+                                octave_value_list ret = fcn->call (*m_tw, nargout, final_args);
+
+                                // TODO: Kludge for e.g. "m = containsers.Map;" which returns a function.
+                                //       Should preferably be done by subsref()/call()?
+                                //       Is this actually executed? See pt-idx.cc
+                                octave_value val = (ret.length () ? ret(0) : octave_value ());
+                                if (val.is_function ())
+                                  {
+                                    octave_function *fcn_final = val.function_value (true);
+
+                                    if (fcn_final)
+                                      {
+                                        ret = fcn_final->call (*m_tw, nargout, final_args); // Called with same args as above ...
+                                      }
+                                  }
+
+                                STACK_DESTROY (1); // Destroy the object being indexed
+                                EXPAND_CSLIST_PUSH_N_OVL_ELEMENTS_TO_STACK (ret, nargout);
+                              }
+                            CATCH_INTERRUPT_EXCEPTION
+                            CATCH_INDEX_EXCEPTION
+                            CATCH_EXECUTION_EXCEPTION
+                            CATCH_BAD_ALLOC
+                            CATCH_EXIT_EXCEPTION
+                          }
+                      }
+                    break;
+                  }
+              }
+          }
+      }
+    CATCH_INTERRUPT_EXCEPTION
+    CATCH_INDEX_EXCEPTION
+    CATCH_EXECUTION_EXCEPTION
+    CATCH_BAD_ALLOC
+    CATCH_EXIT_EXCEPTION
+  }
+  DISPATCH ();
+}
+
+octave_value
+vm::handle_object_end (octave_value ov, int idx, int nargs)
+{
+  // See tree_evaluator::evaluate_end_expression()
+  octave_value ans;
+
+  auto &interpreter = m_tw->get_interpreter ();
+  std::string dispatch_class = ov.class_name ();
+  symbol_table& symtab = interpreter.get_symbol_table ();
+
+  octave_value meth = symtab.find_method ("end", dispatch_class);
+
+  if (meth.is_defined ())
+    ans = interpreter.feval (meth, ovl (ov, idx+1, nargs), 1).first_or_nil_ov ();
+  else
+    ans = octave_value (ov.end_index (idx, nargs));
+
+  return ans;
+}
+
+octave_value
+vm::find_fcn_for_cmd_call (std::string *name)
+{
+  interpreter& interp = __get_interpreter__();
+
+  symbol_table& symtab = interp.get_symbol_table ();
+
+  return symtab.find_function (*name);
+}
+
+vm::error_data
+vm::handle_error (error_type error_type)
+{
+  error_data ret;
+
+  error_system& es = m_tw->get_interpreter().get_error_system ();
+
+  std::stringstream ss;
+  // ip points to the "next" instruction, so search for the
+  // code location for ip - 1
+  loc_entry loc = find_loc (m_ip - 1, m_unwind_data->m_loc_entry);
+
+  switch (error_type)
+    {
+    case error_type::BAD_ALLOC:
+      {
+        execution_exception e {"error", "Octave:bad-alloc", "out of memory or dimension too large for Octave's index type"};
+        es.save_exception (e);
+
+        break;
+      }
+    case error_type::ID_UNDEFINED:
+      {
+        std::string *sp = m_sp [-1].ps;
+        m_sp--;
+        std::string id_name = *sp;
+        delete sp;
+
+        ss << "'" << id_name << "'" <<
+          " undefined near line " << loc.m_line <<
+          ", column " << loc.m_col;
+
+        execution_exception e { "error",
+          "Octave:undefined-function",
+          ss.str ()};
+
+        // Since the exception was made in the VM it has not been saved yet
+        es.save_exception (e);
+
+        break;
+      }
+    case error_type::IF_UNDEFINED:
+      {
+        // error ("%s: undefined value used in conditional expression", warn_for);
+        ss << "if's condition undefined near line " <<
+          loc.m_line << ", column " << loc.m_col;
+
+        execution_exception e {"error", "", ss.str ()};
+
+        es.save_exception (e);
+
+        break;
+      }
+    case error_type::INDEX_ERROR:
+      {
+        execution_exception *e = m_sp [-1].pee;
+
+        CHECK (e);
+        es.save_exception (*e);
+
+        delete e;
+
+        m_sp--;
+
+        break;
+      }
+    case error_type::EXECUTION_EXC:
+      {
+        execution_exception *e = m_sp [-1].pee;
+
+        CHECK (e);
+        es.save_exception (*e);
+
+        delete e;
+
+        m_sp--;
+
+        break;
+      }
+    case error_type::INTERRUPT_EXC:
+      break; // Do nothing
+    case error_type::EXIT_EXCEPTION:
+      ret.m_safe_to_return = (--m_sp)->i;
+      ret.m_exit_status = (--m_sp)->i;
+      break;
+    case error_type::INVALID_N_EL_RHS_IN_ASSIGNMENT:
+    {
+      execution_exception e {"error", "", "invalid number of elements on RHS of assignment"};
+
+      es.save_exception (e);
+
+      break;
+    }
+    case error_type::RHS_UNDEF_IN_ASSIGNMENT:
+    {
+      execution_exception e {"error", "", "value on right hand side of assignment is undefined"};
+
+      es.save_exception (e);
+
+      break;
+    }
+    case error_type::DEBUG_QUIT:
+    {
+      ret.m_debug_quit_all = m_sp[-1].i;
+      m_sp--;
+
+      break;
+    }
+    default:
+      TODO ("Unhandeled error type");
+    }
+
+  return ret;
+}
+
+vm::~vm ()
+{
+  delete [] m_stack0;
+
+  CHECK (m_output_ignore_data == nullptr);
+}
+
+vm::vm (tree_evaluator *tw, bytecode &initial_bytecode)
+{
+  m_ti = &__get_type_info__();
+  m_stack0 = new stack_element[stack_size + stack_pad * 2];
+
+  for (unsigned i = 0; i < stack_pad; i++)
+    {
+      m_stack0[i].u = stack_magic_int;
+      m_stack0[i + stack_size].u = stack_magic_int;
+    }
+
+  m_sp = m_stack = m_stack0 + stack_pad;
+  m_tw = tw;
+  m_symtab = &__get_symbol_table__();
+
+  m_data = initial_bytecode.m_data.data ();
+  m_code = initial_bytecode.m_code.data ();
+  m_name_data = initial_bytecode.m_ids.data ();
+  m_unwind_data = &initial_bytecode.m_unwind_data;
+
+  // Check that the typeids are what the VM anticipates. If the id change, just change
+  // the constexpr.
+  CHECK (octave_scalar::static_type_id () == m_scalar_typeid);
+  CHECK (octave_bool::static_type_id () == m_bool_typeid);
+  CHECK (octave_matrix::static_type_id () == m_matrix_typeid);
+  CHECK (octave_cs_list::static_type_id () == m_cslist_typeid);
+
+  // Function pointer used for specialized op-codes
+  m_fn_dbl_mul = m_ti->lookup_binary_op (octave_value::binary_op::op_mul, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_div = m_ti->lookup_binary_op (octave_value::binary_op::op_div, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_add = m_ti->lookup_binary_op (octave_value::binary_op::op_add, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_sub = m_ti->lookup_binary_op (octave_value::binary_op::op_sub, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_pow = m_ti->lookup_binary_op (octave_value::binary_op::op_pow, m_scalar_typeid, m_scalar_typeid);
+
+  m_fn_dbl_le = m_ti->lookup_binary_op (octave_value::binary_op::op_lt, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_le_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_le, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_gr = m_ti->lookup_binary_op (octave_value::binary_op::op_gt, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_gr_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_ge, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_eq = m_ti->lookup_binary_op (octave_value::binary_op::op_eq, m_scalar_typeid, m_scalar_typeid);
+  m_fn_dbl_neq = m_ti->lookup_binary_op (octave_value::binary_op::op_ne, m_scalar_typeid, m_scalar_typeid);
+
+  m_fn_dbl_usub = m_ti->lookup_unary_op (octave_value::unary_op::op_uminus, m_scalar_typeid);
+  m_fn_dbl_not = m_ti->lookup_unary_op (octave_value::unary_op::op_not, m_scalar_typeid);
+  m_fn_bool_not = m_ti->lookup_unary_op (octave_value::unary_op::op_not, m_bool_typeid);
+
+  m_pi_builtin_fn = m_symtab->find_built_in_function ("pi").function_value ();
+  m_i_builtin_fn = m_symtab->find_built_in_function ("i").function_value ();
+  m_e_builtin_fn = m_symtab->find_built_in_function ("e").function_value ();
+  // If the platform has no M_PI, M_E we need to initialize ov_pi and ov_e
+#if !defined (M_PI)
+  ov_pi = 4.0 * atan (1.0);
+#endif
+#if !defined (M_E)
+  ov_e = exp (1.0);
+#endif
+
+}
+
+// If there are too many return values we can't just move them since the stacks will overlap so we
+// need to copy the args first with this proc
+static void copy_many_args_to_caller (octave::stack_element *sp,
+                                      octave::stack_element *caller_stack_end,
+                                      int n_args_to_move, int n_args_caller_expects)
+{
+  // Move args to an ovl
+  octave_value_list ovl;
+  for (int i = 0; i < n_args_to_move; i++)
+    {
+      octave_value &arg = caller_stack_end[i].ov;
+      ovl.append (std::move (arg));
+    }
+
+  for (int i = 0; i < n_args_to_move; i++)
+    {
+      PUSH_OV (ovl(n_args_to_move - 1 - i)); // backwards
+    }
+
+  // Push missing args
+  for (int i = n_args_to_move; i < n_args_caller_expects; i++)
+    PUSH_OV ();
+}
+
+static octave_value xeval_for_numel (octave_value &ov, const std::string& type, const std::list<octave_value_list>& idx);
+
+// This function reimplements octave_lvalue::numel()
+// TODO: octave_lvalue::numel() could be broken out or made static and used instead. But don't mess with that code
+//       to keep the VM somewhat independent of other code.
+static int lhs_assign_numel (octave_value &ov, const std::string& type, const std::list<octave_value_list>& idx)
+{
+  // Return 1 if there is no index because without an index there
+  // should be no way to have a cs-list here.  Cs-lists may be passed
+  // around internally but they are not supposed to be stored as
+  // single symbols in a stack frame.
+
+  std::size_t num_indices = idx.size ();
+
+  if (num_indices == 0)
+    return 1;
+
+  switch (type[num_indices-1])
+    {
+    case '(':
+      return 1;
+
+    case '{':
+      {
+        // FIXME: Duplicate code in '.' case below...
+
+        // Evaluate, skipping the last index.
+
+        std::string tmp_type = type;
+        std::list<octave_value_list> tmp_idx = idx;
+
+        tmp_type.pop_back ();
+        tmp_idx.pop_back ();
+
+        octave_value tmp = xeval_for_numel (ov, tmp_type, tmp_idx);
+
+        octave_value_list tidx = idx.back ();
+
+        if (tmp.is_undefined ())
+          {
+            if (tidx.has_magic_colon ())
+              err_invalid_inquiry_subscript ();
+
+            tmp = Cell ();
+          }
+        else if (tmp.is_zero_by_zero ()
+                 && (tmp.is_matrix_type () || tmp.is_string ()))
+          {
+            tmp = Cell ();
+          }
+
+        return tmp.xnumel (tidx);
+      }
+      break;
+
+    case '.':
+      {
+        // Evaluate, skipping either the last index or the last two
+        // indices if we are looking at "(idx).field".
+
+        std::string tmp_type = type;
+        std::list<octave_value_list> tmp_idx = idx;
+
+        tmp_type.pop_back ();
+        tmp_idx.pop_back ();
+
+        bool paren_dot = num_indices > 1 && type[num_indices-2] == '(';
+
+        // Index for paren operator, if any.
+        octave_value_list pidx;
+
+        if (paren_dot)
+          {
+            pidx = tmp_idx.back ();
+
+            tmp_type.pop_back ();
+            tmp_idx.pop_back ();
+          }
+
+        octave_value tmp = xeval_for_numel (ov, tmp_type, tmp_idx);
+
+        bool autoconv = (tmp.is_zero_by_zero ()
+                         && (tmp.is_matrix_type () || tmp.is_string ()
+                             || tmp.iscell ()));
+
+        if (paren_dot)
+          {
+            // Use octave_map, not octave_scalar_map so that the
+            // dimensions are 0x0, not 1x1.
+
+            if (tmp.is_undefined ())
+              {
+                if (pidx.has_magic_colon ())
+                  err_invalid_inquiry_subscript ();
+
+                tmp = octave_map ();
+              }
+            else if (autoconv)
+              tmp = octave_map ();
+
+            return tmp.xnumel (pidx);
+          }
+        else if (tmp.is_undefined () || autoconv)
+          return 1;
+        else
+          return tmp.xnumel (octave_value_list ());
+      }
+      break;
+
+    default:
+      panic_impossible ();
+    }
+}
+
+static octave_value xeval_for_numel (octave_value &ov, const std::string& type, const std::list<octave_value_list>& idx)
+{
+  octave_value retval;
+
+  try
+    {
+      retval = ov;
+
+      if (retval.is_constant () && ! idx.empty ())
+        retval = retval.subsref (type, idx);
+    }
+  catch (const execution_exception&)
+    {
+      // Ignore an error and treat it as undefined.  The error
+      // could happen because there is an index is out of range
+      // and we will be resizing a cell array.
+
+      interpreter& interp = __get_interpreter__ ();
+
+      interp.recover_from_exception ();
+
+      retval = octave_value ();
+    }
+
+  return retval;
+}
+
+
+loc_entry vm::find_loc (int ip, std::vector<octave::loc_entry> &loc_entries)
+{
+  int best = -1;
+
+  int n = loc_entries.size ();
+
+  // TODO: Should maybe be some binary search, but only called in
+  //       exceptions so who cares?
+  for (int i = 0; i < n; i++)
+    {
+      loc_entry &e = loc_entries[i];
+
+      if (ip >= e.m_ip_start && ip < e.m_ip_end)
+        best = i;
+    }
+
+  if (best == -1)
+    return {};
+
+  return loc_entries[best];
+}
+
+void vm::set_nargin (int nargin)
+{
+  m_tw->set_nargin (nargin);
+}
+
+void vm::caller_ignores_output ()
+{
+  m_output_ignore_data = new output_ignore_data;
+  m_output_ignore_data->m_v_lvalue_list.back () = m_tw->lvalue_list ();
+  m_output_ignore_data->m_v_owns_lvalue_list.back () = false;
+  m_output_ignore_data->m_external_root_ignorer = true;
+}
+
+void vm::set_nargout (int nargout)
+{
+  m_tw->set_nargout (nargout);
+}
+
+int
+vm::find_unwind_entry_for_forloop (int current_stack_depth)
+{
+  int best_match = -1;
+
+  // Find a for loop entry that matches the current instruction pointer
+  // range and also got an anticipated stack depth less than current stack
+  // depth.
+  //
+  // I.e. if the ip is in a for loop, we want to unwind down the stack
+  // untill we reach the stack depth of the for loop to be able to remove
+  // its native int:s properly.
+  //
+  // To be able to unwind nested for loops we look for smaller and
+  // smaller stack depths given by current_stack_depth parameter.
+
+  for (unsigned i = 0; i < m_unwind_data->m_unwind_entries.size(); i++)
+    {
+      unwind_entry& entry = m_unwind_data->m_unwind_entries[i];
+      int start = entry.m_ip_start;
+      int end = entry.m_ip_end;
+      int stack_depth = entry.m_stack_depth;
+
+      // Skip not for loop entries
+      if (entry.m_unwind_entry_type != unwind_entry_type::FOR_LOOP)
+        continue;
+      // Are ip the range?
+      if (start > m_ip || end <= m_ip)
+        continue;
+      // Is the stack depth ok?
+      if (stack_depth >= current_stack_depth)
+        continue;
+
+      // Is it better than prior match?
+      if (best_match != -1)
+        {
+          if (best_match > stack_depth)
+            continue;
+        }
+
+      best_match = stack_depth;
+    }
+
+  return best_match;
+}
+
+unwind_entry*
+vm::find_unwind_entry_for_current_state (bool only_find_unwind_protect)
+{
+  int best_match = -1;
+
+  // Find the entry with the highest start instruction offset
+  for (unsigned i = 0; i < m_unwind_data->m_unwind_entries.size(); i++)
+    {
+      unwind_entry& entry = m_unwind_data->m_unwind_entries[i];
+      int start = entry.m_ip_start;
+      int end = entry.m_ip_end;
+
+      // When unwinding for e.g. interrupt exceptions we are only looking for UNWIND_PROTECT
+      if (only_find_unwind_protect && (entry.m_unwind_entry_type != unwind_entry_type::UNWIND_PROTECT))
+        continue;
+
+      // Skip for loop entries
+      if (entry.m_unwind_entry_type == unwind_entry_type::FOR_LOOP)
+        continue;
+
+      // Are ip the range?
+      if (start > m_ip || end <= m_ip) // TODO: end < m_ip ???
+        continue;
+
+      // Is it better than prior match?
+      if (best_match != -1)
+        {
+          int best_start =
+            m_unwind_data->m_unwind_entries[best_match].m_ip_start;
+          if (best_start > start)
+            continue;
+        }
+
+      best_match = i;
+    }
+
+  if (best_match == -1)
+    return nullptr;
+
+  return &m_unwind_data->m_unwind_entries[best_match];
+}
+
+static bool ov_need_stepwise_subsrefs (octave_value &ov)
+{
+  return !ov.isobject () && !ov.isjava () && !(ov.is_classdef_meta () && ! ov.is_package ());
+}
+
+int64_t
+vm_profiler::unow ()
+{
+  return octave_gettime_ns_wrapper ();
+}
+
+void
+vm_profiler::vm_profiler_fn_stats::add_t (int64_t dt)
+{
+  int ip = m_v_ip.back ();
+  maybe_resize (ip);
+
+  m_v_cum_t[ip] += dt;
+  ++m_v_n_cum[ip];
+}
+
+void
+vm_profiler::add_t (int64_t dt)
+{
+  if (!m_shadow_call_stack.size ())
+    return;
+
+  m_shadow_call_stack.back ().m_t_self_cum += dt;
+}
+
+// There is no std::format since we use C++ 11 so lets make our own.
+// The 'format' attribute gives nice compiler warnings on missuse.
+static
+std::string
+x_snprintf (const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static
+std::string
+x_snprintf (const char *fmt, ...)
+{
+    int n = 32;
+    do {
+      char *buff = new char[n];
+
+      va_list va;
+      va_start (va, fmt);
+      int n_needed = vsnprintf (buff, n, fmt, va);
+      va_end (va);
+
+      std::string ret;
+
+      try
+        {
+          std::string tmp {buff};
+          ret = tmp;
+        }
+      catch (...) // Maybe bad_alloc could be thrown
+        {
+          delete [] buff;
+          throw;
+        }
+
+      delete [] buff;
+
+      if (n_needed < 0)
+        error ("profiler internal error: Invalid call to x_snprintf()");
+      if (n_needed < n)
+        return ret;
+
+      n = n_needed + 1;
+    } while (1);
+}
+
+void
+vm_profiler::print_to_stdout ()
+{
+  using std::string;
+  using std::vector;
+  using std::map;
+  using std::pair;
+
+  using std::cout;
+  using std::setw;
+
+  // These could probably be vectors, but we'll do with maps to keep the
+  // code easier to follow.
+  map<string, int64_t> map_fn_to_cum_t;
+  map<string, int64_t> map_fn_to_self_cum_t;
+  map<string, vector<string>> map_fn_to_sourcerows;
+  map<string, vector<pair<int, string>>> map_fn_to_opcodes_stringrows;
+  map<string, string> map_fn_to_annotated_source;
+  map<string, string> map_fn_to_annotated_bytecode;
+
+  // Calculate cumulative function time
+  for (auto kv : m_map_fn_stats)
+    {
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+
+      int64_t t_fn_cum = 0;
+      int64_t t_fn_self_cum = 0;
+      unsigned n = stats.m_v_cum_t.size ();
+
+      for (unsigned ip = 0; ip < n; ip++)
+        {
+          t_fn_cum += stats.m_v_cum_t[ip];
+          t_fn_self_cum += stats.m_v_cum_t[ip];
+        }
+      for (unsigned ip = 0; ip < stats.m_v_cum_call_t.size (); ip++)
+        t_fn_cum += stats.m_v_cum_call_t[ip];
+
+      map_fn_to_cum_t[fn_name] = t_fn_cum;
+      map_fn_to_self_cum_t[fn_name] = t_fn_self_cum;
+    }
+
+  // Try to get the source code
+  for (auto kv : m_map_fn_stats)
+    {
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+      string file = stats.m_fn_file;
+
+      auto &interp = __get_interpreter__ ();
+
+      // Call type with the quiet flag to get the source
+      // Also works for functions without source code in files.
+      octave_value_list ans;
+      string source_text;
+
+      bool got_source_text = false;
+
+      if (!got_source_text)
+        {
+          octave_value_list args;
+          args.append ("-q");
+          args.append (file);
+          try
+            {
+              if (file.size ())
+                ans = interp.feval ("type", args, 1);
+            }
+          catch (execution_exception &)
+            {
+              // Didn't work
+            }
+        }
+
+      if (ans.length () >= 1)
+        source_text = ans(0).string_value ();
+      if (source_text.size ())
+        got_source_text = true;
+
+      if (!got_source_text)
+        {
+          octave_value_list args;
+          args.append ("-q");
+          args.append (fn_name);
+          try
+            {
+              if (fn_name.size ())
+                ans = interp.feval ("type", args, 1);
+            }
+          catch (execution_exception &)
+            {
+              // Didn't work
+            }
+        }
+
+      if (ans.length () >= 1)
+        source_text = ans(0).string_value ();
+      if (source_text.size ())
+        got_source_text = true;
+
+      if (got_source_text)
+        {
+          // Split source by row
+          vector<string> v_rows;
+
+          std::stringstream ss(source_text);
+          string buff;
+
+          while(std::getline (ss, buff, '\n'))
+              v_rows.push_back (buff);
+
+          map_fn_to_sourcerows[fn_name] = v_rows;
+        }
+    }
+
+  // Get bytecode "source code" rows
+  for (auto kv : m_map_fn_stats)
+    {
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+
+      auto v_ls = opcodes_to_strings (stats.m_code, stats.m_ids);
+
+      map_fn_to_opcodes_stringrows[fn_name] = v_ls;
+    }
+
+  // Annotate bytecode
+  for (auto kv : m_map_fn_stats)
+    {
+      std::string ans;
+
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+
+      auto v_ls = map_fn_to_opcodes_stringrows[fn_name];
+      int64_t fn_cum_t = map_fn_to_cum_t[fn_name];
+
+      for (auto ls : v_ls)
+        {
+          int ip = ls.first; // Opcode offset
+          string s = ls.second; // Text representation of the opcode
+
+          // Ignore strange data
+          if (ip < 0)
+            continue;
+
+          if (static_cast<unsigned> (ip) >= stats.m_v_cum_t.size () || (stats.m_v_cum_t[ip] == 0 && stats.m_v_cum_call_t[ip] == 0))
+          {
+            ans += x_snprintf ("\t%*s %5d: %s\n", 43, "", ip, s.c_str ());
+            continue;
+          }
+
+          int64_t n_hits = stats.m_v_n_cum[ip];
+          int64_t t_op = stats.m_v_cum_t[ip] + stats.m_v_cum_call_t[ip];
+          double share_of_fn = 100. * static_cast<double> (t_op) / fn_cum_t;
+
+          // Try to make the table neat around the decimal separator
+          int wholes = floor (share_of_fn);
+          int rest = (share_of_fn - wholes) * 100;
+
+          if (share_of_fn >= 0.1)
+            ans += x_snprintf ("\t%8lld %12lld ns %5d.%-3d %% %12d: %s\n", static_cast<long long> (n_hits), static_cast<long long> (t_op), wholes, rest, ip, s.c_str ());
+          else
+            ans += x_snprintf ("\t%8lld %12lld ns  %7.3e%% %12d: %s\n", static_cast<long long> (n_hits), static_cast<long long> (t_op), share_of_fn, ip, s.c_str ());
+        }
+
+      map_fn_to_annotated_bytecode[fn_name] = ans;
+    }
+
+  // Annotate source code
+  for (auto kv : m_map_fn_stats)
+    {
+      std::string ans;
+
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+
+      // First we need to create a map between opcode offset and source line
+      auto v_ip_s = map_fn_to_opcodes_stringrows[fn_name];
+
+      map<int, int> map_op_offset_to_src_line;
+
+      for (auto ip_s : v_ip_s)
+        {
+          int ip = ip_s.first;
+          loc_entry loc = vm::find_loc (ip, stats.m_loc_entries);
+          map_op_offset_to_src_line[ip] = loc.m_line;
+        }
+
+      // Sum up the time spent on a source line
+      map<int, int64_t> map_srcline_to_tcum;
+      map<int, int64_t> map_srcline_to_nhits;
+
+      for (unsigned ip = 0; ip < stats.m_v_cum_t.size (); ip++)
+        {
+          int64_t tcum = stats.m_v_cum_t[ip] + stats.m_v_cum_call_t[ip];
+          int64_t nhits = stats.m_v_n_cum[ip];
+          int src_line = map_op_offset_to_src_line[ip];
+          map_srcline_to_tcum[src_line] += tcum;
+          map_srcline_to_nhits[src_line] += nhits;
+        }
+
+      auto v_src_rows = map_fn_to_sourcerows[fn_name];
+      // Annotate the source code
+
+      // Put all time spent in opcodes that does not correnspond to any source line,
+      // on the first row with "function.*fnname" on.
+      bool found = false;
+      for (unsigned i = 0; i < v_src_rows.size(); i++)
+        {
+          string &row = v_src_rows[i];
+          std::size_t func_idx = row.find ("function");
+          std::size_t name_idx = row.find (stats.m_fn_name);
+
+          if (func_idx == string::npos || name_idx == string::npos)
+            continue;
+
+          string def = row.substr (0, func_idx + strlen ("function"));
+
+          // Any comment making it a fake?
+          if (def.find ('#') != string::npos || def.find ('%') != string::npos)
+            continue;
+
+          int line_nr = i + 1;
+          map_srcline_to_tcum[line_nr] += map_srcline_to_tcum[-1];
+          map_srcline_to_nhits[line_nr] += map_srcline_to_nhits[-1];
+          found = true;
+          break;
+        }
+
+      if (!found)
+      {
+        map_srcline_to_tcum[1] += map_srcline_to_tcum[-1];
+        map_srcline_to_nhits[1] += map_srcline_to_nhits[-1];
+      }
+      int64_t fn_cum_t = map_fn_to_cum_t[fn_name];
+
+      for (unsigned i = 0; i < v_src_rows.size(); i++)
+        {
+          int line_nr = i + 1;
+          int64_t t_line_cum = map_srcline_to_tcum[line_nr];
+          int64_t n_hits = map_srcline_to_nhits[line_nr];
+
+          double share_of_fn = 100. * static_cast<double> (t_line_cum) / fn_cum_t;
+
+          // Try to make the table neat around the decimal separator
+          int wholes = floor (share_of_fn);
+          int rest = (share_of_fn - wholes) * 100;
+
+          string src_line = v_src_rows[i];
+
+          if (share_of_fn == 0)
+            ans += x_snprintf ("\t%*s %5d: %s\n", 43, "", line_nr, src_line.c_str ());
+          else if (share_of_fn >= 0.1)
+            ans += x_snprintf ("\t%8lld %12lld ns %5d.%-3d %% %12d: %s\n", static_cast<long long> (n_hits), static_cast<long long> (t_line_cum), wholes, rest, line_nr, src_line.c_str ());
+          else
+            ans += x_snprintf ("\t%8lld %12lld ns  %7.3e%% %12d: %s\n", static_cast<long long> (n_hits), static_cast<long long> (t_line_cum), share_of_fn, line_nr, src_line.c_str ());
+        }
+
+      map_fn_to_annotated_source[fn_name] = ans;
+    }
+
+  map<int64_t, string> map_cumt_to_fn;
+  for (auto &kv : map_fn_to_cum_t)
+    map_cumt_to_fn[kv.second] = kv.first;
+
+  int64_t t_tot = 0;
+  for (auto &kv : map_fn_to_cum_t)
+    t_tot += kv.second;
+
+  // Print stuff to the user
+
+  cout << "\n\n\nProfiled functions:\n";
+  cout << "\tRuntime order:\n";
+  for (auto it = map_cumt_to_fn.rbegin (); it != map_cumt_to_fn.rend (); it++)
+    printf ("\t\t%12lld ns %3.0f%% %s\n", static_cast<long long> (it->first), it->first * 100. / t_tot, it->second.c_str ());
+  cout << "\tFirst call order:\n";
+  for (string fn_name : m_fn_first_call_order)
+  {
+    int64_t tcum = map_fn_to_cum_t[fn_name];
+    printf ("\t\t%12lld ns %3.0f%% %s\n", static_cast<long long> (tcum), tcum * 100. / t_tot, fn_name.c_str ());
+  }
+
+  for (auto kv : m_map_fn_stats)
+    {
+      string fn_name = kv.first;
+      vm_profiler_fn_stats &stats = kv.second;
+
+      int64_t fn_cum_t = map_fn_to_cum_t[fn_name];
+      int64_t fn_self_cum_t = map_fn_to_self_cum_t[fn_name];
+      string annotated_source = map_fn_to_annotated_source[fn_name];
+      string annotated_bytecode = map_fn_to_annotated_bytecode[fn_name];
+
+      cout << "\n\n\nFunction: " << kv.first << "\n\n";
+      if (stats.m_fn_file.size ())
+        cout << "\tFile: " << stats.m_fn_file << "\n";
+      cout << "\tAmount of calls: " << static_cast<long long> (stats.m_n_calls) << "\n";
+      cout << "\tCallers:         ";
+      for (string caller : stats.m_set_callers)
+        cout << caller << " ";
+      cout << "\n";
+      printf ("\tCumulative time: %9.5gs %lld ns\n", fn_cum_t/1e9, static_cast<long long> (fn_cum_t));
+      printf ("\tCumulative self time: %9.5gs %lld ns\n", fn_self_cum_t/1e9, static_cast<long long> (fn_self_cum_t));
+      cout << "\n\n";
+
+      if (annotated_source.size ())
+      {
+         cout << "\tAnnotated source:\n";
+         cout << "\t     ops         time       share\n";
+         cout << "\n";
+         cout << annotated_source << "\n\n";
+      }
+      if (annotated_bytecode.size ())
+      {
+        cout << "\tAnnotated bytecode:\n";
+        cout << "\t     hits         time       share\n";
+        cout << "\n";
+        cout << annotated_bytecode << "\n\n";
+      }
+      cout << "\n";
+    }
+
+  cout << std::flush;
+}
+
+void
+vm_profiler::enter_fn (std::string caller_name, bytecode &bc)
+{
+  unsigned char *code = bc.m_code.data ();
+  std::string *name_data = bc.m_ids.data ();
+  unwind_data *unwind_data = &bc.m_unwind_data;
+
+  std::string callee_name = bc.m_data[2].string_value (); // profiler_name () querried at compile time
+
+  enter_fn (callee_name, caller_name, unwind_data, name_data, code);
+}
+
+void
+vm_profiler::enter_fn (std::string fn_name, std::string caller, octave::unwind_data *unwind_data, std::string *name_data, unsigned char *code)
+{
+  if (!m_map_fn_stats.count (fn_name))
+    m_fn_first_call_order.push_back (fn_name);
+
+  vm_profiler_fn_stats &callee_stat = m_map_fn_stats[fn_name];
+
+  callee_stat.m_set_callers.insert (caller);
+  callee_stat.m_v_callers.push_back (caller);
+  callee_stat.m_n_calls++;
+
+  vm_profiler_call call{};
+  call.m_callee = fn_name;
+  call.m_caller = caller;
+
+  int64_t now = unow ();
+  call.m_entry_time = now;
+
+  m_shadow_call_stack.push_back (call);
+
+  callee_stat.m_v_t.push_back (now);
+  callee_stat.m_v_ip.push_back (0);
+
+  if (callee_stat.m_code.size ())
+    return;
+
+  callee_stat.m_fn_file = unwind_data->m_file;
+  callee_stat.m_fn_name = unwind_data->m_name;
+
+  // We need to copy the bytecode with id names to the stat object to be able
+  // to print it later.
+  unsigned n_code = unwind_data->m_code_size;
+  unsigned n_ids = unwind_data->m_ids_size;
+  callee_stat.m_code = std::vector<unsigned char> (n_code);
+  callee_stat.m_ids = std::vector<std::string> (n_ids);
+
+  callee_stat.m_loc_entries = unwind_data->m_loc_entry;
+
+  for (unsigned i = 0; i < n_code; i++)
+    callee_stat.m_code[i] = code[i];
+  for (unsigned i = 0; i < n_ids; i++)
+    callee_stat.m_ids[i] = name_data[i];
+}
+
+void
+vm_profiler::purge_shadow_stack ()
+{
+  warning ("profiler shadow stack got messed up. Measurement results might be inaccurate");
+
+  m_shadow_call_stack.clear ();
+
+  for (auto &kv : m_map_fn_stats)
+  {
+    auto &v = kv.second;
+    v.m_v_callers.clear ();
+    v.m_v_t.clear ();
+    v.m_v_ip.clear ();
+  }
+}
+
+void
+vm_profiler::exit_fn (std::string fn_name)
+{
+  {
+    int64_t t_exit = unow ();
+
+    vm_profiler_fn_stats &callee_stat = m_map_fn_stats[fn_name];
+
+    // Add the cost of the RET up till now to the callee
+    if (callee_stat.m_v_t.size () && callee_stat.m_v_t.back () != -1)
+      {
+        int64_t t0 = callee_stat.m_v_t.back ();
+        int64_t dt = t_exit - t0;
+
+        callee_stat.add_t (dt);
+        this->add_t (dt);
+      }
+
+    if (!m_shadow_call_stack.size ())
+      goto error;
+    if (!callee_stat.m_v_callers.size ())
+      goto error;
+
+    bool is_recursive = false;
+    for (auto &call : m_shadow_call_stack)
+      {
+        if (call.m_caller == fn_name)
+          {
+            is_recursive = true;
+            break;
+          }
+      }
+
+    vm_profiler_call call = m_shadow_call_stack.back ();
+    m_shadow_call_stack.pop_back ();
+
+    std::string caller = call.m_caller;
+
+    std::string caller_according_to_callee = callee_stat.m_v_callers.back ();
+
+    // Pop one level
+    callee_stat.m_v_callers.pop_back ();
+    callee_stat.m_v_t.pop_back ();
+    callee_stat.m_v_ip.pop_back ();
+
+    if (caller_according_to_callee != caller)
+      goto error;
+
+    if (caller != "") // If the caller name is "" the callee has no profiled caller
+      {
+        vm_profiler_fn_stats &caller_stat = m_map_fn_stats[caller];
+
+        if (!caller_stat.m_v_t.size ())
+          goto error;
+
+        int64_t caller_enters_call = caller_stat.m_v_t.back ();
+        int64_t caller_enters_callee = call.m_entry_time;
+        int64_t caller_call_overhead = caller_enters_callee - caller_enters_call;
+        int64_t callee_dt = call.m_t_self_cum + call.m_t_call_cum - caller_call_overhead;
+
+        // Add the call's cumulative time to the caller's "time spent in bytecode call"-vector
+        // unless the call is recursive (to prevent confusing double book keeping of the time).
+        unsigned caller_ip = caller_stat.m_v_ip.back ();
+        caller_stat.maybe_resize (caller_ip);
+
+        if (!is_recursive)
+        {
+          // Add to cumulative spent in call from this ip, in caller
+          caller_stat.m_v_cum_call_t[caller_ip] += callee_dt;
+          // Add to cumulative time spent in *the latest call* to caller
+          if (m_shadow_call_stack.size ())
+            m_shadow_call_stack.back ().m_t_call_cum += callee_dt;
+        }
+        // Change the caller's last timestamp to now and subtract the caller's call overhead.
+        caller_stat.m_v_t.back () = unow () - caller_call_overhead;
+      }
+    return;
+  }
+error:
+  purge_shadow_stack ();
+  return;
+}
+
+void
+vm::output_ignore_data::push_frame (vm &vm)
+{
+  vm.m_tw->set_auto_fcn_var (stack_frame::IGNORED, m_ov_pending_ignore_matrix);
+  m_ov_pending_ignore_matrix = {}; // Clear ignore matrix so that the next call wont ignore anything
+  m_v_lvalue_list.push_back (vm.m_tw->lvalue_list ()); // Will be restored in output_ignore_data::pop_frame ()
+  m_v_owns_lvalue_list.push_back (false); // Caller owns the current lvalue
+
+  vm.m_tw->set_lvalue_list (nullptr); // There is not lvalue list set for the new frame
+}
+
+void
+vm::output_ignore_data::clear_ignore (vm &vm)
+{
+  CHECK_PANIC (m_v_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size () == m_v_lvalue_list.size ());
+
+  // If the output_ignore_data object owns the current lvalue list
+  // we need to free it.
+  auto *current_lval_list = vm.m_tw->lvalue_list ();
+
+  bool owns_lval_list = m_v_owns_lvalue_list.back ();
+  m_v_owns_lvalue_list.back () = false;
+
+  if (owns_lval_list)
+    delete current_lval_list;
+
+  // Restore the prior lvalue list in the tree walker
+  vm.m_tw->set_lvalue_list (m_v_lvalue_list.back ());
+  m_v_lvalue_list.back () = nullptr;
+
+  m_ov_pending_ignore_matrix = {};
+}
+
+void
+vm::output_ignore_data::pop_frame (vm &vm)
+{
+  CHECK_PANIC (m_v_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size () == m_v_lvalue_list.size ());
+
+  // If the output_ignore_data object owns the current lvalue list
+  // we need to free it.
+  auto *current_lval_list = vm.m_tw->lvalue_list ();
+
+  bool owns_lval_list = m_v_owns_lvalue_list.back ();
+  m_v_owns_lvalue_list.pop_back ();
+
+  if (owns_lval_list)
+    delete current_lval_list;
+
+  // Restore the prior lvalue list in the tree walker
+  vm.m_tw->set_lvalue_list (m_v_lvalue_list.back ());
+  m_v_lvalue_list.pop_back ();
+}
+
+void
+vm::output_ignore_data::set_ignore_anon (vm &vm, octave_value ignore_matrix)
+{
+  CHECK_PANIC (m_ov_pending_ignore_matrix.is_nil ());
+  CHECK_PANIC (m_v_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size () == m_v_lvalue_list.size ());
+
+  // For anonymous functions we propagate the current ignore matrix and lvalue list to the callee.
+
+  m_ov_pending_ignore_matrix = ignore_matrix;
+  // Since the caller owns the lvalue list, we need to note not to delete the lvalue list when popping
+  // the callee frame.
+  vm.m_tw->set_lvalue_list (m_v_lvalue_list.back ());
+}
+
+void
+vm::output_ignore_data::set_ignore (vm &vm, octave_value ignore_matrix,
+                                    std::list<octave_lvalue> *new_lval_list)
+{
+  CHECK_PANIC (m_ov_pending_ignore_matrix.is_nil ());
+  CHECK_PANIC (m_v_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size ());
+  CHECK_PANIC (m_v_owns_lvalue_list.size () == m_v_lvalue_list.size ());
+
+  m_ov_pending_ignore_matrix = ignore_matrix;
+  m_v_owns_lvalue_list.back () = true;
+  m_v_lvalue_list.back () = vm.m_tw->lvalue_list ();
+  vm.m_tw->set_lvalue_list (new_lval_list);
+}
+
+bool
+vm::maybe_compile_or_compiled (octave_user_code *fn, stack_frame::local_vars_map *locals)
+{
+  if (!fn)
+    return false;
+
+  if (fn->is_compiled ())
+    return true;
+
+  if (V__vm_enable__ && !fn->compilation_failed ())
+    {
+      try
+        {
+          if (fn->is_anonymous_function ())
+            {
+              CHECK_PANIC (locals);
+              octave::compile_anon_user_function (*fn, false, *locals);
+            }
+          else
+            octave::compile_user_function (*fn, false);
+
+          return true;
+        }
+      catch (std::exception &e)
+        {
+          warning_with_id ("Octave:bytecode-compilation",
+                           "auto-compilation of %s failed with message %s",
+                           fn->name().c_str (), e.what ());
+          return false;
+        }
+    }
+
+  return false;
+}
+
+octave_value_list
+vm::call (tree_evaluator& tw, int nargout, const octave_value_list& xargs,
+          octave_user_code *fn, std::shared_ptr<stack_frame> context)
+{
+  // If number of outputs unknown, pass nargout=1 to the function being called
+  if (nargout < 0)
+    nargout = 1;
+
+  CHECK_PANIC (fn);
+  CHECK_PANIC (fn->is_compiled ());
+
+  bool call_script = fn->is_user_script ();
+
+  if (call_script && (xargs.length () != 0 || nargout != 0))
+    error ("invalid call to script %s", fn->name ().c_str ());
+
+  if (tw.m_call_stack.size () >= static_cast<std::size_t> (tw.m_max_recursion_depth))
+    error ("max_recursion_depth exceeded");
+
+  octave_value_list args (xargs);
+
+  bytecode &bc = fn->get_bytecode ();
+
+  vm vm (&tw, bc);
+
+  bool caller_is_bytecode = tw.get_current_stack_frame ()->is_bytecode_fcn_frame ();
+
+  // Pushes a bytecode stackframe. nargin is set inside the VM.
+  if (context)
+    tw.push_stack_frame (vm, fn, nargout, 0, context); // Closure context for nested frames
+  else
+    tw.push_stack_frame (vm, fn, nargout, 0);
+
+  // The arg names of root stackframe in VM need to be set here, unless the caller is bytecode.
+  // The caller can be bytecode if evalin("caller", ...) is used in some uncompiled function.
+  if (!caller_is_bytecode)
+    tw.set_auto_fcn_var (stack_frame::ARG_NAMES, Cell (xargs.name_tags ()));
+  if (!call_script)
+    {
+      Matrix ignored_outputs = tw.ignored_fcn_outputs ();
+      if (ignored_outputs.numel())
+        {
+          vm.caller_ignores_output ();
+          tw.set_auto_fcn_var (stack_frame::IGNORED, ignored_outputs);
+        }
+    }
+
+  octave_value_list ret;
+
+  try {
+    ret = vm.execute_code (args, nargout);
+  } catch (std::exception &e) {
+    if (vm.m_dbg_proper_return == false)
+      {
+        std::cout << e.what () << std::endl;
+        // TODO: Replace with panic when the VM almost works
+
+        // Some test code eats errors messages, so we print to stderr too.
+        std::cerr << "VM error " << __LINE__ << ": Exception in " << fn->name () << " escaped the VM\n";
+        error("VM error %d: " "Exception in %s escaped the VM\n", __LINE__, fn->name ().c_str());
+      }
+
+    tw.pop_stack_frame ();
+    throw;
+  } catch (const quit_debug_exception &qde) {
+    if (vm.m_dbg_proper_return == false)
+      panic ("quit debug exception escaping the vm");
+
+    tw.pop_stack_frame ();
+    throw;
+  }
+
+
+  tw.pop_stack_frame ();
+  return ret;
+}
+
+// Debugging functions to be called from gdb
+
+void
+vm_debug_print_obv (octave_base_value *obv)
+{
+  obv->print (std::cout);
+}
+
+void
+vm_debug_print_ov (octave_value ov)
+{
+  ov.print (std::cout);
+}
+
+void
+vm_debug_print_ovl (octave_value_list ovl)
+{
+  for (int i = 0; i < ovl.length (); i++)
+    {
+      ovl (i).print (std::cout);
+    }
+}
+
+
+extern "C" void dummy_mark_1 (void)
+{
+  asm ("");
+}
+
+extern "C" void dummy_mark_2 (void)
+{
+  asm ("");
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode-vm.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,653 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+/*
+
+  -- About the experimental VM for GNU Octave
+
+  The VM is a "monkey tracing" stack based VM, executing linear bytecode compiled
+  from the abstract syntax tree (class tree_expression etc.).
+
+  Files of interest are:
+    * pt-bytecode-walk.cc:
+        The compiler translating the AST to bytecode
+    * pt-bytecode-vm.cc:
+        The VM
+    * stack-frame.cc:
+      bytecode_fcn_stack_frame, is the dynamic stack frame
+
+  -- Stack
+    The VM has one stack where it put arguments, returns, locals and temporaries.
+    The stack elements have the type 'union stack_element', which is a union of
+    octave_value and some pointers and native long long, double etc.
+
+    I.e. octave_value:s are constructed inplace on the stack. Not all stack elements
+    are octave_value:s.
+
+    Nested calls to compiled bytecode functions use the same stack.
+
+    The stack area does not grow. If the stack space runs out, the execution aborts.
+
+    To access arguments, returns and locals their "slot number" is used. I.e. offset
+    from the base stack register.
+
+    At VM termination, the end and start of the stack is checked for magic numbers
+    that should be there, and aborts if they are changed.
+
+  -- Registers
+    The VM uses the following pseudo-register:
+      * instruction pointer ('ip')
+      * base instruction register ('code')
+      * stack register ('sp')
+      * base stack register ('bsp')
+          The start of the current stack frame
+      * constant base register ('data')
+          A pointer to an array of octave_value "literal constants", like "3"
+
+    The registers are popped and pushed in each return or call together with the
+    follwing auxilliary data:
+      * unwind data
+      * name data (names of the identifiers)
+      * nargout
+      * nargin
+
+    Note that 'argnames' is lazy in the VM. There is more kludge state stored in
+    the VM object other then the happy path state noted above.
+
+  -- Dynamic stack frame
+    The VM uses its own stack frames, but also pushes a 'stack_frame' of the subclass
+    'bytecode_fcn_stack_frame' to the 'tree_evaluator', to be able to cooperate
+    with C++-compiled functions and the 'tree_evaluator'.
+
+    'bytecode_fcn_stack_frame' is quite lazy and lets e.g. a compiled function
+    or user code executed with the 'tree_evaluator' create, read and write variables
+    on the VM stack.
+
+  -- Monkey tracing
+    During execution of some op-codes the VM checks the type of the operands
+    and might modify the bytecode to execute a specialized op-code.
+
+    E.g. the "index identifier"-opcode becomes the "index matrix with one scalar"-opcode
+    if the index is one double and the object to index is a matrix.
+
+    If later the preconditions are not true, the specialized opcode replaces itself
+    with the general opcode.
+
+    "Monkey tracing" is a made up term for this concept.
+
+  -- Function caching
+    Function lookups are cached in the corrensponding slot of an identifier on
+    the VM stack. If any function is added to the symbol table, the current
+    directory is changed or 'clear' is called, all function caches are invalidated.
+
+    The function cache is dependent on the argument types. If the argument types
+    change, the cache is invalidated.
+
+    Binary and unary operators for doubles are looked up on VM start and cached in
+    the VM. They are not invalidated aslong the VM is running.
+
+  -- Compilation
+    At runtime when user code is about to be executed, it is compiled, if VM
+    evaluation is turned on.
+
+    It is also possible to compile ahead of time.
+
+    Compiled code is reused the next invocation of the user function. If the
+    user function is changed, the compiled code is cleared.
+
+    Compilation is done be the 'bytecode_walker' class in 'pt-bytecode-walk.cc'.
+
+  -- Opcodes
+
+  The op-codes are byte aligned and some are variable length.
+
+  The first byte always identifies the op-code and is also the offset used in the
+  dispatch table 'instr'.
+
+  'octave_value' is abbreviated 'ov' in this table.
+  "<-" means, state of stack before operation. The right most element is the toppiest of the stack.
+  "->" means, state of stack after operation.
+
+    ** Binary math operations
+      Pop two 'ov:s' of the stack and do the appropiate operation, then push the
+      resulting 'ov'. The top of the stack is the right hand side.
+
+      For all:
+      <- (ov lhs) (ov rhs)
+      -> (ov ans)
+
+      -- MUL DIV ADD SUB POW LDIV EL_MUL EL_DIV EL_POW EL_LDIV
+          *   /   +   -   ^   \     .*     ./     .^     .\
+
+      The following specializations for double arguments exist:
+        MUL_DBL, ADD_DBL, SUB_DBL, DIV_DBL, POW_DBL
+
+    ** Compound math operations
+      Pop two 'ov:s' off the stack and do the appropiate operation, then push the
+      resulting 'ov'. The top of the stack is the right hand side.
+
+      The opcodes are combinations of an unary math operation and a binary.
+
+      <- (ov lhs) (ov rhs)
+      -> (ov ans)
+
+      -- TRANS_MUL  MUL_TRANS  HERM_MUL  MUL_HERM  TRANS_LDIV HERM_LDIV
+          a.'*b      a*b.'      a'*b      a*b'      a.'\b      a'\b
+
+    ** Unary math operations
+      Pop one 'ov' and do the appropiate operation, then push the
+      resulting 'ov'.
+
+      <- (ov arg)
+      -> (ov ans)
+
+      -- UADD
+        Unary addition. Note that unary plus is not a nop in GNU Octave. It
+        can be overloaded to do whatever.
+      -- USUB
+        Unary subtraction
+      -- TRANS
+        Transpose, ".'"
+      -- HERM
+        Hermitian, "'"
+
+    ** Logical unary operations
+      Pop one 'ov' and do the appropiate operation, then push the
+      resulting 'ov'.
+
+      <- (ov arg)
+      -> (ov ans)
+
+      -- NOT
+        "!", "~"
+
+      -- UNARY_TRUE
+        Converts an ov on the stack to either ov false or ov true.
+        The op-code is used to construct control flow for e.g. shortcircuits.
+        User values' truthness are checked by JMP_IF and JMP_IFN which errors
+        on undefined values.
+
+    ** Logical binary operations
+      Pop two 'ov:s' of the stack and do the appropiate operation, then push the
+      resulting 'ov'. The top of the stack is the right hand side.
+
+      For all:
+      <- (ov lhs) (ov rhs)
+      -> (ov ans)
+
+      -- LE  GR  EQ  NEQ  GR_EQ  LE_EQ EL_AND EL_OR
+         <   >   ==  !=   >=     <=     &      |
+
+      Note that EL_AND and EL_OR does not emulate braindamaged if-conditions. That
+      is done by the bytecode compiler with the help of the opcodes
+      BRAINDEAD_PRECONDITION and BRAINDEAD_WARNING together with some convoluted
+      bytecode.
+
+      The following specializations exist:
+        LE_DBL, LE_EQ_DBL, GR_DBL, GR_EQ_DBL, EQ_DBL, NEQ_DBL
+
+    ** Stack control
+      -- POP
+      <- (ov)
+      ->
+        Pop one 'ov' element of the stack.
+
+      -- DUP
+      <- (ov1)
+      -> (ov1) (ov1)
+
+        Duplicate the 'ov' on top of the stack and push it to the stack.
+
+      -- ROT
+      <- (ov1) (ov2)
+      -> (ov2) (ov1)
+        Rotate the top two 'ov:s' on the stack.
+
+      -- DUPN (uint8 offset) (uint8 n)
+      <- (ov -offset - n) ... (ov -offset) ... (offset amount of stack elements)
+      -> The range "(ov -offset - n) ... (ov -offset)" copied to the top of the stack in the same order.
+        Pushes 'n' ov:s from the stack at depth 'offset' to the top of the
+        stack. The copies have the same order on the stack as the originals.
+        An 'offset' of 0 means the top element of the stack.
+
+      -- PUSH_SLOT_NARGOUT0 (uint8 slot)
+      -- PUSH_SLOT_NARGOUT1 (uint8 slot)
+      -- PUSH_SLOT_NARGOUTN (uint8 slot) (uint8 nargout)
+      <-
+      -> (ov 1) (ov 2)? ... (ov n)?
+        If the local 'ov' at 'bsp[slot]' is an ordinary variable, push it
+        to the stack.
+
+        If the local is undefined, assume it is a command call function,
+        look the function name up, and call it with the nargout 0, 1 or n.
+
+        If the local is a function object, call it with the nargout 0, 1 or n.
+
+        PUSH_SLOT_NARGOUT1_SPECIAL is like PUSH_SLOT_NARGOUT1 but pushes 'classdef_metas'
+        instead of trying to execute them. PUSH_SLOT_DISP keeps track of whether the
+        slot variable was executed or not for a correct display call.
+
+      -- PUSH_SLOT_INDEXED (uint8 slot)
+      <-
+      -> (ov)
+        Push the local 'ov' at 'bsp[slot]' to the stack. This opcode is used for
+        e.g. pushing 'x' in "x(2)".
+
+      -- PUSH_PI (uint8 slot)
+      <-
+      -> (ov 1) (ov 2)? ... (ov n)?
+        Like PUSH_SLOT_NARGOUT1, but if the slot variable resolves to a call to
+        the builtin function 'pi', just push pi to the stack as a double ov.
+
+      -- PUSH_OV_U64
+      <-
+      -> (ov1)
+        Push an ov of the type uint64 with the value 0, to the stack.
+
+      -- PUSH_CELL
+      <- (ov i) (ov j) ... [a mess of ov:s]
+      -> (ov ans)
+        Create a cell ov on the stack with up to i*j ov objects in it. Note that the last row can
+        shorter than the other rows and that any row can be empty and ignored.
+
+        Each row is initially pushed as following:
+          1. element by element, if any
+          2. an integer ov with the row length
+
+      -- PUSH_NIL
+      <-
+      -> (ov nil)
+        Push a default constructed 'octave_value' to the stack.
+
+      -- POP_N_INTS (uint8 n)
+      <- (int i1) ... (int in)
+      ->
+        Pops 'n' native values of the stack. It could be pointers or doubles, not just int:s.
+
+    ** Data control
+      -- LOAD_CST (uint8 offset)
+      <-
+      -> (ov)
+        Load the 'ov' at 'data[offset]' and push it to the stack.
+        LOAD_CST_ALTx are duplicates of the opcode existing for branch prediction reasons.
+
+      -- LOAD_FAR_CST (int32 offset)
+      <-
+      -> (ov)
+        Load the 'ov' at 'data[offset]' and push it to the stack.
+
+      -- INIT_GLOBAL (uint8 type) (uint8 slot) (uint8 unused) (bool has_init_code) (uint16 target)?
+        Initializes a persistent or global variable depending on 'type'.
+        If 'has_init_code' is true, jumps to 'target' if the variable does not exist yet
+        in the global namespace. If 'has_init_code' is false, it is the end of the instruction.
+
+    ** Flow control
+      -- JMP (uint16 target)
+        Set the intstruction register to the instruction base register plus target.
+
+      -- JMP_IF (uint16 target)
+      -- JMP_IFN (uint16 target)
+      <- (ov)
+      ->
+        Set the intstruction register to the instruction base register plus target
+        if the argument is true/untrue.
+
+      -- RET
+      <- [saved caller frame] (int nargout) (ov ret1) ... (ov retn) (ov arg1) ... (ov argn) (ov local1) ... (ov localn)
+      -> (ov retn) ... (ov ret1)
+        Return from a bytecode frame to another.
+
+        There is always atleast one ov on the stack after RET is executed. It might be the nil ov.
+
+      -- FOR_SETUP FOR_COND (uint16 after_target) (uint8 slot)
+      <- (ov range1)
+      -> (ov range1) (int64 n) (int64 i)
+        Executes a for-loop setup. Then falls through to the FOR_COND op-code which checks
+        if a loop body is to be executed.
+
+        FOR_SETUP is always followed by a FOR_COND opcode.
+
+        The 'slot' is the slot for the iteration variable.
+
+        The 'after_target' is the instruction offset to after the loop body.
+
+        The end of the loop body jumps to the FOR_COND op-code.
+
+        After the loop body, and at each escape point in the body,
+        the two native integers and the ov range are popped.
+
+        FOR_COMPLEX_SETUP and FOR_COMPLEX_COND is similar for "struct key-value for-loops"
+        but needs two slots.
+
+      -- THROW_IFERROBJ
+      <- (ov)
+      ->
+        Unwinds the stack until any exception handler if ov is an error object.
+
+
+      ... there are more op-codes.
+*/
+
+
+
+
+#if ! defined (octave_pt_bytecode_vm_h)
+#define octave_pt_bytecode_vm_h 1
+
+#include "octave-config.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include "octave-config.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+#include <memory>
+
+#include "oct-lvalue.h"
+#include "ovl.h"
+
+#include "interpreter-private.h"
+#include "symtab.h"
+
+#include "pt-bytecode.h"
+
+#if defined(__FILE_NAME__)
+#define CHECK_PANIC(cond) \
+do { if (!(cond)) panic ("VM internal error at %s:%d, " #cond, __FILE_NAME__, __LINE__);} while ((0))
+#else
+#define CHECK_PANIC(cond) \
+do { if (!(cond)) panic ("VM internal error at %d, " #cond, __LINE__);} while ((0))
+#endif
+
+
+OCTAVE_BEGIN_NAMESPACE(octave)
+
+class tree_evaluator;
+
+struct vm_profiler
+{
+  struct vm_profiler_call
+  {
+    std::string m_caller;
+    std::string m_callee;
+    int64_t m_entry_time;
+    int64_t m_t_self_cum; // Time spent in callee it-self
+    int64_t m_t_call_cum; // Time spent in bytecode calls, called from callee
+  };
+
+  struct vm_profiler_fn_stats
+  {
+    // Cumulative ns time at op-code at offset
+    std::vector<int64_t> m_v_cum_t;
+    // Cumulative hits at op-code at offset
+    std::vector<int64_t> m_v_n_cum;
+    // Cumulative time spent in nested calls to a bytecode function at op-code at offset
+    std::vector<int64_t> m_v_cum_call_t;
+
+    void maybe_resize (unsigned ip)
+    {
+      if (ip >= m_v_cum_t.size ())
+        m_v_cum_t.resize (ip + 1);
+      if (ip >= m_v_n_cum.size ())
+        m_v_n_cum.resize (ip + 1);
+      if (ip >= m_v_cum_call_t.size ())
+        m_v_cum_call_t.resize (ip + 1);
+    }
+
+    // The last bytecode timestamp, i.e. the start of the currently running opcode. One level per call
+    std::vector<int64_t> m_v_t;
+    // The last ip, i.e. the ip being executed. One level per call
+    std::vector<int> m_v_ip;
+    // Set of callers. One entry for each caller
+    std::set<std::string> m_set_callers;
+    // Amount of calls to this function
+    int64_t m_n_calls;
+
+    // Data structures to keep track of calls. One level per call
+    std::vector<std::string> m_v_callers; // Used in callee to change the last timestamp of caller
+
+    std::string m_fn_name;
+    std::string m_fn_file;
+    std::vector<unsigned char> m_code; // Copy of the actual opcodes executed
+    std::vector<std::string> m_ids; // Copy of the name data
+    std::vector<loc_entry> m_loc_entries; // Copy of source code location data
+
+    void add_t (int64_t dt);
+  };
+
+  void add_t (int64_t dt);
+
+  std::vector<vm_profiler_call> m_shadow_call_stack;
+
+  std::map<std::string, vm_profiler_fn_stats> m_map_fn_stats;
+
+  std::vector<std::string> m_fn_first_call_order;
+
+  static int64_t unow ();
+  void print_to_stdout ();
+  void enter_fn (std::string callee_name, std::string caller_name, octave::unwind_data *unwind_data, std::string *name_data, unsigned char *code);
+  void enter_fn (std::string caller_name, bytecode &bc);
+  void exit_fn (std::string fn);
+  void purge_shadow_stack ();
+};
+
+class vm
+{
+ public:
+
+  static constexpr size_t stack_size = 2048 * 8;
+  static constexpr size_t stack_pad = 32;
+
+#if SIZE_MAX == 0xFFFFFFFF
+  static constexpr size_t stack_magic_int = 0xBABEBEEF; // 32bit systems
+#else
+  static constexpr size_t stack_magic_int = 0xBABEBEEFCAFE1234;
+#endif
+  static constexpr size_t stack_min_for_new_call = 1024;
+
+  vm (tree_evaluator *tw, bytecode &initial_bytecode);
+
+  ~vm ();
+
+  bool m_dbg_proper_return = false;
+  bool m_could_not_push_frame = false;
+  bool m_unwinding_interrupt = false;
+  stack_element *m_stack0 = nullptr;
+
+  std::vector<std::shared_ptr<stack_frame>> m_frame_ptr_cache;
+
+  tree_evaluator *m_tw;
+  type_info *m_ti;
+  symbol_table *m_symtab;
+  stack_element *m_stack = nullptr;
+  stack_element *m_sp = 0;
+  stack_element *m_bsp = 0;
+  stack_element *m_rsp = 0;
+
+  type_info::binary_op_fcn m_fn_dbl_mul = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_add = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_sub = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_div = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_pow = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_le = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_le_eq = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_gr = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_gr_eq = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_eq = nullptr;
+  type_info::binary_op_fcn m_fn_dbl_neq = nullptr;
+
+  type_info::unary_op_fcn m_fn_dbl_usub = nullptr;
+  type_info::unary_op_fcn m_fn_dbl_not = nullptr;
+  type_info::unary_op_fcn m_fn_bool_not = nullptr;
+
+  octave_function * m_pi_builtin_fn = nullptr;
+  octave_function * m_i_builtin_fn = nullptr;
+  octave_function * m_e_builtin_fn = nullptr;
+
+  static int constexpr m_scalar_typeid = 2;
+  static int constexpr m_matrix_typeid = 4;
+  static int constexpr m_bool_typeid = 10;
+  static int constexpr m_cslist_typeid = 36;
+
+  // If there are any ignored outputs, e.g. "[x, ~] = foo ()", we need to push a separate
+  // stack frame with the ignored outputs for isargout () to be able to querry for ignored
+  // outputs in the callees.
+  //
+  //
+  struct output_ignore_data {
+    octave_value m_ov_pending_ignore_matrix;
+    std::vector<const std::list<octave::octave_lvalue>*> m_v_lvalue_list;
+    std::vector<bool> m_v_owns_lvalue_list; // If true, should call delete on active lvalue list
+    // A sanity check flag. Set to true if the first ignorer is calling from outside the VM
+    bool m_external_root_ignorer = false;
+
+    output_ignore_data ()
+    {
+      m_v_lvalue_list.push_back (nullptr);
+      m_v_owns_lvalue_list.push_back (false);
+    }
+
+    static void maybe_delete_ignore_data (vm &vm, unsigned target_depth)
+    {
+      if (!vm.m_output_ignore_data)
+        return;
+      if (vm.m_output_ignore_data->m_v_owns_lvalue_list.size () > target_depth)
+        return;
+
+      delete vm.m_output_ignore_data;
+      vm.m_output_ignore_data = nullptr;
+    }
+
+    void push_frame (vm &vm);
+    void pop_frame (vm &vm);
+    void clear_ignore (vm &vm);
+    void set_ignore (vm &vm, octave_value ignore_matrix,
+                     std::list<octave_lvalue> *new_lval_list);
+
+    void set_ignore_anon (vm &vm, octave_value ignore_matrix);
+
+    octave_value get_and_null_ignore_matrix ()
+    {
+      octave_value ret = m_ov_pending_ignore_matrix;
+      m_ov_pending_ignore_matrix = {};
+
+      return ret;
+    }
+
+    octave_value get_ignore_matrix ()
+    {
+      return m_ov_pending_ignore_matrix;
+    }
+
+    const std::list<octave::octave_lvalue>* pop_lvalue_list ()
+    {
+      auto *p = m_v_lvalue_list.back ();
+      m_v_lvalue_list.pop_back ();
+      return p;
+    }
+  };
+
+  output_ignore_data *m_output_ignore_data = nullptr;
+  const std::list<octave::octave_lvalue> *m_original_lvalue_list = nullptr;
+
+  unsigned char *m_code;
+  octave_value *m_data;
+  std::string *m_name_data;
+  unwind_data *m_unwind_data;
+
+  bool m_echo_prior_op_was_cond = false;
+
+  int m_ip;
+
+  // Generic data container to recreate exceptions
+  struct error_data
+  {
+    // Execution exception
+    int m_exit_status;
+    bool m_safe_to_return;
+    // Debug quit
+    bool m_debug_quit_all;
+  };
+
+  error_data
+  handle_error (error_type et);
+
+  static
+  loc_entry find_loc (int ip, std::vector<octave::loc_entry> &loc_entries);
+
+  // Disable some optimizations in GCC that are not suitable for dynamic label dispatch
+#if defined (__has_attribute) && __has_attribute (optimize)
+#  define OCTAVE_VM_EXECUTE_ATTR __attribute__ ((optimize("no-gcse","no-crossjumping")))
+#else
+#  define OCTAVE_VM_EXECUTE_ATTR
+#endif
+
+  // Returns true if the VM should be used to call the function
+  static bool maybe_compile_or_compiled (octave_user_code *fn, stack_frame::local_vars_map *locals = nullptr);
+
+  // Allocate a VM and call the function
+  static octave_value_list call (tree_evaluator& tw,
+                                 int nargout,
+                                 const octave_value_list& args,
+                                 octave_user_code *fn,
+                                 std::shared_ptr<stack_frame> context = nullptr);
+
+  octave_value_list execute_code (const octave_value_list &args, int root_nargout) OCTAVE_VM_EXECUTE_ATTR;
+
+  octave_value find_fcn_for_cmd_call (std::string *name);
+  octave_value handle_object_end (octave_value ov, int idx, int nargs);
+
+  void set_nargin (int nargin);
+
+  void set_nargout (int nargout);
+
+  void caller_ignores_output ();
+
+  unwind_entry* find_unwind_entry_for_current_state (bool only_find_unwind_protect);
+  int find_unwind_entry_for_forloop (int current_stack_depth);
+
+  static std::shared_ptr<vm_profiler> m_vm_profiler;
+  static bool m_profiler_enabled;
+  static bool m_trace_enabled;
+};
+
+OCTINTERP_API
+void print_bytecode (bytecode &bc);
+
+OCTINTERP_API
+std::vector<std::pair<int, std::string>>
+opcodes_to_strings (bytecode &bc);
+
+OCTINTERP_API
+std::vector<std::pair<int, std::string>>
+opcodes_to_strings (std::vector<unsigned char> &code, std::vector<std::string> &names);
+
+OCTAVE_END_NAMESPACE(octave)
+
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode-walk.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5958 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if defined (HAVE_CONFIG_H)
+#  include "config.h"
+#endif
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include "pt-all.h"
+#include "pt-bytecode-walk.h"
+#include "symrec.h"
+#include "pt-walk.h"
+#include "ov-scalar.h"
+#include "file-ops.h"
+
+//#pragma GCC optimize("Og")
+
+using namespace octave;
+
+#define ERR(msg) error("VM error %d: " msg, __LINE__)
+
+#define TODO(msg) error("VM error, Not done yet %d: " msg, __LINE__)
+
+#define CHECK(cond)                                                            \
+  do {                                                                         \
+    if (!(cond))                                                               \
+      ERR("internal VM compiler consistency check failed, " #cond);             \
+  } while ((0))
+
+#define CHECK_NONNULL(ptr) if (!ptr) error ("unexpected null %d", __LINE__)
+
+// Compiles an anonymous function.
+//
+// The compilation need to happen at runtime since the values of the variables are embedded into the bytecode
+// as constants. I.e. when the anonymous function is created by running e.g. "a = @() b;"
+void octave::compile_anon_user_function (octave_user_code &ufn, bool do_print, stack_frame::local_vars_map &locals)
+{
+  try
+    {
+      if (!ufn.is_anonymous_function ())
+        error ("compile_anon_user_function (): Function is not anonymous");
+
+      if (ufn.is_classdef_constructor ())
+        error ("classdef constructors are not supported by the VM yet"); // Needs special handling
+
+      // Begin with clearing the old bytecode, if any
+      ufn.clear_bytecode ();
+
+      bytecode_walker bw;
+
+      // The value of each variable that is taken from the calling scope
+      bw.m_anon_local_values = &locals;
+      bw.m_is_anon = true; // Flag used during compilation
+
+      // We need to insert all the locals into the scope object. The tree_evaluator
+      // does this as part of pushing the frame object, but just do it here once instead.
+      auto scope = ufn.scope ();
+      for (auto it : locals)
+        {
+          scope.insert (it.first);
+        }
+
+      ufn.accept (bw);
+
+      if (do_print)
+        print_bytecode (bw.m_code);
+
+      ufn.set_bytecode (bw.m_code);
+
+      // Compile the subfunctions
+      auto subs = ufn.subfunctions ();
+      for (auto kv : subs)
+        {
+          octave_user_function *sub = kv.second.user_function_value ();
+          compile_user_function (*sub, do_print);
+          sub->get_bytecode ().m_unwind_data.m_file = ufn.fcn_file_name ();
+        }
+    }
+  catch(...)
+  {
+    ufn.clear_bytecode ();
+    throw;
+  }
+}
+
+void octave::compile_nested_user_function (octave_user_function &ufn, bool do_print, std::vector<octave_user_function *> v_parent_fns)
+{
+  try
+    {
+      if (ufn.is_classdef_constructor ())
+        error ("classdef constructors are not supported by the VM yet"); // Needs special handling
+      CHECK (ufn.is_nested_function ());
+
+      // Begin with clearing the old bytecode, if any
+      ufn.clear_bytecode ();
+
+      bytecode_walker bw;
+      bw.m_n_nested_fn = v_parent_fns.size ();
+      bw.m_v_parent_fns = v_parent_fns;
+      bw.m_code.m_unwind_data.m_parent_id = v_parent_fns.back ()->get_bytecode ().m_unwind_data.m_id; // Direct parent (root or another nested function)
+      bw.m_code.m_unwind_data.m_matriarch_id = v_parent_fns.front ()->get_bytecode ().m_unwind_data.m_id; // Root parent (the function with all nested functions in it)
+
+      ufn.accept (bw);
+
+      if (do_print)
+        print_bytecode (bw.m_code);
+
+      bw.m_code.m_unwind_data.m_file = v_parent_fns.front ()->get_bytecode ().m_unwind_data.m_file;
+
+      ufn.set_bytecode (bw.m_code);
+
+      v_parent_fns.push_back (&ufn);
+
+      // Compile the subfunctions
+      auto subs = ufn.subfunctions ();
+      for (auto kv : subs)
+        {
+          octave_user_function *sub = kv.second.user_function_value ();
+
+          CHECK (sub->is_nested_function ());
+
+          compile_nested_user_function (*sub, do_print, v_parent_fns);
+        }
+    }
+  catch(...)
+  {
+    ufn.clear_bytecode ();
+    throw;
+  }
+}
+
+void octave::compile_user_function (octave_user_code &ufn, bool do_print)
+{
+  try
+    {
+      if (ufn.is_classdef_constructor ())
+        error ("classdef constructors are not supported by the VM yet"); // Needs special handling
+
+      // Begin with clearing the old bytecode, if any
+      ufn.clear_bytecode ();
+
+      bytecode_walker bw;
+
+      ufn.accept (bw);
+
+      if (do_print)
+        print_bytecode (bw.m_code);
+
+      ufn.set_bytecode (bw.m_code);
+
+      // Compile the subfunctions
+      auto subs = ufn.subfunctions ();
+      for (auto kv : subs)
+        {
+          octave_user_function *sub = kv.second.user_function_value ();
+
+          if (sub->is_nested_function ())
+            {
+              std::vector<octave_user_function *> v_parent_fns;
+              v_parent_fns.push_back (static_cast<octave_user_function *> (&ufn));
+              compile_nested_user_function (*sub, do_print, v_parent_fns);
+            }
+          else
+            compile_user_function (*sub, do_print);
+
+          sub->get_bytecode ().m_unwind_data.m_file = ufn.fcn_file_name ();
+        }
+    }
+  catch(...)
+  {
+    ufn.clear_bytecode ();
+    throw;
+  }
+}
+
+// Class to walk the tree and collect most id:s that are assigned to.
+// E.g. any assign to index expression "pi(3) = 4;" is not collected.
+// This walker is used to try to figure out if any "i" identifier is used
+// as a variable or the imaginary unit.
+class find_assigned_ids_walker : tree_walker
+{
+public:
+  static std::set<std::string>
+  find_ids (octave_user_code &e)
+  {
+    find_assigned_ids_walker walker;
+    e.accept (walker);
+
+    return walker.m_set_of_ids;
+  }
+
+  std::set<std::string> m_set_of_ids; // ... that are assigned to
+
+  void visit_simple_assignment (tree_simple_assignment &t)
+  {
+    auto lhs = t.left_hand_side ();
+
+    if (lhs->is_identifier ())
+      m_set_of_ids.insert (lhs->name ());
+
+    t.right_hand_side ()->accept (*this);
+  }
+
+  void visit_multi_assignment (tree_multi_assignment &t)
+  {
+    octave::tree_argument_list *lhs = t.left_hand_side ();
+    if (!lhs)
+      return;
+
+    for (auto it = lhs->begin (); it != lhs->end (); it++)
+      {
+        if ((*it)->is_identifier ())
+          m_set_of_ids.insert ((*it)->name ());
+      }
+
+    t.right_hand_side ()->accept (*this);
+  }
+
+  void visit_simple_for_command (tree_simple_for_command& cmd)
+  {
+    tree_expression *lhs = cmd.left_hand_side ();
+    if (lhs->is_identifier ())
+      m_set_of_ids.insert (lhs->name ());
+
+    tree_statement_list *list = cmd.body ();
+    if (list)
+      list->accept (*this);
+  }
+
+  void visit_complex_for_command (tree_complex_for_command& cmd)
+  {
+    octave::tree_argument_list *lhs = cmd.left_hand_side ();
+
+    CHECK (lhs);
+    CHECK (lhs->size () == 2);
+
+    auto p = lhs->begin ();
+    tree_expression *val = *p++;
+    tree_expression *key = *p++;
+
+    CHECK (val); CHECK (key);
+
+    CHECK (val->is_identifier ());
+    CHECK (key->is_identifier ());
+
+    m_set_of_ids.insert (val->name ());
+    m_set_of_ids.insert (key->name ());
+
+    tree_statement_list *list = cmd.body ();
+    if (list)
+      list->accept (*this);
+  }
+
+  void visit_octave_user_function (octave_user_function& fcn)
+  {
+    octave::tree_parameter_list *paras = fcn.parameter_list ();
+    if (paras)
+      {
+        for (auto it = paras->begin (); it != paras->end (); it++)
+          {
+            CHECK_NONNULL (*it);
+            CHECK ((*it)->ident ());
+            m_set_of_ids.insert ((*it)->name ());
+          }
+      }
+
+    // Walk body
+    tree_statement_list *cmd_list = fcn.body ();
+    if (cmd_list)
+      cmd_list->accept (*this);
+  }
+};
+
+// Class to walk the tree and see if a index expression has
+// an end in it.
+//
+// Does not walk nested index expressions.
+class find_end_walker : tree_walker
+{
+public:
+  static bool has_end (tree &e)
+  {
+    find_end_walker walker;
+    e.accept (walker);
+
+    return walker.m_has_end;
+  }
+
+  bool m_has_end = false;
+
+  void visit_identifier (tree_identifier &id)
+  {
+    std::string name = id.name ();
+    if (name == "end")
+      m_has_end = true;
+  }
+};
+
+class is_foldable_walker : tree_walker
+{
+public:
+  static bool is_foldable (tree_binary_expression &e)
+  {
+    return is_foldable_internal (e);
+  }
+
+  static bool is_foldable (tree_prefix_expression &e)
+  {
+    return is_foldable_internal (e);
+  }
+
+  static bool is_foldable (tree_postfix_expression &e)
+  {
+    return is_foldable_internal (e);
+  }
+
+private:
+  static bool is_foldable_internal (tree &e)
+  {
+    is_foldable_walker walker;
+
+    e.accept (walker);
+
+    return walker.m_is_foldable;
+  }
+
+  bool is_foldable_expr (tree_expression *e)
+  {
+    return e->is_binary_expression () || e->is_unary_expression () || e->is_constant ();
+  }
+
+  void visit_postfix_expression (tree_postfix_expression& e)
+  {
+    if (!m_is_foldable)
+      return;
+
+    tree_expression *op = e.operand ();
+
+    if (!is_foldable_expr (op))
+      {
+        m_is_foldable = false;
+        return;
+      }
+
+    op->accept (*this);
+  }
+
+  void visit_prefix_expression (tree_prefix_expression& e)
+  {
+    if (!m_is_foldable)
+      return;
+
+    tree_expression *op = e.operand ();
+
+    if (!is_foldable_expr (op))
+      {
+        m_is_foldable = false;
+        return;
+      }
+
+    op->accept (*this);
+  }
+
+  void visit_binary_expression (tree_binary_expression &e)
+  {
+    if (!m_is_foldable)
+      return;
+
+    tree_expression *rhs = e.rhs ();
+    tree_expression *lhs = e.lhs ();
+    if (!is_foldable_expr (rhs) || !is_foldable_expr (lhs))
+      {
+        m_is_foldable = false;
+        return;
+      }
+
+    lhs->accept (*this);
+    if (m_is_foldable)
+      rhs->accept (*this);
+  }
+
+  bool m_is_foldable = true;
+};
+
+class collect_idnames_walker : tree_walker
+{
+public:
+  struct id_data { std::string m_name; std::size_t m_offset; std::size_t m_frame_offset; };
+
+  static std::vector<id_data> collect_id_names (tree_statement_list &l)
+  {
+    collect_idnames_walker walker;
+
+    for (auto it = l.begin (); it != l.end (); it++)
+      {
+        if (*it)
+          (*it)->accept (walker);
+      }
+
+    return walker.m_id_names_and_offset;
+  }
+
+  static std::vector<id_data> collect_id_names (tree_expression &e)
+  {
+    collect_idnames_walker walker;
+    e.accept (walker);
+
+    return walker.m_id_names_and_offset;
+  }
+
+  std::vector<id_data> m_id_names_and_offset;
+
+  void visit_identifier (tree_identifier &id)
+  {
+    std::string name = id.name ();
+    if (name == "~") // We dont want this magic id
+      return;
+
+    m_id_names_and_offset.push_back ({name, id.symbol ().data_offset (), id.symbol ().frame_offset ()});
+  }
+
+  void visit_anon_fcn_handle (tree_anon_fcn_handle &)
+  {
+    // We dont collect any id:s in the handle, since the original scope
+    // don't.
+  }
+
+  void visit_function_def (tree_function_def &)
+  {
+    // Don't collect any id:s inside a function def (functions embedded in scripts)
+  }
+};
+
+template <class T>
+typename T::value_type vector_pop (T &v)
+{
+  typename T::value_type tmp = v.back ();
+  v.pop_back ();
+  return tmp;
+}
+
+#define PUSH_CODE(code_) do {\
+    int code_check_s_ = static_cast<int> (code_); \
+    unsigned char code_s_ = static_cast<unsigned char> (code_check_s_);     \
+    CHECK (code_check_s_ < 256 && code_check_s_ >= -128); \
+    m_code.m_code.push_back(code_s_);        \
+  } while ((0))
+
+#define PUSH_CODE_LOAD_CST(offset) do {\
+  unsigned offset_ = offset; \
+  if (offset_ < 65536)\
+    {\
+      if (offset_ >= 256) \
+        PUSH_CODE (INSTR::WIDE); \
+      emit_alt (m_cnt_alts_cst, {INSTR::LOAD_CST, INSTR::LOAD_CST_ALT2, \
+        INSTR::LOAD_CST_ALT3, INSTR::LOAD_CST_ALT4});\
+      if (offset_ >= 256) \
+        PUSH_CODE_SHORT (offset_);\
+      else\
+        PUSH_CODE (offset_);\
+    }\
+  else\
+    {\
+      PUSH_CODE (INSTR::LOAD_FAR_CST);\
+      PUSH_CODE_INT (offset_);\
+    }\
+} while (0)
+
+#define PUSH_SSLOT(sslot) PUSH_CODE(sslot)
+#define PUSH_WSLOT(wslot) PUSH_CODE_SHORT(wslot)
+#define NEED_WIDE_SLOTS() (m_map_locals_to_slot.size () >= 256)
+
+#define MAYBE_PUSH_WIDE_OPEXT(slot) \
+do {\
+  if (slot >= 256)\
+    PUSH_CODE (INSTR::WIDE);\
+} while ((0))
+
+// Anonymous functions need dynamic nargout
+// The extension opcode EXT_NARGOUT replaces the arg0
+// (the static opcode embedded 'nargout') of the following opcode
+// with the %nargout on the stack.
+// -1 is the marker value for the need of the dynamic nargout
+#define MAYBE_PUSH_ANON_NARGOUT_OPEXT(nargout) \
+do {\
+  if ((nargout) == -1)\
+    PUSH_CODE (INSTR::EXT_NARGOUT);\
+} while ((0))
+
+#define PUSH_SLOT(slot) \
+do {\
+  if (slot >= 256)\
+    PUSH_WSLOT (slot);\
+  else\
+    PUSH_SSLOT (slot);\
+} while ((0))
+
+#define CODE_SIZE() m_code.m_code.size()
+#define CODE(x) m_code.m_code[x]
+#define PUSH_CODE_SHORT(code_) do {   \
+  unsigned u = code_;                 \
+  unsigned char b0 = u & 0xFF;        \
+  unsigned char b1 = (u >> 8) & 0xFF; \
+  int code_check_ss_ = static_cast<int> (u);                  \
+  CHECK (code_check_ss_ < 65536 && code_check_ss_ >= -32768); \
+  PUSH_CODE (b0);                     \
+  PUSH_CODE (b1);                     \
+  } while ((0))
+#define PUSH_CODE_INT(code_) do {   \
+  unsigned u = code_;                 \
+  unsigned char b0 = u & 0xFF;        \
+  unsigned char b1 = (u >> 8) & 0xFF; \
+  unsigned char b2 = (u >> 16) & 0xFF;\
+  unsigned char b3 = (u >> 24) & 0xFF;\
+  PUSH_CODE (b0);                     \
+  PUSH_CODE (b1);                     \
+  PUSH_CODE (b2);                     \
+  PUSH_CODE (b3);                     \
+  } while ((0))
+
+#define SET_CODE_SHORT(offset, value) do {  \
+  int tmp = offset;                         \
+  unsigned u = value;                       \
+  unsigned char b0 = u & 0xFF;              \
+  unsigned char b1 = (u >> 8) & 0xFF;       \
+  int code_check_s_ = static_cast<int> (u); \
+  CHECK (code_check_s_ < 65536 && code_check_s_ >= -32768); \
+  CODE (tmp) = b0;                          \
+  CODE (tmp + 1) = b1;                      \
+  } while ((0))
+
+#define PUSH_DATA(cst) m_code.m_data.push_back(cst)
+#define DATA_SIZE() m_code.m_data.size()
+
+// TODO: This optimization is nice and we should get it working again.
+#define PUSH_ALL_PATHS_TERMINATED() m_all_paths_terminated.push_back (false)
+#define POP_ALL_PATHS_TERMINATED() vector_pop (m_all_paths_terminated)
+#define PEEK_ALL_PATHS_TERMINATED() m_all_paths_terminated.back ()
+#define SET_ALL_PATHS_TERMINATED() m_all_paths_terminated.back () = true
+
+#define PUSH_BREAKS() m_need_break_target.push_back ({})
+#define POP_BREAKS() vector_pop (m_need_break_target)
+#define PUSH_NEED_BREAK(offset) m_need_break_target.back ().push_back (offset)
+#define N_BREAKS() m_need_break_target.size ()
+
+#define PUSH_CONTINUE_TARGET(target) m_continue_target.push_back ({})
+#define POP_CONTINUE_TARGET() vector_pop (m_continue_target)
+#define PUSH_NEED_CONTINUE_TARGET(offset) \
+  m_continue_target.back ().push_back (offset)
+
+#define SLOT(name) get_slot (name)
+
+#define PUSH_ID_BEGIN_INDEXED(slot, idx, narg, is_obj) \
+  m_indexed_id.push_back ({slot, idx, narg, is_obj})
+#define POP_ID_BEING_INDEXED() m_indexed_id.pop_back ()
+#define ID_IS_BEING_INDEXED() (m_indexed_id.size () != 0)
+#define N_IDS_BEING_INDEXED() (m_indexed_id.size ())
+#define PEEK_ID_BEING_INDEXED() m_indexed_id.back ()
+#define IDS_BEING_INDEXED(idx) m_indexed_id[idx]
+
+#define PUSH_NESTING_STATEMENT(type) m_nesting_statement.push_back (type)
+#define POP_NESTING_STATEMENT() m_nesting_statement.pop_back ()
+#define NESTING_STATEMENTS() m_nesting_statement
+
+// Track how many expression deep we are in the walk.
+// I.e. identifiers need to know if they are:
+//   foo; %depth 1
+// or
+//   foo * 2; %depth 2 for id foo
+//
+// so that nargout for a command call at root is zero.
+// E.g.:
+//   tic;
+#define INC_DEPTH() ++m_depth
+#define DEC_DEPTH() --m_depth
+#define DEPTH() m_depth
+
+// We need to track the expected amount of output variables
+// for each expression. E.g.:
+// foo(); %0
+// a = foo (); %1
+// [a b] = foo (); %2
+// [a b] = foo (foo () + foo ()); %2 for outer, 1 for inner foo
+
+#define NARGOUT() m_nargout.back ()
+#define PUSH_NARGOUT(nargout) m_nargout.push_back (nargout)
+#define POP_NARGOUT() vector_pop (m_nargout)
+
+#define PUSH_ARGNAMES_ENTRY(arg_nm_e) m_code.m_unwind_data.m_argname_entries.push_back (arg_nm_e)
+
+#define PUSH_UNWIND_RETURN_TARGETS() m_need_unwind_target.push_back ({})
+#define POP_UNWIND_RETURN_TARGET() vector_pop (m_need_unwind_target)
+#define N_UNWIND_RETURN_TARGETS() m_need_unwind_target.size ()
+#define PUSH_A_UNWIND_RETURN_TARGET(offset) \
+  m_need_unwind_target.back ().push_back (offset)
+
+#define PUSH_LOC() m_code.m_unwind_data.m_loc_entry.push_back ({})
+#define LOC(i) m_code.m_unwind_data.m_loc_entry[i]
+#define N_LOC() m_code.m_unwind_data.m_loc_entry.size ()
+
+#define PUSH_UNWIND() m_code.m_unwind_data.m_unwind_entries.push_back ({})
+#define UNWIND(i) m_code.m_unwind_data.m_unwind_entries[i]
+#define N_UNWIND() m_code.m_unwind_data.m_unwind_entries.size ()
+
+#define PUSH_GLOBAL(name) do {m_map_id_is_global[name] = 1;} while ((0))
+#define IS_GLOBAL(name) (m_map_id_is_global.find (name) !=\
+                                                  m_map_id_is_global.end ())
+
+#define PUSH_PERSISTENT(name) do {m_map_id_is_persistent[name] = 1;} while ((0))
+#define IS_PERSISTENT(name) (m_map_id_is_persistent.find (name) !=\
+                             m_map_id_is_persistent.end ())
+
+// Note that the placement of PUSH_TREE_FOR_DBG() need to mirror the walk in pt-bp.cc
+#define PUSH_TREE_FOR_DBG(ptree) do { m_code.m_unwind_data.m_ip_to_tree[CODE_SIZE ()] = ptree; } while(0)
+#define PUSH_TREE_FOR_EVAL(ptree) do { m_code.m_unwind_data.m_ip_to_tree[-CODE_SIZE ()] = ptree; } while(0)
+
+void
+bytecode_walker::
+visit_statement_list (tree_statement_list& lst)
+{
+  for (tree_statement *elt : lst)
+    {
+      CHECK_NONNULL (elt);
+
+      // For anonymous functions the nargout is dynamic, which is marked
+      // by setting nargout to -1
+      if (m_is_anon)
+        PUSH_NARGOUT (-1);
+      else
+        PUSH_NARGOUT (0);
+
+      elt->accept (*this);
+      POP_NARGOUT ();
+    }
+}
+
+void
+bytecode_walker::
+visit_statement (tree_statement& stmt)
+{
+  if (stmt.is_expression ())
+    {
+      int loc_id = N_LOC ();
+      PUSH_LOC ();
+      LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+      tree_expression *expr = stmt.expression ();
+      PUSH_TREE_FOR_DBG(expr);
+      CHECK_NONNULL (expr);
+      expr->accept (*this);
+
+      LOC (loc_id).m_ip_end = CODE_SIZE ();
+      LOC (loc_id).m_col = stmt.column ();
+      LOC (loc_id).m_line = stmt.line ();
+    }
+  else if (stmt.is_command ())
+    {
+      tree_command *cmd = stmt.command ();
+      CHECK_NONNULL (cmd);
+      cmd->accept (*this);
+    }
+  else
+    TODO ();
+}
+
+void
+bytecode_walker::emit_alt (int &cntr, std::vector<INSTR> alts)
+{
+  unsigned n = alts.size ();
+  unsigned offset = cntr++ % n;
+  PUSH_CODE (alts [offset]);
+}
+
+bytecode_walker::emit_unwind_protect_data
+bytecode_walker::emit_unwind_protect_code_start ()
+{
+  emit_unwind_protect_data D; // Keeps track of state for emit_unwind_protect_code_before_cleanup() and emit_unwind_protect_code_end()
+
+    // Unwind protect has a body and cleanup part that always
+  // is executed.
+  //
+  // If the VM is unwinding it enters the cleanup code with an
+  // error object on the stack. The body puts a nil object on the
+  // stack.
+  //
+  // If there is an error object on the stack at the end of the cleanup
+  // code it rethrows it.
+  //
+  // Returns in the body jumps to the cleanup code before actually returning.
+  // If a return is reached in the body, a true object is pushed to the stack,
+  // which is checked in the cleanup code to know if we are falling through or
+  // are supposed to return.
+  //
+  // The same applies to breaks, so the code underneath gets abit messy.
+  //
+  // The body_expr, cleanup_expr and cleanup_instructions parameters are for
+  // when we need to emit some internal cleanup code that has no corrensponding
+  // unwind_protect in the user code.
+
+  int unwind_idx = N_UNWIND ();
+  PUSH_UNWIND();
+
+  UNWIND (unwind_idx).m_ip_start = CODE_SIZE ();
+
+  UNWIND (unwind_idx).m_unwind_entry_type =
+    unwind_entry_type::UNWIND_PROTECT;
+
+  UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt();
+
+  // Returns need to execute the unwind cleanup code before
+  // returning, so we need to keep track of offsets that need
+  // to jump to the cleanup code.
+  PUSH_UNWIND_RETURN_TARGETS ();
+
+  // We need to store away the pending "need breaks" since any break in the
+  // unwind protect body need to jump to the cleanup code.
+  std::vector<int> v_need_breaks_initial;
+  bool break_stack_populated = N_BREAKS ();
+  if (break_stack_populated)
+    {
+      v_need_breaks_initial = POP_BREAKS ();
+      PUSH_BREAKS ();
+    }
+
+  D.m_break_stack_populated = break_stack_populated;
+  D.m_idx_unwind = unwind_idx;
+  D.m_v_need_breaks_initial = v_need_breaks_initial;
+
+  return D;
+}
+
+void
+bytecode_walker::emit_unwind_protect_code_before_cleanup (emit_unwind_protect_data &D)
+{
+  // If the vm is unwinding it will push an error object to
+  // the stack. If we are just done executing the body we
+  // push a nil ov to the stack.
+  //
+  // At the end of the cleanup code we check the ov on the stack
+  // and continue to unwind if it is an error object, otherwise
+  // just execute the next instruction.
+  PUSH_CODE (INSTR::PUSH_NIL);
+  // For unwinding we need to keep track of the ov we pushed.
+  PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+  auto v_need_cleanup = POP_UNWIND_RETURN_TARGET ();
+  int n_need_cleanup = v_need_cleanup.size ();
+
+  std::vector<int> v_need_break;
+  if (D.m_break_stack_populated)
+    v_need_break = POP_BREAKS();
+
+  int n_need_break = v_need_break.size ();
+
+  // If there is a return statement inside the unwind body it need
+  // to jump to the cleanup code before the actual return. The return
+  // statement pushed a true ov to the stack, which is checked at the end of the
+  // cleanup code, since we use the same code for just falling throught too.
+  //
+  // The same applies to breaks, and also the combination of a possibility of
+  // breaks and returns.
+  int n_falses = 0;
+  if (n_need_break && n_need_cleanup)
+    {
+      n_falses = 2;
+      // These nils ov is the break and return  marker if we are falling
+      // through to the cleanup code from the body.
+      // We have an error object on the stack.
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ovs we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the break jumps that need to go to here
+      for (int need : v_need_break)
+        SET_CODE_SHORT (need, CODE_SIZE ());
+
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::PUSH_TRUE);// break marker
+      PUSH_CODE (INSTR::JMP);
+      int also_need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // Set the offset for all the return jumps that need to go to here
+      for (int need_cleanup : v_need_cleanup)
+        SET_CODE_SHORT (need_cleanup, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// return marker
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+
+      // If we were falling throught the body to the cleanup we jump to here
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+      SET_CODE_SHORT(also_need_after, CODE_SIZE ());
+    }
+  else if (n_need_break)
+    {
+      n_falses = 1;
+      // This nil ov is the break marker if we are falling through to the
+      // cleanup code from the body. We have an error object on the stack.
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ov we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the break jumps that need to go to here
+      for (int need : v_need_break)
+        SET_CODE_SHORT (need, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// break marker
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+  else if (n_need_cleanup)
+    {
+      n_falses = 1;
+      // This nil ov is the return marker if we are falling through to the
+      // cleanup code from the body
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::JMP); // We need to skip the pushes for the returns
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ov we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the jumps that need to go to here
+      for (int need_cleanup : v_need_cleanup)
+        SET_CODE_SHORT (need_cleanup, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// return marker
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+
+  // This is the end of protected code
+  UNWIND (D.m_idx_unwind).m_ip_end = CODE_SIZE ();
+
+  if (n_falses)
+    {
+      // Fallthrough code do not need false pushes
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      // An unwind will jump to here and needs some
+      // falses pushed to mark return or breaks
+      UNWIND (D.m_idx_unwind).m_ip_target = CODE_SIZE ();
+      for (int i = 0; i < n_falses; i++)
+        PUSH_CODE (INSTR::PUSH_FALSE);
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+  else
+    UNWIND (D.m_idx_unwind).m_ip_target = CODE_SIZE ();
+
+  // The body will just fall through into the unwind clean up code
+
+  // There might be breaks in the cleanup code too
+  if (D.m_break_stack_populated)
+    PUSH_BREAKS();
+
+  D.m_n_need_break = n_need_break;
+  D.m_n_need_cleanup = n_need_cleanup;
+}
+
+void
+bytecode_walker::emit_unwind_protect_code_end (emit_unwind_protect_data &D)
+{
+  std::vector<int> v_need_break_cleanup;
+  if (D.m_break_stack_populated)
+    v_need_break_cleanup = POP_BREAKS ();
+
+  if (v_need_break_cleanup.size ())
+    TODO ("break in cleanup code");
+
+  if (D.m_break_stack_populated)
+    {
+      // Restore the initial "need breaks"
+      PUSH_BREAKS ();
+      for (int offset : D.m_v_need_breaks_initial)
+        PUSH_NEED_BREAK (offset);
+    }
+
+  if (D.m_n_need_break && D.m_n_need_cleanup)
+    TODO ("Return and break nested");
+  if (D.m_n_need_break)
+    {
+      // The break ov marker is on the stack.
+      // If it is not true, we skip the break jump
+      PUSH_CODE (INSTR::JMP_IFN);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      POP_NESTING_STATEMENT (); // The jump ate the break marker
+
+      PUSH_CODE (INSTR::POP); // Pop the error object
+
+      // So, we break jump from here.
+      // The visitor for the loop will write to proper target
+      PUSH_CODE (INSTR::JMP);
+      int need_break = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      PUSH_NEED_BREAK (need_break); // Nesting loop need to know
+
+      // If we are not breaking we jump to here
+      SET_CODE_SHORT (need_after, CODE_SIZE ());
+    }
+  // Check if we are doing a return unwind
+  else if (D.m_n_need_cleanup)
+    {
+      // If we are in another unwind protect we need to jump to its cleanup
+      // code if the return ov marker is true
+      if (N_UNWIND_RETURN_TARGETS())
+        {
+          // The return ov marker is on the stack.
+          // If it is not true, we skip the "jump bridge"
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_after = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          POP_NESTING_STATEMENT (); // The jump ate the return marker
+
+          PUSH_CODE (INSTR::POP); // Pop the error object
+
+          PUSH_CODE (INSTR::JMP); // Jump to the nesting unwind protect
+          int need_unwind = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+          PUSH_A_UNWIND_RETURN_TARGET (need_unwind);
+
+          // If we are not returning we jump to here
+          SET_CODE_SHORT (need_after, CODE_SIZE ());
+        }
+      // Return if the return marker on the stack is true
+      else
+        {
+          // The return ov marker is on the stack.
+          // If it is not true, we skip the return
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_after = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          POP_NESTING_STATEMENT (); // The jump ate the return marker
+
+          // Generate code for a return
+          emit_return ();
+
+          // If we are not returning we jump to here
+          SET_CODE_SHORT (need_after, CODE_SIZE ());
+        }
+    }
+  // If there is an error object on top of the stack we
+  // need to continue unwinding.
+  PUSH_CODE (INSTR::THROW_IFERROBJ);
+
+  POP_NESTING_STATEMENT ();
+}
+
+void
+bytecode_walker::
+emit_unwind_protect_code (tree_statement_list *body,
+                          tree_statement_list *cleanup_code,
+                          tree_expression *body_expr,
+                          tree_expression *cleanup_expr,
+                          std::vector<int> cleanup_instructions)
+{
+  // Unwind protect has a body and cleanup part that always
+  // is executed.
+  //
+  // If the VM is unwinding it enters the cleanup code with an
+  // error object on the stack. The body puts a nil object on the
+  // stack.
+  //
+  // If there is an error object on the stack at the end of the cleanup
+  // code it rethrows it.
+  //
+  // Returns in the body jumps to the cleanup code before actually returning.
+  // If a return is reached in the body, a true object is pushed to the stack,
+  // which is checked in the cleanup code to know if we are falling through or
+  // are supposed to return.
+  //
+  // The same applies to breaks, so the code underneath gets abit messy.
+  //
+  // The body_expr, cleanup_expr and cleanup_instructions parameters are for
+  // when we need to emit some internal cleanup code that has no corrensponding
+  // unwind_protect in the user code.
+
+  int unwind_idx = N_UNWIND ();
+  PUSH_UNWIND();
+
+  UNWIND (unwind_idx).m_ip_start = CODE_SIZE ();
+
+  UNWIND (unwind_idx).m_unwind_entry_type =
+    unwind_entry_type::UNWIND_PROTECT;
+
+  UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt();
+
+  // Returns need to execute the unwind cleanup code before
+  // returning, so we need to keep track of offsets that need
+  // to jump to the cleanup code.
+  PUSH_UNWIND_RETURN_TARGETS ();
+
+  // We need to store away the pending "need breaks" since any break in the
+  // unwind protect body need to jump to the cleanup code.
+  std::vector<int> v_need_breaks_initial;
+  bool break_stack_populated = N_BREAKS ();
+  if (break_stack_populated)
+    {
+      v_need_breaks_initial = POP_BREAKS ();
+      PUSH_BREAKS ();
+    }
+
+  // Walk the body
+  if (body)
+    body->accept (*this);
+  if (body_expr)
+    body_expr->accept (*this);
+
+  // If the vm is unwinding it will push an error object to
+  // the stack. If we are just done executing the body we
+  // push a nil ov to the stack.
+  //
+  // At the end of the cleanup code we check the ov on the stack
+  // and continue to unwind if it is an error object, otherwise
+  // just execute the next instruction.
+  PUSH_CODE (INSTR::PUSH_NIL);
+  // For unwinding we need to keep track of the ov we pushed.
+  PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+  auto v_need_cleanup = POP_UNWIND_RETURN_TARGET ();
+  int n_need_cleanup = v_need_cleanup.size ();
+
+  std::vector<int> v_need_break;
+  if (break_stack_populated)
+    v_need_break = POP_BREAKS();
+
+  int n_need_break = v_need_break.size ();
+
+  // If there is a return statement inside the unwind body it need
+  // to jump to the cleanup code before the actual return. The return
+  // statement pushed a true ov to the stack, which is checked at the end of the
+  // cleanup code, since we use the same code for just falling throught too.
+  //
+  // The same applies to breaks, and also the combination of a possibility of
+  // breaks and returns.
+  int n_falses = 0;
+  if (n_need_break && n_need_cleanup)
+    {
+      n_falses = 2;
+      // These nils ov is the break and return  marker if we are falling
+      // through to the cleanup code from the body.
+      // We have an error object on the stack.
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ovs we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the break jumps that need to go to here
+      for (int need : v_need_break)
+        SET_CODE_SHORT (need, CODE_SIZE ());
+
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::PUSH_TRUE);// break marker
+      PUSH_CODE (INSTR::JMP);
+      int also_need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // Set the offset for all the return jumps that need to go to here
+      for (int need_cleanup : v_need_cleanup)
+        SET_CODE_SHORT (need_cleanup, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// return marker
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+
+      // If we were falling throught the body to the cleanup we jump to here
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+      SET_CODE_SHORT(also_need_after, CODE_SIZE ());
+    }
+  else if (n_need_break)
+    {
+      n_falses = 1;
+      // This nil ov is the break marker if we are falling through to the
+      // cleanup code from the body. We have an error object on the stack.
+      PUSH_CODE (INSTR::PUSH_FALSE); // break marker
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ov we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the break jumps that need to go to here
+      for (int need : v_need_break)
+        SET_CODE_SHORT (need, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// break marker
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+  else if (n_need_cleanup)
+    {
+      n_falses = 1;
+      // This nil ov is the return marker if we are falling through to the
+      // cleanup code from the body
+      PUSH_CODE (INSTR::PUSH_FALSE); // return marker
+      PUSH_CODE (INSTR::JMP); // We need to skip the pushes for the returns
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // For unwinding we need to keep track of the ov we pushed.
+      PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+      // Set the offset for all the jumps that need to go to here
+      for (int need_cleanup : v_need_cleanup)
+        SET_CODE_SHORT (need_cleanup, CODE_SIZE ());
+
+      PUSH_CODE (INSTR::PUSH_NIL); // error object
+      PUSH_CODE (INSTR::PUSH_TRUE);// return marker
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+
+  // This is the end of protected code
+  UNWIND (unwind_idx).m_ip_end = CODE_SIZE ();
+
+  if (n_falses)
+    {
+      // Fallthrough code do not need false pushes
+      PUSH_CODE (INSTR::JMP);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      // An unwind will jump to here and needs some
+      // falses pushed to mark return or breaks
+      UNWIND (unwind_idx).m_ip_target = CODE_SIZE ();
+      for (int i = 0; i < n_falses; i++)
+        PUSH_CODE (INSTR::PUSH_FALSE);
+
+      SET_CODE_SHORT(need_after, CODE_SIZE ());
+    }
+  else
+    UNWIND (unwind_idx).m_ip_target = CODE_SIZE ();
+
+  // The body will just fall through into the unwind clean up code
+
+  // There might be breaks in the cleanup code too
+  if (break_stack_populated)
+    PUSH_BREAKS();
+
+  // Walk the clean up code
+  if (cleanup_code)
+    cleanup_code->accept (*this);
+  if (cleanup_expr)
+    cleanup_expr->accept (*this);
+
+  // Used to e.g. always call op CLEAR_IGNORE_OUTPUTS
+  for (auto instr : cleanup_instructions)
+    PUSH_CODE (instr);
+
+  std::vector<int> v_need_break_cleanup;
+  if (break_stack_populated)
+    v_need_break_cleanup = POP_BREAKS ();
+
+  if (v_need_break_cleanup.size ())
+    TODO ("break in cleanup code");
+
+  if (break_stack_populated)
+    {
+      // Restore the initial "need breaks"
+      PUSH_BREAKS ();
+      for (int offset : v_need_breaks_initial)
+        PUSH_NEED_BREAK (offset);
+    }
+
+  if (n_need_break && n_need_cleanup)
+    TODO ("Return and break nested");
+  if (n_need_break)
+    {
+      // The break ov marker is on the stack.
+      // If it is not true, we skip the break jump
+      PUSH_CODE (INSTR::JMP_IFN);
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      POP_NESTING_STATEMENT (); // The jump ate the break marker
+
+      PUSH_CODE (INSTR::POP); // Pop the error object
+
+      // So, we break jump from here.
+      // The visitor for the loop will write to proper target
+      PUSH_CODE (INSTR::JMP);
+      int need_break = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      PUSH_NEED_BREAK (need_break); // Nesting loop need to know
+
+      // If we are not breaking we jump to here
+      SET_CODE_SHORT (need_after, CODE_SIZE ());
+    }
+  // Check if we are doing a return unwind
+  else if (n_need_cleanup)
+    {
+      // If we are in another unwind protect we need to jump to its cleanup
+      // code if the return ov marker is true
+      if (N_UNWIND_RETURN_TARGETS())
+        {
+          // The return ov marker is on the stack.
+          // If it is not true, we skip the "jump bridge"
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_after = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          POP_NESTING_STATEMENT (); // The jump ate the return marker
+
+          PUSH_CODE (INSTR::POP); // Pop the error object
+
+          PUSH_CODE (INSTR::JMP); // Jump to the nesting unwind protect
+          int need_unwind = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+          PUSH_A_UNWIND_RETURN_TARGET (need_unwind);
+
+          // If we are not returning we jump to here
+          SET_CODE_SHORT (need_after, CODE_SIZE ());
+        }
+      // Return if the return marker on the stack is true
+      else
+        {
+          // The return ov marker is on the stack.
+          // If it is not true, we skip the return
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_after = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          POP_NESTING_STATEMENT (); // The jump ate the return marker
+
+          // Generate code for a return
+          emit_return ();
+
+          // If we are not returning we jump to here
+          SET_CODE_SHORT (need_after, CODE_SIZE ());
+        }
+    }
+  // If there is an error object on top of the stack we
+  // need to continue unwinding.
+  PUSH_CODE (INSTR::THROW_IFERROBJ);
+
+  POP_NESTING_STATEMENT ();
+}
+
+void
+bytecode_walker::
+visit_unwind_protect_command (tree_unwind_protect_command& cmd)
+{
+  emit_unwind_protect_code (cmd.body (), cmd.cleanup ());
+}
+
+void
+bytecode_walker::
+visit_try_catch_command (tree_try_catch_command& cmd)
+{
+  // So we are in a try catch.
+  //
+  int unwind_idx = N_UNWIND ();
+  PUSH_UNWIND();
+
+  UNWIND (unwind_idx).m_ip_start = CODE_SIZE ();
+
+  UNWIND (unwind_idx).m_unwind_entry_type =
+    unwind_entry_type::TRY_CATCH;
+
+  tree_statement_list *try_code = cmd.body ();
+
+  // Walk the body for the code
+  if (try_code)
+    try_code->accept (*this);
+  // We need to jump past the catch code that will come after
+  PUSH_CODE (INSTR::JMP);
+  int need_after = CODE_SIZE ();
+  PUSH_CODE_SHORT (-1);
+
+  // Mark an end to the "try zone"
+  UNWIND (unwind_idx).m_ip_end = CODE_SIZE ();
+
+  // We put the catch code right after the try body
+  UNWIND (unwind_idx).m_ip_target = CODE_SIZE ();
+
+  // For loops add two native ints and one ov to the stack,
+  // and switches add one ov to the stack, so we need to
+  // record how many things we have added to the stack,
+  // from for loops and switches.
+  UNWIND (unwind_idx).m_stack_depth = n_on_stack_due_to_stmt ();
+
+  // The optional identifier "catch id"
+  tree_identifier *expr_id = cmd.identifier ();
+
+  // The unwind code in the vm will push an error object ...
+  if (expr_id)
+    {
+      // ... so assign it to the identifiers in its slot.
+      std::string name = expr_id->name ();
+      int slot = add_id_to_table (name);
+      MAYBE_PUSH_WIDE_OPEXT (slot);
+      PUSH_CODE (INSTR::ASSIGN);
+      PUSH_SLOT (slot);
+    }
+  else
+    {
+      // ... just pop the error object unceremoniously.
+      PUSH_CODE (INSTR::POP);
+    }
+
+  // Walk the catch code
+  tree_statement_list *catch_code = cmd.cleanup ();
+  if (catch_code)
+    catch_code->accept (*this);
+
+  // The body jumps to here
+  SET_CODE_SHORT (need_after, CODE_SIZE ());
+
+  return;
+}
+
+// For loops add two native ints and one ov to the stack,
+// and switches etc add one ov to the stack, so we need to
+// how how many things we have added to the stack
+int
+bytecode_walker::
+n_on_stack_due_to_stmt ()
+{
+  auto v = NESTING_STATEMENTS();
+  int n_things_on_stack = 0;
+  for (auto it = v.rbegin () ;it !=  v.rend (); it++)
+    {
+      nesting_statement t = *it;
+      switch (t)
+        {
+        case nesting_statement::FOR_LOOP:
+          n_things_on_stack += 3;
+          break;
+        case nesting_statement::ONE_OV_ON_STACK:
+          n_things_on_stack += 1;
+          break;
+        default:
+          ERR("Invalid state");
+        }
+    }
+
+  return n_things_on_stack;
+}
+
+void
+bytecode_walker::
+visit_decl_command (tree_decl_command& cmd)
+{
+  tree_decl_init_list *lst = cmd.initializer_list ();
+
+  CHECK_NONNULL (lst);
+
+  // A decl list might containt multiple declarations.
+  // E.g. "global a b = 3 c"
+  for (auto it = lst->begin (); it != lst->end (); it++)
+    {
+      tree_decl_elt *el = *it;
+      CHECK_NONNULL (el);
+
+      std::string name = el->name ();
+
+      int slot = add_id_to_table (name);
+
+      if (el->is_global () || el->is_persistent())
+        {
+          if (el->is_global ())
+            PUSH_GLOBAL (name);
+          if (el->is_persistent())
+            PUSH_PERSISTENT (name);
+
+          // Slot for variable to keep track off if the variable is actually
+          // a global. Prepended with "#" to not collide. "+" for persistent.
+          //
+          // We need this since the same identifier in a function can be both
+          // a local or a global depending on whether the global declare
+          // statement is reached or not.
+          //
+          // Since the name of the identifier that is declared global might
+          // allready be used as a local, we also need to store the slot number
+          // of the #-marker in the code too. If this feature is removed, we
+          // can save some space in the OP-codes making the slot number implicit
+          // +1.
+          std::string prefix;
+          if (el->is_global ())
+            prefix = "#";
+          else
+            prefix = "+";
+
+          int prefix_slot = add_id_to_table (prefix + name);
+
+          PUSH_CODE (INSTR::GLOBAL_INIT);
+          if (el->is_global ())
+            PUSH_CODE (global_type::GLOBAL);
+          else if (el->is_persistent ())
+            {
+              PUSH_CODE (global_type::PERSISTENT);
+              // We need a "offset" for the persistent variable that
+              // matches the exact offset the treewalker would use
+              tree_identifier *id = el->ident ();
+              CHECK_NONNULL (id);
+              int offset = id->symbol ().data_offset ();
+
+              // The VM need to know the special persistent variable offset
+              // so we store it in the unwind data
+              m_code.m_unwind_data.
+                m_slot_to_persistent_slot[slot] = offset;
+            }
+          PUSH_WSLOT (slot);
+          PUSH_WSLOT (prefix_slot);
+
+          tree_expression *expr = el->expression ();
+          bool has_init = expr;
+
+          PUSH_CODE (has_init); // has initialization code
+
+          // The global has an initialization expression
+          if (has_init)
+            {
+              // Placeholder for address to after init code.
+              // GLOBAL_INIT jumps to there if the global is
+              // already initialized.
+              int need_after = CODE_SIZE ();
+              PUSH_CODE_SHORT (-1);
+
+              // We want the value of the initialization on
+              // the operand stack.
+
+              INC_DEPTH();
+              PUSH_NARGOUT(1);
+
+              // Walk for the initialization code
+              expr->accept (*this);
+              // The value of rhs is on the operand stack now.
+              // So we need to write it to its local slot and then
+              // write that to its global value.
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::ASSIGN); // Write operand stack top ...
+              PUSH_SLOT (slot);   // to the local slot of the global
+
+              // I think only this makes sense
+              CHECK (DEPTH () == 1);
+
+              POP_NARGOUT ();
+              DEC_DEPTH ();
+
+              // Write the instruction address to the placeholder
+              SET_CODE_SHORT (need_after, CODE_SIZE ());
+            }
+        }
+      else
+        ERR ("Strange state");
+    }
+}
+
+void
+bytecode_walker::
+visit_postfix_expression (tree_postfix_expression& expr)
+{
+  m_unknown_nargout++;
+  INC_DEPTH();
+
+  tree_expression *e = expr.operand ();
+  CHECK_NONNULL (e);
+
+  octave_value::unary_op op = expr.op_type ();
+
+  int folded_need_after = -1;
+  int fold_slot = -1;
+  // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime.
+  // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump
+  // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST
+  if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr))
+    {
+      m_is_folding = true;
+
+      std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++);
+      fold_slot = add_id_to_table (fold_name);
+
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::PUSH_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+      folded_need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+    }
+
+  int slot = -1;
+  // For ++ and -- we don't want a local pushed to the stack, but operate
+  // directly in the slot, and then pushing the slot.
+  if (e->is_identifier() && (op == octave_value::unary_op::op_decr ||
+                             op == octave_value::unary_op::op_incr))
+    {
+      // Just add the symbol to the table
+      // TODO: Could there be command function calls messing this up?
+      //       I.e. foo++ could be a foo()++?
+      slot = add_id_to_table (e->name ());
+    }
+  // We handle e.g. m("qwe")++ with eval
+  else if (op != octave_value::unary_op::op_incr && op != octave_value::unary_op::op_decr)
+    {
+      PUSH_NARGOUT (1);
+      e->accept (*this);
+      POP_NARGOUT ();
+    }
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  switch (op)
+    {
+    case octave_value::unary_op::op_not:
+      PUSH_CODE (INSTR::NOT);
+      break;
+    case octave_value::unary_op::op_uplus:
+      PUSH_CODE (INSTR::UADD);
+      break;
+    case octave_value::unary_op::op_uminus:
+      PUSH_CODE (INSTR::USUB);
+      break;
+    case octave_value::unary_op::op_transpose:
+      PUSH_CODE (INSTR::TRANS);
+      break;
+    case octave_value::unary_op::op_hermitian:
+      PUSH_CODE (INSTR::HERM);
+      break;
+    case octave_value::unary_op::op_incr:
+      {
+        if (! e->is_identifier ())
+          {
+            // TODO: Cheating with eval
+            PUSH_TREE_FOR_EVAL (&expr);
+            int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+            int nargout = NARGOUT ();
+            MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+            PUSH_CODE (INSTR::EVAL);
+            PUSH_CODE (nargout);
+            PUSH_CODE_INT (tree_idx);
+          }
+        else
+          {
+            MAYBE_PUSH_WIDE_OPEXT (slot);
+            PUSH_CODE (INSTR::INCR_ID_POSTFIX);
+            PUSH_SLOT (slot);
+          }
+      }
+      break;
+    case octave_value::unary_op::op_decr:
+      {
+        if (! e->is_identifier ())
+          {
+            // TODO: Cheating with eval
+            PUSH_TREE_FOR_EVAL (&expr);
+            int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+            int nargout = NARGOUT ();
+            MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+            PUSH_CODE (INSTR::EVAL);
+            PUSH_CODE (nargout);
+            PUSH_CODE_INT (tree_idx);
+          }
+        else
+          {
+            MAYBE_PUSH_WIDE_OPEXT (slot);
+            PUSH_CODE (INSTR::DECR_ID_POSTFIX);
+            PUSH_SLOT (slot);
+          }
+      }
+      break;
+    default:
+      TODO ("not covered");
+    }
+
+  if (fold_slot != -1)
+    {
+      m_is_folding = false;
+
+      PUSH_CODE (INSTR::DUP);
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::SET_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+
+      SET_CODE_SHORT (folded_need_after, CODE_SIZE ());
+    }
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  DEC_DEPTH();
+  m_unknown_nargout--;
+}
+
+void
+bytecode_walker::
+visit_prefix_expression (tree_prefix_expression& expr)
+{
+  m_unknown_nargout++;
+  INC_DEPTH();
+
+  tree_expression *e = expr.operand ();
+  CHECK_NONNULL (e);
+
+  octave_value::unary_op op = expr.op_type ();
+
+  int folded_need_after = -1;
+  int fold_slot = -1;
+  // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime.
+  // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump
+  // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST
+  if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr))
+    {
+      m_is_folding = true;
+
+      std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++);
+      fold_slot = add_id_to_table (fold_name);
+
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::PUSH_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+      folded_need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+    }
+
+  int slot = -1;
+  // For ++ and -- we don't want a local pushed to the stack, but operate
+  // directly in the slot, and then pushing the slot.
+  if (e->is_identifier() && (op == octave_value::unary_op::op_decr ||
+                             op == octave_value::unary_op::op_incr))
+    {
+      // Just add the symbol to the table
+      // TODO: Could there be command function calls messing this up?
+      //       I.e. foo++ could be a foo()++?
+      slot = add_id_to_table (e->name ());
+    }
+  // We handle e.g. m("qwe")++ with eval
+  else if (op != octave_value::unary_op::op_incr && op != octave_value::unary_op::op_decr)
+    {
+      PUSH_NARGOUT (1);
+      e->accept (*this);
+      POP_NARGOUT ();
+    }
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  switch (op)
+    {
+    case octave_value::unary_op::op_not:
+      PUSH_CODE (INSTR::NOT);
+      break;
+    case octave_value::unary_op::op_uplus:
+      PUSH_CODE (INSTR::UADD);
+      break;
+    case octave_value::unary_op::op_uminus:
+      PUSH_CODE (INSTR::USUB);
+      break;
+    case octave_value::unary_op::op_transpose:
+      PUSH_CODE (INSTR::TRANS);
+      break;
+    case octave_value::unary_op::op_hermitian:
+      PUSH_CODE (INSTR::HERM);
+      break;
+    case octave_value::unary_op::op_incr:
+      {
+        if (! e->is_identifier ())
+          {
+            // TODO: Cheating with eval
+            PUSH_TREE_FOR_EVAL (&expr);
+            int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+            int nargout = NARGOUT ();
+            MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+            PUSH_CODE (INSTR::EVAL);
+            PUSH_CODE (nargout);
+            PUSH_CODE_INT (tree_idx);
+          }
+        else
+          {
+            MAYBE_PUSH_WIDE_OPEXT (slot);
+            PUSH_CODE (INSTR::INCR_ID_PREFIX);
+            PUSH_SLOT (slot);
+          }
+      }
+      break;
+    case octave_value::unary_op::op_decr:
+      {
+        if (! e->is_identifier ())
+          {
+            // TODO: Cheating with eval
+            PUSH_TREE_FOR_EVAL (&expr);
+            int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+            int nargout = NARGOUT ();
+            MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+            PUSH_CODE (INSTR::EVAL);
+            PUSH_CODE (nargout);
+            PUSH_CODE_INT (tree_idx);
+          }
+        else
+          {
+            MAYBE_PUSH_WIDE_OPEXT (slot);
+            PUSH_CODE (INSTR::DECR_ID_PREFIX);
+            PUSH_SLOT (slot);
+          }
+      }
+      break;
+    default:
+      TODO ("not covered");
+    }
+
+  if (fold_slot != -1)
+    {
+      m_is_folding = false;
+
+      PUSH_CODE (INSTR::DUP);
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::SET_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+
+      SET_CODE_SHORT (folded_need_after, CODE_SIZE ());
+    }
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  DEC_DEPTH();
+  m_unknown_nargout--;
+}
+
+void
+bytecode_walker::
+visit_boolean_expression(tree_boolean_expression& expr)
+{
+  INC_DEPTH ();
+  PUSH_NARGOUT (1);
+
+  // Since the || and && has short circuit behavoir
+  // we need to built up the expression from multiple opcodes.
+  //
+  // Note that UNARY_TRUE accepts operands that are not
+  // "is_defined ()" where as IF or IF_N would error on those,
+  // so we need UNARY_TRUE before the IFs.
+  if (expr.op_type() == tree_boolean_expression::bool_and)
+    {
+      // We want lhs on the operand stack
+      tree_expression *op1 = expr.lhs ();
+      CHECK_NONNULL (op1);
+      op1->accept (*this);
+
+      // If false, jump to push false
+      PUSH_CODE (INSTR::UNARY_TRUE);
+      PUSH_CODE (INSTR::JMP_IFN);
+      int need_false0 = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // If lhs was true, we want rhs on the
+      // operand stack too.
+      tree_expression *op2 = expr.rhs ();
+      CHECK_NONNULL (op2);
+      op2->accept (*this);
+
+      // If false, jump to push false
+      PUSH_CODE (INSTR::UNARY_TRUE);
+      PUSH_CODE (INSTR::JMP_IFN);
+      int need_false1 = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // If both lhs and rhs was true,
+      // we fallthrough to push true
+      PUSH_CODE (INSTR::PUSH_TRUE);
+      PUSH_CODE (INSTR::JMP); // Jump past PUSH_FALSE
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // The 2 JMP_IFN goes here, to PUSH_FALSE
+      int offset_false = CODE_SIZE ();
+      PUSH_CODE (INSTR::PUSH_FALSE);
+      // The JMP after PUSH_TRUE goes here
+
+      // Set the addresses for the false jumps
+      SET_CODE_SHORT (need_false0, offset_false);
+      SET_CODE_SHORT (need_false1, offset_false);
+      // The true push jumps to after
+      SET_CODE_SHORT (need_after, CODE_SIZE ());
+    }
+  else
+    {
+      // We want lhs on the operand stack
+      tree_expression *op1 = expr.lhs ();
+      CHECK_NONNULL (op1);
+      op1->accept (*this);
+
+      // If true, jump to push true
+      PUSH_CODE (INSTR::UNARY_TRUE);
+      PUSH_CODE (INSTR::JMP_IF);
+      int need_true0 = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // If lhs was false, we want rhs on the
+      // operand stack too.
+      tree_expression *op2 = expr.rhs ();
+      CHECK_NONNULL (op2);
+      op2->accept (*this);
+
+      // If true, jump to push true
+      PUSH_CODE (INSTR::UNARY_TRUE);
+      PUSH_CODE (INSTR::JMP_IF);
+      int need_true1 = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // If both lhs and rhs were false,
+      // we fallthrough to here, push false
+      PUSH_CODE (INSTR::PUSH_FALSE);
+      PUSH_CODE (INSTR::JMP); // Jump past PUSH_TRUE
+      int need_after = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+
+      // The 2 JMP_IF goes here, to PUSH_TRUE
+      int offset_true = CODE_SIZE ();
+      PUSH_CODE (INSTR::PUSH_TRUE);
+      // The JMP after PUSH_FALSE goes here
+
+      // Set the addresses for the true jumps
+      SET_CODE_SHORT (need_true0, offset_true);
+      SET_CODE_SHORT (need_true1, offset_true);
+      // The false push jumps to after
+      SET_CODE_SHORT (need_after, CODE_SIZE ());
+    }
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  DEC_DEPTH ();
+  POP_NARGOUT ();
+}
+
+void
+bytecode_walker::
+visit_compound_binary_expression (tree_compound_binary_expression &expr)
+{
+  // Compound expression are expression that are more effeicient to
+  // do fused for a matrix, like M'*A etc.
+  INC_DEPTH();
+  PUSH_NARGOUT (1);
+  m_unknown_nargout++;
+
+  tree_expression *op1 = expr.clhs ();
+
+  CHECK_NONNULL (op1);
+  op1->accept (*this);
+
+  tree_expression *op2 = expr.crhs ();
+
+  CHECK_NONNULL (op2);
+  op2->accept (*this);
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  switch (expr.cop_type ())
+    {
+    case octave_value::compound_binary_op::op_trans_mul:
+      PUSH_CODE (INSTR::TRANS_MUL);
+      break;
+    case octave_value::compound_binary_op::op_mul_trans:
+      PUSH_CODE (INSTR::MUL_TRANS);
+      break;
+    case octave_value::compound_binary_op::op_herm_mul:
+      PUSH_CODE (INSTR::HERM_MUL);
+      break;
+    case octave_value::compound_binary_op::op_mul_herm:
+      PUSH_CODE (INSTR::MUL_HERM);
+      break;
+    case octave_value::compound_binary_op::op_trans_ldiv:
+      PUSH_CODE (INSTR::TRANS_LDIV);
+      break;
+    case octave_value::compound_binary_op::op_herm_ldiv:
+      PUSH_CODE (INSTR::HERM_LDIV);
+      break;
+    default:
+      TODO ("not covered");
+    }
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  POP_NARGOUT ();
+  DEC_DEPTH();
+  m_unknown_nargout--;
+}
+
+void
+bytecode_walker::
+visit_binary_expression (tree_binary_expression& expr)
+{
+  INC_DEPTH ();
+  PUSH_NARGOUT (1);
+  m_unknown_nargout++;
+
+  std::vector<int> need_after;
+  int fold_slot = -1;
+
+  // "&" and "|" have a braindead short circuit behavoiur when
+  // in if or while conditions, so we need special handling of those.
+  if (expr.is_braindead ())
+    {
+      if (expr.op_type() == octave_value::binary_op::op_el_and)
+        {
+          // We use a slot to store whether a warning has been issued
+          // or not
+          std::string id_warning = "%braindead_warning_" +
+            std::to_string(CODE_SIZE ());
+          int slot = add_id_to_table(id_warning);
+
+          // The left most expression is always evaled
+          tree_expression *op1 = expr.lhs ();
+
+          CHECK_NONNULL (op1);
+          op1->accept (*this);
+
+          // We need to check if lhs value is scalar
+          PUSH_CODE (INSTR::DUP);
+          PUSH_CODE (INSTR::BRAINDEAD_PRECONDITION);
+
+          // If the precondition is not true, we do a
+          // normal binop. Note that lhs is evaled twice
+          // since that is what the treewalker does.
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_target_not_braindead = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          // Now we do the braindead short circuit
+
+          // If the lhs expression is true we check the rhs
+          PUSH_CODE (INSTR::UNARY_TRUE);
+          PUSH_CODE (INSTR::JMP_IF);
+          int need_target_true = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          // The lhs was false which means we need to issue a warning
+          // and push a false
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::BRAINDEAD_WARNING);
+          PUSH_SLOT (slot);
+          PUSH_CODE ('&'); // The operand type to print in the warning
+          PUSH_CODE (INSTR::PUSH_FALSE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // If lhs was true we jump to here
+          SET_CODE_SHORT (need_target_true, CODE_SIZE ());
+          // Walk rhs
+          tree_expression *op2 = expr.rhs ();
+
+          CHECK_NONNULL (op2);
+          op2->accept (*this);
+
+          // With rhs on the stack, check if it is true and jump to
+          // a true push, otherwise push false and jump to after
+          PUSH_CODE (INSTR::UNARY_TRUE);
+          PUSH_CODE (INSTR::JMP_IF);
+          need_target_true = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          // Push false jump to after
+          PUSH_CODE (INSTR::PUSH_FALSE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // Push true jump to after
+          SET_CODE_SHORT (need_target_true, CODE_SIZE ());
+          PUSH_CODE (INSTR::PUSH_TRUE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // If the precondition was false we need to do the ordinary binary op
+          SET_CODE_SHORT (need_target_not_braindead, CODE_SIZE ());
+          PUSH_CODE (INSTR::POP); // Pop the evaled lhs value
+        }
+      else if (expr.op_type() == octave_value::binary_op::op_el_or)
+        {
+          // We use a slot to store whether a warning has been issued
+          // or not
+          std::string id_warning = "%braindead_warning_" +
+            std::to_string(CODE_SIZE ());
+          int slot = add_id_to_table(id_warning);
+
+          // The left most expression is always evaled
+          tree_expression *op1 = expr.lhs ();
+
+          CHECK_NONNULL (op1);
+          op1->accept (*this);
+
+          // We need to check if lhs value is scalar
+          PUSH_CODE (INSTR::DUP);
+          PUSH_CODE (INSTR::BRAINDEAD_PRECONDITION);
+
+          // If the precondition is not true, we do a
+          // normal binop. Note that lhs is evaled twice
+          // since that is what the treewalker does.
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_target_not_braindead = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          // Now we do the braindead short circuit for "or"
+
+          // If the lhs expression is true we issue a
+          // warning, push a true and jump to after.
+          // If lhs is false we instead need to check rhs too.
+          PUSH_CODE (INSTR::UNARY_TRUE);
+          PUSH_CODE (INSTR::JMP_IFN);
+          int need_target_check_rhs = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::BRAINDEAD_WARNING);
+          PUSH_SLOT (slot);
+          PUSH_CODE ('|'); // The operand type to print in the warning
+          PUSH_CODE (INSTR::PUSH_TRUE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // If lhs was false we jump to here
+          SET_CODE_SHORT (need_target_check_rhs, CODE_SIZE ());
+          // Walk rhs
+          tree_expression *op2 = expr.rhs ();
+
+          CHECK_NONNULL (op2);
+          op2->accept (*this);
+
+          // With rhs on the stack, check if it is true and jump to
+          // a true push, otherwise push false and jump to after
+          PUSH_CODE (INSTR::UNARY_TRUE);
+          PUSH_CODE (INSTR::JMP_IF);
+          int need_target_true = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1);
+
+          // Push false jump to after
+          PUSH_CODE (INSTR::PUSH_FALSE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // Push true jump to after
+          SET_CODE_SHORT (need_target_true, CODE_SIZE ());
+          PUSH_CODE (INSTR::PUSH_TRUE);
+          PUSH_CODE (INSTR::JMP);
+          need_after.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1);
+
+          // If the precondition was false we need to do the ordinary binary op
+          SET_CODE_SHORT (need_target_not_braindead, CODE_SIZE ());
+          PUSH_CODE (INSTR::POP); // Pop the evaled lhs value
+        }
+      else
+        panic_impossible ();
+    }
+  // Check if we should to a constant fold. It only makes sense in loops since the expression is folded at runtime.
+  // Essentially there is a PUSH_FOLDED_CST opcode that is tied to a cache. If the cache is valid, push it and jump
+  // past the initialization code, otherwise run the initialization code and set the cache with SET_FOLDED_CST
+  else if (m_n_nested_loops && !m_is_folding && is_foldable_walker::is_foldable (expr))
+    {
+      m_is_folding = true;
+
+      std::string fold_name = "#cst_fold_" + std::to_string (m_n_folds++);
+      fold_slot = add_id_to_table (fold_name);
+
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::PUSH_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+      need_after.push_back (CODE_SIZE ());
+      PUSH_CODE_SHORT (-1);
+    }
+
+  tree_expression *op1 = expr.lhs ();
+  tree_expression *op2 = expr.rhs ();
+  CHECK_NONNULL (op1);
+  CHECK_NONNULL (op2);
+
+  bool op1_is_cst = op1->is_constant ();
+  bool op2_is_cst = op2->is_constant ();
+  int n_cst = op1_is_cst + op2_is_cst;
+  int cst_offset = -1;
+
+  if (op1_is_cst && op2_is_cst && DATA_SIZE () < 255)
+    {
+      // If both rhs and lhs are constants we want to emit a super op-code
+      // aslong as the WIDE op is not going to be used (<255)
+      emit_load_2_cst (op1, op2);
+    }
+  else if (n_cst == 1 &&
+           (expr.op_type () <= octave_value::binary_op::op_ne && expr.op_type () >= octave_value::binary_op::op_add) &&
+           DATA_SIZE () < 255 && expr.op_type () != octave_value::binary_op::op_ldiv)
+    {
+      if (op1_is_cst)
+        {
+          tree_constant *tree_cst = static_cast<tree_constant *> (op1);
+          octave_value ov_cst = tree_cst->value ();
+          cst_offset = DATA_SIZE ();
+          PUSH_DATA (ov_cst);
+
+          op2->accept (*this);
+        }
+      else
+        {
+          tree_constant *tree_cst = static_cast<tree_constant *> (op2);
+          octave_value ov_cst = tree_cst->value ();
+          cst_offset = DATA_SIZE ();
+          PUSH_DATA (ov_cst);
+
+          op1->accept (*this);
+        }
+    }
+  else
+    {
+      op1->accept (*this);
+      op2->accept (*this);
+    }
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  switch (expr.op_type ())
+    {
+    case octave_value::binary_op::op_mul:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::MUL);
+      else
+        {
+          PUSH_CODE (INSTR::MUL_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+      break;
+    case octave_value::binary_op::op_div:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::DIV);
+      else
+        {
+          PUSH_CODE (INSTR::DIV_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_add:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::ADD);
+      else
+        {
+          PUSH_CODE (INSTR::ADD_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_sub:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::SUB);
+      else
+        {
+          PUSH_CODE (INSTR::SUB_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_lt:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::LE);
+      else
+        {
+          PUSH_CODE (INSTR::LE_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_le:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::LE_EQ);
+      else
+        {
+          PUSH_CODE (INSTR::LE_EQ_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_gt:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::GR);
+      else
+        {
+          PUSH_CODE (INSTR::GR_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_ge:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::GR_EQ);
+      else
+        {
+          PUSH_CODE (INSTR::GR_EQ_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_eq:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::EQ);
+      else
+        {
+          PUSH_CODE (INSTR::EQ_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_ne:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::NEQ);
+      else
+        {
+          PUSH_CODE (INSTR::NEQ_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_pow:
+      if (cst_offset == -1)
+        PUSH_CODE (INSTR::POW);
+      else
+        {
+          PUSH_CODE (INSTR::POW_CST);
+          PUSH_CODE (cst_offset);
+          PUSH_CODE (op1_is_cst);
+        }
+      break;
+    case octave_value::binary_op::op_ldiv:
+      PUSH_CODE (INSTR::LDIV);
+      break;
+    case octave_value::binary_op::op_el_mul:
+      PUSH_CODE (INSTR::EL_MUL);
+      break;
+    case octave_value::binary_op::op_el_div:
+      PUSH_CODE (INSTR::EL_DIV);
+      break;
+    case octave_value::binary_op::op_el_pow:
+      PUSH_CODE (INSTR::EL_POW);
+      break;
+    case octave_value::binary_op::op_el_and:
+      PUSH_CODE (INSTR::EL_AND);
+      break;
+    case octave_value::binary_op::op_el_or:
+      PUSH_CODE (INSTR::EL_OR);
+      break;
+    case octave_value::binary_op::op_el_ldiv:
+      PUSH_CODE (INSTR::EL_LDIV);
+      break;
+
+    default:
+      TODO ("not covered");
+    }
+
+  if (fold_slot != -1)
+    {
+      m_is_folding = false;
+
+      PUSH_CODE (INSTR::DUP);
+      MAYBE_PUSH_WIDE_OPEXT (fold_slot);
+      PUSH_CODE (INSTR::SET_FOLDED_CST);
+      PUSH_SLOT (fold_slot);
+    }
+
+  for (int offset : need_after)
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  POP_NARGOUT ();
+
+  DEC_DEPTH ();
+  m_unknown_nargout--;
+}
+
+void
+
+bytecode_walker::
+emit_load_2_cst (tree_expression *lhs, tree_expression *rhs)
+{
+  INC_DEPTH();
+
+  CHECK (DEPTH () > 1);
+
+  CHECK (lhs); CHECK (rhs);
+  CHECK (lhs->is_constant ());
+  CHECK (rhs->is_constant ());
+
+  tree_constant *lhs_cst = static_cast<tree_constant *> (lhs);
+  tree_constant *rhs_cst = static_cast<tree_constant *> (rhs);
+
+  octave_value ov_lhs = lhs_cst->value ();
+  octave_value ov_rhs = rhs_cst->value ();
+
+  PUSH_DATA (ov_lhs);
+  PUSH_DATA (ov_rhs);
+
+  unsigned cst_offset = DATA_SIZE () - 1;
+  CHECK (cst_offset < 256);
+
+  PUSH_CODE (INSTR::LOAD_2_CST);
+  PUSH_CODE (cst_offset - 1); // Offset of lhs
+
+  DEC_DEPTH();
+}
+
+void
+bytecode_walker::
+visit_constant (tree_constant& cst)
+{
+  INC_DEPTH();
+
+  octave_value ov_cst = cst.value ();
+
+  bool specialized = false;
+  if (ov_cst.type_id () == octave_scalar::static_type_id ())
+    {
+      double val = ov_cst.double_value ();
+      if (val == 0)
+      {
+          specialized = true;
+          PUSH_CODE (INSTR::PUSH_DBL_0);
+      }
+      else if (val == 1)
+      {
+          specialized = true;
+          PUSH_CODE (INSTR::PUSH_DBL_1);
+      }
+      else if (val == 2)
+      {
+          specialized = true;
+          PUSH_CODE (INSTR::PUSH_DBL_2);
+      }
+    }
+
+  if (!specialized)
+    {
+      PUSH_DATA (ov_cst);
+      PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant
+    }
+
+  maybe_emit_bind_ans_and_disp (cst);
+
+  DEC_DEPTH();
+}
+
+void
+bytecode_walker::
+visit_octave_user_script (octave_user_script& fcn)
+{
+  m_is_script = true;
+
+  m_code.m_unwind_data.m_external_frame_offset_to_internal.push_back ({});
+
+  m_code.m_unwind_data.m_is_script = true;
+  m_code.m_unwind_data.m_name = fcn.name ();
+  m_code.m_unwind_data.m_file = fcn.fcn_file_name ();
+  PUSH_DATA (fcn.name ());
+  PUSH_DATA (std::string {"user-script"});
+  PUSH_DATA (fcn.profiler_name ());
+
+  tree_statement_list *cmd_list = fcn.body ();
+
+  // The first instruction is the amount of return variables.
+  PUSH_CODE (1); // Only the dummy return '%nargout'
+
+  // The second instruction is the amount of arguments
+  PUSH_CODE (0);
+
+  // The third is the amount of locals, which need to be set
+  // after compiling the function. So we need to store the offset
+  // to it for later
+  m_offset_n_locals = CODE_SIZE ();
+  PUSH_CODE (-1); // Placeholder
+  PUSH_CODE (-1);
+
+  // The first slot is a native int represenation nargout
+  // so we add a dummy slot object for it
+  add_id_to_table("%nargout");
+
+  // We always need the magic id "ans"
+  int slot_ans = add_id_to_table ("ans");
+  // m_map_user_locals_names_to_slot keeps track of what user symbols to borrow from and return back to
+  // the eval_frame when the script frame is pushed and popped.
+  m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({"ans", slot_ans});
+
+  // We add all identifiers in the body to the id-table. We also
+  // make a map mapping the interpreters frame offset of a id
+  // to the frame offset in the bytecode VM frame.
+  if (cmd_list)
+    {
+      auto v_names_offsets = collect_idnames_walker::collect_id_names (*cmd_list);
+
+      for (auto name_offset : v_names_offsets)
+        {
+          std::string name = name_offset.m_name;
+          unsigned frame_offset = name_offset.m_frame_offset;
+          unsigned offset = name_offset.m_offset;
+
+          add_id_to_table (name);
+          int slot = SLOT (name);
+
+          if (frame_offset >= m_code.m_unwind_data.m_external_frame_offset_to_internal.size ())
+            m_code.m_unwind_data.m_external_frame_offset_to_internal.resize (frame_offset + 1);
+
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset][offset] = slot;
+
+          if (frame_offset == 0)
+            m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot});
+        }
+    }
+
+  // The function name should be in the frame as an id too
+  std::string function_name = fcn.name ();
+  auto dot_idx = function_name.find_last_of ('.'); // Names might be e.g. "get.Count" but we only want "Count"
+  if (dot_idx != std::string::npos)
+    function_name = function_name.substr (dot_idx + 1);
+
+  // We need to keep track of which id is the function name so that
+  // we can add the id to the id-table and get it's external offset.
+  //
+  // Note that the file 'bar.m' can have one function with the id name 'foo'
+  // which will be added to the scope by the parser, but the function name
+  // and thus call-name is 'bar'.
+  std::size_t idx_fn_name = 1; // "1" since 'ans' is always added first
+
+  for (auto p : fcn.scope ().symbols ())
+    {
+      std::string name = p.first;
+      symbol_record sym = p.second;
+      std::size_t offset = sym.data_offset ();
+
+      bool is_fn_id = offset == idx_fn_name; // Are we at the function name id?
+
+      auto it = m_map_locals_to_slot.find (name);
+      if (it == m_map_locals_to_slot.end ())
+        {
+          if (is_fn_id)
+            {
+              // Add the function name id to the table and add the correct external offset.
+              // (The name might not be the call-name of the function.)
+              int slot = add_id_to_table (name);
+              m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = slot;
+            }
+          else
+            continue;
+        }
+
+      if (name == "varargin")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("varargin");
+      else if (name == "varargout")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("varargout");
+      else if (name == "ans")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("ans");
+    }
+
+  PUSH_CODE (INSTR::ENTER_SCRIPT_FRAME); // Special opcode to steal the "eval scopes" values
+
+  CHECK_NONNULL (cmd_list);
+  cmd_list->accept (*this);
+
+  // EXIT_SCRIPT_FRAME is put before each RET during the walk.
+
+  // When the last byte of opcode, a 'RET', is to be executed, the VM reads the
+  // next byte of code and puts it in 'arg0'.  So, we need to add a dummy
+  // opcode afterwards to prevent out-of-bounds reads.
+  PUSH_CODE (INSTR::RET);
+
+  // Set the amount of locals that has a placeholder since earlier
+  SET_CODE_SHORT (m_offset_n_locals, m_n_locals);
+
+  // We want to add the locals to the scope in slot order
+  // so we push all the locals' names to a vector by their slot
+  // number
+  unsigned n_slots = m_map_locals_to_slot.size ();
+  CHECK (n_slots == static_cast<unsigned> (m_n_locals));
+  std::vector<std::string> names (n_slots);
+
+  auto iter = m_map_locals_to_slot.begin ();
+  for (unsigned i = 0; i < n_slots; i++)
+    {
+      auto kv = *iter++;
+
+      const std::string& name = kv.first;
+      int slot = kv.second;
+
+      CHECK (slot >= 0 && slot < static_cast<int> (n_slots));
+      CHECK (names[slot] == ""); // Check not duplicate slot number used
+
+      names[slot] = name;
+    }
+
+  // Check that the mapping between external offsets and internal slots has no holes in it
+   int i = 0;
+  for (auto it : m_code.m_unwind_data.m_external_frame_offset_to_internal[0])
+    {
+      int external_offset = it.first;
+      CHECK (external_offset == i);
+      i++;
+    }
+
+  // Save how many symbols there were originally, so that the VM can easely check if more symbols
+  // have been added to the scope object when a frame is pushed to the call stack.
+  m_code.m_unwind_data.m_n_orig_scope_size = 0;
+  for (auto m : m_code.m_unwind_data.m_external_frame_offset_to_internal)
+    m_code.m_unwind_data.m_n_orig_scope_size += m.size ();
+
+  // The profiler needs to know these sizes when copying from pointers.
+  m_code.m_unwind_data.m_code_size = m_code.m_code.size ();
+  m_code.m_unwind_data.m_ids_size = m_code.m_ids.size ();
+
+  m_code.m_unwind_data.m_n_returns = 1; // Only %nargout
+  m_code.m_unwind_data.m_n_args = 0; // No args
+  m_code.m_unwind_data.m_n_locals = n_slots;
+}
+
+void
+bytecode_walker::
+visit_octave_user_function (octave_user_function& fcn)
+{
+  m_code.m_unwind_data.m_external_frame_offset_to_internal.push_back ({});
+  m_code.m_unwind_data.m_n_nested_fn = m_n_nested_fn;
+
+  m_code.m_unwind_data.m_name = fcn.name ();
+  m_code.m_unwind_data.m_file = fcn.fcn_file_name ();
+  PUSH_DATA (fcn.name ());
+  PUSH_DATA (std::string {"user-function"});
+  PUSH_DATA (fcn.profiler_name ());
+
+  tree_statement_list *cmd_list = fcn.body ();
+  tree_parameter_list *returns = fcn.return_list();
+  tree_parameter_list *paras = fcn.parameter_list ();
+
+  std::vector<std::string> v_paras;
+  if (paras) // paras is 0 if function args are missing, e.g. "function foo\nend"
+    {
+      for (auto it = paras->begin (); it != paras->end (); it++)
+        {
+          CHECK_NONNULL (*it);
+          CHECK ((*it)->ident ());
+          v_paras.push_back ((*it)->name ());
+        }
+    }
+
+  // Does the function output varargout?
+  m_varargout = returns ? returns->takes_varargs () : false;
+  // "varargout" is not in the 'returns' list (if in the proper last position)
+  // so add one to size if 'm_varargout' is true
+  int n_returns = returns ? returns->size () + m_varargout: 0;
+
+  // The first instruction is the amount of return variables. Negative for varargout.
+  // Anonymous functions have no return parameter list specified. For those we set
+  // the amount of returns to magic number -128.
+  if (!returns)
+    {
+      m_code.m_unwind_data.m_is_anon = true;
+      CHECK (m_is_anon); // m_is_anon is set by compile_anon_user_function ()
+      PUSH_CODE (-128);
+    }
+  else
+    PUSH_CODE (m_varargout ? -(n_returns + 1) : (n_returns + 1)); // +1 for native '%nargout' on the stack.
+
+  // Check if the last parameter is "varargin"
+  // If that is the case, we need to mess with the stacks
+  // in the vm, so mark the function as having negative
+  // amount of parameters.
+  bool is_varargin = paras ? paras->takes_varargs () : false;
+
+  // varargin is not among the parameter_lists elements, so
+  // add it to the vector of parameter names
+  if (is_varargin)
+    v_paras.push_back("varargin");
+
+  // The second instruction is the amount of arguments
+  int n_paras = v_paras.size ();
+  PUSH_CODE (is_varargin ? -n_paras : n_paras);
+
+  // The third is the amount of locals, which need to be set
+  // after compiling the function. So we need to store the offset
+  // to it for later
+  m_offset_n_locals = CODE_SIZE ();
+  PUSH_CODE (-1); // Placeholder
+  PUSH_CODE (-1);
+
+  // The first slot is a native int represenation nargout
+  // so we add a dummy slot object for it
+  add_id_to_table("%nargout");
+
+  // Then the return values
+  if (returns)
+    {
+      for (auto it = returns->begin (); it != returns->end (); it++)
+        {
+          tree_identifier *id = (*it)->ident ();
+          CHECK_NONNULL (id);
+          std::string name = id->name ();
+          int slot = add_id_to_table (name);
+          m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot});
+        }
+    }
+
+  if (m_varargout)
+  {
+    int slot = add_id_to_table ("varargout"); // Not in the returns list. Need to be last
+    m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({"varargout", slot});
+  }
+
+  // Then the arguments
+  for (std::string name : v_paras)
+    {
+      if (m_map_locals_to_slot.find (name) !=
+          m_map_locals_to_slot.end ())
+        {
+          // So the parameter is also a return value
+          // so we need to push it and assign
+          // it to the return value, since the caller
+          // will write the argument to the argument slot.
+          //
+          // We give the parameter a dummy name so it
+          // still occupies a slot, and assigns a dummy
+          // object to it after we copied it to the return
+          // slot.
+
+          std::string dummy_name = "!" + name;
+          int slot_dummy = add_id_to_table (dummy_name);
+
+          // PUSH_SLOT_INDEXED just pushes and does not check
+          // for doing a cmd function call.
+          MAYBE_PUSH_WIDE_OPEXT (slot_dummy);
+          PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+          PUSH_SLOT (slot_dummy);
+          int slot = SLOT (name);
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::FORCE_ASSIGN); // Accepts undefined rhs
+          PUSH_SLOT (slot);
+          PUSH_CODE (INSTR::PUSH_FALSE); // False will do
+          MAYBE_PUSH_WIDE_OPEXT (slot_dummy);
+          PUSH_CODE (INSTR::ASSIGN);
+          PUSH_SLOT (slot_dummy);
+
+          continue;
+        }
+
+      int slot = add_id_to_table (name);
+      m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot});
+    }
+
+  // We always need the magic id "ans"
+  int slot_ans = add_id_to_table ("ans");
+  m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({"ans", slot_ans});
+
+  // We add all identifiers in the body to the id-table. We also
+  // make a map mapping the interpreters frame offset of a id
+  // to the frame offset in the bytecode VM frame.
+  if (cmd_list)
+    {
+      auto v_names_offsets = collect_idnames_walker::collect_id_names (*cmd_list);
+
+      for (auto name_offset : v_names_offsets)
+        {
+          std::string name = name_offset.m_name;
+          unsigned offset = name_offset.m_offset;
+          unsigned frame_offset = name_offset.m_frame_offset;
+          add_id_to_table (name);
+          int slot = SLOT (name);
+
+          if (frame_offset >= m_code.m_unwind_data.m_external_frame_offset_to_internal.size ())
+            m_code.m_unwind_data.m_external_frame_offset_to_internal.resize (frame_offset + 1);
+
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset][offset] = slot;
+
+          if (frame_offset == 0)
+            m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot});
+        }
+    }
+  // We need the arguments and return id:s in the map too.
+  if (paras)
+    {
+      for (auto it = paras->begin (); it != paras->end (); it++)
+        {
+          CHECK_NONNULL (*it);
+          tree_identifier *id = (*it)->ident ();
+          int offset = id->symbol ().data_offset ();
+          std::string name = id->name ();
+          int slot = SLOT (name);
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = slot;
+          m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot});
+
+          // If the parameter has an init expression e.g.
+          // "function foo (a = sin (pi))"
+          // , we need to search it for id:s too.
+          tree_expression *init_expr = (*it)->expression ();
+          if (init_expr)
+            {
+              auto v_names_offsets = collect_idnames_walker::collect_id_names (*init_expr);
+              for (auto name_offset : v_names_offsets)
+                {
+                  std::string name_i = name_offset.m_name;
+                  int offset_i = name_offset.m_offset;
+                  int slot_i = add_id_to_table (name_i);
+
+                  m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset_i] = slot_i;
+                  m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name_i, slot_i});
+                }
+            }
+        }
+    }
+  if (returns)
+    {
+      for (auto it = returns->begin (); it != returns->end (); it++)
+        {
+          std::string name = (*it)->name();
+          tree_identifier *id = (*it)->ident ();
+          int frame_offset = id->symbol ().data_offset ();
+          int slot = SLOT (name);
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[0][frame_offset] = slot;
+        }
+    }
+
+  // The function name should be in the frame as an id too aswell
+  // as 'varargin', 'varargout' and 'ans'.
+  //
+  // 'ans' is allready added to the id table and 'varargin' and 'varargout' too
+  // if they are used, but we don't have their external offset.
+  //
+  // The function name is not added to the id table yet.
+  //
+  // Note that there might be symbols added to the original scope by
+  // eg. eval ("foo = 3"). We just ignore those.
+  //
+  // We need to keep track of which id is the function name so that
+  // we can add the id to the id-table and get it's external offset.
+  //
+  // Note that the file 'bar.m' can have one function with the id name 'foo'
+  // which will be added to the scope by the parser, but the function name
+  // and thus call-name is 'bar'.
+  //
+  // Also, for nested functions any parent nesting function name need to
+  // be included too.
+  if (!fcn.is_anonymous_function ())
+    {
+      auto scope = fcn.scope ();
+      std::string fn_name = scope.fcn_name ();
+
+      // Names might be e.g. "get.Count" but we only want "Count"
+      auto dot_idx = fn_name.find_last_of ('.');
+      if (dot_idx != std::string::npos)
+        fn_name = fn_name.substr (dot_idx + 1);
+
+      symbol_record fn_sr = scope.find_symbol (fn_name);
+      CHECK (fn_sr.is_valid ());
+
+      std::size_t offset = fn_sr.data_offset ();
+      std::size_t frame_offset = fn_sr.frame_offset ();
+
+      if (frame_offset >= m_code.m_unwind_data.m_external_frame_offset_to_internal.size ())
+        m_code.m_unwind_data.m_external_frame_offset_to_internal.resize (frame_offset + 1);
+
+      int slot = add_id_to_table (fn_name);
+      m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset][offset] = slot;
+
+      m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({fn_name, slot});
+    }
+
+  // Add the parents' function names for nested functions
+  if (m_n_nested_fn)
+    {
+      auto scope = fcn.scope ();
+      for (std::string parent_name : scope.parent_fcn_names ())
+        {
+          symbol_record parent_sr = scope.find_symbol (parent_name);
+          CHECK (parent_sr.is_valid ());
+
+          std::size_t offset = parent_sr.data_offset ();
+          std::size_t frame_offset = parent_sr.frame_offset ();
+
+          if (frame_offset >= m_code.m_unwind_data.m_external_frame_offset_to_internal.size ())
+            m_code.m_unwind_data.m_external_frame_offset_to_internal.resize (frame_offset + 1);
+
+          int slot = add_id_to_table (parent_name);
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[frame_offset][offset] = slot;
+
+          m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({parent_name, slot});
+        }
+    }
+
+  // Add varargin, varargout, ans
+  for (auto p : fcn.scope ().symbols ())
+    {
+      std::string name = p.first;
+      symbol_record sym = p.second;
+      std::size_t offset = sym.data_offset ();
+      std::size_t frame_offset = sym.frame_offset ();
+
+      if (name == "varargin")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("varargin");
+      else if (name == "varargout")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("varargout");
+      else if (name == "ans")
+        m_code.m_unwind_data.m_external_frame_offset_to_internal[0][offset] = SLOT ("ans");
+      else if (offset == 1 && frame_offset == 0 && !fcn.is_anonymous_function ())
+        {
+          // If the function name and function identifier of the root function don't match ('foo.m' with 'bar' function)
+          // the identifier's name ('bar') will end up on offset 1. Make sure it is added by always adding
+          // the 1 offset symbol in the scope for nested functions.
+          m_code.m_unwind_data.m_external_frame_offset_to_internal[0][1] = add_id_to_table (sym.name ());
+        }
+    }
+
+  // If the id:s "i", "j", "I","J" or "e" are used we try to figure out if they are used as variables or as
+  // the imaginary unit, to choose between PUSH_I, PUSH_E (specialized) or a generic push op-code.
+  bool ije_used = m_map_locals_to_slot.find ("i") != m_map_locals_to_slot.end ();
+  ije_used |= m_map_locals_to_slot.find ("j") != m_map_locals_to_slot.end ();
+  ije_used |= m_map_locals_to_slot.find ("I") != m_map_locals_to_slot.end ();
+  ije_used |= m_map_locals_to_slot.find ("J") != m_map_locals_to_slot.end ();
+  ije_used |= m_map_locals_to_slot.find ("e") != m_map_locals_to_slot.end ();
+
+  if (ije_used)
+    m_set_assigned_ids = find_assigned_ids_walker::find_ids (fcn);
+
+  CHECK (! (m_is_anon && m_n_nested_fn));
+
+  // Add code to initialize variables in anonymous functions that took their value from
+  // the parent scope.
+  if (m_is_anon)
+    {
+      CHECK_NONNULL (m_anon_local_values);
+      // external frame offset 1 is used for storing internal offsets to local
+      // variables in nested anonymous funcitons, so assure there is no other use
+      // of frame offset by checking the size is one.
+      CHECK (m_code.m_unwind_data.m_external_frame_offset_to_internal.size () == 1);
+
+      auto scope = fcn.scope ();
+
+      for (auto kv : *m_anon_local_values)
+        {
+          std::string name = kv.first;
+
+          // Add slots for the locals provided by the scope of the anonymous function
+          if (m_map_locals_to_slot.find (name) == m_map_locals_to_slot.end ())
+            {
+              int slot_added = add_id_to_table (name);
+
+              auto sym = scope.find_symbol (name);
+
+              CHECK_PANIC (sym.is_valid ());
+              CHECK_PANIC (sym.frame_offset () == 0);
+
+              // Increasing number for fake external offset
+              int external_offset = sym.data_offset ();
+
+              m_code.m_unwind_data.m_external_frame_offset_to_internal[0][external_offset] = slot_added;
+              m_code.m_unwind_data.m_map_user_locals_names_to_slot.insert ({name, slot_added});
+            }
+
+          int slot = SLOT (name);
+
+          int data_offset = DATA_SIZE ();
+          octave_value val = kv.second;
+          PUSH_DATA (val);
+
+          // Push the value to the operand stack
+          PUSH_CODE_LOAD_CST (data_offset);
+          // Assign the value on the stack to the slot.
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::FORCE_ASSIGN);
+          PUSH_SLOT (slot);
+        }
+    }
+  if (m_n_nested_fn)
+    {
+      // So we are compiling a nested function. We need to put references to the proper parent stack
+      // from the nested functions stack for each shared variable. To do this we store which id:s in
+      // the nested stack that are shared with the parent stack in the unwind date.
+      // Then the opcode ENTER_NESTED_FRAME sets up the references at runtime.
+      //
+      // All variables of the parent is shared with the nested function, including 'ans'. The arguments
+      // and returns of the nested function are local to it. The rule is recursive for nested nested functions.
+      //
+      // A local slot number is only added once, and outer scopes take precedence.
+      CHECK (fcn.is_nested_function ());
+
+      std::vector<unwind_data::nested_var_offset> v_nested_vars;
+      std::set<int> set_locals_added;
+
+      for (unsigned i = 0; i < m_v_parent_fns.size (); i++)
+        {
+          unsigned depth = m_v_parent_fns.size () - i;
+
+          octave_user_function *parent_fn = m_v_parent_fns[i];
+
+          bytecode &bc = parent_fn->get_bytecode ();
+
+          for (unsigned parent_slot_idx = 1; parent_slot_idx < bc.m_ids.size (); parent_slot_idx++) // 1, for %nargout
+            {
+              std::string &parent_id_name = bc.m_ids[parent_slot_idx];
+
+              // Skip special id:s
+              if (parent_id_name.size ())
+                {
+                  switch (parent_id_name.front ())
+                    {
+                      case '%':
+                      case '#':
+                      case '!':
+                        continue;
+                      default:
+                        ;
+                    }
+                }
+
+              auto it = m_map_locals_to_slot.find (parent_id_name);
+              if (it == m_map_locals_to_slot.end ())
+                continue;
+
+              int local_slot_nr = it->second;
+
+              // The return values and arguments of the nested function are not shared with parents
+              if (local_slot_nr < 1 + n_returns + n_paras)
+                continue;
+
+              unwind_data::nested_var_offset data;
+              data.m_depth = depth;
+              data.m_slot_nested = local_slot_nr;
+              data.m_slot_parent = parent_slot_idx;
+
+               // Add only an local slot nr once to v_nested_vars
+              if (!set_locals_added.count (local_slot_nr))
+                {
+                  v_nested_vars.push_back (data);
+                  set_locals_added.insert (local_slot_nr);
+                }
+            }
+        }
+
+      m_code.m_unwind_data.m_v_nested_vars = std::move (v_nested_vars);
+      PUSH_CODE (INSTR::ENTER_NESTED_FRAME);
+    }
+
+  // Add code to handle default arguments. If an argument is undefined or
+  // "magic colon" it is to get its default value.
+  if (paras)
+    {
+      for (auto it = paras->begin (); it != paras->end (); it++)
+        {
+          tree_expression *init_expr = (*it)->expression ();
+          // TODO: Default init for varargin?
+
+          if (init_expr)
+            {
+              // There is a default arg.
+
+              std::string name = (*it)->name ();
+              int slot = SLOT (name);
+
+              // Push the arg to the operand stack from its slot
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+              PUSH_SLOT (slot);
+              // If it is undefined or "Magic colon", execute the init code
+              // otherwise jump past it.
+              PUSH_CODE (INSTR::JMP_IFDEF);
+              int need_after = CODE_SIZE ();
+              PUSH_CODE_SHORT (-1); // Placeholder
+
+              INC_DEPTH();
+              PUSH_NARGOUT(1); // nargout is 1 for simple assignments
+
+              // Walk for the rhs code
+              init_expr->accept (*this);
+
+              // The value of rhs is now on the operand stack. Assign it
+              // to the arg.
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::ASSIGN);
+              PUSH_SLOT (slot);
+
+              POP_NARGOUT ();
+              DEC_DEPTH();
+
+              // The jump need to go here, if the argument is defined, so
+              // set the placeholder from above.
+              SET_CODE_SHORT (need_after, CODE_SIZE ());
+            }
+        }
+    }
+
+  CHECK_NONNULL (cmd_list);
+  cmd_list->accept (*this);
+
+  // Set the amount of locals that has a placeholder since earlier
+  SET_CODE_SHORT (m_offset_n_locals, m_n_locals);
+
+  // Anonymous functions has no end marker, so push RET_ANON here
+  if (m_is_anon)
+    PUSH_CODE (INSTR::RET_ANON);
+
+  // When the last byte of opcode, a 'RET', is to be executed, the VM reads the
+  // next byte of code and puts it in 'arg0'.  So, we need to add a dummy
+  // opcode afterwards to prevent out-of-bounds reads.
+  PUSH_CODE (INSTR::RET);
+
+  // We want to add the locals to the scope in slot order
+  // so we push all the locals' names to a vector by their slot
+  // number
+  unsigned n_slots = m_map_locals_to_slot.size ();
+  CHECK (n_slots == static_cast<unsigned> (m_n_locals));
+  std::vector<std::string> names (n_slots);
+
+  auto iter = m_map_locals_to_slot.begin ();
+  for (unsigned i = 0; i < n_slots; i++)
+    {
+      auto kv = *iter++;
+
+      const std::string& name = kv.first;
+      int slot = kv.second;
+
+      CHECK (slot >= 0 && slot < static_cast<int> (n_slots));
+      CHECK (names[slot] == ""); // Check not duplicate slot number used
+
+      names[slot] = name;
+    }
+
+  // Check that the mapping between external offsets and internal slots has no holes in it
+  if (m_n_nested_fn == 0)
+    {
+      int i = 0;
+      for (auto it : m_code.m_unwind_data.m_external_frame_offset_to_internal[0])
+        {
+          int external_offset = it.first;
+          CHECK (external_offset == i);
+          i++;
+        }
+    }
+
+  // Save how many symbols there were originally, so that the VM can easely check if more symbols
+  // have been added to the scope object.
+  m_code.m_unwind_data.m_n_orig_scope_size = 0;
+  for (auto m : m_code.m_unwind_data.m_external_frame_offset_to_internal)
+    m_code.m_unwind_data.m_n_orig_scope_size += m.size ();
+
+  // The profiler needs to know these sizes when copying from pointers.
+  m_code.m_unwind_data.m_code_size = m_code.m_code.size ();
+  m_code.m_unwind_data.m_ids_size = m_code.m_ids.size ();
+
+  m_code.m_unwind_data.m_n_returns = n_returns;
+  m_code.m_unwind_data.m_n_args = n_paras;
+  m_code.m_unwind_data.m_n_locals = n_slots;
+}
+
+void
+bytecode_walker::
+visit_multi_assignment (tree_multi_assignment& expr)
+{
+  INC_DEPTH();
+  int outer_nargout = NARGOUT ();
+
+  tree_argument_list *lhs = expr.left_hand_side ();
+
+  // Lists are annoying, move lhs elements to a vector
+  std::vector<tree_expression *> v_lhs;
+  for (auto it = lhs->begin (); it != lhs->end (); it++)
+    {
+      CHECK_NONNULL(*it);
+      v_lhs.push_back (*it);
+    }
+
+  CHECK_NONNULL (lhs);
+  // Set nargout
+  size_t n_args = v_lhs.size ();
+  PUSH_NARGOUT (n_args);
+
+  std::vector<std::string> v_arg_names;
+  std::vector<bool> v_is_blackhole;
+
+  // Can't nest ignored outputs as the code here is written. Is it even possible to nest those?
+  CHECK (m_pending_ignore_outputs == false);
+
+  // TODO:
+  //       Something smarter is needed to split of cs-lists among different lhs values
+  //       This does not work for e.g. [C{1:2}, D] = {1,2,3}{:}
+  //       Probably need some opcode ASSIGNNX or something.
+  //       See tree_multi_assignment::evaluate_n and octave_lvalue::eval_for_numel
+  //
+  //       There probably has to be another tree_walker figuring out how many elements
+  //       a lvalue will "ask for". In [C{1:2}] = deal (1,2) deal will have to have nargout 2
+  //       which is annoying since we will have to be able to set nargout dynamically.
+  //       With a slot maybe?
+  //
+  //       Maybe make a octave_lvalue and call numel() for simplicity?
+  //
+  //       Meanwhile, just call eval on it ...
+
+  bool any_lhs_not_id = false;
+  for (tree_expression *e : v_lhs)
+    if (! e->is_identifier ())
+      any_lhs_not_id = true;
+
+  if (any_lhs_not_id)
+    {
+      // The VM need to access the tree expr.
+      // Abuse the dbg info.
+      PUSH_TREE_FOR_EVAL (&expr);
+      int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+      MAYBE_PUSH_ANON_NARGOUT_OPEXT (outer_nargout);
+      PUSH_CODE (INSTR::EVAL);
+      PUSH_CODE (outer_nargout);
+      PUSH_CODE_INT (tree_idx);
+
+      if (!m_is_anon && DEPTH () == 1)
+        PUSH_CODE (INSTR::POP);
+
+      POP_NARGOUT ();
+      DEC_DEPTH ();
+      return;
+    }
+
+  int n_blackholes = 0;
+  int i = 0;
+  for (tree_expression *e : v_lhs)
+    {
+      if (!e->is_identifier ())
+        {
+          v_arg_names.push_back ("");
+          v_is_blackhole.push_back (false);
+          continue;
+        }
+
+      std::string name = e->name ();
+      if (name == "~") // We need to handle the special "ignore id" '~' as in [~, a] = foo ()
+        {
+          m_v_ignored.push_back (i + 1); // Output parameters are one-indexed
+          name = "%~" + std::to_string (n_blackholes++); // We rename it to "%~X"
+          v_is_blackhole.push_back (true);
+        }
+      else
+        v_is_blackhole.push_back (false);
+
+      v_arg_names.push_back (name);
+
+      add_id_to_table (name);
+
+      i++;
+    }
+
+  CHECK (v_arg_names.size () == n_args);
+
+  /* Handle ignored outputs. Since the called function can ask with isargout()
+   * whether an output is ignored or not we need to set a state for this. */
+  if (m_v_ignored.size ())
+    {
+      m_pending_ignore_outputs = 1;
+      m_ignored_of_total = n_args;
+    }
+
+  tree_expression *rhs = expr.right_hand_side ();
+  CHECK_NONNULL(rhs);
+
+  // We want push NARGOUT elements to the operand stack
+
+  emit_unwind_protect_data D;
+  if (m_pending_ignore_outputs)
+    D = emit_unwind_protect_code_start ();
+
+  rhs->accept (*this); // Walks rhs for NARGOUT elements
+
+  if (m_pending_ignore_outputs)
+    {
+      // The outer expression in rhs should have set m_ignored_ip_start to its ip offset.
+      CHECK (m_ignored_ip_start);
+      UNWIND (D.m_idx_unwind).m_ip_start = m_ignored_ip_start;
+    }
+
+  if (DEPTH () != 1)
+    TODO ("Only root multi assignment supported now");
+
+  PUSH_CODE (INSTR::ASSIGNN);
+  // Push the amount of slots
+  PUSH_CODE (v_lhs.size ());
+
+  // Push the slots
+  for (std::string &name : v_arg_names)
+    PUSH_WSLOT (SLOT (name));
+
+  // Emit code to disp if no ;
+  for (std::string &name : v_arg_names)
+    maybe_emit_push_and_disp_id (expr, name);
+
+  if (m_pending_ignore_outputs)
+    {
+      emit_unwind_protect_code_before_cleanup (D);
+
+      // As we are ignoring outputs we need to unwind protect to clear the VM state with opcode CLEAR_IGNORE_OUTPUTS
+      // We need to supply each black hole slot
+
+      PUSH_CODE (INSTR::CLEAR_IGNORE_OUTPUTS);
+      PUSH_CODE (n_blackholes);
+
+      for (unsigned j = 0; j < n_args; j++)
+        {
+          if (v_is_blackhole.at (j))
+            PUSH_WSLOT (SLOT (v_arg_names.at (j)));
+        }
+
+      emit_unwind_protect_code_end (D);
+    }
+
+  if (m_pending_ignore_outputs)
+    {
+      m_pending_ignore_outputs = 0;
+      m_v_ignored.clear ();
+      m_ignored_ip_start = 0;
+    }
+
+  POP_NARGOUT ();
+  DEC_DEPTH ();
+}
+
+std::map<std::string, octave_base_value::unary_mapper_t> bytecode_walker::m_name_to_unary_func =
+{
+{"abs",       octave_base_value::umap_abs},
+{"acos",      octave_base_value::umap_acos},
+{"acosh",     octave_base_value::umap_acosh},
+{"angle",     octave_base_value::umap_angle},
+{"arg",       octave_base_value::umap_arg},
+{"asin",      octave_base_value::umap_asin},
+{"asinh",     octave_base_value::umap_asinh},
+{"atan",      octave_base_value::umap_atan},
+{"atanh",     octave_base_value::umap_atanh},
+{"cbrt",      octave_base_value::umap_cbrt},
+{"ceil",      octave_base_value::umap_ceil},
+{"conj",      octave_base_value::umap_conj},
+{"cos",       octave_base_value::umap_cos},
+{"cosh",      octave_base_value::umap_cosh},
+{"erf",       octave_base_value::umap_erf},
+{"erfinv",    octave_base_value::umap_erfinv},
+{"erfcinv",   octave_base_value::umap_erfcinv},
+{"erfc",      octave_base_value::umap_erfc},
+{"erfcx",     octave_base_value::umap_erfcx},
+{"erfi",      octave_base_value::umap_erfi},
+{"dawson",    octave_base_value::umap_dawson},
+{"exp",       octave_base_value::umap_exp},
+{"expm1",     octave_base_value::umap_expm1},
+{"isfinite",  octave_base_value::umap_isfinite},
+{"fix",       octave_base_value::umap_fix},
+{"floor",     octave_base_value::umap_floor},
+{"gamma",     octave_base_value::umap_gamma},
+{"imag",      octave_base_value::umap_imag},
+{"isinf",     octave_base_value::umap_isinf},
+{"isna",      octave_base_value::umap_isna},
+{"isnan",     octave_base_value::umap_isnan},
+{"lgamma",    octave_base_value::umap_lgamma},
+{"log",       octave_base_value::umap_log},
+{"log2",      octave_base_value::umap_log2},
+{"log10",     octave_base_value::umap_log10},
+{"log1p",     octave_base_value::umap_log1p},
+{"real",      octave_base_value::umap_real},
+{"round",     octave_base_value::umap_round},
+{"roundb",    octave_base_value::umap_roundb},
+{"signum",    octave_base_value::umap_signum},
+{"sin",       octave_base_value::umap_sin},
+{"sinh",      octave_base_value::umap_sinh},
+{"sqrt",      octave_base_value::umap_sqrt},
+{"tan",       octave_base_value::umap_tan},
+{"tanh",      octave_base_value::umap_tanh},
+{"isalnum",   octave_base_value::umap_xisalnum},
+{"isalpha",   octave_base_value::umap_xisalpha},
+{"isascii",   octave_base_value::umap_xisascii},
+{"iscntrl",   octave_base_value::umap_xiscntrl},
+{"isdigit",   octave_base_value::umap_xisdigit},
+{"isgraph",   octave_base_value::umap_xisgraph},
+{"islower",   octave_base_value::umap_xislower},
+{"isprint",   octave_base_value::umap_xisprint},
+{"ispunct",   octave_base_value::umap_xispunct},
+{"isspace",   octave_base_value::umap_xisspace},
+{"isupper",   octave_base_value::umap_xisupper},
+{"isxdigit",  octave_base_value::umap_xisxdigit},
+{"signbit",   octave_base_value::umap_xsignbit},
+{"tolower",   octave_base_value::umap_xtolower},
+{"toupper",   octave_base_value::umap_xtoupper},
+};
+
+void
+bytecode_walker::
+emit_disp_obj (tree_expression &expr)
+{
+  CHECK (expr.print_result ());
+  CHECK (DEPTH () == 1);
+  PUSH_CODE (INSTR::DISP);
+  // Magic slot number 0 (%nargout that is a native int) that
+  // will never be printed corrensponds to "" name tag stashing of
+  // the ovl before calling display.
+  PUSH_SLOT (0);
+  PUSH_WSLOT (0); // never a command function call
+}
+
+void
+bytecode_walker::
+maybe_emit_push_and_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name)
+{
+  if (!expr.print_result ())
+    return;
+
+  if (name.size () && name[0] == '%') // Don't print internal variables like black holes
+    return;
+
+  CHECK (DEPTH () == 1);
+  int slot = SLOT (name);
+  MAYBE_PUSH_WIDE_OPEXT (slot);
+  PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+  PUSH_SLOT (slot);
+  maybe_emit_disp_id (expr, name, maybe_cmd_name); // Always, not maybe
+}
+
+void
+bytecode_walker::
+maybe_emit_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name)
+{
+  if (!expr.print_result ())
+    return;
+
+  if (name.size () && name[0] == '%') // Don't print internal variables like black holes
+    return;
+
+  // The Octave function inputname (i) needs to be able to know the name
+  // of the argument to a function, so we need to make an entry of
+  // the id printed if the user overloads display()
+  arg_name_entry arg_name_entry;
+  arg_name_entry.m_arg_names = string_vector {name};
+
+  arg_name_entry.m_ip_start = CODE_SIZE ();
+
+  CHECK (DEPTH () == 1);
+  int slot = SLOT (name);
+  MAYBE_PUSH_WIDE_OPEXT (slot);
+  PUSH_CODE (INSTR::DISP);
+  PUSH_SLOT (slot);
+  // E.g. "x" might either be a command call x() that should print
+  // "ans = ..." or a variable that should print "x = ..." so we
+  // store the information on whether a certain symbol
+  // was a variable or command call in a slot.
+  // Some expressions like "1+1" are never command calls
+  // ans have maybe_cmd_name as ""
+  if (maybe_cmd_name != "")
+    PUSH_WSLOT (SLOT (maybe_cmd_name));
+  else
+    PUSH_WSLOT (0);
+
+  arg_name_entry.m_ip_end = CODE_SIZE ();
+  PUSH_ARGNAMES_ENTRY (arg_name_entry);
+}
+
+void
+bytecode_walker::
+maybe_emit_anon_maybe_ignore_outputs ()
+{
+  // The ignored output of the caller need to be propagated to the outer nested expression
+  // for anonymous functions.
+  // E.g.:
+  //  anon = @() foo (bar (baz ()))
+  // [x, ~] = anon (); % The foo call will have isargout(2) set to false
+  if (m_is_anon && DEPTH () == 1)
+    PUSH_CODE (INSTR::ANON_MAYBE_SET_IGNORE_OUTPUTS);
+}
+
+void
+bytecode_walker::
+maybe_emit_bind_ans_and_disp (tree_expression &expr, const std::string maybe_cmd_name)
+{
+  bool print_result = expr.print_result ();
+
+  // Anonymous functions never print or write to ans.
+  // The value currently on the stack is used as the return value
+  // by RET_ANON. For normal functions the write to ans would pop it.
+  if (m_is_anon)
+    return;
+
+  // If this is an root expression we need to write the return value
+  // to ans.
+  if (DEPTH () == 1)
+    {
+      if (print_result)
+        PUSH_CODE (INSTR::DUP);
+      int slot = SLOT ("ans");
+      MAYBE_PUSH_WIDE_OPEXT (slot);
+      PUSH_CODE (INSTR::BIND_ANS);
+      PUSH_SLOT (slot);
+    }
+
+  if (expr.is_identifier ())
+    maybe_emit_disp_id (expr, expr.name (), maybe_cmd_name);
+  else
+    maybe_emit_disp_id (expr, "ans", maybe_cmd_name);
+}
+
+void
+bytecode_walker::
+emit_return ()
+{
+  // For loops, unwind protect and switches etc have stuff on the stack
+  // inside them, so we need to pop those before executing the RET opcode.
+  auto v = NESTING_STATEMENTS();
+  // Reverse it backwards (top to bottom)
+  for (auto it = v.rbegin () ;it !=  v.rend (); it++)
+    {
+      nesting_statement t = *it;
+      switch (t)
+        {
+        case nesting_statement::FOR_LOOP:
+          // We need to pop the counter and n
+          PUSH_CODE (INSTR::POP_N_INTS);
+          PUSH_CODE (2);
+          // Pop the rhs ov (the range)
+          PUSH_CODE (INSTR::POP);
+          break;
+        case nesting_statement::ONE_OV_ON_STACK:
+          PUSH_CODE (INSTR::POP);
+          break;
+        default:
+          ERR("Invalid state");
+        }
+    }
+
+  if (m_is_script)
+    PUSH_CODE (INSTR::EXIT_SCRIPT_FRAME);
+
+  if (m_is_anon)
+    PUSH_CODE (INSTR::RET_ANON);
+  else
+    PUSH_CODE (INSTR::RET);
+}
+
+void
+bytecode_walker::
+visit_return_command (tree_return_command &cmd)
+{
+  int loc_id = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+  // If we are in a unwind protect and returning we need to
+  // run the cleanup code before returning.
+  if (N_UNWIND_RETURN_TARGETS())
+    {
+      PUSH_CODE (INSTR::JMP);
+      int need_unwind = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1); // Placeholder
+      PUSH_A_UNWIND_RETURN_TARGET (need_unwind);
+    }
+  else
+    emit_return ();
+
+  LOC (loc_id).m_ip_end = CODE_SIZE ();
+  LOC (loc_id).m_col = cmd.column ();
+  LOC (loc_id).m_line = cmd.line ();
+}
+
+void
+bytecode_walker::
+visit_simple_assignment (tree_simple_assignment& expr)
+{
+  INC_DEPTH();
+  PUSH_NARGOUT(1); // nargout is 1 for simple assignments
+
+  tree_expression *lhs = expr.left_hand_side ();
+
+  CHECK_NONNULL (lhs);
+
+  if (!lhs->is_identifier() && !lhs->is_index_expression())
+    TODO ("lhs not identifier or index expression");
+
+  octave_value::assign_op op = expr.op_type ();
+
+  // There is a general op-code SUBASSIGN_CHAINED for "complex" index assignments
+  // and some specialized for "simple" ones
+  bool complex_index_assignment = false;
+  bool idx_has_ends = false;
+
+  if (lhs->is_index_expression ())
+    {
+      tree_index_expression *idx = dynamic_cast<tree_index_expression*> (lhs);
+      complex_index_assignment = idx->type_tags ().size () != 1;
+
+      // We want to know if there is any magic end index in the arguments
+      std::list<tree_argument_list *> args_lists = idx->arg_lists ();
+      for (auto it = args_lists.begin (); it != args_lists.end (); it++)
+        {
+          if (!*it)
+            continue;
+          tree_argument_list *args = *it;
+          for (auto it_args = args->begin (); it_args != args->end (); it_args++)
+            {
+              if (!*it_args)
+                continue;
+              if (find_end_walker::has_end (**it_args))
+                idx_has_ends = true;
+            }
+        }
+
+      if (op != octave_value::assign_op::op_asn_eq)
+        complex_index_assignment = true;
+    }
+
+  if (complex_index_assignment)
+    {
+      if (idx_has_ends)
+        {
+          // TODO: Need lvalue walk to figure out how big subexpression are for end.
+          //       Eval as workaround.
+
+          // The VM need to access the tree expr.
+          // Abuse the dbg info.
+          PUSH_TREE_FOR_EVAL (&expr);
+          int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+          PUSH_CODE (INSTR::EVAL);
+          PUSH_CODE (0); // nargout
+          PUSH_CODE_INT (tree_idx);
+
+          if (!m_is_anon && DEPTH () == 1)
+            PUSH_CODE (INSTR::POP);
+
+          POP_NARGOUT ();
+          DEC_DEPTH ();
+          return;
+        }
+
+      tree_index_expression *idx = dynamic_cast<tree_index_expression*> (lhs);
+
+      tree_expression *e = idx->expression ();
+      std::list<tree_argument_list *> args_lists = idx->arg_lists ();
+      std::list<tree_expression *> dyns_fields = idx->dyn_fields ();
+      std::list<string_vector> fields_names = idx->arg_names();
+      std::string type_tags = idx->type_tags ();
+
+      size_t n_chained = type_tags.size ();
+
+      // Begin with rhs
+      tree_expression *rhs = expr.right_hand_side ();
+      CHECK_NONNULL (rhs);
+      rhs->accept (*this);
+
+      // rhs is on the stack now. If the assignment is nested we need to DUP rhs.
+      // E.g. "a = b.c.d = 3" => a = 3 or "a = length (b.c.d = 3)" => a = 1
+      if (DEPTH () != 1)
+        PUSH_CODE (INSTR::DUP);
+
+      if (e->is_identifier ())
+        {
+          // Name of the identifier
+          std::string name = e->name ();
+
+          int slot = add_id_to_table (name);
+
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+          PUSH_SLOT (slot);
+        }
+      else
+        {
+          // Visit the lhs expression. This should put whatever
+          // we are assigning to, on the stack.
+          e->accept (*this);
+        }
+
+      // Subassigns are abit awkward on a stack VM, since we can't
+      // do this piecewise. We need to construct a list of lists of
+      // arguments to all the chained subassigns and feed them to
+      // ov.assign (). TODO: make a "ref_subsasgn()" call or what
+      // ever in ov.cc for the middle in the chain.
+      //
+      // Also, any inclusion of 'end' get quite annoying since we
+      // need to save subsrefs to each chained subexpression to be
+      // able to figure out the subexpressions sizes.
+
+      auto it_args_lists = args_lists.begin ();
+      auto it_dyns_fields = dyns_fields.begin ();
+      auto it_fields_names = fields_names.begin ();
+
+      int active_idx_slot = -1;
+      if (idx_has_ends)
+        {
+          // We need to store the active subexpression in a slot for end
+          // to be able to access it.
+          std::string name = "%active_idx_" + std::to_string (CODE_SIZE ());
+          add_id_to_table (name);
+          active_idx_slot = SLOT (name);
+          // Write the root value to the slot
+          // i.e. root(2:end)(3,end)
+          PUSH_CODE (INSTR::DUP);
+          MAYBE_PUSH_WIDE_OPEXT (active_idx_slot);
+          PUSH_CODE (INSTR::FORCE_ASSIGN);
+          PUSH_SLOT (active_idx_slot);
+        }
+
+      std::vector<int> n_args_per_part;
+
+      for (size_t i = 0; i < n_chained; i++)
+        {
+          // Amount of args in the subexpresseion
+          // E.g. foo(1,2).bar(1) = ... => 2 and 1
+          int n_args_in_part = 0;
+          char type = type_tags[i];
+
+          tree_argument_list *args = *it_args_lists++;
+          tree_expression *dyn_fields = *it_dyns_fields++;
+          string_vector field_names = *it_fields_names++;
+
+          if (type == '.' && dyn_fields)
+            {
+              INC_DEPTH ();
+              dyn_fields->accept (*this);
+              DEC_DEPTH ();
+              n_args_in_part++; // Dynamic struct fields are always one arg
+            }
+          else if (type == '.')
+            {
+              // We want to push the field name as a ovtave_string to the stack
+              std::string field_name = field_names.elem (0);
+              octave_value ov_field_name{field_name};
+              PUSH_DATA (ov_field_name); // Make a constant
+              // Load the constant
+              PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant
+
+              n_args_in_part++;
+            }
+          else if (args)
+            {
+              // Push all the args to the stack
+
+              n_args_in_part = args->size ();
+              int j = 0;
+              // We want to push the args to the stack
+              // The order of eval is left to right
+              for (auto it = args->begin (); it != args->end (); it++, j++)
+                {
+                  INC_DEPTH ();
+                  // Any end will work on the active idx slot's object
+                  PUSH_ID_BEGIN_INDEXED (active_idx_slot, j, n_args_in_part, false);
+                  (*it)->accept (*this);
+                  POP_ID_BEING_INDEXED ();
+                  DEC_DEPTH ();
+                }
+            }
+
+          // If we have an end in the assignment we need to write the active subexpression
+          // to the designated slot for end to be able to access it.
+          // Unecessary for the last in the chain.
+          if (idx_has_ends && i + 1 != n_chained)
+            {
+              // Push the prior active index subexpression
+              MAYBE_PUSH_WIDE_OPEXT (active_idx_slot);
+              PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+              PUSH_SLOT (active_idx_slot);
+              // Duplicate the args
+              PUSH_CODE (INSTR::DUPN);
+              PUSH_CODE (1); // offset, under the object being indexed
+              PUSH_CODE (n_args_in_part); // amount of objects to duplicate
+              // Index the prior active index subexpression
+              PUSH_CODE (INSTR::INDEX_OBJ);
+              PUSH_CODE (1); // nargout
+              PUSH_CODE (0); // "has slot"
+              PUSH_WSLOT (0); // The w/e slot
+              PUSH_CODE (n_args_in_part);
+              PUSH_CODE (type);
+              // Write the new active subexpression back to the slot
+              MAYBE_PUSH_WIDE_OPEXT (active_idx_slot);
+              PUSH_CODE (INSTR::FORCE_ASSIGN);
+              PUSH_SLOT (active_idx_slot);
+            }
+
+          n_args_per_part.push_back (n_args_in_part);
+        }
+
+      // So we have alot of arguments to different subexpression evaluated on the
+      // stack now.
+      //
+      // We want to put them in lists and feed them to a subsassgn() call. We use
+      // a special op-code for this.
+
+      int slot = 0;
+      if (e->is_identifier ())
+        slot = SLOT (e->name ());
+
+      MAYBE_PUSH_WIDE_OPEXT (slot);
+      PUSH_CODE (INSTR::SUBASSIGN_CHAINED);
+      PUSH_SLOT (slot);
+      PUSH_CODE (op); // =, += etc.
+      PUSH_CODE (n_chained);
+      for (unsigned i = 0; i < n_chained; i++)
+        {
+          PUSH_CODE (n_args_per_part[i]); // Amount of args, left to right
+           // The type, i.e. '.' or '(' or '{'
+          PUSH_CODE (type_tags[i]);
+        }
+
+      // Now we got the value that is subassigned to, on the stack
+      if (e->is_identifier ())
+        {
+          PUSH_CODE (INSTR::POP);
+
+          maybe_emit_push_and_disp_id (expr, e->name ());
+        }
+      else
+        {
+          if (expr.print_result ())
+            {
+              PUSH_CODE (INSTR::DUP);
+              emit_disp_obj (expr);
+            }
+          if (!m_is_anon && DEPTH () == 1)
+            PUSH_CODE (INSTR::POP);
+        }
+    }
+  else if (lhs->is_index_expression()) // eg "foo(2) = bar" or "foo.a = bar"?
+    {
+      /* We have differen op codes for struct, cell, () index assignement
+       * of ids and another for assignments where the rhs of the index is not
+       * an id, e.g. foo.("bar") = 2 */
+
+      CHECK (op == octave_value::assign_op::op_asn_eq);
+
+      // We want the arguments to the index expression on the
+      // operand stack. They are evaluated before the rhs expression.
+      tree_index_expression *idx = dynamic_cast<tree_index_expression*> (lhs);
+
+      tree_expression *ee = idx->expression ();
+      std::list<tree_argument_list *> arg_lists = idx->arg_lists ();
+      std::list<tree_expression *> dyn_fields = idx->dyn_fields ();
+      std::list<string_vector> field_names = idx->arg_names();
+
+      std::string type_tags = idx->type_tags ();
+      CHECK (type_tags.size () == 1);
+      CHECK (dyn_fields.size () == 1);
+      CHECK (arg_lists.size () == 1);
+      CHECK (field_names.size () == 1);
+
+      char type = type_tags[0];
+
+
+      bool is_id = ee->is_identifier ();
+      CHECK_NONNULL(ee);
+
+      bool is_dynamic_field = false;
+      if (type == '.')
+        {
+          tree_expression *dyn_field = dyn_fields.front ();
+          if (dyn_field)
+            is_dynamic_field = true;
+        }
+
+      if (!is_id && type != '.')
+        {
+          tree_argument_list *arg = *arg_lists.begin ();
+
+          // TODO: The other branches evaluate rhs after the arguments.
+          //       Has to be wrong?
+          tree_expression *rhs = expr.right_hand_side ();
+
+          CHECK_NONNULL (rhs);
+          rhs->accept (*this);
+          // The value of rhs is on the stack now
+
+          // Visit the lhs expression
+          ee->accept (*this);
+          // Pushed the left most lhs expression to the stack
+
+          int nargs = 0;
+
+          if (arg)
+            {
+              // If we are indexing an object, and have a magic end index
+              // we need to save the stack depth in a slot
+              bool obj_has_end = false;
+              for (auto it = arg->begin (); it != arg->end (); it++)
+                {
+                  CHECK_NONNULL (*it);
+                  tree_expression &t = **it;
+                  obj_has_end = find_end_walker::has_end (t);
+                  if (obj_has_end)
+                    break;
+                }
+
+              int obj_stack_depth_slot = -1;
+              if (obj_has_end)
+                {
+                  std::string obj_stack_depth_name = "%objsd_" + std::to_string (CODE_SIZE ());
+                  obj_stack_depth_slot = add_id_to_table (obj_stack_depth_name);
+
+                  MAYBE_PUSH_WIDE_OPEXT (obj_stack_depth_slot);
+                  PUSH_CODE (INSTR::SET_SLOT_TO_STACK_DEPTH);
+                  PUSH_SLOT (obj_stack_depth_slot);
+                }
+
+              nargs = arg->size ();
+              int i = 0;
+              // We want to push the args to the stack
+              for (auto it = arg->begin (); it != arg->end (); it++, i++)
+                {
+                  INC_DEPTH ();
+                  PUSH_ID_BEGIN_INDEXED (obj_stack_depth_slot, i, nargs, true);
+                  (*it)->accept (*this);
+                  POP_ID_BEING_INDEXED ();
+                  DEC_DEPTH ();
+                }
+            }
+          // rhs, lhs root expression, lhs's args on the stack now
+
+          PUSH_CODE (INSTR::SUBASSIGN_OBJ);
+          PUSH_CODE (nargs);
+          PUSH_CODE (type);
+
+          if (expr.print_result ())
+            {
+              PUSH_CODE (INSTR::DUP);
+              emit_disp_obj (expr);
+            }
+
+          // SUBASSIGN_OBJ puts the lhs back on the stack
+          // but since lhs is not an id from a slot we just
+          // pop it unless it is used by a chained assign.
+          if (!m_is_anon && DEPTH () == 1)
+            PUSH_CODE (INSTR::POP);
+        }
+      else if (type == '(')
+        {
+          // Name of the identifier
+          std::string name = ee->name ();
+
+          add_id_to_table (name);
+
+          tree_argument_list *arg = *arg_lists.begin ();
+
+          // rhs will be copied to this stack position if assigns are chained.
+          // (Chained as in b(1) = c(2) = 3)
+          if (DEPTH () != 1)
+            PUSH_CODE (INSTR::PUSH_NIL);
+
+          int nargs = 0;
+          if (arg)
+            {
+              nargs = arg->size ();
+              int i = 0;
+              // We want to push the args to the stack
+              for (auto it = arg->begin (); it != arg->end (); it++, i++)
+                {
+                  INC_DEPTH ();
+                  PUSH_ID_BEGIN_INDEXED (SLOT (name), i, nargs, false);
+                  (*it)->accept (*this);
+                  POP_ID_BEING_INDEXED ();
+                  DEC_DEPTH ();
+                }
+            }
+
+          tree_expression *rhs = expr.right_hand_side ();
+
+          CHECK_NONNULL (rhs);
+          rhs->accept (*this);
+          // The value of rhs is on the operand stack now
+
+          // If the assignment is not at root we want to keep the
+          // rhs value on the stack, e.g.
+          //   a = b(1) = 3;
+          //   Gives: a == 3
+          //
+          // If that is the case, we pushed a nil earlier, so we
+          // copy the top of the stack (rhs) to the nils place in the stack.
+          // The copy will then be in place to be rhs again.
+          if (DEPTH () != 1)
+            {
+              PUSH_CODE (INSTR::DUP_MOVE);
+              // There is rhs and n args on the stack, over the nil
+              // we want to copy rhs (the top of the stack) to.
+              PUSH_CODE (nargs + 1);
+            }
+
+          int slot = SLOT (name);
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::SUBASSIGN_ID);
+          PUSH_SLOT (slot);
+          PUSH_CODE (nargs);
+
+          maybe_emit_push_and_disp_id (expr, name);
+        }
+      else if (type == '.')
+        {
+          tree_expression *e = idx->expression ();
+          CHECK_NONNULL(e);
+
+          if (is_id && !is_dynamic_field)
+            {
+              // Name of the identifier
+              std::string name = e->name ();
+
+              add_id_to_table (name);
+
+              std::list<string_vector>  l_pv_nms = idx->arg_names ();
+              CHECK (l_pv_nms.size () == 1);
+              auto pv_nms = l_pv_nms.begin ();
+              CHECK (pv_nms->numel () == 1);
+
+              std::string field_name = pv_nms->elem (0);
+
+              // We just need the field's name in the VM
+              int slot_field = add_id_to_table (field_name);
+
+              tree_expression *rhs = expr.right_hand_side ();
+
+              CHECK_NONNULL (rhs);
+              rhs->accept (*this);
+              // The value of rhs is on the operand stack now
+
+              // If the assignment is not at root we want to keep the
+              // rhs value on the stack, e.g.
+              //   a = b(1) = 3;
+              //   Gives: a == 3
+              //
+              // If that is the case, dup rhs on the stack.
+              // The copy will then be in place to be rhs again.
+              if (DEPTH () != 1)
+                {
+                  // There is 1 rhs and no args on the stack, so just do a dup,
+                  // not a DUP_MOVE like for SUBASSIGN_ID and SUBASSIGN_CELL that
+                  // got args on the stack that need to be moved around.
+                  PUSH_CODE (INSTR::DUP);
+                }
+
+              int slot = SLOT (name);
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::SUBASSIGN_STRUCT);
+              PUSH_SLOT (slot);
+              PUSH_WSLOT (slot_field);
+
+              maybe_emit_push_and_disp_id (expr, name);
+            }
+          else if (is_dynamic_field && is_id)
+            {
+              // Name of the identifier
+              std::string name = e->name ();
+
+              add_id_to_table (name);
+
+              tree_expression *rhs = expr.right_hand_side ();
+              CHECK_NONNULL (rhs);
+              rhs->accept (*this);
+              // The value of rhs is on the stack now
+
+              // We want lhs on the stack
+              int slot = SLOT (name);
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+              PUSH_SLOT (slot);
+
+              // The argument, foo.(arg) = bar
+              tree_expression *dyn_expr = dyn_fields.front ();
+              CHECK_NONNULL (dyn_expr);
+
+              INC_DEPTH ();
+              PUSH_NARGOUT (1);
+              dyn_expr->accept (*this);
+              // The value of the arg on the stack, i.e. foo.(arg) = baz
+              POP_NARGOUT ();
+              DEC_DEPTH ();
+
+              PUSH_CODE (INSTR::SUBASSIGN_OBJ);
+              PUSH_CODE (1); // nargout
+              PUSH_CODE (type);
+
+              if (DEPTH () != 1) // Chained assignments?
+                PUSH_CODE (INSTR::DUP);
+
+              // Assign the assigned to value back to the slot
+              // TODO: Neccessary?
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::FORCE_ASSIGN);
+              PUSH_SLOT (slot);
+
+              maybe_emit_push_and_disp_id (expr, name);
+            }
+          else if (!is_dynamic_field && !is_id)
+            {
+              tree_expression *rhs = expr.right_hand_side ();
+              CHECK_NONNULL (rhs);
+              rhs->accept (*this);
+              // The value of rhs is on the operand stack now
+
+              // Visit the lhs expression
+              e->accept (*this);
+              // Pushed the left most lhs expression to the stack
+
+              string_vector ptr = field_names.front ();
+              CHECK (ptr.numel() == 1);
+              std::string field_name = ptr.elem (0);
+
+              /* Make a ov string with the field name in it that
+                * we store as a constant. */
+              octave_value ov_field_name{field_name};
+              PUSH_DATA (ov_field_name);
+
+              PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant
+
+              PUSH_CODE (INSTR::SUBASSIGN_OBJ);
+              PUSH_CODE (1); // nargout
+              PUSH_CODE (type);
+
+              if (expr.print_result ())
+                {
+                  PUSH_CODE (INSTR::DUP);
+                  emit_disp_obj (expr);
+                }
+
+              // SUBASSIGN_OBJ puts the lhs back on the stack
+              // but since lhs is not an id from a slot we just
+              // pop it, unless there are chained assignments.
+              if (!m_is_anon && DEPTH () == 1)
+                PUSH_CODE (INSTR::POP);
+            }
+          else //(is_dynamic_field && !is_id)
+            {
+              tree_expression *rhs = expr.right_hand_side ();
+              CHECK_NONNULL (rhs);
+              rhs->accept (*this);
+              // The value of rhs is on the operand stack now
+
+              // Visit the lhs expression
+              e->accept (*this);
+              // Pushed the left most lhs expression to the stack
+
+              // The argument, foo.(arg) = bar
+              tree_expression *dyn_expr = dyn_fields.front ();
+              CHECK_NONNULL (dyn_expr);
+
+              INC_DEPTH ();
+              PUSH_NARGOUT (1);
+              dyn_expr->accept (*this);
+              // The value of the arg on the stack, i.e. foo.(arg) = baz
+              POP_NARGOUT ();
+              DEC_DEPTH ();
+
+              PUSH_CODE (INSTR::SUBASSIGN_OBJ);
+              PUSH_CODE (1); // nargout
+              PUSH_CODE (type);
+
+              if (expr.print_result ())
+                {
+                  PUSH_CODE (INSTR::DUP);
+                  emit_disp_obj (expr);
+                }
+
+              // SUBASSIGN_OBJ puts the lhs back on the stack
+              // but since lhs is not an id from a slot we just
+              // pop it, unless there are chained assignments.
+              if (!m_is_anon && DEPTH () == 1)
+                PUSH_CODE (INSTR::POP);
+            }
+        }
+      else if (type == '{')
+        {
+          tree_expression *e = idx->expression ();
+          CHECK_NONNULL(e);
+          CHECK (e->is_identifier ());
+
+          // Name of the identifier
+          std::string name = e->name ();
+
+          add_id_to_table (name);
+
+          CHECK (arg_lists.size ());
+          tree_argument_list *arg = *arg_lists.begin ();
+
+          // rhs will be copied to this stack position if assigns are chained.
+          // (Chained as in b(1) = c(2) = 3)
+          if (DEPTH () != 1)
+            PUSH_CODE (INSTR::PUSH_NIL);
+
+          int nargs = 0;
+          if (arg)
+            {
+              nargs = arg->size ();
+              int i = 0;
+              // We want to push the args to the stack
+              for (auto it = arg->begin (); it != arg->end (); it++, i++)
+                {
+                  INC_DEPTH ();
+                  PUSH_ID_BEGIN_INDEXED (SLOT (name), i, nargs, false);
+                  (*it)->accept (*this);
+                  POP_ID_BEING_INDEXED ();
+                  DEC_DEPTH ();
+                }
+            }
+
+          tree_expression *rhs = expr.right_hand_side ();
+
+          CHECK_NONNULL (rhs);
+          rhs->accept (*this);
+          // The value of rhs is on the operand stack now
+
+          // If the assignment is not at root we want to keep the
+          // rhs value on the stack, e.g.
+          //   a = b(1) = 3;
+          //   Gives: a == 3
+          //
+          // If that is the case, we pushed a nil earlier, so we
+          // copy the top of the stack (rhs) to the nils place in the stack.
+          // The copy will then be in place to be rhs again.
+          if (DEPTH () != 1)
+            {
+              PUSH_CODE (INSTR::DUP_MOVE);
+              // There is rhs and n args on the stack, over the nil
+              // we want to copy rhs (the top of the stack) to.
+              PUSH_CODE (nargs + 1);
+            }
+
+          int slot = SLOT (name);
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::SUBASSIGN_CELL_ID);
+          PUSH_SLOT (slot);
+          PUSH_CODE (nargs);
+
+          maybe_emit_push_and_disp_id (expr, name);
+        }
+      else
+        TODO ("Type of subassignment not done yet");
+    }
+  else if (lhs->is_identifier ())
+    {
+      std::string name = lhs->name ();
+
+      int slot = add_id_to_table (name);
+
+      tree_expression *rhs = expr.right_hand_side ();
+
+      CHECK_NONNULL (rhs);
+      rhs->accept (*this);
+      // The value of rhs is on the operand stack now
+
+      if (op != octave_value::assign_op::op_asn_eq)
+        {
+          // Compound assignment have the type of operation in the code
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::ASSIGN_COMPOUND);
+          PUSH_SLOT (slot);
+          PUSH_CODE (op);
+        }
+      else
+        {
+          // Ordinary assignment has its own opcode.
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::ASSIGN);
+          PUSH_SLOT (slot);
+        }
+
+      // If the assignment is not at root we want to keep the
+      // value on the stack, e.g.
+      // a = (b = 3);
+      if (DEPTH () != 1)
+        {
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+          PUSH_SLOT (slot);
+        }
+
+      maybe_emit_push_and_disp_id (expr, name);
+    }
+
+  POP_NARGOUT ();
+  DEC_DEPTH();
+}
+
+void
+bytecode_walker::
+visit_matrix (tree_matrix &m)
+{
+  INC_DEPTH ();
+  m_unknown_nargout++;
+
+  bool is_rectangle = true;
+  std::vector<int> row_lengths;
+
+  /* We want to know if the matrix is rectangular. I.e.
+   * all rows are of equal length. */
+  size_t first_row_size = static_cast<size_t> (-1);
+  for (auto it = m.begin (); it != m.end (); it++)
+  {
+    // This is a row
+    tree_argument_list *row = *it;
+    size_t row_size = row->size ();
+
+    if (first_row_size == static_cast<size_t> (-1))
+      first_row_size = row_size;
+    else if (first_row_size != row_size)
+      is_rectangle = false;
+
+    row_lengths.push_back (row_size);
+  }
+
+
+  auto p = m.begin ();
+  int n_rows = 0;
+  int n_cols = 0;
+
+  // Push each row element to operand stack
+  while (p != m.end ())
+    {
+      // This is a row
+      tree_argument_list *elt = *p++;
+
+      n_cols = 0;
+      CHECK_NONNULL (elt);
+      for (auto it = elt->begin (); it != elt->end (); it++)
+        {
+          // This is an element
+          tree_expression *e = *it;
+          CHECK_NONNULL (e);
+
+          INC_DEPTH ();
+          e->accept (*this);
+          DEC_DEPTH ();
+          n_cols++;
+        }
+      n_rows++;
+    }
+
+  CHECK (n_cols > 0);
+  CHECK (n_rows > 0);
+
+  if (is_rectangle && n_cols < 256 && n_rows < 256) // Small rectangle matrix
+    {
+      PUSH_CODE (INSTR::MATRIX);
+      PUSH_CODE (n_rows);
+      PUSH_CODE (n_cols);
+    }
+  else if (is_rectangle) // Big rectangle matrix
+    {
+      PUSH_CODE (INSTR::MATRIX_UNEVEN);
+      PUSH_CODE (1); // Type 1, Big rectangle matrix
+      PUSH_CODE_INT (n_rows);
+      PUSH_CODE_INT (n_cols);
+    }
+  else // Uneven matrix
+    {
+      PUSH_CODE (INSTR::MATRIX_UNEVEN);
+      PUSH_CODE (0); // Type 0, Uneven matrix
+      PUSH_CODE_INT (n_rows);
+      for (int i : row_lengths)
+        PUSH_CODE_INT (i);
+    }
+
+  maybe_emit_bind_ans_and_disp (m);
+
+  DEC_DEPTH ();
+  m_unknown_nargout--;
+}
+
+void
+bytecode_walker::
+visit_cell (tree_cell &m)
+{
+  INC_DEPTH ();
+  m_unknown_nargout++;
+  
+  octave_idx_type n_cols = 0;
+  octave_idx_type n_rows = 0;
+
+  // Count the amount of rows and columns for an initial guess on the size
+  // of the cell.
+  auto p = m.begin ();
+  while (p != m.end ())
+    {
+      // This is a row
+      tree_argument_list *elt = *p++;
+      n_rows++;
+
+      CHECK_NONNULL (elt);
+
+      octave_idx_type n_cols_this_row = 0;
+      for (auto it = elt->begin (); it != elt->end (); it++)
+        n_cols_this_row++;
+
+      if (n_cols_this_row > n_cols)
+        n_cols = n_cols_this_row;      
+    }
+
+  if (n_cols < 256 && n_rows < 256)
+    {
+      PUSH_CODE (INSTR::PUSH_CELL);
+      PUSH_CODE (n_rows);
+      PUSH_CODE (n_cols);
+    }
+  else
+    {
+      PUSH_CODE (INSTR::PUSH_CELL_BIG);
+      PUSH_CODE_INT (n_rows);
+      PUSH_CODE_INT (n_cols);
+    }
+
+  // Code to push each row arg to operand stack, with a APPEND_CELL after it.
+  p = m.begin ();
+  octave_idx_type row_i = 0;
+  while (p != m.end ())
+    {
+      // This is a row
+      tree_argument_list *elt = *p++;
+
+      CHECK_NONNULL (elt);
+      octave_idx_type n_cols_this_row = 0;
+      for (auto it = elt->begin (); it != elt->end (); /* ++it in if bellow */)
+        {
+          // This is an element
+          tree_expression *e = *it;
+          CHECK_NONNULL (e);
+
+          PUSH_NARGOUT (1);
+          INC_DEPTH ();
+          e->accept (*this);
+          DEC_DEPTH ();
+          POP_NARGOUT ();
+
+          n_cols_this_row++;
+
+          // The last APPEND_CELL in a row need special markers
+          if (++it != elt->end ()) // Not last?
+            {
+              PUSH_CODE (INSTR::APPEND_CELL);
+              PUSH_CODE (0); // 0 => Not last APPEND_CELL in row
+            }
+        }
+
+      // If there are no args in the row, e.g. 'a = {b;;}', the APPEND_CELL still need something
+      // to grab on the stack, that will not be added to the row.
+      if (n_cols_this_row == 0)
+        PUSH_CODE (INSTR::PUSH_NIL); // Dummy value
+
+      // The APPEND_CELL opcode inserts element into a cell put on the stack by PUSH_CELL.
+      // The opcode after APPEND_CELL tells it whether the APPEND_CELL is the last in a row.
+      PUSH_CODE (INSTR::APPEND_CELL);
+      if (p == m.end ()) // Is this the last row?
+        {
+          if (n_rows == 1)
+            PUSH_CODE (3); // Last column in last row, only one row total
+          else
+            PUSH_CODE (2); // Last column in last row, more than one row total
+        }
+      else if (row_i == 0)
+        PUSH_CODE (4); // Last column in first row, more than one row total
+      else
+        PUSH_CODE (1); // Last column in row, more than one row total
+
+      row_i++;
+    }
+
+  maybe_emit_bind_ans_and_disp (m);
+
+  DEC_DEPTH ();
+  m_unknown_nargout--;
+}
+
+void
+bytecode_walker::
+visit_function_def (tree_function_def &cmd)
+{
+  // Function definitions in scripts are supposed to be installed
+  // at runtime when reaching the relevant row.
+  //
+  // So we store the function in the constant data and load it
+  // with INSTALL_FUNCTION.
+
+  CHECK (m_is_script);
+
+  octave_value fcn = cmd.function ();
+  octave_function *f = fcn.function_value ();
+
+  // tree_evaluator just does nothing on nullptr, but abort compilation instead here
+  CHECK_NONNULL (f);
+
+  std::string name = f->name ();
+  int slot = add_id_to_table (name);
+
+  int data_offset = DATA_SIZE ();
+  PUSH_DATA (fcn);
+
+  PUSH_CODE (INSTR::INSTALL_FUNCTION);
+  PUSH_SLOT (slot);
+  PUSH_CODE_INT (data_offset);
+}
+
+void
+bytecode_walker::
+visit_identifier (tree_identifier& id)
+{
+  INC_DEPTH();
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  std::string name = id.name ();
+  if (name == "__VM_DBG")
+  {
+    PUSH_CODE (INSTR::PUSH_FALSE); // An id need to put something on the stack
+    PUSH_CODE (INSTR::DEBUG);
+  }
+  // The magic end id need special handling
+  else if (name == "end")
+    {
+      CHECK (ID_IS_BEING_INDEXED ());
+
+      // Since in e.g. "M = [1 2 3]; M (min (10, end))" the 'end' will
+      // refer to the end of M, not the function min, we need a special
+      // op-code for nested indexings that can refer to any outer object
+      int n_ids = N_IDS_BEING_INDEXED ();
+
+      if (n_ids == 1) // Simple case
+        {
+          id_being_indexed obj = PEEK_ID_BEING_INDEXED ();
+          if (obj.type == 0)
+            {
+              /* TODO: Is this op-code with slots really needed? */
+              MAYBE_PUSH_WIDE_OPEXT (obj.slot);
+              PUSH_CODE (INSTR::END_ID);
+              PUSH_SLOT (obj.slot); // The slot variable being indexed
+              PUSH_CODE (obj.nargs); // The amount of dimensions being indexed
+              PUSH_CODE (obj.idx); // The offset of the index being indexed right now
+            }
+          else if (obj.type == 1)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (obj.slot);
+              PUSH_CODE (INSTR::END_OBJ);
+              // Slot for keeping the stack depth of the object being indexed
+              PUSH_SLOT (obj.slot);
+              PUSH_CODE (obj.nargs); // The amount of dimensions being indexed
+              PUSH_CODE (obj.idx); // The offset of the index being indexed right now
+            }
+          else
+            panic_impossible ();
+        }
+      else // Nested indexing
+        {
+          PUSH_CODE (INSTR::END_X_N);
+          PUSH_CODE (n_ids);
+
+          // Note: Pushing inner to outer.
+          // foo (bar (baz (1, end))) => 1: baz, 2: bar, 3: foo
+          for (int i = n_ids - 1; i >= 0; i--)
+            {
+              id_being_indexed obj = IDS_BEING_INDEXED (i);
+              PUSH_CODE (obj.nargs);
+              PUSH_CODE (obj.idx);
+              PUSH_CODE (obj.type);
+              PUSH_WSLOT (obj.slot);
+            }
+        }
+    }
+  else
+    {
+      int slot = add_id_to_table (name);
+
+      int loc_id = N_LOC ();
+      PUSH_LOC ();
+      LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+      if (m_pending_ignore_outputs && DEPTH () == 2)
+        {
+          PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS);
+          PUSH_CODE (m_v_ignored.size ());
+          PUSH_CODE (m_ignored_of_total);
+          for (int i : m_v_ignored)
+            PUSH_CODE (i);
+          m_ignored_ip_start = CODE_SIZE (); // visit_multi_assignment () need the code offset to set the proper range for the unwind protect
+        }
+
+      if (id.is_postfix_indexed ())
+        {
+          // "foo.a" and "foo{1}" might be command function calls
+          // which is checked for in PUSH_SLOT_NARGOUT1_SPECIAL
+          // Also foo might be a classdef meta object.
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          if (id.postfix_index () != '(')
+            PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1_SPECIAL);
+          else
+            PUSH_CODE (INSTR::PUSH_SLOT_INDEXED);
+          PUSH_SLOT (slot);
+        }
+      else if (DEPTH () == 1 && NARGOUT () != -1)
+        {
+          CHECK (NARGOUT () == 0);
+
+          if (id.print_result ())
+            {
+              // Need to keep track of if this is a command call
+              // or not for display since "x" will print "x = 3"
+              // for e.g. variables but "ans = 3" for command calls.
+              std::string maybe_cmd_name = "%maybe_command";
+              int slot_cmd = add_id_to_table (maybe_cmd_name);
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::PUSH_SLOT_DISP);
+              PUSH_SLOT (slot);
+              PUSH_WSLOT (slot_cmd);
+
+              maybe_emit_bind_ans_and_disp (id, maybe_cmd_name);
+            }
+          else
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT0);
+              PUSH_SLOT (slot);
+
+              // Write the return value to ans. It is either the variables
+              // value straight off, or e.g. a cmd function call return value.
+              maybe_emit_bind_ans_and_disp (id);
+            }
+        }
+      else if (NARGOUT () == 1)
+        {
+          // Push the local at its slot number to the stack
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          if (name == "pi")
+            PUSH_CODE (INSTR::PUSH_PI); // Specialization that pushes pi fast
+          else if (name == "i" || name == "I" || name == "j" || name == "J")
+            {
+              // If the id is assigned to anywhere in the function, we don't use the 
+              // specialization.
+              if (m_set_assigned_ids.find (name) == m_set_assigned_ids.end ())
+                PUSH_CODE (INSTR::PUSH_I); // Specialization that pushes imaginary unit fast
+              else
+                PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1);
+            }
+          else if (name == "e")
+            {
+              if (m_set_assigned_ids.find (name) == m_set_assigned_ids.end ())
+                PUSH_CODE (INSTR::PUSH_E); // Specialization that pushes e fast
+              else
+                PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1);
+            }
+          else
+            PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT1);
+          PUSH_SLOT (slot);
+        }
+      else if (NARGOUT () == -1)
+        {
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::PUSH_SLOT_NX);
+          PUSH_SLOT (slot);
+        }
+      else if (NARGOUT() > 1)
+        {
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::PUSH_SLOT_NARGOUTN);
+          PUSH_SLOT (slot);
+          PUSH_CODE (NARGOUT ());
+        }
+      else
+        {
+          // Push the local at its slot number to the stack
+          MAYBE_PUSH_WIDE_OPEXT (slot);
+          PUSH_CODE (INSTR::PUSH_SLOT_NARGOUT0);
+          PUSH_SLOT (slot);
+        }
+
+      LOC (loc_id).m_ip_end = CODE_SIZE ();
+      LOC (loc_id).m_col = id.column ();
+      LOC (loc_id).m_line = id.line ();
+    }
+  DEC_DEPTH();
+}
+
+int
+bytecode_walker::
+add_id_to_table (std::string name)
+{
+  // Is the id already added to the local table?
+  auto it = m_map_locals_to_slot.find (name);
+
+  if (it == m_map_locals_to_slot.end ())
+    {
+      // Push local
+      m_code.m_ids.push_back(name);
+      m_map_locals_to_slot[name] = m_n_locals++;
+
+      return m_n_locals - 1;
+    }
+
+  return it->second;
+}
+
+void
+bytecode_walker::
+visit_no_op_command (tree_no_op_command& cmd)
+{
+  if (cmd.is_end_of_fcn_or_script())
+    {
+      // Put a return in the end so that we don't fall of the edge
+      // of the world
+
+      int loc_id = N_LOC ();
+      PUSH_LOC ();
+      LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+      PUSH_TREE_FOR_DBG (&cmd);
+      emit_return ();
+
+      LOC (loc_id).m_ip_end = CODE_SIZE ();
+      LOC (loc_id).m_col = cmd.column ();
+      LOC (loc_id).m_line = cmd.line ();
+    }
+}
+
+void
+bytecode_walker::
+visit_do_until_command (tree_do_until_command& cmd)
+{
+  tree_expression *expr = cmd.condition ();
+  int code_start = CODE_SIZE ();
+
+  tree_statement_list *list = cmd.body ();
+
+  PUSH_CONTINUE_TARGET ();
+  PUSH_BREAKS ();
+
+  // Push an opcode that checks for signals, e.g. ctrl-c
+  PUSH_CODE (INSTR::HANDLE_SIGNALS);
+
+  // A empty body will yield a null list pointer
+  m_n_nested_loops++;
+  if (list)
+    list->accept (*this);
+  m_n_nested_loops--;
+
+  // Any continue jumps to here (before the condition)
+  for (int offset : POP_CONTINUE_TARGET())
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+
+  CHECK_NONNULL (expr);
+  INC_DEPTH (); // Since we need the value
+  PUSH_TREE_FOR_DBG (expr);
+  expr->accept (*this);
+  DEC_DEPTH ();
+
+  // The condition value is on the operand stack, do
+  // a jmp_ifn to the start of the body, on false
+  PUSH_CODE (INSTR::JMP_IFN);
+  PUSH_CODE_SHORT (code_start);
+
+  // The breaks jump to here
+  for (int offset : POP_BREAKS ())
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+}
+
+void
+bytecode_walker::
+visit_while_command (tree_while_command& cmd)
+{
+  tree_expression *expr = cmd.condition ();
+
+  // Location data for the condition
+  int loc_id = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+  int cond_offset = CODE_SIZE ();
+
+  CHECK_NONNULL (expr);
+  INC_DEPTH (); // Since we need the value
+  PUSH_TREE_FOR_DBG (expr);
+  expr->accept (*this);
+  DEC_DEPTH ();
+
+  // The condition value is on the operand stack, do
+  // a jmp_ifn to after the body, on false
+  PUSH_CODE (INSTR::JMP_IFN);
+  int offset_need_jmp_after = CODE_SIZE ();
+  PUSH_CODE_SHORT (-1); // Placeholder
+
+  LOC (loc_id).m_ip_end = CODE_SIZE ();
+  LOC (loc_id).m_col = expr->column ();
+  LOC (loc_id).m_line = expr->line ();
+
+  tree_statement_list *list = cmd.body ();
+
+  PUSH_CONTINUE_TARGET ();
+  PUSH_BREAKS ();
+
+  // Push an opcode that checks for signals, e.g. ctrl-c
+  PUSH_CODE (INSTR::HANDLE_SIGNALS);
+
+  // nullptr if body is empty
+  m_n_nested_loops++;
+  if (list)
+    list->accept (*this);
+  m_n_nested_loops--;
+
+  // The continue targets can now be set, to jump back
+  // to the condition.
+  for (int offset : POP_CONTINUE_TARGET())
+    SET_CODE_SHORT (offset, cond_offset);
+
+  // Jump back to the condition, TODO: unless all paths are terminated
+  PUSH_CODE (INSTR::JMP);
+  PUSH_CODE_SHORT (cond_offset);
+
+  // Now we can set where the condition should jump on false, i.e.
+  // to here, after the jump back to the condition
+  SET_CODE_SHORT (offset_need_jmp_after, CODE_SIZE ());
+
+  // The breaks jump to the same place
+  for (int offset : POP_BREAKS ())
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+}
+
+void
+bytecode_walker::
+visit_switch_command (tree_switch_command& cmd)
+{
+  tree_expression *expr = cmd.switch_value ();
+  CHECK_NONNULL (expr);
+
+  tree_switch_case_list *lst = cmd.case_list ();
+
+  std::vector<int> need_after_all;
+
+  // First off we need the switch value on the stack
+  INC_DEPTH ();
+  PUSH_NARGOUT(1);
+
+  expr->accept (*this);
+
+  POP_NARGOUT ();
+  DEC_DEPTH ();
+
+  // Since the switch have a value on the stack through the whole switch
+  // statement we need to track that so returns can pop it.
+  PUSH_NESTING_STATEMENT (nesting_statement::ONE_OV_ON_STACK);
+
+  // Any nested continue or break need to pop the switch value
+  PUSH_CONTINUE_TARGET ();
+  PUSH_BREAKS ();
+
+  // We now have the switch value on the operand stack,
+  // so now we need to compare it with the first label
+  // either execute its code or skip it depending on
+  // wheter the switch value and the label are "equal"
+
+  tree_switch_case *default_case = nullptr;
+
+  if (lst)
+    for (tree_switch_case *t : *lst)
+      {
+        // We want to do the default case last
+        if (t->is_default_case ())
+          {
+            default_case = t;
+            continue;
+          }
+
+        // We need to duplicate the switch value on the stack so
+        // each label will have its own
+        PUSH_CODE (INSTR::DUP);
+
+        INC_DEPTH ();
+        PUSH_NARGOUT(1);
+
+        // Walk for code for the case label expression
+        t->case_label()->accept(*this);
+
+        POP_NARGOUT ();
+        DEC_DEPTH ();
+
+        // case label value is now on the stack
+
+        PUSH_CODE (INSTR::JMP_IFNCASEMATCH);
+        int need_next = CODE_SIZE ();
+        PUSH_CODE_SHORT (-1);
+
+        // Walk for the case label body
+
+        tree_statement_list *stmt_lst = t->commands ();
+
+        if (stmt_lst)
+          stmt_lst->accept (*this);
+
+        // TODO: Unless the body is terminated we need to jump past
+        // the rest of the switch bodies
+        PUSH_CODE (INSTR::JMP);
+        need_after_all.push_back (CODE_SIZE ());
+        PUSH_CODE_SHORT (-1); // Placeholder, jump to after all
+
+        // If the label was not "true" we jump to here. Under there will be
+        // another case or the end of the switch
+        SET_CODE_SHORT (need_next, CODE_SIZE ()); // The placeholder above
+      }
+
+  // If there was a default case, generate code for it
+  if (default_case)
+    {
+      tree_statement_list *stmt_lst = default_case->commands();
+
+      if (stmt_lst)
+        stmt_lst->accept (*this);
+    }
+
+  // Any nested break or continue need to jump here to pop an ov
+  // and then jump to a outer break or continue block.
+  auto v_breaks = POP_BREAKS ();
+  auto v_continues = POP_CONTINUE_TARGET ();
+
+  if (v_breaks.size () || v_continues.size ())
+    {
+      // Fallthrough from default need to jump past break and continue bridges
+      PUSH_CODE (INSTR::JMP);
+      int offset = CODE_SIZE ();
+      need_after_all.push_back (offset);
+      PUSH_CODE_SHORT (-1);
+    }
+
+  if (v_breaks.size ())
+    {
+      for (int offset : v_breaks)
+        SET_CODE_SHORT (offset, CODE_SIZE ());
+      // We need to pop the switch value
+      PUSH_CODE (INSTR::POP);
+      // Jump to the outer break target
+      PUSH_CODE (INSTR::JMP);
+      int offset = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      PUSH_NEED_BREAK (offset);
+    }
+  if (v_continues.size ())
+    {
+      // Nested continues should jump to there
+      int target_offset = CODE_SIZE ();
+      for (int offset : v_continues)
+        SET_CODE_SHORT (offset, target_offset);
+      // We need to pop the switch value
+      PUSH_CODE (INSTR::POP);
+      // Jump to the outer continue target (i.e. start of whatever loop)
+      PUSH_CODE (INSTR::JMP);
+      int need_continue_target = CODE_SIZE ();
+      PUSH_CODE_SHORT (-1);
+      PUSH_NEED_CONTINUE_TARGET (need_continue_target);
+    }
+
+  // Some code points might need a jump to after the switch statement
+  for (int offset : need_after_all)
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+
+  // We need to pop the switch value
+  PUSH_CODE (INSTR::POP);
+
+  // We are out of the switch statement so pop it from the nesting stack
+  POP_NESTING_STATEMENT ();
+}
+
+void
+bytecode_walker::
+visit_if_command (tree_if_command& cmd)
+{
+  tree_if_command_list *list = cmd.cmd_list ();
+  CHECK_NONNULL (list);
+
+  // Offset to jump addresses that will need to jump
+  // to after all the if clauses and bodies. E.g.
+  // the end of each if body, if there are more than one if.
+  std::vector<int> need_after_all;
+
+  // Offset for the jump address of the condition test, which
+  // need to go to after the body.
+  int need_after_body = -1;
+
+  std::size_t n = list->size ();
+
+  std::size_t idx = 0;
+  for (auto p = list->begin (); p != list->end (); p++, idx++)
+    {
+      bool is_last = idx + 1 == n;
+
+      tree_if_clause *elt = *p;
+      CHECK_NONNULL (elt);
+
+      tree_statement_list *body = elt->commands ();
+
+      bool is_not_else = ! elt->is_else_clause ();
+      // Condition
+      if (is_not_else)
+        {
+          tree_expression *cond = elt->condition ();
+          CHECK_NONNULL (cond);
+
+          // Location data for the condition
+          int loc_id = N_LOC ();
+          PUSH_LOC ();
+          LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+          PUSH_TREE_FOR_DBG (elt); // We want the debug hit before the condition
+
+          PUSH_NARGOUT (1);
+          INC_DEPTH ();
+          cond->accept (*this);
+          DEC_DEPTH ();
+          POP_NARGOUT ();
+
+          // The condition is on the operand stack now
+          PUSH_CODE (INSTR::JMP_IFN);
+          need_after_body = CODE_SIZE ();
+          PUSH_CODE_SHORT (-1); // Placeholder, jump to after all
+
+          LOC (loc_id).m_ip_end = CODE_SIZE ();
+          LOC (loc_id).m_col = cond->column ();
+          LOC (loc_id).m_line = cond->line ();
+        }
+
+      // Body
+      // nullptr if body is empty
+      if (body)
+        body->accept (*this);
+
+      if (!is_last)
+        {
+          PUSH_CODE (INSTR::JMP);
+          need_after_all.push_back (CODE_SIZE ());
+          PUSH_CODE_SHORT (-1); // Placeholder, jump to after all
+        }
+
+      // Now we can set the address to which failed condition
+      // will jump
+      if (is_not_else)
+        SET_CODE_SHORT (need_after_body, CODE_SIZE ());
+    }
+
+  for (int offset : need_after_all)
+    SET_CODE_SHORT (offset, CODE_SIZE ());
+}
+
+void
+bytecode_walker::
+visit_anon_fcn_handle (tree_anon_fcn_handle &expr)
+{
+  INC_DEPTH ();
+
+  PUSH_TREE_FOR_EVAL (&expr);
+  int tree_idx = -CODE_SIZE ();
+
+  PUSH_CODE (INSTR::PUSH_ANON_FCN_HANDLE);
+  PUSH_CODE_INT (tree_idx);
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  DEC_DEPTH ();
+}
+
+void
+bytecode_walker::
+emit_args_for_visit_index_expression (tree_argument_list *arg_list,
+                                      tree_expression *root_lhs_id)
+{
+  int nargs = arg_list->size ();
+  int idx = 0;
+  bool lhs_is_id = root_lhs_id ? root_lhs_id->is_identifier () : false;
+
+  // If we are indexing an object, and have a magic end index
+  // we need to save the stack depth in a slot
+  bool obj_has_end = false;
+  if (!lhs_is_id)
+    {
+      for (auto it = arg_list->begin (); it != arg_list->end (); it++)
+        {
+          CHECK_NONNULL (*it);
+          obj_has_end = find_end_walker::has_end (**it);
+          if (obj_has_end)
+            break;
+        }
+    }
+
+  int obj_stack_depth_slot = -1;
+  if (obj_has_end)
+    {
+      std::string obj_stack_depth_name = "%objsd_" + std::to_string (CODE_SIZE ());
+      obj_stack_depth_slot = add_id_to_table (obj_stack_depth_name);
+
+      MAYBE_PUSH_WIDE_OPEXT (obj_stack_depth_slot);
+      PUSH_CODE (INSTR::SET_SLOT_TO_STACK_DEPTH);
+      PUSH_SLOT (obj_stack_depth_slot);
+    }
+
+  // We want to push the args to the stack
+  for (auto it = arg_list->begin (); it != arg_list->end (); it++, idx++)
+    {
+      INC_DEPTH ();
+      if (lhs_is_id)
+        PUSH_ID_BEGIN_INDEXED (SLOT (root_lhs_id->name ()), idx, nargs, false);
+      else
+        PUSH_ID_BEGIN_INDEXED (obj_stack_depth_slot, idx, nargs, true);
+
+      PUSH_NARGOUT (1);
+      (*it)->accept (*this);
+      POP_NARGOUT ();
+      POP_ID_BEING_INDEXED ();
+      DEC_DEPTH ();
+    }
+}
+
+void
+bytecode_walker::
+emit_fields_for_visit_index_expression (string_vector &field_names,
+                                        tree_expression *dyn_expr,
+                                        tree_expression *lhs_root,
+                                        bool *struct_is_id_dot_id)
+{
+  if (struct_is_id_dot_id)
+    *struct_is_id_dot_id = false;
+  // For struct the "arg" is the field and not executed.
+  // Just add it as an identifier so that we can get it's
+  // name as a string in the VM.
+  CHECK (field_names.numel() == 1);
+
+  std::string field_name = field_names.elem (0);
+
+  if (lhs_root && lhs_root->is_identifier () && field_name.size ())
+    {
+      if (struct_is_id_dot_id)
+        *struct_is_id_dot_id = true;
+      add_id_to_table (field_name);
+    }
+  else if (field_name.size ())
+    {
+      octave_value ov_field_name{field_name};
+      PUSH_DATA (ov_field_name);
+
+      PUSH_CODE_LOAD_CST (DATA_SIZE () - 1); // Offset of the constant
+    }
+  else
+    {
+      CHECK_NONNULL (dyn_expr);
+
+      INC_DEPTH ();
+      PUSH_NARGOUT (1);
+      dyn_expr->accept (*this);
+      POP_NARGOUT ();
+      DEC_DEPTH ();
+    }
+}
+
+void
+bytecode_walker::
+eval_visit_index_expression (tree_index_expression& expr)
+{
+  INC_DEPTH ();
+  tree_expression *e = expr.expression ();
+  CHECK_NONNULL(e);
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  PUSH_TREE_FOR_EVAL (&expr);
+  int tree_idx = -CODE_SIZE (); // NB: Negative to not collide with debug data
+
+  int nargout = NARGOUT ();
+  MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+  PUSH_CODE (INSTR::EVAL);
+  PUSH_CODE (nargout);
+  PUSH_CODE_INT (tree_idx);
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  if (DEPTH () == 1 && NARGOUT () > 1)
+    TODO ("Silly state");
+
+  DEC_DEPTH ();
+}
+
+void
+bytecode_walker::
+simple_visit_index_expression (tree_index_expression& expr)
+{
+  INC_DEPTH ();
+  tree_expression *e = expr.expression ();
+  CHECK_NONNULL(e);
+
+  // Word commands are on the form:
+  // foo bar baz; <=> foo('bar', 'baz');
+  bool is_wordcmd = expr.is_word_list_cmd ();
+
+  std::string type_tags = expr.type_tags ();
+
+  size_t n_chained = type_tags.size ();
+  CHECK (n_chained == 1);
+
+  // Put the object to index on the stack
+  INC_DEPTH ();
+  e->accept (*this);
+  DEC_DEPTH ();
+
+  // The Octave function inputname (i) needs to be able to know the name
+  // of th nth argument to a function, so we need to make an entry of
+  // the names.
+  arg_name_entry arg_name_entry;
+
+  std::list<octave::tree_argument_list *> arg_lists = expr.arg_lists ();
+  std::list<string_vector> arg_names = expr.arg_names ();
+  std::list<octave::tree_expression *> dyn_fields = expr.dyn_fields ();
+
+  CHECK (arg_lists.size () == n_chained);
+  CHECK (arg_names.size () == n_chained);
+  CHECK (dyn_fields.size () == n_chained);
+  CHECK (type_tags.size () == n_chained);
+
+  auto arg_names_it = arg_names.begin ();
+  auto arg_lists_it = arg_lists.begin ();
+  auto arg_lists_dyn_it = dyn_fields.begin ();
+  auto arg_type_tags_it = type_tags.begin ();
+
+  char type = *arg_type_tags_it;
+
+  int nargout = NARGOUT ();
+
+  bool struct_is_id_dot_id = false;
+  if (type == '.')
+    emit_fields_for_visit_index_expression (*arg_names_it, *arg_lists_dyn_it, e, &struct_is_id_dot_id);
+  else if (*arg_lists_it)
+    {
+      emit_args_for_visit_index_expression (*arg_lists_it, e);
+      // Push the argnames for inputname ()
+      size_t n_args = arg_names_it->numel ();
+      string_vector names(n_args);
+      for (int i = 0; i < arg_names_it->numel (); i++)
+        names.elem (i) = arg_names_it->elem (i);
+      arg_name_entry.m_arg_names = names;
+    }
+
+  if (m_pending_ignore_outputs && DEPTH () == 2)
+    {
+      PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS);
+      PUSH_CODE (m_v_ignored.size ());
+      PUSH_CODE (m_ignored_of_total);
+      for (int i : m_v_ignored)
+        PUSH_CODE (i);
+      m_ignored_ip_start = CODE_SIZE (); // visit_multi_assignment () need the code offset to set the proper range for the unwind protect
+    }
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  int loc_id = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+  arg_name_entry.m_ip_start = CODE_SIZE ();
+
+  tree_argument_list *args = *arg_lists_it;
+
+  if (is_wordcmd)
+    {
+      CHECK (e->is_identifier ());
+
+      std::string id_name = e->name ();
+      int slot = SLOT (id_name);
+      MAYBE_PUSH_WIDE_OPEXT (slot);
+
+      if (nargout == -1) // Anonymous functions need dynamic nargout
+        {
+          PUSH_CODE (INSTR::WORDCMD_NX);
+          // The vm need the name of the identifier for function lookups
+          PUSH_SLOT (slot);
+          // Push nargin
+          PUSH_CODE (args ? args->size () : 0);
+        }
+      else
+        {
+          PUSH_CODE (INSTR::WORDCMD);
+          // The vm need the name of the identifier for function lookups
+          PUSH_SLOT (slot);
+          PUSH_CODE (nargout);
+          // Push nargin
+          PUSH_CODE (args ? args->size () : 0);
+        }
+    }
+  else if (e->is_identifier () && !(type == '.' && !struct_is_id_dot_id))
+    {
+      std::string id_name = e->name ();
+      int slot = SLOT (id_name);
+
+      if (type == '(')
+        {
+          if (nargout == 0)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_ID_NARGOUT0);
+              // The vm need the name of the identifier for function lookups
+              PUSH_SLOT (slot);
+            }
+          else if (nargout == 1)
+            {
+              // If the id is "sin", "cos", "round" etc, and there is one argument,
+              // in the end map(unary_mapper_t) will be called while executing,
+              // unless the user have overriden those.
+              // We do a special opcode for those to speed them up.
+              // Don't do the special opcode if it would need wide slots, i.e. slot nr >= 256.
+              auto umaped_fn_it = m_name_to_unary_func.find (id_name);
+              if (!args || args->size () != 1 || umaped_fn_it == m_name_to_unary_func.end () || slot >= 256)
+                {
+                  MAYBE_PUSH_WIDE_OPEXT (slot);
+                  PUSH_CODE (INSTR::INDEX_ID_NARGOUT1);
+                }
+              else
+                {
+                  octave_base_value::unary_mapper_t idx = umaped_fn_it->second;
+                  PUSH_CODE (INSTR::INDEX_ID1_MATHY_UFUN);
+                  PUSH_CODE (static_cast<int> (idx));
+                }
+
+              PUSH_SLOT (slot);
+            }
+          // Anonymous functions need to have dynamic nargout
+          else if (nargout == -1)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_IDNX);
+              PUSH_SLOT (slot);
+            }
+          else
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_IDN);
+              PUSH_SLOT (slot);
+              PUSH_CODE (nargout);
+            }
+
+          // Push nargin
+          PUSH_CODE (args ? args->size () : 0);
+        }
+      else if (type == '{')
+        {
+          if (nargout == 0)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUT0);
+              // The vm need the name of the identifier for function lookups
+              PUSH_SLOT (slot);
+            }
+          else if (nargout == 1)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUT1);
+              PUSH_SLOT (slot);
+            }
+          else if (nargout == -1)
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_CELL_IDNX);
+              PUSH_SLOT (slot);
+            }
+          else
+            {
+              MAYBE_PUSH_WIDE_OPEXT (slot);
+              PUSH_CODE (INSTR::INDEX_CELL_ID_NARGOUTN);
+              PUSH_SLOT (slot);
+              PUSH_CODE (nargout);
+            }
+
+          // Push nargin
+          PUSH_CODE (args ? args->size () : 0);
+        }
+      else if (type == '.')
+        {
+          MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+          PUSH_CODE (INSTR::INDEX_STRUCT_NARGOUTN);
+          PUSH_CODE (nargout);
+
+          string_vector field_names = *arg_names_it;
+          CHECK (field_names.numel ());
+          std::string field_name = field_names.elem (0);
+
+          PUSH_WSLOT (slot);   // id to index
+          PUSH_WSLOT (SLOT (field_name)); // VM need name of the field
+        }
+      else
+        TODO ("Not implemeted typetag");
+    }
+  else
+    {
+      // We are not indexing an id, but e.g.:
+      // (foo).()
+      // I.e. a temporary object.
+      MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+      PUSH_CODE (INSTR::INDEX_OBJ);
+      PUSH_CODE (nargout); // This is arg0, not used if EXT_NARGOUT is used
+      PUSH_CODE (0); // "has slot"
+      PUSH_WSLOT (0); // The w/e slot TODO: Remove?
+      // Push nargin
+      if (type == '.')
+        PUSH_CODE (1); // Nargin always one for struct indexing
+      else
+        PUSH_CODE (args ? args->size () : 0);
+      PUSH_CODE (type);
+    }
+
+  arg_name_entry.m_ip_end = CODE_SIZE ();
+  PUSH_ARGNAMES_ENTRY (arg_name_entry);
+
+  LOC (loc_id).m_ip_end = CODE_SIZE ();
+  LOC (loc_id).m_col = expr.column ();
+  LOC (loc_id).m_line = expr.line ();
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  if (DEPTH () == 1 && NARGOUT () > 1)
+    TODO ("Silly state");
+
+  DEC_DEPTH ();
+}
+
+void
+bytecode_walker::
+visit_index_expression (tree_index_expression& expr)
+{
+
+  tree_expression *e = expr.expression ();
+  CHECK_NONNULL(e);
+
+  std::list<octave::tree_argument_list *> arg_lists = expr.arg_lists ();
+  std::list<string_vector> arg_names = expr.arg_names ();
+  std::list<octave::tree_expression *> dyn_fields = expr.dyn_fields ();
+  std::string type_tags = expr.type_tags ();
+
+  size_t n_chained = type_tags.size ();
+  CHECK (n_chained);
+
+  // TODO: Kludge alert. Mirror the behaviour in ov_classdef::subsref
+  // where under certain conditions a magic number nargout of -1 is
+  // expected to  maybe return a cs-list. "-1" in this context
+  // does not have the same meaning as in the VM, where it means
+  // a varargout with only one return symbol 'varargout'.
+  bool m1_magic_nargout = false;
+  if (m_unknown_nargout)
+    {
+      if ((type_tags.size () >= 1 && (type_tags[0] == '{' || type_tags[0] == '.')) ||
+          (type_tags.size () >= 2 && (type_tags[0] == '(' && type_tags[1] == '.')))
+      {
+          m1_magic_nargout = true;
+      }
+    }
+
+  // For un-chained index expressions we use specialized
+  // op-codes that has e.g. nargout and type '(','{' and '.'
+  // encoded in the op-code it self to speed things up.
+  if (n_chained == 1 && !m1_magic_nargout)
+    {
+      simple_visit_index_expression (expr);
+      return;
+    }
+
+  // If there is any struct in the chain and an end, we cheat and use
+  // eval. We can't use the existing end op-codes since they need a value
+  // to check the size of, but e.g. "foo.bar(end)" might be a
+  // class cmd form method call "foo.bar" and we can't know the size before
+  // that has been checked.
+  //
+  // TODO: Solve this. Maybe with some special if?
+  bool has_end = false;
+  for (auto outer_it = arg_lists.begin (); outer_it != arg_lists.end (); outer_it++)
+    {
+      auto arg_list = *outer_it;
+      if (!arg_list)
+        continue;
+      for (auto it = arg_list->begin (); it != arg_list->end (); it++)
+        {
+          CHECK_NONNULL (*it);
+          has_end = find_end_walker::has_end (**it);
+          if (has_end)
+            break;
+        }
+    }
+  if (has_end)
+    {
+      eval_visit_index_expression (expr);
+      return;
+    }
+
+  INC_DEPTH ();
+
+  // A chained index expression might be: foo.bar(2).baz{1} => n_chained == 4
+
+  int loc_id = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+  // The Octave function inputname (i) needs to be able to know the name
+  // of th nth argument to a function, so we need to make an entry of
+  // the names.
+  arg_name_entry arg_name_entry;
+
+  // We push the first object to index to the stack.
+  // Subsequent indexings will have the prior index result on the
+  // stack.
+  INC_DEPTH ();
+  e->accept (*this);
+  DEC_DEPTH ();
+
+  CHECK (arg_lists.size () == n_chained);
+  CHECK (arg_names.size () == n_chained);
+  CHECK (dyn_fields.size () == n_chained);
+  CHECK (type_tags.size () == n_chained);
+
+  auto arg_names_it = arg_names.begin ();
+  auto arg_lists_it = arg_lists.begin ();
+  auto arg_lists_dyn_it = dyn_fields.begin ();
+  auto arg_type_tags_it = type_tags.begin ();
+
+  tree_expression *first_expression = e;
+  // Iterate over the chained subexpressions. Collect arg names and amount of args.
+  std::vector<int> v_n_args {}; // We pushed one field above
+  std::vector<int> v_types {};// The type is .
+  while (arg_lists_it != arg_lists.end ())
+    {
+      tree_argument_list *arg_list = *arg_lists_it++;
+      string_vector field_names = *arg_names_it++;
+      char type = *arg_type_tags_it++;
+
+      v_types.push_back (type);
+
+      if (type == '.')
+        v_n_args.push_back (1);
+      else if (arg_list)
+        {
+          v_n_args.push_back (arg_list->size ());
+          // Push the argnames for inputname ()
+          int n_args = field_names.numel ();
+          string_vector names(n_args);
+          for (int i = 0; i < n_args; i++)
+            names.elem (i) = field_names.elem (i);
+          arg_name_entry.m_arg_names = names;
+        }
+      else
+        v_n_args.push_back (0); // e.g. the call to "bar" in "foo.bar ()"
+    }
+
+  if (m_pending_ignore_outputs && DEPTH () == 2)
+    {
+      PUSH_CODE (INSTR::SET_IGNORE_OUTPUTS);
+      PUSH_CODE (m_v_ignored.size ());
+      PUSH_CODE (m_ignored_of_total);
+      for (int i : m_v_ignored)
+        PUSH_CODE (i);
+      m_ignored_ip_start = CODE_SIZE (); // visit_multi_assignment () need the code offset to set the proper range for the unwind protect
+    }
+
+  // Reset the iterators to iterate over them all again
+  arg_names_it = arg_names.begin ();
+  arg_lists_it = arg_lists.begin ();
+  arg_lists_dyn_it = dyn_fields.begin ();
+  arg_type_tags_it = type_tags.begin ();
+
+  // The first subcall need to be handled specially since it can be a function call
+  // by identifier name.
+  {
+    tree_argument_list *arg_list = *arg_lists_it++;
+    string_vector field_names = *arg_names_it++;
+    tree_expression *dyn_expr = *arg_lists_dyn_it++;
+    char type = *arg_type_tags_it++;
+
+    if (type == '.')
+      emit_fields_for_visit_index_expression (field_names, dyn_expr, nullptr, nullptr);
+    else if (arg_list)
+      emit_args_for_visit_index_expression (arg_list, nullptr);
+    // For e.g. the call to "bar" in "foo.bar ()" no args need to be emitted
+
+
+    maybe_emit_anon_maybe_ignore_outputs ();
+
+    int nargout = NARGOUT ();
+
+    arg_name_entry.m_ip_start = CODE_SIZE ();
+
+    if (first_expression && first_expression->is_identifier ())
+      {
+        // Name of the left most identifier needed for error messages
+        // since INDEX_STRUCT_SUBCALL:s don't have any slot specified
+        arg_name_entry.m_obj_name = first_expression->name ();
+
+        int slot = SLOT (first_expression->name ());
+        MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+        PUSH_CODE (INSTR::INDEX_STRUCT_CALL);
+        if (m1_magic_nargout)
+          PUSH_CODE (1); // Note, 1, not -1. Since there is no subsref in INDEX_STRUCT_CALL
+        else
+          PUSH_CODE (nargout);
+        PUSH_WSLOT (slot); // the slot
+      }
+    else
+      {
+        MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+        PUSH_CODE (INSTR::INDEX_STRUCT_CALL);
+        if (m1_magic_nargout)
+          PUSH_CODE (1); // Note, 1, not -1.
+        else
+          PUSH_CODE (nargout);
+        PUSH_WSLOT (0); // slot ignored
+      }
+
+    PUSH_CODE (v_n_args[0]);
+    PUSH_CODE (v_types[0]); // '.', '(' or '{'
+
+    // The first subcall (INDEX_STRUCT_CALL) checks the identifier to see if it is function that should be called.
+    // Or an callable object that should be called.
+    //
+    // If it is, the following opcode is skipped for '(' type calls since those eat the args, whereas it is executed
+    // for '{' or '.' types.
+    MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+    PUSH_CODE (INSTR::INDEX_STRUCT_SUBCALL);
+    if (m1_magic_nargout)
+      PUSH_CODE (-1);
+    else
+      PUSH_CODE (nargout);
+    PUSH_CODE (0);
+    PUSH_CODE (v_n_args.size ());
+    PUSH_CODE (v_n_args[0]);
+    PUSH_CODE (v_types[0]);
+  }
+
+  // Now do the rest of the subcalls
+  int cntr = 0; // The subcall number, starting at one since the first is allready done above
+  while (arg_lists_it != arg_lists.end ())
+    {
+      cntr++; // One first iteration of the loop
+      tree_argument_list *arg_list = *arg_lists_it++;
+      string_vector field_names = *arg_names_it++;
+      tree_expression *dyn_expr = *arg_lists_dyn_it++;
+      char type = *arg_type_tags_it++;
+
+      if (type == '.')
+        emit_fields_for_visit_index_expression (field_names, dyn_expr, nullptr, nullptr);
+      else if (arg_list)
+        emit_args_for_visit_index_expression (arg_list, nullptr);
+      // For e.g. the call to "bar" in "foo.bar ()" no args need to be emitted
+
+      int nargout = NARGOUT ();
+      MAYBE_PUSH_ANON_NARGOUT_OPEXT (nargout);
+      PUSH_CODE (INSTR::INDEX_STRUCT_SUBCALL);
+      if (m1_magic_nargout)
+        PUSH_CODE (-1);
+      else
+        PUSH_CODE (nargout);
+      PUSH_CODE (cntr);
+      PUSH_CODE (v_n_args.size ());
+      PUSH_CODE (v_n_args[cntr]);
+      PUSH_CODE (v_types[cntr]);
+    }
+
+  arg_name_entry.m_ip_end = CODE_SIZE ();
+  PUSH_ARGNAMES_ENTRY (arg_name_entry);
+  arg_name_entry = {}; // TODO: Remove?
+
+  LOC (loc_id).m_ip_end = CODE_SIZE ();
+  LOC (loc_id).m_col = expr.column ();
+  LOC (loc_id).m_line = expr.line ();
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  if (DEPTH () == 1 && NARGOUT () > 1)
+    TODO ("Silly state");
+
+  DEC_DEPTH ();
+}
+
+// For loops are setup like this:
+//
+// Setup block:
+//   * The range variable is on the top of the stack
+//   * Push the amount of iterations to the stack, octave_idx_type
+//   * Push a counter to the stack initialized to ~0, octave_idx_type,
+//     so that it wraps to zero after incrementing.
+//   * Fall through to condition block
+// Condition block:
+//   * Increase counter
+//   * If there are no iterations left, go to after.
+//   * Write the iteration's value to the local
+//   * Fall through to body
+// Body block:
+//   * Execute the body code
+//   * Jump to condition block
+// After block:
+//   * Pop the type, counter and limit variables
+//
+// FOR_SETUP = opcode
+// FOR_COND  = opcode, after address, local slot
+
+void
+bytecode_walker::
+visit_simple_for_command (tree_simple_for_command& cmd)
+{
+  tree_expression *lhs = cmd.left_hand_side ();
+
+  int loc_id = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id).m_ip_start = CODE_SIZE ();
+
+  CHECK_NONNULL (lhs);
+  if (! lhs->is_identifier ())
+    TODO ("For loop with lhs not id ???");
+
+  std::string id_name = lhs->name ();
+  // We don't want the id pushed to the stack so we
+  // don't walk it.
+  int slot = add_id_to_table (id_name);
+
+  tree_expression *expr = cmd.control_expr ();
+  CHECK_NONNULL (expr);
+
+  PUSH_TREE_FOR_DBG (&cmd); // Debug hit before rhs
+
+  // We want the rhs expression on the stack
+  INC_DEPTH ();
+  PUSH_NARGOUT (1);
+  expr->accept (*this);
+  POP_NARGOUT ();
+  DEC_DEPTH ();
+
+  // For loops need a special unwind entry to destroy the
+  // native ints on the stack properly.
+  int unwind_idx = N_UNWIND ();
+  PUSH_UNWIND();
+  UNWIND (unwind_idx).m_ip_start = CODE_SIZE ();
+
+  UNWIND (unwind_idx).m_unwind_entry_type =
+    unwind_entry_type::FOR_LOOP;
+
+  // For loops add two native ints and one ov to the stack,
+  // and switches add one ov to the stack, so we need to
+  // record how many things we have added to the stack,
+  // not counting this for loop. From for loops and
+  // switches.
+  int n_things_on_stack = n_on_stack_due_to_stmt();
+
+  // Store added things on stack (due to for loops and switches)
+  // in the unwind table.
+  UNWIND (unwind_idx).m_stack_depth = n_things_on_stack;
+
+  PUSH_CODE (INSTR::FOR_SETUP);
+  // FOR_COND need to come after FOR_SETUP
+  // FOR_SETUP uses FOR_COND's operands the first loop iteration
+  PUSH_TREE_FOR_DBG (&cmd); // Debug hit at condition
+  int cond_offset = CODE_SIZE ();
+  MAYBE_PUSH_WIDE_OPEXT (slot);
+  PUSH_CODE (INSTR::FOR_COND);
+  PUSH_SLOT (slot); // The for loop variable
+  int need_after = CODE_SIZE ();
+  PUSH_CODE_SHORT (-1); // Placeholder for after address
+
+  LOC (loc_id).m_ip_end = CODE_SIZE ();
+  LOC (loc_id).m_col = cmd.column ();
+  LOC (loc_id).m_line = cmd.line ();
+
+  // Walk body
+  tree_statement_list *list = cmd.body ();
+
+  // The body can be empty
+  if (list)
+    {
+      m_n_nested_loops++;
+      PUSH_NESTING_STATEMENT (nesting_statement::FOR_LOOP);
+      PUSH_BREAKS ();
+      PUSH_CONTINUE_TARGET ();
+      list->accept (*this);
+      for (int offset : POP_CONTINUE_TARGET())
+        SET_CODE_SHORT (offset, cond_offset);
+      POP_NESTING_STATEMENT ();
+      m_n_nested_loops--;
+    }
+
+  // A new loc for the for loop suffix code, so that any time
+  // spent there end up by the "for"-row in the profiler
+
+  int loc_id2 = N_LOC ();
+  PUSH_LOC ();
+  LOC (loc_id2).m_ip_start = CODE_SIZE ();
+
+  // Jump to condition block, TODO: unless all paths terminated
+  PUSH_CODE (INSTR::JMP);
+  PUSH_CODE_SHORT (cond_offset);
+
+  // Now we can set the after jump in cond
+  SET_CODE_SHORT (need_after, CODE_SIZE ());
+
+  if (list)
+    {
+      // Also all breaks jump to here
+      for (int need_break : POP_BREAKS ())
+        {
+          SET_CODE_SHORT (need_break, CODE_SIZE ());
+        }
+    }
+
+  // Mark an end to the special for loop unwind entry
+  UNWIND (unwind_idx).m_ip_end = CODE_SIZE ();
+
+  // We need to pop the counter, n and range
+  PUSH_CODE (INSTR::POP_N_INTS);
+  PUSH_CODE (2);
+  // Pop the rhs ov (the range)
+  PUSH_CODE (INSTR::POP);
+
+  LOC (loc_id2).m_ip_end = CODE_SIZE ();
+  LOC (loc_id2).m_col = cmd.column ();
+  LOC (loc_id2).m_line = cmd.line ();
+}
+
+void
+bytecode_walker::
+visit_complex_for_command (tree_complex_for_command& cmd)
+{
+  tree_argument_list *lhs = cmd.left_hand_side ();
+
+  CHECK (lhs);
+  CHECK (lhs->size () == 2);
+
+  auto p = lhs->begin ();
+  tree_expression *val = *p++;
+  tree_expression *key = *p++;
+
+  CHECK (val); CHECK (key);
+
+  CHECK (val->is_identifier ());
+  CHECK (key->is_identifier ());
+
+  std::string val_name = val->name ();
+  std::string key_name = key->name ();
+
+  add_id_to_table (val_name);
+  add_id_to_table (key_name);
+
+  tree_expression *expr = cmd.control_expr ();
+  CHECK_NONNULL (expr);
+
+  // We want the rhs expression on the stack
+  INC_DEPTH ();
+  PUSH_NARGOUT (1);
+  expr->accept (*this);
+  POP_NARGOUT ();
+  DEC_DEPTH ();
+
+  // For loops need a special unwind entry to destroy the
+  // native ints on the stack properly.
+  int unwind_idx = N_UNWIND ();
+  PUSH_UNWIND();
+  UNWIND (unwind_idx).m_ip_start = CODE_SIZE ();
+
+  UNWIND (unwind_idx).m_unwind_entry_type =
+    unwind_entry_type::FOR_LOOP;
+
+  // For loops add two native ints and one ov to the stack,
+  // and switches add one ov to the stack, so we need to
+  // record how many things we have added to the stack,
+  // not counting this for loop. From for loops and
+  // switches.
+  int n_things_on_stack = n_on_stack_due_to_stmt();
+
+  // Store added things on stack (due to for loops and switches)
+  // in the unwind table.
+  UNWIND (unwind_idx).m_stack_depth = n_things_on_stack;
+
+  PUSH_CODE (INSTR::FOR_COMPLEX_SETUP);
+  int need_after0 = CODE_SIZE ();
+  PUSH_CODE_SHORT (-1); // Placeholder for after address for a jump if rhs is undefined
+
+  int cond_offset = CODE_SIZE ();
+  PUSH_CODE (INSTR::FOR_COMPLEX_COND);
+  int need_after1 = CODE_SIZE ();
+  PUSH_CODE_SHORT (-1); // Placeholder for after address
+  PUSH_WSLOT (SLOT (key_name));
+  PUSH_WSLOT (SLOT (val_name));
+
+  // Walk body
+  tree_statement_list *list = cmd.body ();
+    // The body can be empty
+  if (list)
+    {
+      m_n_nested_loops++;
+      PUSH_NESTING_STATEMENT (nesting_statement::FOR_LOOP);
+      PUSH_BREAKS ();
+      PUSH_CONTINUE_TARGET ();
+      list->accept (*this);
+      for (int offset : POP_CONTINUE_TARGET())
+        SET_CODE_SHORT (offset, cond_offset);
+      POP_NESTING_STATEMENT ();
+      m_n_nested_loops--;
+    }
+
+  // Jump to condition block, TODO: unless all paths terminated
+  PUSH_CODE (INSTR::JMP);
+  PUSH_CODE_SHORT (cond_offset);
+
+  // Now we can set the after jump in cond and setup
+  SET_CODE_SHORT (need_after0, CODE_SIZE ());
+  SET_CODE_SHORT (need_after1, CODE_SIZE ());
+
+  if (list)
+    {
+      // Also all breaks jump to here
+      for (int need_break : POP_BREAKS ())
+        {
+          SET_CODE_SHORT (need_break, CODE_SIZE ());
+        }
+    }
+
+  // Mark an end to the special for loop unwind entry
+  UNWIND (unwind_idx).m_ip_end = CODE_SIZE ();
+
+  // We need to pop the counter, n and rhs struct
+  PUSH_CODE (INSTR::POP_N_INTS);
+  PUSH_CODE (2);
+  // Pop the rhs ov (the struct)
+  PUSH_CODE (INSTR::POP);
+}
+
+void
+bytecode_walker::
+visit_fcn_handle (tree_fcn_handle &handle)
+{
+  INC_DEPTH ();
+  std::string name = handle.name ();
+  // We prepend the handles with @ to not risk collisions with
+  // other identifiers in the id table
+  std::string aname = "@" + name;
+
+  if (name.find ('.') != std::string::npos)
+    TODO ("No support for method fcn handles yet");
+
+  // slot for the handle function cache
+  int slot = add_id_to_table(aname);
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  MAYBE_PUSH_WIDE_OPEXT (slot);
+  PUSH_CODE (INSTR::PUSH_FCN_HANDLE);
+  PUSH_SLOT (slot);
+
+  maybe_emit_bind_ans_and_disp (handle);
+
+  DEC_DEPTH ();
+}
+
+void
+bytecode_walker::
+visit_colon_expression (tree_colon_expression& expr)
+{
+  INC_DEPTH ();
+
+  tree_expression *op1 = expr.base ();
+
+  CHECK_NONNULL (op1);
+  op1->accept (*this);
+
+  tree_expression *op2 = expr.increment ();
+
+  if (op2)
+    op2->accept (*this);
+
+  tree_expression *op3 = expr.limit ();
+
+  CHECK_NONNULL (op3);
+  op3->accept (*this);
+
+  maybe_emit_anon_maybe_ignore_outputs ();
+
+  // Colon expressions have some different semantics
+  // in command expressions.
+  if (expr.is_for_cmd_expr ())
+    {
+      if (op2)
+        PUSH_CODE (INSTR::COLON3_CMD);
+      else
+        PUSH_CODE (INSTR::COLON2_CMD);
+    }
+  else
+    {
+      if (op2)
+        PUSH_CODE (INSTR::COLON3);
+      else
+        PUSH_CODE (INSTR::COLON2);
+    }
+
+  maybe_emit_bind_ans_and_disp (expr);
+
+  DEC_DEPTH ();
+}
+
+void
+bytecode_walker::
+visit_break_command (tree_break_command&)
+{
+  PUSH_CODE (INSTR::JMP);
+  // Need to set where to jump to after we know where the loop ends
+  PUSH_NEED_BREAK (CODE_SIZE ());
+  PUSH_CODE_SHORT (-1); // Placeholder
+}
+
+
+
+void
+bytecode_walker::
+visit_continue_command (tree_continue_command&)
+{
+  PUSH_CODE (INSTR::JMP);
+  // The address to jump to need to be set by the loop
+  // visitor (do until jumps forward), så push the code
+  // address that need a target address.
+  PUSH_NEED_CONTINUE_TARGET (CODE_SIZE ());
+  PUSH_CODE_SHORT (-1); // Placeholder
+}
+
+std::size_t unwind_data::m_id_cntr = 0;
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode-walk.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,447 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if ! defined (octave_pt_bytecode_walk_h)
+#define octave_pt_bytecode_walk_h 1
+
+#include "octave-config.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include <map>
+
+#include "pt-walk.h"
+#include "error.h"
+
+#include "pt-bytecode-vm.h"
+
+class octave_user_script;
+class octave_user_function;
+
+#define ERROR_NOT_IMPLEMENTED \
+{ error("not implemented %s:%d", __FILE__, __LINE__); }
+
+namespace octave
+{
+  void compile_user_function (octave_user_code &ufn, bool do_print);
+  void compile_nested_user_function (octave_user_function &ufn, bool do_print, std::vector<octave_user_function *> v_parent_fns);
+  void compile_anon_user_function (octave_user_code &ufn, bool do_print, stack_frame::local_vars_map &locals);
+
+  // No separate visitor needed
+  // Base classes only, so no need to include them.
+  //
+  //  class tree_array_list
+  //  class tree_unary_expression
+  //  class tree_black_hole
+
+  class tree_anon_fcn_handle;
+  class tree_arg_size_spec;
+  class tree_arg_validation;
+  class tree_arg_validation_fcns;
+  class tree_args_block_attribute_list;
+  class tree_args_block_validation_list;
+  class tree_argument_list;
+  class tree_arguments_block;
+  class tree_binary_expression;
+  class tree_boolean_expression;
+  class tree_compound_binary_expression;
+  class tree_break_command;
+  class tree_colon_expression;
+  class tree_continue_command;
+  class tree_decl_command;
+  class tree_decl_init_list;
+  class tree_decl_elt;
+  class tree_simple_for_command;
+  class tree_complex_for_command;
+  class tree_spmd_command;
+  class tree_function_def;
+  class tree_identifier;
+  class tree_if_clause;
+  class tree_if_command;
+  class tree_if_command_list;
+  class tree_switch_case;
+  class tree_switch_case_list;
+  class tree_switch_command;
+  class tree_index_expression;
+  class tree_matrix;
+  class tree_cell;
+  class tree_multi_assignment;
+  class tree_no_op_command;
+  class tree_constant;
+  class tree_fcn_handle;
+  class tree_parameter_list;
+  class tree_postfix_expression;
+  class tree_prefix_expression;
+  class tree_return_command;
+  class tree_simple_assignment;
+  //class tree_simple_index_expression;
+  class tree_statement;
+  //class tree_statement_cmd;
+  //class tree_statement_expression;
+  //class tree_statement_null;
+  class tree_statement_list;
+  class tree_try_catch_command;
+  class tree_unwind_protect_command;
+  class tree_while_command;
+  class tree_do_until_command;
+
+  class tree_superclass_ref;
+  class tree_metaclass_query;
+  class tree_classdef_attribute;
+  class tree_classdef_attribute_list;
+  class tree_classdef_superclass;
+  class tree_classdef_superclass_list;
+  class tree_classdef_property;
+  class tree_classdef_property_list;
+  class tree_classdef_properties_block;
+  class tree_classdef_methods_list;
+  class tree_classdef_methods_block;
+  class tree_classdef_event;
+  class tree_classdef_events_list;
+  class tree_classdef_events_block;
+  class tree_classdef_enum;
+  class tree_classdef_enum_list;
+  class tree_classdef_enum_block;
+  class tree_classdef_body;
+  class tree_classdef;
+
+  struct id_being_indexed
+  {
+    int slot;
+    int idx;
+    int nargs;
+    int type;
+  };
+
+  class bytecode_walker : public tree_walker
+  {
+  public:
+
+    enum class nesting_statement
+      {
+        INVALID,
+        FOR_LOOP,
+        ONE_OV_ON_STACK,
+      };
+
+    bytecode_walker () { }
+
+    virtual ~bytecode_walker () = default;
+
+    // The bytecode will be put in this container
+    bytecode m_code;
+    // The bytecode need its own scope object that will
+    // be written back to the octave_user_function object
+    symbol_scope m_scope = symbol_scope::invalid ();
+
+    bool m_varargout = false;
+    bool m_is_script = false;
+    bool m_is_anon = false;
+    int m_n_nested_fn = 0;
+    std::vector<octave_user_function*> m_v_parent_fns; // Parent functions for nested functions
+
+    std::vector<std::vector<int>> m_continue_target;
+    std::vector<std::vector<int>> m_need_break_target;
+    std::vector<int> m_loop_target;
+    std::vector<bool> m_all_paths_terminated;
+    std::vector<int> m_nargout;
+    std::vector<std::vector<int>> m_need_unwind_target;
+
+    std::vector<nesting_statement> m_nesting_statement;
+
+    std::set<std::string> m_set_assigned_ids; // Only populated if i,j,I,J,e are ids
+
+    // For "end" in indexing expression we need to know what variable is
+    // being indexed.
+    std::vector<id_being_indexed> m_indexed_id;
+
+    int m_depth = 0;
+    int m_offset_n_locals = -1;
+    int m_n_locals = 0;
+    int m_n_nested_loops = 0;
+
+    // Counter to choose different alternative op-codes in a try to help branch prediction
+    int m_cnt_alts_cst = 0;
+    int m_cnt_alts_mul = 0;
+    int m_cnt_alts_add = 0;
+    int m_cnt_alts_div = 0;
+
+    // Simple way to keep down amount of temporary slots made to store results
+    int m_n_multi_assign = 0;
+
+    // Need to keep track of ignored outputs with the '~'
+    bool m_pending_ignore_outputs = false;
+    int m_ignored_of_total = 0;
+    std::vector<int> m_v_ignored;
+    int m_ignored_ip_start = 0;
+
+    // The values that the locals of an anonymous function are supposed
+    // to be set to. The field is set before calling the first accept()
+    // for anonymous functions.
+    stack_frame::local_vars_map *m_anon_local_values = nullptr;
+
+    // TODO: Kludge alert. Mirror the behaviour in ov_classdef::subsref
+    // where under certain conditions a magic number nargout of -1 is
+    // expected to  maybe return a cs-list. "-1" in this context
+    // does not have the same meaning as in the VM, where it means
+    // a varargout with only one return symbol 'varargout'.
+    //
+    // We need to track "unknown nargout" for this.
+    int m_unknown_nargout = 0;
+
+    //
+    bool m_is_folding = false;
+    std::vector<tree*> m_v_trees_to_fold;
+    std::vector<int> m_v_offset_of_folds;
+    int m_n_folds = 0;
+
+    std::map<std::string, int> m_map_locals_to_slot;
+
+    std::map<std::string, bool> m_map_id_is_global;
+    std::map<std::string, bool> m_map_id_is_persistent;
+
+    static std::map<std::string, octave_base_value::unary_mapper_t> m_name_to_unary_func;
+
+    int add_id_to_table (std::string name);
+
+    int n_on_stack_due_to_stmt ();
+
+    void emit_return ();
+
+    void emit_alt (int &cntr, std::vector<INSTR> alts);
+
+    void emit_load_2_cst (tree_expression *lhs, tree_expression *rhs);
+
+    void maybe_emit_anon_maybe_ignore_outputs ();
+    void maybe_emit_bind_ans_and_disp (tree_expression &expr, const std::string maybe_cmd_name = "");
+    void maybe_emit_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name = "" );
+    void maybe_emit_push_and_disp_id (tree_expression &expr, const std::string &name, const std::string maybe_cmd_name = "");
+    void emit_disp_obj (tree_expression &expr);
+
+    int get_slot (std::string name)
+    {
+      auto it = m_map_locals_to_slot.find (name);
+      if (it == m_map_locals_to_slot.end ())
+        error ("VM internal error: Slot %s does not exist", name.c_str ());
+      return it->second;
+    }
+
+    void ctor_unary_map ();
+
+    // No copying!
+
+    bytecode_walker (const bytecode_walker&) = delete;
+
+    bytecode_walker& operator = (const bytecode_walker&) = delete;
+
+    void visit_anon_fcn_handle (tree_anon_fcn_handle&);
+
+    void visit_argument_list (tree_argument_list&) ERROR_NOT_IMPLEMENTED
+
+    void visit_arguments_block (tree_arguments_block&) ERROR_NOT_IMPLEMENTED
+
+    void visit_args_block_attribute_list (tree_args_block_attribute_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_args_block_validation_list (tree_args_block_validation_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_arg_validation (tree_arg_validation&) ERROR_NOT_IMPLEMENTED
+
+    void visit_arg_size_spec (tree_arg_size_spec&) ERROR_NOT_IMPLEMENTED
+
+    void visit_arg_validation_fcns (tree_arg_validation_fcns&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_binary_expression (tree_binary_expression&);
+
+    void visit_boolean_expression (tree_boolean_expression&);
+
+    void visit_compound_binary_expression (tree_compound_binary_expression&);
+
+    void visit_break_command (tree_break_command&);
+
+    void visit_colon_expression (tree_colon_expression&);
+
+    void visit_continue_command (tree_continue_command&);
+
+    void visit_decl_command (tree_decl_command&);
+
+    void visit_decl_elt (tree_decl_elt&) ERROR_NOT_IMPLEMENTED
+
+    void visit_decl_init_list (tree_decl_init_list&) ERROR_NOT_IMPLEMENTED
+
+    void visit_simple_for_command (tree_simple_for_command&);
+
+    void visit_complex_for_command (tree_complex_for_command&);
+
+    void visit_spmd_command (tree_spmd_command&) ERROR_NOT_IMPLEMENTED
+
+    void visit_octave_user_script (octave_user_script&);
+
+    void visit_octave_user_function (octave_user_function&);
+
+    void visit_function_def (tree_function_def&);
+
+    void visit_identifier (tree_identifier&);
+
+    void visit_if_clause (tree_if_clause&) ERROR_NOT_IMPLEMENTED
+
+    void visit_if_command (tree_if_command&);
+
+    void visit_if_command_list (tree_if_command_list&) ERROR_NOT_IMPLEMENTED
+
+    void visit_switch_case (tree_switch_case&) ERROR_NOT_IMPLEMENTED
+
+    void visit_switch_case_list (tree_switch_case_list&) ERROR_NOT_IMPLEMENTED
+
+    void visit_switch_command (tree_switch_command&);
+
+    // Helper functions
+    void
+    emit_args_for_visit_index_expression (tree_argument_list *arg_list,
+                                          tree_expression *lhs_root);
+
+    void
+    emit_fields_for_visit_index_expression (string_vector &field_names,
+                                            tree_expression *dyn_expr,
+                                            tree_expression *lhs_root,
+                                            bool *struct_is_id_dot_id);
+
+    void simple_visit_index_expression (tree_index_expression&);
+    void eval_visit_index_expression (tree_index_expression&);
+
+    void visit_index_expression (tree_index_expression&);
+
+    //void visit_simple_index_expression (tree_simple_index_expression&);
+
+    void visit_matrix (tree_matrix&);
+
+    void visit_cell (tree_cell&);
+
+    void visit_multi_assignment (tree_multi_assignment&);
+
+    void visit_no_op_command (tree_no_op_command&);
+
+    void visit_constant (tree_constant&);
+
+    void visit_fcn_handle (tree_fcn_handle&);
+
+    void visit_parameter_list (tree_parameter_list&) ERROR_NOT_IMPLEMENTED
+
+    void visit_postfix_expression (tree_postfix_expression&);
+
+    void visit_prefix_expression (tree_prefix_expression&);
+
+    void visit_return_command (tree_return_command&);
+
+    void visit_simple_assignment (tree_simple_assignment&);
+
+    void visit_statement (tree_statement&);
+
+    void visit_statement_list (tree_statement_list&);
+
+    void visit_try_catch_command (tree_try_catch_command&);
+
+    void emit_unwind_protect_code (tree_statement_list *body,
+                                   tree_statement_list *cleanup_code,
+                                   tree_expression *body_expr = nullptr,
+                                   tree_expression *cleanup_expr = nullptr,
+                                   std::vector<int> cleanup_instructions = {});
+
+    struct emit_unwind_protect_data
+    {
+      int m_idx_unwind;
+      bool m_break_stack_populated;
+      std::vector<int> m_v_need_breaks_initial;
+      int m_n_need_break;
+      int m_n_need_cleanup;
+    };
+
+    emit_unwind_protect_data emit_unwind_protect_code_start ();
+    void emit_unwind_protect_code_before_cleanup (emit_unwind_protect_data &data);
+    void emit_unwind_protect_code_end (emit_unwind_protect_data &data);
+
+    void visit_unwind_protect_command (tree_unwind_protect_command&);
+
+    void visit_while_command (tree_while_command&);
+
+    void visit_do_until_command (tree_do_until_command&);
+
+    void visit_superclass_ref (tree_superclass_ref&) ERROR_NOT_IMPLEMENTED
+
+    void visit_metaclass_query (tree_metaclass_query&) ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_attribute (tree_classdef_attribute&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_attribute_list (tree_classdef_attribute_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_superclass (tree_classdef_superclass&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_superclass_list (tree_classdef_superclass_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_property (tree_classdef_property&) ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_property_list (tree_classdef_property_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_properties_block (tree_classdef_properties_block&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_methods_list (tree_classdef_methods_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_methods_block (tree_classdef_methods_block&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_event (tree_classdef_event&) ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_events_list (tree_classdef_events_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_events_block (tree_classdef_events_block&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_enum (tree_classdef_enum&) ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_enum_list (tree_classdef_enum_list&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_enum_block (tree_classdef_enum_block&)
+      ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef_body (tree_classdef_body&) ERROR_NOT_IMPLEMENTED
+
+    void visit_classdef (tree_classdef&) ERROR_NOT_IMPLEMENTED
+  };
+}
+
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libinterp/parse-tree/pt-bytecode.h	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,367 @@
+////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2023-2024 The Octave Project Developers
+//
+// See the file COPYRIGHT.md in the top-level directory of this
+// distribution or <https://octave.org/copyright/>.
+//
+// This file is part of Octave.
+//
+// Octave is free software: you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Octave is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Octave; see the file COPYING.  If not, see
+// <https://www.gnu.org/licenses/>.
+//
+////////////////////////////////////////////////////////////////////////
+
+#if ! defined (octave_pt_bytecode_h)
+#define octave_pt_bytecode_h 1
+
+#include "octave-config.h"
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+#include <vector>
+#include <map>
+
+#include "octave-config.h"
+#include "Cell.h"
+#include "ov-vm.h"
+
+OCTAVE_BEGIN_NAMESPACE(octave)
+
+class tree;
+
+enum class INSTR
+{
+  POP,
+  DUP,
+  LOAD_CST,
+  MUL,
+  DIV,
+  ADD,
+  SUB,
+  RET,
+  ASSIGN,
+  JMP_IF,
+  JMP,
+  JMP_IFN,
+  PUSH_SLOT_NARGOUT0,
+  LE,
+  LE_EQ,
+  GR,
+  GR_EQ,
+  EQ,
+  NEQ,
+  INDEX_ID_NARGOUT0,
+  PUSH_SLOT_INDEXED,
+  POW,
+  LDIV,
+  EL_MUL,
+  EL_DIV,
+  EL_POW,
+  EL_AND,
+  EL_OR,
+  EL_LDIV,
+  NOT,
+  UADD,
+  USUB,
+  TRANS,
+  HERM,
+  // TODO: These should have an inplace optimization (no push)
+  INCR_ID_PREFIX,
+  DECR_ID_PREFIX,
+  INCR_ID_POSTFIX,
+  DECR_ID_POSTFIX,
+  FOR_SETUP,
+  FOR_COND,
+  POP_N_INTS,
+  PUSH_SLOT_NARGOUT1,
+  INDEX_ID_NARGOUT1,
+  PUSH_FCN_HANDLE,
+  COLON3,
+  COLON2,
+  COLON3_CMD,
+  COLON2_CMD,
+  PUSH_TRUE,
+  PUSH_FALSE,
+  UNARY_TRUE,
+  INDEX_IDN,
+  ASSIGNN,
+  PUSH_SLOT_NARGOUTN,
+  SUBASSIGN_ID,
+  END_ID,
+  MATRIX,
+  TRANS_MUL,
+  MUL_TRANS,
+  HERM_MUL,
+  MUL_HERM,
+  TRANS_LDIV,
+  HERM_LDIV,
+  WORDCMD,
+  HANDLE_SIGNALS,
+  PUSH_CELL,
+  INDEX_CELL_ID_NARGOUT0,
+  INDEX_CELL_ID_NARGOUT1,
+  INDEX_CELL_ID_NARGOUTN,
+  INCR_PREFIX,
+  ROT,
+  GLOBAL_INIT,
+  ASSIGN_COMPOUND,
+  JMP_IFDEF,
+  JMP_IFNCASEMATCH,
+  BRAINDEAD_PRECONDITION,
+  BRAINDEAD_WARNING,
+  FORCE_ASSIGN, // Accepts undefined rhs
+  PUSH_NIL,
+  THROW_IFERROBJ,
+  INDEX_STRUCT_NARGOUTN,
+  SUBASSIGN_STRUCT,
+  SUBASSIGN_CELL_ID,
+  INDEX_OBJ,
+  SUBASSIGN_OBJ,
+  MATRIX_UNEVEN,
+  LOAD_FAR_CST,
+  END_OBJ,
+  SET_IGNORE_OUTPUTS,
+  CLEAR_IGNORE_OUTPUTS,
+  SUBASSIGN_CHAINED,
+  SET_SLOT_TO_STACK_DEPTH,
+  DUPN,
+  DEBUG,
+  INDEX_STRUCT_CALL,
+  END_X_N,
+  EVAL,
+  BIND_ANS,
+  PUSH_ANON_FCN_HANDLE,
+  FOR_COMPLEX_SETUP, // opcode
+  FOR_COMPLEX_COND,
+  PUSH_SLOT_NARGOUT1_SPECIAL,
+  DISP,
+  PUSH_SLOT_DISP,
+  LOAD_CST_ALT2,
+  LOAD_CST_ALT3,
+  LOAD_CST_ALT4,
+  LOAD_2_CST,
+  MUL_DBL,
+  ADD_DBL,
+  SUB_DBL,
+  DIV_DBL,
+  POW_DBL,
+  LE_DBL,
+  LE_EQ_DBL,
+  GR_DBL,
+  GR_EQ_DBL,
+  EQ_DBL,
+  NEQ_DBL,
+  INDEX_ID1_MAT_1D,
+  INDEX_ID1_MAT_2D,
+  PUSH_PI,
+  INDEX_ID1_MATHY_UFUN,
+  SUBASSIGN_ID_MAT_1D,
+  INCR_ID_PREFIX_DBL,
+  DECR_ID_PREFIX_DBL,
+  INCR_ID_POSTFIX_DBL,
+  DECR_ID_POSTFIX_DBL,
+  PUSH_DBL_0,
+  PUSH_DBL_1,
+  PUSH_DBL_2,
+  JMP_IF_BOOL,
+  JMP_IFN_BOOL,
+  USUB_DBL,
+  NOT_DBL,
+  NOT_BOOL,
+  PUSH_FOLDED_CST,
+  SET_FOLDED_CST,
+  WIDE,
+  SUBASSIGN_ID_MAT_2D,
+  ENTER_SCRIPT_FRAME,
+  EXIT_SCRIPT_FRAME,
+  RET_ANON,
+  INDEX_IDNX,
+  INDEX_CELL_IDNX,
+  PUSH_SLOT_NX,
+  EXT_NARGOUT,
+  WORDCMD_NX,
+  ANON_MAYBE_SET_IGNORE_OUTPUTS,
+  ENTER_NESTED_FRAME,
+  INSTALL_FUNCTION,
+  DUP_MOVE,
+  MUL_CST_DBL,
+  MUL_CST,
+  ADD_CST_DBL,
+  ADD_CST,
+  DIV_CST_DBL,
+  DIV_CST,
+  SUB_CST_DBL,
+  SUB_CST,
+  LE_CST_DBL,
+  LE_CST,
+  LE_EQ_CST_DBL,
+  LE_EQ_CST,
+  GR_CST_DBL,
+  GR_CST,
+  GR_EQ_CST_DBL,
+  GR_EQ_CST,
+  EQ_CST_DBL,
+  EQ_CST,
+  NEQ_CST_DBL,
+  NEQ_CST,
+  POW_CST_DBL,
+  POW_CST,
+  PUSH_I,
+  PUSH_E,
+  INDEX_STRUCT_SUBCALL,
+  PUSH_CELL_BIG,
+  APPEND_CELL,
+};
+
+enum class unwind_entry_type
+{
+  INVALID,
+  FOR_LOOP,
+  TRY_CATCH,
+  UNWIND_PROTECT,
+};
+
+struct unwind_entry
+{
+  int m_ip_start;
+  int m_ip_end;
+  int m_ip_target;
+  int m_stack_depth;
+  unwind_entry_type m_unwind_entry_type;
+};
+
+struct loc_entry
+{
+  int m_ip_start = -1;
+  int m_ip_end = -1;
+  int m_col = -1;
+  int m_line = -1;
+};
+
+struct arg_name_entry
+{
+  int m_ip_start;
+  int m_ip_end;
+  Cell m_arg_names;
+  std::string m_obj_name;
+};
+
+struct unwind_data
+{
+  // Id to let nested children recognize their parents when they look for them on the stack.
+  std::size_t m_id = 0;
+  std::size_t m_parent_id = 0; // Id of parent, which could be the root function or another nested function.
+  std::size_t m_matriarch_id = 0; // Id of the root function, which have nested functions. Common for all nested functions in that function.
+  static std::size_t m_id_cntr;
+
+  unwind_data ()
+  {
+    m_id = ++m_id_cntr;
+  }
+
+  std::vector<unwind_entry> m_unwind_entries;
+  std::vector<loc_entry> m_loc_entry;
+  std::map<int, int> m_slot_to_persistent_slot;
+  std::map<int, tree*> m_ip_to_tree;
+  std::vector<arg_name_entry> m_argname_entries;
+  std::vector<std::map<int,int>> m_external_frame_offset_to_internal;
+  std::map<std::string, int> m_map_user_locals_names_to_slot;
+
+  struct nested_var_offset { int m_depth; int m_slot_parent; int m_slot_nested; };
+  std::vector<nested_var_offset> m_v_nested_vars;
+
+  std::string m_name;
+  std::string m_file;
+
+  unsigned m_code_size;
+  unsigned m_ids_size;
+
+  bool m_is_script = false;
+  bool m_is_anon = false;
+  int m_n_nested_fn = 0;
+
+  // Note:
+  //  n locals includes n args and n returns.
+  //  n returns and n locals are not negative for varargout and varargin.
+  //  %nargout is included in the counts.
+  int m_n_returns;
+  int m_n_args;
+  int m_n_locals;
+  // The amount of symbols originally in the scope object at compile time
+  int m_n_orig_scope_size;
+};
+
+struct bytecode
+{
+  std::vector<unsigned char> m_code;
+  std::vector<octave_value> m_data;
+  std::vector<std::string> m_ids;
+  unwind_data m_unwind_data;
+};
+
+union stack_element
+{
+  octave_value ov;
+  octave_value_vm ov_vm;
+  octave_base_value *ovb;
+  uint64_t u;
+  int64_t i;
+  double d;
+
+  void *pv;
+  const char *pcc;
+  unsigned char *puc;
+  stack_element *pse;
+  octave_value *pov;
+  std::string *ps;
+  unwind_data *pud;
+  execution_exception *pee;
+
+  stack_element(){}
+  ~stack_element(){}
+};
+
+// Enums to describe what error message to build
+enum class error_type
+{
+  INVALID,
+  ID_UNDEFINED,
+  ID_UNDEFINEDN,
+  IF_UNDEFINED,
+  INDEX_ERROR,
+  EXECUTION_EXC,
+  INTERRUPT_EXC,
+  INVALID_N_EL_RHS_IN_ASSIGNMENT,
+  RHS_UNDEF_IN_ASSIGNMENT,
+  BAD_ALLOC,
+  EXIT_EXCEPTION,
+  DEBUG_QUIT,
+};
+
+enum class global_type
+{
+  GLOBAL,
+  PERSISTENT,
+  GLOBAL_OR_PERSISTENT,
+};
+
+// If TRUE, use VM evaluator rather than tree walker.
+extern bool V__vm_enable__;
+
+OCTAVE_END_NAMESPACE(octave)
+
+#endif
+
+#endif
--- a/libinterp/parse-tree/pt-eval.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/parse-tree/pt-eval.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -72,6 +72,10 @@
 #include "unwind-prot.h"
 #include "utils.h"
 #include "variables.h"
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+#  include "pt-bytecode-vm.h"
+#  include "pt-bytecode-walk.h"
+#endif
 
 OCTAVE_BEGIN_NAMESPACE(octave)
 
@@ -83,25 +87,6 @@
 
 // Normal evaluator.
 
-class quit_debug_exception
-{
-public:
-
-  quit_debug_exception (bool all = false) : m_all (all) { }
-
-  quit_debug_exception (const quit_debug_exception&) = default;
-
-  quit_debug_exception& operator = (const quit_debug_exception&) = default;
-
-  ~quit_debug_exception () = default;
-
-  bool all () const { return m_all; }
-
-private:
-
-  bool m_all;
-};
-
 class debugger
 {
 public:
@@ -1387,12 +1372,19 @@
                   || m_dbstep_flag != 0
                   || m_break_on_next_stmt
                   || in_debug_repl ());
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  update_vm_dbgprofecho_flag ();
+#endif
 }
 
 void
 tree_evaluator::reset_debug_state (bool mode)
 {
   m_debug_mode = mode;
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  update_vm_dbgprofecho_flag ();
+#endif
 }
 
 void
@@ -2240,6 +2232,12 @@
 }
 
 void
+tree_evaluator::set_active_bytecode_ip (int ip)
+{
+  m_call_stack.set_active_bytecode_ip (ip);
+}
+
+void
 tree_evaluator::define_parameter_list_from_arg_vector
   (tree_parameter_list *param_list, const octave_value_list& args)
 {
@@ -2519,6 +2517,36 @@
   m_call_stack.push (fcn);
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+void tree_evaluator::push_stack_frame (vm &vm, octave_user_function *fcn, int nargout, int nargin)
+{
+  m_call_stack.push (vm, fcn, nargout, nargin);
+}
+
+void tree_evaluator::push_stack_frame (vm &vm, octave_user_script *fcn, int nargout, int nargin)
+{
+  m_call_stack.push (vm, fcn, nargout, nargin);
+}
+
+void tree_evaluator::push_stack_frame (vm &vm, octave_user_code *fcn, int nargout, int nargin)
+{
+  if (fcn->is_user_function ())
+    m_call_stack.push (vm, static_cast<octave_user_function*> (fcn), nargout, nargin);
+  else
+    m_call_stack.push (vm, static_cast<octave_user_script*> (fcn), nargout, nargin);
+}
+
+void tree_evaluator::push_stack_frame (vm &vm, octave_user_code *fcn, int nargout, int nargin,
+                                       const std::shared_ptr<stack_frame>& closure_frames)
+{
+
+  CHECK_PANIC (fcn->is_user_function ());
+  m_call_stack.push (vm, static_cast<octave_user_function*> (fcn), nargout, nargin, closure_frames);
+}
+
+#endif
+
 void
 tree_evaluator::pop_stack_frame ()
 {
@@ -4743,7 +4771,12 @@
 tree_evaluator::add_autoload (const std::string& fcn,
                               const std::string& nm)
 {
-  m_autoload_map[fcn] = check_autoload_file (nm);
+  std::string file_name = check_autoload_file (nm);
+
+  // Signal to load path that the function cache is invalid
+  octave::load_path::signal_clear_fcn_cache ();
+
+  m_autoload_map[fcn] = file_name;
 }
 
 void
@@ -4752,6 +4785,9 @@
 {
   check_autoload_file (nm);
 
+  // Signal to load path that the function cache is invalid
+  octave::load_path::signal_clear_fcn_cache ();
+
   // Remove function from symbol table and autoload map.
   symbol_table& symtab = m_interpreter.get_symbol_table ();
 
@@ -5101,6 +5137,11 @@
   if (cleanup_pushed)
     maybe_set_echo_state ();
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  // Since m_echo might have changed value we need to call this
+  update_vm_dbgprofecho_flag ();
+#endif
+
   return octave_value ();
 }
 
--- a/libinterp/parse-tree/pt-eval.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/parse-tree/pt-eval.h	Fri Apr 19 12:57:20 2024 -0400
@@ -62,12 +62,20 @@
 class push_parser;
 class unwind_protect;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+class vm;
+#endif
+
 // How to evaluate the code that the parse trees represent.
 
 class OCTINTERP_API tree_evaluator : public tree_walker
 {
 public:
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  friend class vm;
+#endif
+
   enum echo_state
   {
     ECHO_OFF = 0,
@@ -135,6 +143,9 @@
       m_silent_functions (false), m_string_fill_char (' '), m_PS4 ("+ "),
       m_dbstep_flag (0), m_break_on_next_stmt (false), m_echo (ECHO_OFF),
       m_echo_state (false), m_echo_file_name (),
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+      m_vm_dbg_profile_echo (false), m_vm_profiler_active (false),
+#endif
       m_echo_file_pos (1),
       m_echo_files (), m_in_top_level_repl (false),
       m_server_mode (false), m_in_loop_command (false),
@@ -403,6 +414,8 @@
 
   octave_value get_auto_fcn_var (stack_frame::auto_var_type avt) const;
 
+  void set_active_bytecode_ip (int ip);
+
   void define_parameter_list_from_arg_vector
   (tree_parameter_list *param_list, const octave_value_list& args);
 
@@ -438,6 +451,18 @@
 
   void push_stack_frame (octave_function *fcn);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  void push_stack_frame (vm &vm, octave_user_function *fcn, int nargout, int nargin);
+
+  void push_stack_frame (vm &vm, octave_user_script *fcn, int nargout, int nargin);
+
+  void push_stack_frame (vm &vm, octave_user_code *fcn, int nargout, int nargin);
+
+  void push_stack_frame (vm &vm, octave_user_code *fcn, int nargout, int nargin, const std::shared_ptr<stack_frame>& closure_frames);
+
+#endif
+
   void pop_stack_frame ();
 
   std::shared_ptr<stack_frame> pop_return_stack_frame ();
@@ -833,6 +858,9 @@
     int old_val = m_echo;
     m_echo = val;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+    update_vm_dbgprofecho_flag ();
+#endif
     return old_val;
   }
 
@@ -860,6 +888,16 @@
     m_echo_file_pos = pos;
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  void vm_set_profiler_active (bool val)
+  {
+    m_vm_profiler_active = val;
+    update_vm_dbgprofecho_flag ();
+  }
+
+  bool vm_dbgprofecho_flag () { return m_vm_dbg_profile_echo; }
+#endif
+
 private:
 
   template <typename T>
@@ -975,6 +1013,22 @@
 
   std::string m_echo_file_name;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+  // The VM needs to keep know if the evaluation is in a debug, echo or profiler
+  // state.
+  bool m_vm_dbg_profile_echo; // Set to true if either echo, dbg or vm profiler active
+  bool m_vm_profiler_active; // VM specific profiler flag
+
+  // Set m_vm_dbg_profile_echo to its proper state. Need to be done after each update to
+  // the underlying flags.
+  void update_vm_dbgprofecho_flag ()
+  {
+    m_vm_dbg_profile_echo = m_debug_mode || m_echo || m_vm_profiler_active;
+  }
+
+#endif
+
   // Next line to echo, counting from 1.  We use int here because the
   // parser does.  It also initializes line and column numbers to the
   // invalid value -1 and that can cause trouble if cast to an
@@ -1014,6 +1068,25 @@
   int m_num_indices;
 };
 
+class quit_debug_exception
+{
+public:
+
+  quit_debug_exception (bool all = false) : m_all (all) { }
+
+  quit_debug_exception (const quit_debug_exception&) = default;
+
+  quit_debug_exception& operator = (const quit_debug_exception&) = default;
+
+  ~quit_debug_exception () = default;
+
+  bool all () const { return m_all; }
+
+private:
+
+  bool m_all;
+};
+
 OCTAVE_END_NAMESPACE(octave)
 
 #endif
--- a/libinterp/parse-tree/pt-tm-const.cc	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/parse-tree/pt-tm-const.cc	Fri Apr 19 12:57:20 2024 -0400
@@ -38,6 +38,7 @@
 #include "ovl.h"
 #include "pt-arg-list.h"
 #include "pt-bp.h"
+#include "pt-bytecode-vm.h"
 #include "pt-eval.h"
 #include "pt-exp.h"
 #include "pt-mat.h"
@@ -60,6 +61,16 @@
 
 OCTAVE_BEGIN_NAMESPACE(octave)
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+tm_row_const::tm_row_const (const stack_element *beg, const stack_element *end)
+    : tm_info (beg == end), m_values ()
+{
+  init (beg, end);
+}
+
+#endif
+
 void tm_row_const::cellify ()
 {
   bool elt_changed = false;
@@ -224,6 +235,85 @@
     }
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// FIXME: This function is mostly a duplicate of
+//
+//   void tm_row_const::init (const tree_argument_list&, tree_evaluator&)
+//
+// The common parts should be factored out into a single function that
+// is used by the others.
+
+void tm_row_const::init (const stack_element *beg, const stack_element *end)
+{
+  bool first_elem = true;
+
+  for (; beg != end; beg++)
+    {
+      octave_quit ();
+
+      octave_value tmp = beg->ov;
+
+      if (tmp.is_undefined ())
+        error ("undefined element in matrix list");
+
+      if (tmp.is_cs_list ())
+        {
+          octave_value_list tlst = tmp.list_value ();
+
+          for (octave_idx_type i = 0; i < tlst.length (); i++)
+            {
+              octave_quit ();
+
+              init_element (tlst(i), first_elem);
+            }
+        }
+      else
+        init_element (tmp, first_elem);
+    }
+
+  if (m_any_cell && ! m_any_class && ! m_first_elem_is_struct)
+    cellify ();
+
+  first_elem = true;
+
+  for (const auto& val : m_values)
+    {
+      octave_quit ();
+
+      dim_vector this_elt_dv = val.dims ();
+
+      if (! this_elt_dv.zero_by_zero ())
+        {
+          m_all_empty = false;
+
+          if (first_elem)
+            {
+              first_elem = false;
+              m_dv = this_elt_dv;
+            }
+          else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 1)))
+            eval_error ("horizontal dimensions mismatch", m_dv, this_elt_dv);
+        }
+    }
+}
+
+tm_const::tm_const (const stack_element *beg, const stack_element *end,
+                    octave_idx_type n_rows, tree_evaluator& tw)
+  : tm_info (beg == end), m_evaluator (tw), m_tm_rows ()
+{
+  init (beg, end, n_rows);
+}
+
+tm_const::tm_const (const stack_element *beg, const stack_element *end,
+                    const std::vector<int>& row_lengths, tree_evaluator& tw)
+  : tm_info (beg == end), m_evaluator (tw), m_tm_rows ()
+{
+  init (beg, end, row_lengths);
+}
+
+#endif
+
 octave_value
 tm_const::concat (char string_fill_char) const
 {
@@ -432,6 +522,265 @@
     }
 }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+
+// FIXME: This function is mostly a duplicate of both of the functions
+//
+//   void tm_const::init (const tree_matrix&)
+//   void tm_const::init (const octave_value *, const octave_value *,
+//                        octave_idx_type)
+//
+// The common parts should be factored out into a single function that
+// is used by the others.
+
+// For variable length rows
+void tm_const::init (const stack_element *beg, const stack_element *end,
+                     const std::vector<int>& row_lengths)
+{
+  bool first_elem = true;
+  bool first_elem_is_struct = false;
+
+  // Just eval and figure out if what we have is complex or all strings.
+  // We can't check columns until we know that this is a numeric matrix --
+  // collections of strings can have elements of different lengths.
+
+  for (int i = 0; beg != end; beg += row_lengths[i++])
+    {
+      octave_quit ();
+
+      if (beg + row_lengths[i] > end)
+        error ("invalid call to tm_const::init");
+
+      tm_row_const row (beg, beg + row_lengths[i]);
+
+      if (first_elem)
+        {
+          first_elem_is_struct = row.first_elem_struct_p ();
+
+          first_elem = false;
+        }
+
+      if (row.empty ())
+        continue;
+
+      if (m_all_strings && ! row.all_strings_p ())
+        m_all_strings = false;
+
+      if (m_all_sq_strings && ! row.all_sq_strings_p ())
+        m_all_sq_strings = false;
+
+      if (m_all_dq_strings && ! row.all_dq_strings_p ())
+        m_all_dq_strings = false;
+
+      if (! m_some_strings && row.some_strings_p ())
+        m_some_strings = true;
+
+      if (m_all_real && ! row.all_real_p ())
+        m_all_real = false;
+
+      if (m_all_complex && ! row.all_complex_p ())
+        m_all_complex = false;
+
+      if (m_all_empty && ! row.all_empty_p ())
+        m_all_empty = false;
+
+      if (! m_any_cell && row.any_cell_p ())
+        m_any_cell = true;
+
+      if (! m_any_sparse && row.any_sparse_p ())
+        m_any_sparse = true;
+
+      if (! m_any_class && row.any_class_p ())
+        m_any_class = true;
+
+      m_all_1x1 = m_all_1x1 && row.all_1x1_p ();
+
+      m_tm_rows.push_back (row);
+    }
+
+  if (m_any_cell && ! m_any_class && ! first_elem_is_struct)
+    {
+      for (auto& elt : m_tm_rows)
+        {
+          octave_quit ();
+
+          elt.cellify ();
+        }
+    }
+
+  first_elem = true;
+
+  for (const auto& elt : m_tm_rows)
+    {
+      octave_quit ();
+
+      octave_idx_type this_elt_nr = elt.rows ();
+      octave_idx_type this_elt_nc = elt.cols ();
+
+      std::string this_elt_class_name = elt.class_name ();
+      m_class_name = get_concat_class (m_class_name, this_elt_class_name);
+
+      dim_vector this_elt_dv = elt.dims ();
+
+      m_all_empty = false;
+
+      if (first_elem)
+        {
+          first_elem = false;
+
+          m_dv = this_elt_dv;
+        }
+      else if (m_all_strings && m_dv.ndims () == 2
+               && this_elt_dv.ndims () == 2)
+        {
+          // This is Octave's specialty.
+          // Character matrices support rows of unequal length.
+          if (m_dv.any_zero ())
+            {
+              // Empty existing element (bug #52542).
+              // Replace empty element with non-empty one.
+              m_dv = this_elt_dv;
+            }
+          else
+            {
+              if (this_elt_nc > cols ())
+                m_dv(1) = this_elt_nc;
+              m_dv(0) += this_elt_nr;
+            }
+        }
+      else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 0)))
+        eval_error ("vertical dimensions mismatch", m_dv, this_elt_dv);
+    }
+}
+
+// FIXME: This function is mostly a duplicate of both of the functions
+//
+//   void tm_const::init (const tree_matrix&)
+//   void tm_const::init (const octave_value *, const octave_value *,
+//                        const std::vector<int>&)
+//
+// The common parts should be factored out into a single function that
+// is used by the others.
+
+// Fixed row size
+void tm_const::init (const stack_element *beg, const stack_element *end,
+                     octave_idx_type row_length)
+{
+  bool first_elem = true;
+  bool first_elem_is_struct = false;
+
+  // Just eval and figure out if what we have is complex or all strings.
+  // We can't check columns until we know that this is a numeric matrix --
+  // collections of strings can have elements of different lengths.
+
+  for (;beg != end; beg += row_length)
+    {
+      octave_quit ();
+
+      tm_row_const row (beg, beg + row_length);
+
+      if (first_elem)
+        {
+          first_elem_is_struct = row.first_elem_struct_p ();
+
+          first_elem = false;
+        }
+
+      if (row.empty ())
+        continue;
+
+      if (m_all_strings && ! row.all_strings_p ())
+        m_all_strings = false;
+
+      if (m_all_sq_strings && ! row.all_sq_strings_p ())
+        m_all_sq_strings = false;
+
+      if (m_all_dq_strings && ! row.all_dq_strings_p ())
+        m_all_dq_strings = false;
+
+      if (! m_some_strings && row.some_strings_p ())
+        m_some_strings = true;
+
+      if (m_all_real && ! row.all_real_p ())
+        m_all_real = false;
+
+      if (m_all_complex && ! row.all_complex_p ())
+        m_all_complex = false;
+
+      if (m_all_empty && ! row.all_empty_p ())
+        m_all_empty = false;
+
+      if (! m_any_cell && row.any_cell_p ())
+        m_any_cell = true;
+
+      if (! m_any_sparse && row.any_sparse_p ())
+        m_any_sparse = true;
+
+      if (! m_any_class && row.any_class_p ())
+        m_any_class = true;
+
+      m_all_1x1 = m_all_1x1 && row.all_1x1_p ();
+
+      m_tm_rows.push_back (row);
+    }
+
+  if (m_any_cell && ! m_any_class && ! first_elem_is_struct)
+    {
+      for (auto& elt : m_tm_rows)
+        {
+          octave_quit ();
+
+          elt.cellify ();
+        }
+    }
+
+  first_elem = true;
+
+  for (const auto& elt : m_tm_rows)
+    {
+      octave_quit ();
+
+      octave_idx_type this_elt_nr = elt.rows ();
+      octave_idx_type this_elt_nc = elt.cols ();
+
+      std::string this_elt_class_name = elt.class_name ();
+      m_class_name = get_concat_class (m_class_name, this_elt_class_name);
+
+      dim_vector this_elt_dv = elt.dims ();
+
+      m_all_empty = false;
+
+      if (first_elem)
+        {
+          first_elem = false;
+
+          m_dv = this_elt_dv;
+        }
+      else if (m_all_strings && m_dv.ndims () == 2
+               && this_elt_dv.ndims () == 2)
+        {
+          // This is Octave's specialty.
+          // Character matrices support rows of unequal length.
+          if (m_dv.any_zero ())
+            {
+              // Empty existing element (bug #52542).
+              // Replace empty element with non-empty one.
+              m_dv = this_elt_dv;
+            }
+          else
+            {
+              if (this_elt_nc > cols ())
+                m_dv(1) = this_elt_nc;
+              m_dv(0) += this_elt_nr;
+            }
+        }
+      else if ((! m_any_class) && (! m_dv.hvcat (this_elt_dv, 0)))
+        eval_error ("vertical dimensions mismatch", m_dv, this_elt_dv);
+    }
+}
+
+#endif
+
 octave_value
 tm_const::char_array_concat (char string_fill_char) const
 {
--- a/libinterp/parse-tree/pt-tm-const.h	Fri Apr 19 18:30:39 2024 +0200
+++ b/libinterp/parse-tree/pt-tm-const.h	Fri Apr 19 12:57:20 2024 -0400
@@ -47,6 +47,10 @@
 
 class tree_evaluator;
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+union stack_element;
+#endif
+
 // Evaluate tree_matrix objects and convert them to octave_value
 // arrays (full and sparse numeric, char, cell, struct, class and
 // anything else that works like an array).  Use a separate class
@@ -157,6 +161,10 @@
     init (row, tw);
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  tm_row_const (const stack_element *beg, const stack_element *end);
+#endif
+
   tm_row_const (const tm_row_const&) = default;
 
   tm_row_const& operator = (const tm_row_const&) = delete;
@@ -182,6 +190,10 @@
   void init_element (const octave_value&, bool&);
 
   void init (const tree_argument_list&, tree_evaluator& tw);
+
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  void init (const stack_element *beg, const stack_element *end);
+#endif
 };
 
 class tm_const : public tm_info
@@ -199,6 +211,14 @@
     init (tm);
   }
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  tm_const (const stack_element *beg, const stack_element *end,
+            octave_idx_type n_rows, tree_evaluator& tw);
+
+  tm_const (const stack_element *beg, const stack_element *end,
+            const std::vector<int>& row_lengths, tree_evaluator& tw);
+#endif
+
   OCTAVE_DISABLE_COPY_MOVE (tm_const)
 
   ~tm_const () = default;
@@ -217,6 +237,14 @@
 
   void init (const tree_matrix& tm);
 
+#if defined (OCTAVE_ENABLE_BYTECODE_EVALUATOR)
+  void init (const stack_element *beg, const stack_element *end,
+             octave_idx_type row_length);
+
+  void init (const stack_element *beg, const stack_element *end,
+             const std::vector<int>& row_lengths);
+#endif
+
   octave_value char_array_concat (char string_fill_char) const;
 
   octave_value class_concat () const;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/help/bytecode.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,68 @@
+########################################################################
+##
+## Copyright (C) 2023-2024 The Octave Project Developers
+##
+## See the file COPYRIGHT.md in the top-level directory of this
+## distribution or <https://octave.org/copyright/>.
+##
+## This file is part of Octave.
+##
+## Octave is free software: you can redistribute it and/or modify it
+## under the terms of the GNU General Public License as published by
+## the Free Software Foundation, either version 3 of the License, or
+## (at your option) any later version.
+##
+## Octave is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with Octave; see the file COPYING.  If not, see
+## <https://www.gnu.org/licenses/>.
+##
+########################################################################
+
+## -*- texinfo -*-
+## @deftypefn {} {} bytecode ()
+## Summary of commands related to Octave's bytecode interpreter.
+##
+## As of Octave 9, the bytecode interpreter is considered @emph{experimental}.
+## The user is encouraged to test it with that in mind.  All bytecode
+## functions, being experimental, may be renamed in future.
+##
+## To switch on the bytecode interpreter, type: @code{__vm_enable__ (1)}
+##
+## To switch it off, type: @code{__vm_enable__ (0)}
+##
+## To always use it, add @code{__vm_enable__ (1)}
+## to your Octave startup file (@file{.octaverc} or similar).
+##
+## For more information on each command and available options use
+## @code{help CMD}.
+##
+## Other useful bytecode commands available in Octave are:
+##
+## @table @code
+## @item __vm_compile__
+## Compile a specified function to bytecode.
+##
+## @item __vm_profile__
+## Profile the code running in the bytecode interpreter.
+##
+## @end table
+##
+## There are also several private functions whose names also begin with
+## @code{__vm_}.  These are intended for developer use.
+##
+## @c FIXME: Use seealso macro when functions are no longer experimental.
+## See also: __vm_enable__, __vm_compile__, __vm_profile__.
+## @end deftypefn
+
+function bytecode ()
+  help ("bytecode");
+endfunction
+
+
+## Mark file as being tested.  No real test needed for a documentation .m file
+%!assert (1)
--- a/scripts/help/module.mk	Fri Apr 19 18:30:39 2024 +0200
+++ b/scripts/help/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -13,6 +13,7 @@
   %reldir%/__unimplemented__.m \
   %reldir%/ans.m \
   %reldir%/bessel.m \
+  %reldir%/bytecode.m \
   %reldir%/debug.m \
   %reldir%/doc.m \
   %reldir%/doc_cache_create.m \
--- a/scripts/help/warning_ids.m	Fri Apr 19 18:30:39 2024 +0200
+++ b/scripts/help/warning_ids.m	Fri Apr 19 12:57:20 2024 -0400
@@ -148,6 +148,11 @@
 ## the others.
 ## By default, the @code{Octave:charmat-truncated} warning is enabled.
 ##
+## @item Octave:bytecode-compilation
+## If the @code{Octave:bytecode-compilation} warning is enabled, a warning is
+## printed when the bytecode compiler encounters unsupported code constructs.
+## By default, the @code{Octave:bytecode-compilation} warning is enabled.
+##
 ## @item Octave:classdef-to-struct
 ## If the @code{Octave:classdef-to-struct} warning is enabled, a warning
 ## is issued when a classdef object is forcibly converted into a struct with
--- a/test/Makefile.am	Fri Apr 19 18:30:39 2024 +0200
+++ b/test/Makefile.am	Fri Apr 19 12:57:20 2024 -0400
@@ -120,6 +120,8 @@
 include classdef-multiple-inheritance/module.mk
 include classes/module.mk
 include colon-op/module.mk
+include compile-bench/module.mk
+include compile/module.mk
 include ctor-vs-method/module.mk
 include fcn-handle/module.mk
 include file-encoding/module.mk
@@ -155,6 +157,14 @@
 check-local: $(GENERATED_TEST_FILES) $(MEX_TEST_FUNCTIONS) | $(OCTAVE_INTERPRETER_TARGETS) $(octave_dirstamp)
 	$(AM_V_at)$(call run-octave-tests)
 
+if AMCOND_ENABLE_BYTECODE_EVALUATOR
+check-bytecode-evaluator: $(BUILT_SOURCES) $(GENERATED_TEST_FILES) $(MEX_TEST_FUNCTIONS) | $(OCTAVE_INTERPRETER_TARGETS) $(octave_dirstamp)
+	$(AM_V_at)$(call run-octave-tests,__vm_enable__ (1))
+
+check-tree-evaluator: $(BUILT_SOURCES) $(GENERATED_TEST_FILES) $(MEX_TEST_FUNCTIONS) | $(OCTAVE_INTERPRETER_TARGETS) $(octave_dirstamp)
+	$(AM_V_at)$(call run-octave-tests,__vm_enable__ (0))
+endif
+
 COVERAGE_DIR = coverage
 COVERAGE_INFO = $(COVERAGE_DIR)/$(PACKAGE).info
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/bench.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,181 @@
+function bench (varargin)
+
+  % The tests to run
+  %
+  % {name, {arg_type, n}, ...}
+  tests = get_bench_conf ();
+
+  reg = '';
+  calibrate = 0;
+  do_both = 1;
+  n_factor = 1;
+  no_compile = 0;
+  filter = "";
+  i = 1;
+  while i <= nargin
+    arg = varargin{i++};
+    if strcmp (arg, "reg")
+      assert (i <= nargin)
+      reg = varargin{i++};
+    elseif strcmp (arg, "calibrate")
+      calibrate = 1;
+    elseif strcmp (arg, "n_factor")
+      assert (i <= nargin)
+      n_factor = varargin{i++};
+    elseif strcmp (arg, "nocompile")
+      no_compile = 1;
+    end
+  end
+
+  % For compatibility with older releases and Matlab
+  if ~exist("__vm_compile__")
+    __vm_compile__ = @(varargin) true;
+  end
+  if ~exist("__dummy_mark_1__")
+    __dummy_mark_1__ = @() true;
+  end
+  if ~exist("__dummy_mark_2__")
+    __dummy_mark_2__ = @() true;
+  end
+
+  cal_res = {};
+
+  for i = 1:length(tests)
+
+    test = tests{i};
+    name = test{1};
+    complexity = test{3};
+    also_compile = test{4};
+    is_script = test{5};
+    j = 1;
+
+    % Skip on not matching regex, if there is one
+    if length (reg) && isempty (regexp (name, reg))
+      continue;
+    end
+
+    fn = str2func (name);
+
+    printf ("%s:\n", name);
+
+    n = 0;
+    arg = 0;
+    conf = test{2};
+    conf_type = conf {1}; %"n", "rand sq" etc
+    n_norm = conf{2};
+
+    if strcmp (conf_type, "n")
+      n = round (conf{2} * n_factor);
+      arg = n;
+    elseif strcmp (conf_type, "rand sq")
+      if exist ("rng") % rng is a kinda new function. Keep backwards compatibility
+        rng (0); % Reset rng
+      end
+      n = round (conf{2} * n_factor);
+      arg = randn (n);
+    elseif strcmp (conf_type, "rand rowvec")
+      if exist ("rng")
+        rng (0); % Reset rng
+      end
+      n = round (conf{2} * n_factor);
+      arg = randn (n, 1);
+    end
+    n = round (n);
+
+    iters = 1:1;
+    if calibrate
+      iters = 1:40;
+      e_i = 0;
+    end
+
+    for j = iters
+
+      if strcmp (conf_type, "n")
+        n = round (n_norm * n_factor);
+        arg = n;
+      elseif strcmp (conf_type, "rand sq")
+        if exist ("rng")
+          rng (0); % Reset rng
+        end
+        n = round (n_norm * n_factor);
+        arg = randn (n);
+      elseif strcmp (conf_type, "rand rowvec")
+        if exist ("rng")
+          rng (0); % Reset rng
+        end
+        n = round (n_norm * n_factor);
+        arg = randn (n, 1);
+      end
+      n = round (n);
+
+
+      tic;
+      [ccttot0, cctuser0, cctsys0] = cputime;
+      if !no_compile
+        if ! __vm_compile__ (name)
+          warning ("Could not compile %s, skipping ...", name)
+          continue;
+        end
+      end
+      [ccttot1, cctuser1, cctsys1] = cputime;
+      cctwall = toc;
+
+      try
+        [cttot0, ctuser0, ctsys0] = cputime;
+        tic;
+        __dummy_mark_1__ ();
+        if is_script
+          fn ();
+        else
+          fn (arg);
+        end
+        __dummy_mark_1__ ();
+        __dummy_mark_2__ ();
+        [cttot1, ctuser1, ctsys1] = cputime;
+        ctwall = toc;
+      catch
+        warning ("Could not run %s due to '%s', skipping ...", e.msg, name)
+        continue;
+      end
+
+      printf ("                %-16s %-16s %-16s %-16s %-16s\n", "t tic","t cpu", "t usr" , "t sys", "n");
+      printf ("    Runtime:    %-16g %-16g %-16g %-16g %-16g\n", ctwall, cttot1 - cttot0, ctuser1 - ctuser0, ctsys1 - ctsys0, n);
+      printf ("    Compiletime %-16g %-16g %-16g %-16g\n\n", cctwall, ccttot1 - ccttot0, cctuser1 - cctuser0, cctsys1 - cctsys0);
+
+      if calibrate
+        t_target = 1;
+        e = ctwall - t_target;
+        if e > 0.5
+          e = 0.5;
+        elseif e < -0.5
+          e = -0.5;
+        end
+
+        n_norm_new = n_norm * (1 - e);
+        if j > 30
+          n_norm = 0.998 * n_norm + 0.002 * n_norm_new;
+        elseif j > 20
+          n_norm = 0.99 * n_norm + 0.01 * n_norm_new;
+        elseif j > 10
+          n_norm = 0.95 * n_norm + 0.05 * n_norm_new;
+        else
+          n_norm = n_norm_new;
+        end
+
+        printf ("    n = %g, e = %g, e_i = %g\n", n_norm, e, e_i);
+      end
+    end
+
+    if calibrate
+      printf ("    Calibrated n: %d\n\n", n);
+      cal_res{end + 1} = {name, n};
+    end
+  end
+
+  if calibrate
+    printf ("Calibrated n:s for 1s\n\n");
+    for e = cal_res
+      printf ("%s %d\n", e{1}{1}, e{1}{2});
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/bench_cov.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function bench_cov (v)
+  for i = 1:10000
+    cov (v, v);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/bench_median.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function bench_median (v)
+  for i = 1:10000
+    median (v);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/bench_valgrind.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,77 @@
+function bench_valgrind (octbin_path, factor, benchname_filter, octbin_path_ref, logdir)
+
+  s = unix ("valgrind --version");
+  if s
+    error ("Valgrind probably not installed");
+  end
+
+  orig_dir = pwd;
+
+  path_to_bench_folder = which ("bench_valgrind");
+  path_to_bench_folder = strrep (path_to_bench_folder, "bench_valgrind.m", "");
+  logsubfolder_name = ["run_" datestr(now, "yyyy_mm_dd_HH_MM_ss")];
+
+  tests = get_bench_conf ();
+
+  result = {};
+
+  unwind_protect
+    mkdir (logdir);
+    cd (logdir);
+    mkdir (logsubfolder_name)
+    cd (logsubfolder_name)
+
+    for i = 1:length(tests)
+
+      test = tests{i};
+      name = test{1};
+      complexity = test{3};
+      also_compile = test{4};
+      is_script = test{5};
+
+      % Skip on not matching regex, if there is one
+      if length (benchname_filter) && isempty (regexp (name, benchname_filter))
+        continue;
+      end
+
+      % logfilename, octbin, bench folder, benchname filter
+      cmd_template = ["valgrind --tool=callgrind  --callgrind-out-file=%s  --separate-recs=10 " ...
+                      "--dump-instr=yes --collect-jumps=yes \"--dump-after=dummy_mark_1\" " ...
+                      " %s -W --eval \"__vm_enable__ (1); cd %s; bench('reg','%s','n_factor', %d);exit(0)\""];
+
+      logfilename1 = ["callgrind.out.log_mark_", name, "_", num2str(i), "_", name, ".log"];
+      logfilename2 = ["callgrind.out.log_ref1_", name, "_", num2str(i), "_", name, ".log"];
+
+      cmd_1 = sprintf (cmd_template, logfilename1, octbin_path, path_to_bench_folder, name, factor);
+      cmd_2 = sprintf (cmd_template, logfilename2, octbin_path_ref, path_to_bench_folder, name, factor);
+
+      [status1, stdout1] = unix (cmd_1);
+      [status2, stdout2] = unix (cmd_2);
+
+      log1 = fileread ([logfilename1 ".2"]);
+      log2 = fileread ([logfilename2 ".2"]);
+
+      [~,~,~,~, cost1] = regexp (log1, "summary:\\s+(\\d+)", 'once');
+      cost1 = str2num (cost1{1});
+      [~,~,~,~, cost2] = regexp (log2, "summary:\\s+(\\d+)", 'once');
+      cost2 = str2num (cost2{1});
+
+      result{end + 1} = {name, cost1, cost2, cost2/cost1*100};
+    endfor
+
+    summary = sprintf ("%30s %15s %15s %15s\n", "name", "mark", "ref", "ref/mark%");
+    for i = 1:length (result)
+      run_result = result{i};
+      summary = [summary, sprintf("%30s %15d %15d %15.7f\n", run_result{:})];
+    endfor
+
+    printf ("\n\n\n%s", summary);
+    % Save summary to file
+    f = fopen ("summary.log", "w");
+    fprintf (f, "%s", summary);
+    fclose (f);
+
+  unwind_protect_cleanup
+    cd (orig_dir);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/cdef_ctor.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,12 @@
+function cdef_ctor (n)
+  for i = 1:n
+    obj = cdef_foo ("asd", i);
+  end
+end
+
+% bugg
+function cdef_ctor1 (n)
+  for i = 1:n
+    obj = cdef_ctor1 ("asd", i);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/cdef_foo.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,25 @@
+classdef cdef_foo
+  properties
+    s = "";
+    a = 0;
+  end
+
+  methods
+    function obj = cdef_foo (s, a)
+      obj.s = s;
+      obj.a = a;
+    end
+
+    function c = method1 (obj, b)
+       c = b + obj.a;
+    end
+
+    function a = getter1 (obj)
+      a = obj.a;
+    end
+
+    function obj = setter1 (obj, a)
+      obj.a = a;
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/cdef_method1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function cdef_method1 (n)
+  obj = cdef_foo ("qwe", 123);
+  for i = 1:n
+    obj.method1 (2);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/do_until_loop_empty.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function do_until_loop_empty (n)
+  i = 0;
+  do
+    i++;
+  until i >= n
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/dummy_script.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,1 @@
+%%% empty
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/fib.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,8 @@
+function b = fib (n)
+    if n <= 1
+        b = 1;
+        return;
+    endif
+
+    b = fib (n - 1) + fib (n - 2);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_binop_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function for_loop_binop_1 (n)
+  for i = 1:n
+    j = 1*2*3*4;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_binop_2.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,7 @@
+function for_loop_binop_2 (n)
+  for i = 1:n
+    j = 1*2*3*4 * i;
+    g = j / 2 + 1;
+    f = g + j - 3;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_binop_2_script.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+for i = 1:n
+  j = 1*2*3*4 * i;
+  g = j / 2 + 1;
+  f = g + j - 3;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_call_script_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,3 @@
+  for i = 1:n
+    dummy_script ();
+  end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_empty.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,4 @@
+function for_loop_empty (n)
+  for i = 1:n
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_fncall.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function for_loop_fncall (n)
+  for i = 1:n
+    max (i, 2);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_ifs.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,29 @@
+function for_loop_ifs (n)
+  for i = 1:n
+    if i == 100
+      continue;
+    elseif i == 300
+      continue;
+    end
+
+    if i * 2 == 3002
+      continue;
+    end
+
+    if i < 0
+      break;
+    end
+
+    if i == -1024
+      disp ("foooo");
+    end
+
+    if i == n + 1
+      break;
+    end
+
+    if ~i
+      break;
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_matselfmul.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,18 @@
+function A = for_loop_matselfmul (sq)
+  A = zeros (size (sq));
+
+  cols = size (sq, 2);
+  rows = size (sq, 1);
+
+  assert (cols == rows);
+
+  n = cols;
+
+  for i=1:n
+    for j=1:n
+      for k=1:n
+        A(i,j) = A(i,j)+sq(i,k)*sq(k,j);
+      end
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_silly.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function for_loop_silly (n)
+  for i = 1:n
+    j = i;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_sinpi.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function for_loop_sinpi (n)
+  for i = 1:n
+    j = sin (pi * i);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_loop_subfun_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,13 @@
+function for_loop_subfun_1 (n)
+
+  for i = 1:n
+    suby ();
+    suby2 (1, 2);
+  end
+end
+
+function suby ()
+end
+
+function c = suby2 (a, b)
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_sum_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function for_sum_1 (mat)
+  sum = 0;
+  for i = 1:length (mat)
+    sum = sum + mat (i);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/for_sum_2.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function sum = for_sum_2 (arg)
+  sum = 0;
+  for i = arg'
+    sum = sum + i;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/get_bench_conf.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,28 @@
+function conf = get_bench_conf ()
+  conf = {
+    % name              argument        O(n^x) dunno  is_script
+    {"for_loop_empty", {"n", 206824596}, 1, {}, 0},
+    {"for_loop_silly", {"n", 34894840}, 1, {}, 0},
+    {"for_loop_binop_1", {"n", 20300088}, 1, {}, 0},
+    {"for_loop_binop_2", {"n", 10300088}, 1, {}, 0},
+    {"for_loop_binop_2_script", {"n", 10300088}, 1, {}, 1},
+    {"for_loop_sinpi", {"n", 12991066}, 1, {}, 0},
+    {"for_loop_ifs", {"n", 5874007}, 1, {}, 0},
+    {"while_loop_empty", {"n", 24237997}, 1, {}, 0},
+    {"do_until_loop_empty", {"n", 27109647}, 1, {}, 0},
+    {"for_loop_subfun_1", {"n", 11930390}, 1, {}, 0},
+    {"for_loop_call_script_1", {"n", 11930390}, 1, {}, 1},
+    {"for_loop_matselfmul", {"rand sq",150}, 3, {}, 0},
+    {"for_sum_1", {"rand rowvec", 19267692}, 1, {}, 0},
+    {"for_sum_2", {"rand rowvec", 8742659}, 1, {}, 0},
+    {"qsort_recursive", {"rand rowvec", 107851}, 1, {}, 0}, % Mostly copies vectors around
+    {"qsort_iterative", {"rand rowvec", 344418}, 1, {}, 0},
+    {"for_loop_fncall", {"n", 2164885}, 1, {}, 0},
+    {"bench_median", {"rand rowvec", 1927}, 1, {}, 0},
+    {"bench_cov", {"rand rowvec", 15261}, 1, {}, 0},
+    {"str_mod", {"n", 2335290}, 1, {}, 0},
+    {"fib", {"n", 31}, 1, {}, 0},
+    {"cdef_ctor", {"n", 94964}, 1, {}, 0},
+    {"cdef_method1", {"n", 164837}, 1, {}, 0},
+  };
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/qsort_iterative.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,48 @@
+function A = qsort_iterative (A)
+  len = length (A);
+  stack = zeros (64, 1);
+
+  top = 0;
+
+  % Push initial indices to the stack
+  stack (++top) = 1;
+  stack (++top) = len;
+
+  while top > 0
+    % Pop indices
+    high = stack (top--);
+    low = stack (top--);
+
+    % Partion part of the algorithm
+    p = low - 1; % pivot index
+    x = A (high);% pivot value
+
+    % Swap so that there are two parts. One less than or equal to the pivot
+    % value and one higher
+    for j = low:high - 1
+      if A(j) <= x
+        p++;
+        tmp = A(j);
+        A(j) = A(p);
+        A(p) = tmp;
+      end
+    end
+    % Swap the pivot value with the first value bigger than the pivot
+    p++;
+    tmp = A(high);
+    A(high) = A(p);
+    A(p) = tmp;
+    % End partion
+
+    % Push left and right indices (if there are any value to the left or right)
+    if p - 1 > low
+      stack(++top) = low;
+      stack(++top) = p - 1;
+    end
+
+    if p + 1 < high
+      stack(++top) = p + 1;
+      stack(++top) = high;
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/qsort_recursive.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,36 @@
+% Most time is spent copying vectors anyway so terrible test really.
+
+function A = qsort_recursive (A)
+  if isempty (A) || length(A) == 1
+    return;
+  end
+
+  [p, A] = partion (A);
+
+  left = qsort_recursive (A(1:p - 1));
+  right = qsort_recursive (A(p + 1:end));
+
+  A = [left A(p) right];
+end
+
+function [p, A] = partion (A)
+  lo = 1;
+  hi = length (A);
+
+  pivot = A(hi);
+
+  p = lo - 1;
+
+  for j = lo:1:hi-1
+    if A(j) <= pivot
+      p++;
+      tmp = A(j);
+      A (j) = A (p);
+      A (p) = tmp;
+    end
+  end
+  p++;
+  tmp = A(hi);
+  A(hi) = A(p);
+  A(p) = tmp;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/str_mod.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,9 @@
+function str_mod (n)
+
+  s1 = "qweasd";
+  s2 = "zxccvb";
+  for i = 1:n
+    s1 (2) = "f";
+    s2 (1) = s1(3);
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-octave/while_loop_empty.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function while_loop_empty (n)
+  i = 0;
+  while i < n
+    i++;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/bench-py3/bench.py	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+
+from timeit import default_timer as timer
+from array import *
+import random
+import math
+
+def fib (n):
+    if n <= 1:
+        return 1
+
+    return fib (n - 1) + fib (n - 2)
+
+def for_loop_empty (n):
+    for i in range (1, n + 1):
+        continue
+
+def for_loop_silly (n):
+    for i in range (1, n + 1):
+        j = i
+
+def for_loop_binop_1(n):
+    for i in range (1, n + 1):
+        j = 1*2*3*4
+
+def for_loop_sinpi (n):
+    for i in range (1, n):
+        j = math.sin (math.pi * i)
+
+def for_loop_ifs (n):
+    for i in range (1, n + 1):
+        if i == 100.0:
+            continue
+        elif i == 300.0:
+            continue
+
+        if i * 2.0 == 3002.0:
+            continue
+
+        if i < 0.0:
+            break
+
+        if i == -1024.0:
+            print ("fooo")
+
+        if i == n + 1.0:
+            break
+
+        if not i:
+            break
+
+def while_loop_empty (n):
+    i = 0.0
+    while i < n:
+        i += 1
+
+def for_loop_subfun_1 (n):
+    for i in range (n):
+        suby ()
+
+def suby ():
+    return
+
+def qsort_iterative (A):
+    l = len (A)
+    stack = [0] * 128 # Probably big enough
+
+    top = 0
+    stack[top] = 0
+    top += 1
+    stack[top] = l - 1
+    top += 1
+
+    while top > 0:
+        top -= 1
+        high = stack[top]
+        top -= 1
+        low = stack[top]
+
+        p = low - 1
+        x = A[high]
+
+        for j in range (low, high):
+            if A[j] <= x:
+                p += 1
+                tmp = A[j]
+                A[j] = A[p]
+                A[p] = tmp
+        p += 1
+        tmp = A[high]
+        A[high] = A[p]
+        A[p] = tmp
+
+        if p - 1 > low:
+            stack[top] = low
+            top += 1
+            stack[top] = p - 1
+            top += 1
+        if p + 1 < high:
+            stack[top] = p + 1
+            top += 1
+            stack[top] = high
+            top += 1
+
+    return A
+
+def time_fn_call (fn, arg):
+    start = timer()
+    fn (arg)
+    end = timer()
+
+    return end - start
+
+def randn (rows, cols):
+    if rows == 1:
+        arr = array('d')
+        for i in range(cols):
+            arr.append(random.gauss (0, 1))
+        return arr
+
+    arr = array()
+    for i in range (rows):
+        arr_row = array('d')
+        for j in range(cols):
+          arr_row.append(random.gauss (0, 1))
+        arr.append (arr_row)
+    return arr
+
+tests = \
+   [[for_loop_empty, 206824596],
+    [for_loop_silly, 34894840],
+    [for_loop_binop_1, 20300088],
+    [for_loop_sinpi, 12991066],
+    [for_loop_ifs, 5874007],
+    [while_loop_empty, 24237997],
+    [for_loop_subfun_1, 11930390],
+    [fib, 31],
+    [qsort_iterative, "rowvec", 344418]]
+
+def main():
+    for t in tests:
+        if (t[1] == "rowvec"):
+            dt = time_fn_call (t[0], randn (1, t[2]))
+        else:
+            dt = time_fn_call (t[0], t[1])
+        print (t[0].__name__ + " in %g s" % dt)
+
+if __name__ == "__main__":
+    main ()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile-bench/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,32 @@
+compile_bench_TEST_FILES = \
+  %reldir%/bench-octave/bench.m \
+  %reldir%/bench-octave/bench_cov.m \
+  %reldir%/bench-octave/bench_median.m \
+  %reldir%/bench-octave/bench_valgrind.m \
+  %reldir%/bench-octave/cdef_foo.m \
+  %reldir%/bench-octave/cdef_ctor.m \
+  %reldir%/bench-octave/cdef_method1.m \
+  %reldir%/bench-octave/dummy_script.m \
+  %reldir%/bench-octave/do_until_loop_empty.m \
+  %reldir%/bench-octave/fib.m \
+  %reldir%/bench-octave/for_loop_binop_1.m \
+  %reldir%/bench-octave/for_loop_binop_2.m \
+  %reldir%/bench-octave/for_loop_binop_2_script.m \
+  %reldir%/bench-octave/for_loop_call_script_1.m \
+  %reldir%/bench-octave/for_loop_empty.m \
+  %reldir%/bench-octave/for_loop_fncall.m \
+  %reldir%/bench-octave/for_loop_ifs.m \
+  %reldir%/bench-octave/for_loop_matselfmul.m \
+  %reldir%/bench-octave/for_loop_silly.m \
+  %reldir%/bench-octave/for_loop_sinpi.m \
+  %reldir%/bench-octave/for_loop_subfun_1.m \
+  %reldir%/bench-octave/for_sum_1.m \
+  %reldir%/bench-octave/for_sum_2.m \
+  %reldir%/bench-octave/get_bench_conf.m \
+  %reldir%/bench-octave/qsort_iterative.m \
+  %reldir%/bench-octave/qsort_recursive.m \
+  %reldir%/bench-octave/str_mod.m \
+  %reldir%/bench-octave/while_loop_empty.m \
+  %reldir%/bench-py3/bench.py
+
+TEST_FILES += $(compile_bench_TEST_FILES)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode.tst	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,849 @@
+########################################################################
+##
+## Copyright (C) 2022-2024 The Octave Project Developers
+##
+## See the file COPYRIGHT.md in the top-level directory of this
+## distribution or <https://octave.org/copyright/>.
+##
+## This file is part of Octave.
+##
+## Octave is free software: you can redistribute it and/or modify it
+## under the terms of the GNU General Public License as published by
+## the Free Software Foundation, either version 3 of the License, or
+## (at your option) any later version.
+##
+## Octave is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with Octave; see the file COPYING.  If not, see
+## <https://www.gnu.org/Test/>.
+##
+########################################################################
+
+## Just clear the cached string in __prog_output_assert__() and clear
+## classes method lookups due to maybe bug
+%!test
+%! __prog_output_assert__ ("");
+%! % Overloading of class-methods seems to stick so we need to clear them since we overload
+%! % double's display. Is this a bug ???
+%! clear classes
+
+## Test binary expressions
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local"); % Disable the vm for the tree_walker run
+%!
+%! clear all % We want all compiled functions to be cleared so that we can run the tree_walker
+%!
+%! key = "10 -10 24 0.041666666666666664 1 -5.0915810909090906 13 1 0 1 0 truthy1 1 falsy3 falsy4 truthy5 1 truthy7 truthy8 1 falsy9 falsy11 0 1 0 1 0 0 1 1 0 1 0 1 1 1 0 1 0 1 0 0 0 0 1 1 1 1 1 1 1 1 ";
+%!
+%! __vm_compile__ bytecode_binops clear;
+%! bytecode_binops ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! % We wanna know the function compiles, so do a explicit compile
+%! assert (__vm_compile__ ("bytecode_binops"));
+%! bytecode_binops ();
+%! assert (__prog_output_assert__ (key));
+
+## Test subfunctions
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 2 2 11 30 10 30 5  0 0 double 1 2 1 2 double 30 11 5  0 0 double 1 2 1 2 double 11 11 12 13 1 1 double 14 1 1 double 11 11 5 13 1 1 double 14 1 1 double 11 3 3 3 2 2 2 313 ret32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 ret32:1 ret32:ret32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 take32:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 18 59 64 ";
+%! a = 313;
+%! % Gets called to ensure anonymous functions calls with
+%! % externally scoped variables work
+%! h = @() __printf_assert__ ("%d ", a);
+%!
+%! __vm_compile__ bytecode_subfuncs clear;
+%! bytecode_subfuncs (h);
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_subfuncs"));
+%! bytecode_subfuncs (h);
+%! assert (__prog_output_assert__ (key));
+
+## Test if:s
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "0 1 2 3 4 5 6 7 8 1 2 yay1 3 5 7 8 1 yay1 3 4 yay2 5 6 7 yay3 ";
+%!
+%! __vm_compile__ bytecode_if clear;
+%! bytecode_if ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_if"));
+%! bytecode_if ();
+%! assert (__prog_output_assert__ (key));
+
+## Test for:s
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 2 3 4 4 1 3 5 5 1 4 4 4 3 2 1 1 0.200 0.300 0.400 0.400 0.300 0.200 0.100 0.000 0.000 NaN NaN NaN 1 4 2 2 16 4 3 3 256 3 2 1  double 1 3 size 2 size 1 2 4 size 2 size 1  double q size 1 size 1 w size 1 size 1 e size 1 size 1 char single single 5 1 11 2 12 key:a val:1 1val:1 key:b val:1 3val:2 4val:2 2key:c val:string ";
+%!
+%! __vm_compile__ bytecode_for clear;
+%! bytecode_for ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_for"));
+%! bytecode_for ();
+%! assert (__prog_output_assert__ (key));
+
+## Test while
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "5 4 3 2 1 3 5 4 4 3 3 4 1 2 1 3 2 8 3 1 3 ";
+%!
+%! __vm_compile__ bytecode_while clear;
+%! bytecode_while ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_while"));
+%! bytecode_while ();
+%! assert (__prog_output_assert__ (key));
+
+## Test assign
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 3 1 1 2 3 2 3 2 2 6 18 2.000000 2.000000 3.000000 4.000000 5.000000 1 4 double 729.000000 324.000000 182.250000 116.640000 4 1 double 37.000000 81.000000 54.000000 118.000000 2 2 double ";
+%!
+%! __vm_compile__ bytecode_assign clear;
+%! bytecode_assign ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_assign"));
+%! bytecode_assign ();
+%! assert (__prog_output_assert__ (key));
+
+## Test unary
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "-1 4 1 2 3 4 1 3 2 4 0 0 ";
+%!
+%! __vm_compile__ bytecode_unary clear;
+%! bytecode_unary ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_unary"));
+%! bytecode_unary ();
+%! assert (__prog_output_assert__ (key));
+
+## Test range
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 2 3 1 3 5 1 3 5 1 1.1 1.2 1.3 1.4 1 0.9 0.8 0.7 7 7 1 8 10 8 10 8 9 10 11 8 9 10 11 10 8 10 8 -10 -9 -8 -7 -10 -9 -8 -7 ";
+%!
+%! __vm_compile__ bytecode_range clear;
+%! bytecode_range ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_range"));
+%! bytecode_range ();
+%! assert (__prog_output_assert__ (key));
+
+## Test multi assign
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "3 4 2 2 1 2 3 4 1 2 3 4 1 1 3 2 3 4 1 1 1 2 3 4 1 1 1 2 3 4 1 2 3 ";
+%!
+%! __vm_compile__ bytecode_multi_assign clear;
+%! bytecode_multi_assign ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_multi_assign"));
+%! bytecode_multi_assign ();
+%! assert (__prog_output_assert__ (key));
+
+## Test subsasgn
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! clear functions  % persistent variables in bytecode_subsasgn
+%! key = "3 5 9 8 11 13 1 2 3 4 5 6 77 88 99 1010 1 2 3 987 987 6 77 88 99 1010 0 0 0 0 0 13 double 3 2 4 2 3 cell 1 3 6 7 2 3 1 4 5 1 3 5 2 4 6 7 7 7 7 7 7 1 2 3 1 3 3 2 3 2 3 1 3 1 2 3 4 4 4 3 4 5 6 1 5 3 4 1 5 -1 4 1 5 -1 8 3 3 3 3 3 3 3 3 1 1 3 1 ";
+%!
+%! __vm_compile__ bytecode_subsasgn clear;
+%! bytecode_subsasgn ();
+%! assert (__prog_output_assert__ (key), "bytecode_subsasgn failed uncompiled");
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_subsasgn"));
+%! bytecode_subsasgn ();
+%! assert (__prog_output_assert__ (key), "bytecode_subsasgn failed compiled");
+
+## Test end
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 3 2 4 1 5 6 7 2 2 5 5 6 6 1 2 3 4 5 2 2 2 3 3 4 fs 2 3 1 foo oo ";
+%!
+%! __vm_compile__ bytecode_end clear;
+%! bytecode_end ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_end"));
+%! bytecode_end ();
+%! assert (__prog_output_assert__ (key));
+
+## Test matrix
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 2 3 4 1 4 1 2 3 4 4 1 1 3 2 4 2 2 1 3 1 3 2 4 2 4 4 2  0 0 a b c d 7 15 10 22 2 2 30 1 1 1 2 3 4 2 4 6 8 3 6 9 12 4 8 12 16 4 4 1 1 1 0.0333333 0.0666667 0.1 0.133333 0.0666667 0.133333 0.2 0.266667 0.1 0.2 0.3 0.4 0.133333 0.266667 0.4 0.533333 4 4 1 0 0 1 2 2 30 1 1 10 14 14 20 2 2 0.0333333 0.0666667 0.1 0.133333 0.0666667 0.133333 0.2 0.266667 0.1 0.2 0.3 0.4 0.133333 0.266667 0.4 0.533333 4 4 2.5 -0.5 2 0 2 2 2 6 4 8 2 2 2 3 4 5 3 4 5 6 4 5 6 7 5 6 7 8 4 4 3 4 5 6 1 4 3 4 5 6 1 4 -1 0 1 2 1 4 2 4 6 8 1 4 0.5 1 1.5 2 1 4 0.5 1 1.5 2 1 4 1 4 9 16 1 4 1 1 1 1 1 4 1 1 1 1 1 4 1 4 27 256 1 4 1 2 3 4 2 4 6 8 3 6 9 12 4 8 12 16 4 4 1 0.5 0.333333 0.25 2 1 0.666667 0.5 3 1.5 1 0.75 4 2 1.33333 1 4 4 1 2 3 4 0.5 1 1.5 2 0.333333 0.666667 1 1.33333 0.25 0.5 0.75 1 4 4 1 4 27 256 4 1 qzwxeca s d  zzxxccz x c  1 258 33264 258 1 33264 ";
+%!
+%! __vm_compile__ bytecode_matrix clear;
+%! bytecode_matrix ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_matrix"));
+%! bytecode_matrix ();
+%! assert (__prog_output_assert__ (key));
+
+## Test return
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 baaar bääär baaaaz bääääz bååååz booz 1 1 2 1 1 1 2 1 silly silly ";
+%!
+%! __vm_compile__ bytecode_return clear;
+%! bytecode_return ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_return"));
+%! bytecode_return ();
+%! assert (__prog_output_assert__ (key));
+
+## Test word list command
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "A B C QWE ";
+%!
+%! __vm_compile__ bytecode_wordlistcmd clear;
+%! bytecode_wordlistcmd ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_wordlistcmd"));
+%! bytecode_wordlistcmd ();
+%! assert (__prog_output_assert__ (key));
+
+## Test do until
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "5 3 5 5 4 4 3 4 1 2 1 3 2 12 3 0 3 ";
+%!
+%! __vm_compile__ bytecode_dountil clear;
+%! bytecode_dountil ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_dountil"));
+%! bytecode_dountil ();
+%! assert (__prog_output_assert__ (key));
+
+## Test cell
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "a b a b 1 2 b c b c 1 2 char b c c d b d c e 2 2 b d f h j l c e g i k m 6 2 1 2 2 3 1 3 2 4 1 3 1 2 1 3 2 4 2 2 double qwe 1 3 char 1 2 ";
+%!
+%! __vm_compile__ bytecode_cell clear;
+%! bytecode_cell ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_cell"));
+%! bytecode_cell ();
+%! assert (__prog_output_assert__ (key));
+
+## Test varargin
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 3 1 2 1 1 1 1 1 2 3 4 1 4 4 0 0 0 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 0 0 1 0 0 0 2 1 1 1 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 2 1 2 4 1 2 0 0 2 1 nob 0 0 1 noa nob 0 0 0 2 1 2 4 1 2 3 4 3 3 2 1 0 ";
+%!
+%! __vm_compile__ bytecode_varargin clear;
+%! bytecode_varargin (1,2,3);
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_varargin"));
+%! bytecode_varargin (1,2,3);
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "0 0 1 1 1 1 1 2 3 4 1 4 4 0 0 0 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 0 0 1 0 0 0 2 1 1 1 1 2 3 4 1 3 4 2 3 4 1 3 3 2 2 3 4 1 4 4 1 2 1 2 4 1 2 0 0 2 1 nob 0 0 1 noa nob 0 0 0 2 1 2 4 1 2 3 4 1 3 2 1 0 ";
+%!
+%! __vm_compile__ bytecode_varargin clear;
+%! bytecode_varargin (1);
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_varargin"));
+%! bytecode_varargin (1);
+%! assert (__prog_output_assert__ (key));
+
+## Test global variables
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "double 0 0 1 1 1 1 1 2 2 2 400 100 0 1 3 double 1 1 1 2 double 1 2 1 1 11 eclass:double 1 1 3 4 double 1 2 400 100 1 1 1 1 3 4 1 1 5 6 1 1 1 2 double 1 2 1 2 double 1 2 1 1 3 4 eclass:double 1 2 3 4 double 1 2 0 0 1 1 3 4 1 1 5 6 1 0 2 double 2 double 11 2 6 4 5 double 1 5 11 double 1 1 22 double 1 1 33 double 1 1 3 double 1 1 4 double 1 1 10 double 1 1 2 3 double 1 2 3 double 1 1 2 double 1 1 55 double 1 1 7 double 1 1 0 11 12 4 11 12 4 ";
+%!
+%! __vm_compile__ bytecode_global_1 clear;
+%! clear global a;
+%! clear global b;
+%! clear global q
+%! global q % Used in test function
+%! q = 55;
+%! bytecode_global_1 ();
+%! assert (__prog_output_assert__ (key));
+%! assert (length(who('global','a')));
+%! assert (length(who('global','b')));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_global_1"));
+%! clear global a;
+%! clear global b;
+%! clear global q;
+%! global q % Used in test function
+%! q = 55;
+%! bytecode_global_1 ();
+%! assert (length(who('global','a')));
+%! assert (length(who('global','b')));
+%! assert (__prog_output_assert__ (key));
+%!
+%! global a b;
+%! assert (a == 5);
+%! assert (b == 6);
+%!
+%! clear global a;
+%! clear global b;
+%! clear global q;
+
+## Test switch
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "yay yay2 yay3 yay4 yay5 yay6 yay7 yay8 1 2 3 3 1 3 3 4 4 1 3 3 4 4 2 yoo 2 3 3 1:1 for-end:12:2 3:3 for-end:3breaking:4 ";
+%!
+%! __vm_compile__ bytecode_switch clear;
+%! bytecode_switch;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_switch"));
+%! bytecode_switch;
+%! assert (__prog_output_assert__ (key));
+
+## Test eval (dynamic stack frames)
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "3.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 5.000000 4.000000 2.000000 3.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 4.000000 1.000000 1.000000 double 5.000000 4.000000 2.000000 1:11.000000 2:22.000000 3:33.000000 4:3.000000 5:22.000000 6:3.000000 7:3.000000 3 3 2 2 3.000000 3.000000 ";
+%!
+%! __vm_compile__ bytecode_eval_1 clear;
+%! bytecode_eval_1;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_eval_1"));
+%! bytecode_eval_1;
+%! assert (__prog_output_assert__ (key));
+
+## Test evalin and assignin
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! % We want to test all combinations of compiled and uncompiled evalin_1 and 2.
+%!
+%! key = "2.000000 yoyo yobase 3.000000 yoyo2 yobase2 123.000000 124.000000 11.000000 33.000000 ";
+%!
+%! caller_a = 2;
+%!
+%!
+%! __vm_compile__ bytecode_evalin_1 clear;
+%! __vm_compile__ bytecode_evalin_2 clear;
+%! bytecode_evalin_1 ();
+%! assert (__prog_output_assert__ (key));
+%!
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_evalin_1"));
+%! bytecode_evalin_1 ();
+%! assert (__prog_output_assert__ (key));
+%!
+%!
+%! __vm_compile__ bytecode_evalin_1 clear;
+%! __vm_compile__ bytecode_evalin_2 clear;
+%! assert (__vm_compile__ ("bytecode_evalin_1"));
+%! assert (__vm_compile__ ("bytecode_evalin_2"));
+%! bytecode_evalin_1 ();
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_compile__ bytecode_evalin_1 clear;
+%! __vm_compile__ bytecode_evalin_2 clear;
+%! assert (__vm_compile__ ("bytecode_evalin_2"));
+%! bytecode_evalin_1 ();
+%! assert (__prog_output_assert__ (key));
+%!
+
+## Test error messages
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! ## Interpreter reference
+%! __vm_enable__ (0, "local");
+%! clear all
+%! __vm_compile__ bytecode_errors clear;
+%! fail ("bytecode_errors (0)", ...
+%!       "'qweqwe' undefined near line 9, column 6");
+%! fail ("bytecode_errors (1)", ...
+%!       "'b' undefined near line 15, column 7");
+%! fail ("bytecode_errors (2)", ...
+%!       "'b' undefined near line 19, column 7");
+%! fail ("bytecode_errors (3)", ...
+%!       "'b' undefined near line 23, column 7");
+%! fail ("bytecode_errors (4)", ...
+%!       "'b' undefined near line 27, column 3");
+%! fail ("bytecode_errors (5)", ...
+%!       "'b' undefined near line 31, column 3");
+%! fail ("bytecode_errors (6)", ...
+%!       'a\(3\): out of bound 2 \(dimensions are 1x2\)');
+%! fail ("bytecode_errors (7)", ...
+%!       'a\(-1\): subscripts must be either integers 1 to \(2\^(31|63)\)-1 or logicals');
+%! fail ("bytecode_errors (8)", ...
+%!       'operator \+: nonconformant arguments \(op1 is 1x3, op2 is 1x2\)');
+%!
+%! __vm_enable__ (1, "local");
+%! ## Bytecode running the same errors
+%! __vm_compile__ bytecode_errors;
+%! fail ("bytecode_errors (0)", ...
+%!       "'qweqwe' undefined near line 9, column 6");
+%! fail ("bytecode_errors (1)", ...
+%!       "'b' undefined near line 15, column 7");
+%! fail ("bytecode_errors (2)", ...
+%!       "'b' undefined near line 19, column 7");
+%! fail ("bytecode_errors (3)", ...
+%!       "'b' undefined near line 23, column 7");
+%! fail ("bytecode_errors (4)", ...
+%!       "'b' undefined near line 27, column 3");
+%! fail ("bytecode_errors (5)", ...
+%!       "'b' undefined near line 31, column 3");
+%! fail ("bytecode_errors (6)", ...
+%!       'a\(3\): out of bound 2 \(dimensions are 1x2\)');
+%! fail ("bytecode_errors (7)", ...
+%!       'a\(-1\): subscripts must be either integers 1 to \(2\^(31|63)\)-1 or logicals');
+%! fail ("bytecode_errors (8)", ...
+%!       'operator \+: nonconformant arguments \(op1 is 1x3, op2 is 1x2\)');
+
+## Test try catch
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "yay yay2 yay3 ooo yay2 yay3 ooo2 ooo2 yay3 yay4 Nested error yay5 yay6 In catch yay7 qwe yay8 Error in subfunction yay9 'asd' undefined near line 87, column 11 yay10 operator *: nonconformant arguments (op1 is 1x2, op2 is 1x3) yay11 yoyo yay12 foo yay12 foo yay12 foo yay13 foo yay13 foo yay13 foo ";
+%!
+%! __vm_compile__ bytecode_trycatch clear;
+%! bytecode_trycatch;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_trycatch"));
+%! bytecode_trycatch;
+%! assert (__prog_output_assert__ (key));
+
+## Test unwind protect
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "yay1 yay2 yay3 e1 subyyay1 subyyay2 subyyay3 subyyay4 subyyay5 subyyay6 subyyay7 subyyay8 subyyay9 subyyay10 subyyay11 subyyay12 subyyay13 subyyay14 subyyay15 subyyay16 subyyay17 subyyay18 yay4 yay5 yay6 ";
+%! __vm_compile__ bytecode_unwind clear;
+%! bytecode_unwind;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_unwind"));
+%! bytecode_unwind;
+%! assert (__prog_output_assert__ (key));
+
+## Test persistant
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! clear functions  % clear persistent variables in bytecode_persistant
+%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 ";
+%!
+%! __vm_compile__ bytecode_persistant clear;
+%! bytecode_persistant;
+%! bytecode_persistant;
+%! assert (__prog_output_assert__ (key));
+%!
+%! clear all;
+%! __vm_enable__ (1, "local");
+%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 ";
+%! assert (__vm_compile__ ("bytecode_persistant"));
+%! bytecode_persistant;
+%!
+%! bytecode_persistant;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (0, "local");
+%! __vm_compile__ bytecode_persistant clear;
+%! clear all;
+%! key = "a:3 b: double 0 0 0 c:3 c:4 a:4 b:1 double 1 1 0 c:5 c:6 ";
+%! bytecode_persistant;
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_persistant"));
+%!
+%! bytecode_persistant;
+%! assert (__prog_output_assert__ (key));
+
+## Test structs
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 2 double 1 1 struct 3 4 ";
+%! __vm_compile__ bytecode_struct clear;
+%! bytecode_struct;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_struct"));
+%! bytecode_struct;
+%! assert (__prog_output_assert__ (key));
+
+## Test indexing chained objects and strange indexing
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 2 3 3 2 cell 1 1 3 3 2 3 22 double 33 3 4 matlab.lang.MemoizedFunction 2 ";
+%! __vm_compile__ bytecode_index_obj clear;
+%! bytecode_index_obj;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_index_obj"));
+%! bytecode_index_obj;
+%! assert (__prog_output_assert__ (key));
+
+## Test varargout
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "7 8 1 1 2 1 0 0 0 1 0 1 0 0 0 1 0 ";
+%! __vm_compile__ bytecode_varargout clear;
+%! bytecode_varargout;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_varargout"));
+%! bytecode_varargout;
+%! assert (__prog_output_assert__ (key));
+
+## Test inputname
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "a a b + 1  a a b b aa aa bb bb aa + 1  bb * 3  a + 1  b * 3  aa aa bb bb aa + 1  bb * 3  a a b b a + 1  b * 3  ";
+%! __vm_compile__ bytecode_inputname clear;
+%! a = 9; b = 8;
+%! bytecode_inputname (a, b + 1);
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_inputname"));
+%! bytecode_inputname (a, b + 1);
+%! assert (__prog_output_assert__ (key));
+
+## Test ans
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "2 5 1 1 1 ";
+%! __vm_compile__ bytecode_ans clear;
+%! bytecode_ans;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_ans"));
+%! bytecode_ans;
+%! assert (__prog_output_assert__ (key));
+
+## Test using classdef
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt;
+%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt;
+%! key = ". 1 f1 . 2 f3 3 f2 . sumf2f3 2 . . call14 f4 . a a_1 . 5 f8 . 6 f10 7 f9 . sumf9f10 2 . . call18 f11 . 2 2 3 4 4 4 3 2 2 3 . 9 sumf9f10 10 f12 11 f13 12 f14 13 sumf2f3 14 f5 15 f6 16 f7 ";
+%! __vm_compile__ bytecode_cdef_use clear;
+%! bytecode_cdef_use ();
+%! assert (__prog_output_assert__ (key));
+%! global cdef_foo_ctor_cnt; global cdef_foo_dtor_cnt;
+%! assert (cdef_foo_ctor_cnt == cdef_foo_dtor_cnt); % Check, as many ctor and dtor executions
+%!
+%! __vm_enable__ (1, "local");
+%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt;
+%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt;
+%! assert (__vm_compile__ ("bytecode_cdef_use"));
+%! bytecode_cdef_use ();
+%! assert (__prog_output_assert__ (key));
+%! global cdef_foo_ctor_cnt; global cdef_foo_dtor_cnt;
+%! assert (cdef_foo_ctor_cnt == cdef_foo_dtor_cnt);
+%!
+%! global cdef_foo_ctor_cnt; clear global cdef_foo_ctor_cnt;
+%! global cdef_foo_dtor_cnt; clear global cdef_foo_dtor_cnt;
+%!
+%! clear global __assert_printf__
+
+## Test anonymous function handles
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "1 2 12 3 4 4 4 1 2 3 1 2 3 1 2 1 2 11 12 11 12 1 4 4 1 1 1 2 1 2 1 2 3 1 2 3 1 3 1 3 9 0 1 fooo ~101 103 ~101 103 ~110 123 ~101 123 ~010 123 ~000 123 ~011 123 ";
+%! __vm_compile__ bytecode_anon_handles clear;
+%! bytecode_anon_handles;
+%!
+%! global __assert_printf__;
+%!
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_anon_handles"));
+%! bytecode_anon_handles;
+%! global __assert_printf__;
+%! assert (__prog_output_assert__ (key));
+
+## Test compling a function named differently from its
+## m-file
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! clear all
+%! __vm_enable__ (1, "local");
+%! __vm_compile__ wrongname_fn clear;
+%! assert (__vm_compile__ ("wrongname_fn"));
+%!
+%! assert (wrongname_fn (77) == 78);
+
+## Test some misc stuff
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! clear all
+%! __vm_enable__ (1, "local");
+%!
+%! bytecode_misc; % asserts inernally
+
+## Leak check
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! clear all
+%! __vm_enable__ (1, "local");
+%!
+%! c = 2;
+%! d = 3;
+%! n_c = __ref_count__ (c);
+%! n_d = __ref_count__ (d);
+%! bytecode_leaks (c, d); % asserts inernally
+%!
+%! assert (n_c == __ref_count__ (c))
+%! assert (n_d == __ref_count__ (d))
+
+## Test scripts
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! __vm_enable__ (0, "local");
+%! clear all
+%! key = "0 1 3 4 5 3 ";
+%! __vm_compile__ bytecode_scripts clear;
+%! bytecode_scripts;
+%! assert (__prog_output_assert__ (key));
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_scripts"));
+%! bytecode_scripts;
+%! assert (__prog_output_assert__ (key));
+
+## Test nested functions.
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! global cdef_bar_cnt
+%! cdef_bar_cnt = 0;
+%!
+%! __vm_enable__ (0, "local");
+%! clear all
+%! __vm_compile__ bytecode_nested clear;
+%! % These tests uses asserts in themself
+%! bytecode_nested;
+%!
+%! cdef_bar_cnt = 0;
+%!
+%! __vm_enable__ (1, "local");
+%! assert (__vm_compile__ ("bytecode_nested"));
+%! bytecode_nested;
+%!
+%! assert (cdef_bar_cnt == 0);
+%! clear -global cdef_bar_alive_objs cdef_bar_cnt glb_d glb_e glb_f
+
+## Test script interaction when called from top scope
+%!testif ENABLE_BYTECODE_EVALUATOR
+%!
+%! __vm_enable__ (0, "local");
+%! clear all
+%!
+%! global bytecode_script_topscope_place; % Used in bytecode_script_topscope the select where to evalin
+%! bytecode_script_topscope_place = "base";
+%!
+%! global bytecode_script_topscope_call_self
+%! bytecode_script_topscope_call_self = false;
+%!
+%! bytecode_script_topscope_setup; % Does some setups of globals and locals in base frame, from a function
+%! evalin ("base", "bytecode_script_topscope_setup_script"); % Also does some setups, but from a script
+%! evalin ("base", "bytecode_script_topscope"); % A script that does things a does some asserts
+%!
+%! bytecode_script_topscope_assert; % Does some asserts and cleans up the globals and locals added
+%!
+%! __vm_enable__ (1, "local");
+%! bytecode_script_topscope_setup;
+%! evalin ("base", "bytecode_script_topscope_setup_script");
+%! assert (__vm_compile__ ("bytecode_script_topscope"));
+%! evalin ("base", "bytecode_script_topscope");
+%! bytecode_script_topscope_assert;
+%!
+%! %% Redo the test, but nested in itself to get a stack frame in between.
+%! bytecode_script_topscope_call_self = true;
+%!
+%! bytecode_script_topscope_setup;
+%! evalin ("base", "bytecode_script_topscope_setup_script");
+%! assert (__vm_compile__ ("bytecode_script_topscope"));
+%! evalin ("base", "bytecode_script_topscope");
+%! bytecode_script_topscope_assert;
+%!
+%! bytecode_script_topscope_call_self = false;
+%!
+%! %% Redo the test, but in a command line function instead of in the top scope.
+%! bytecode_script_topscope_place = "caller"; % Governs some evalin() in nbytecode_script_topscope. Switch to "caller"
+%!
+%! eval ("function bytecode_script_topscope_cli_fn ()\nbytecode_script_topscope_setup ('caller');\nbytecode_script_topscope_setup_script;\nbytecode_script_topscope;\nbytecode_script_topscope_assert ('caller');\nend");
+%!
+%! bytecode_script_topscope_cli_fn ();
+%! bytecode_script_topscope_call_self = true;
+%! bytecode_script_topscope_cli_fn ();
+%!
+%! %% Redo the test, but test all 15 different combinations of compiled and uncompiled functions and scripts
+%!
+%! __vm_enable__ (0, "local");
+%! clear all
+%!
+%! global bytecode_script_topscope_place % An assert in nbytecode_script_topscope need to know whether to check in caller or base
+%! global bytecode_script_topscope_call_self; % Whether nbytecode_script_topscope should call it self to add another frame
+%!
+%! names = {"bytecode_script_topscope_cli_fn", "bytecode_script_topscope_setup", "bytecode_script_topscope", "bytecode_script_topscope_assert", "bytecode_script_topscope_setup_script"};
+%!
+%! for k = 0:5
+%!   choices = nchoosek (names, k);
+%!   for choise = choices'
+%!     clear bytecode_script_topscope_cli_fn bytecode_script_topscope_setup nbytecode_script_topscope nbytecode_script_topscope_assert bytecode_script_topscope_setup_script
+%!
+%!     eval ("function bytecode_script_topscope_cli_fn ()\nbytecode_script_topscope_setup ('caller');\nbytecode_script_topscope_setup_script;\nbytecode_script_topscope;\nbytecode_script_topscope_assert ('caller');\nend");
+%!
+%!     for fn = choise'
+%!       assert (__vm_compile__ (fn{1}))
+%!     end
+%!
+%!     % Check if it works in the CLI function
+%!     bytecode_script_topscope_place = "caller";
+%!     bytecode_script_topscope_call_self = false;
+%!     bytecode_script_topscope_cli_fn ();
+%!     bytecode_script_topscope_call_self = true;
+%!     bytecode_script_topscope_cli_fn ();
+%!
+%!     % Check if it works in base
+%!     bytecode_script_topscope_place = "base";
+%!     bytecode_script_topscope_call_self = false;
+%!     evalin ("base", "bytecode_script_topscope_setup ('base');\nbytecode_script_topscope_setup_script;\nbytecode_script_topscope;\nbytecode_script_topscope_assert ('base');")
+%!     bytecode_script_topscope_call_self = true;
+%!     evalin ("base", "bytecode_script_topscope_setup ('base');\nbytecode_script_topscope_setup_script;\nbytecode_script_topscope;\nbytecode_script_topscope_assert ('base');")
+%!   end
+%! end
+%!
+%! %% Test that nargin is set properly
+%!
+%! __vm_enable__ (0, "local");
+%! clear all
+%! bytecode_script_nargin_call_recursive = true; % The script calls it self once if this symbol is true
+%! bytecode_script_nargin_expected_value = nargin; % The expected value of nargin
+%! bytecode_script_nargin;
+%!
+%! % Call in top scope, to check that Octave cli start options set nargin to whatever the user started Octave with
+%! evalin ("base", "bytecode_script_nargin_call_recursive = true; bytecode_script_nargin_expected_value = nargin; bytecode_script_nargin;");
+%!
+%! % Call in function
+%! eval ("function foo (a,b) bytecode_script_nargin_expected_value = nargin; bytecode_script_nargin; end; foo (1, 2);")
+%!
+%! __vm_enable__ (1, "local");
+%! clear all
+%! bytecode_script_nargin_call_recursive = true;
+%! bytecode_script_nargin_expected_value = nargin;
+%! bytecode_script_nargin;
+%!
+%! evalin ("base", "bytecode_script_nargin_call_recursive = true; bytecode_script_nargin_expected_value = nargin; bytecode_script_nargin;");
+%! eval ("function foo (a,b) bytecode_script_nargin_expected_value = nargin; bytecode_script_nargin; end; foo (1, 2);")
+%!
+%! %% Cleanup after the tests
+%! clear all
+
+## Test save() and load() from scripts.
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! clear all
+%!
+%! for use_vm = [true, false] % Test with and without VM
+%!   __vm_enable__ (use_vm, "local");
+%!
+%!   % We need to pass the file name to the scripts with a global
+%!   global bytecode_load_script_file
+%!   bytecode_load_script_file = [tempname(), ".mat"];
+%!
+%!   eval ("function bytecode_load_script_save_fn\n  bytecode_load_script_save\n;  bytecode_load_script_load_and_assert;\n  end\n")
+%!
+%!   if use_vm
+%!     assert (__vm_compile__ ("bytecode_load_script_save"))
+%!     assert (__vm_compile__ ("bytecode_load_script_load_and_assert"))
+%!     assert (__vm_compile__ ("bytecode_load_script_save_fn"))
+%!   end
+%!
+%!   unwind_protect
+%!     evalin ("base", "bytecode_load_script_save"); % Saves some variables to a file with save ()
+%!     evalin ("base", "bytecode_load_script_load_and_assert"); % Loads them back and checks their values etc
+%!   unwind_protect_cleanup
+%!     unlink (bytecode_load_script_file);
+%!     evalin ("base", "clear local_aa local_bb local_cc")
+%!     evalin ("base", "clear global glb_aa glb_bb glb_cc glb_dd glb_ee")
+%!   end_unwind_protect
+%!
+%!   bytecode_load_script_file = [tempname(), ".mat"];
+%!   %% Same test, but from a function
+%!   unwind_protect
+%!     bytecode_load_script_save_fn;
+%!   unwind_protect_cleanup
+%!     unlink (bytecode_load_script_file);
+%!   end_unwind_protect
+%!
+%!   clear all
+%! end
+%!
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_anon_handles.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,160 @@
+% TODO: The anonymous functions bodies are not compiled
+
+function bytecode_anon_handles ()
+  h1 = @(x) __printf_assert__ ("%d ", x);
+  h1 (1);
+  h1 (2);
+  h11 = h1;
+  h11 (12);
+
+  a = 3;
+  h2 = @() __printf_assert__ ("%d ", a);
+  h2 ();
+
+  h3 = @(a,b,c) a + b + c;
+  __printf_assert__ ("%d ", h3 (1, 2, 1));
+  __printf_assert__ ("%d ", h3 (1, 2, 1));
+
+  h3 (1, 2, 1);
+  __printf_assert__ ("%d ", ans);
+
+  h4 = @() {1,2,3}{:};
+  [a b c] = h4();
+  __printf_assert__ ("%d %d %d ", a, b, c);
+  [a b c] = h4();
+  __printf_assert__ ("%d %d %d ", a, b, c);
+  [a b] = h4();
+  __printf_assert__ ("%d %d ", a, b);
+  [a b] = h4();
+  __printf_assert__ ("%d %d ", a, b);
+
+  h5 = @(x) @(y) __printf_assert__ ("%d %d ", x, y);
+  h5(11)(12)
+  h5(11)(12)
+
+  % max not in parent scope
+  h6 = @(x, y) max (x, y);
+  __printf_assert__ ("%d ", h6 (-1, 1));
+
+  % Mess with the anon function's stackframe
+  a = 3;
+  h7 = @() foo () + a;
+  __printf_assert__ ("%d ", h7 ()); % 4
+  __printf_assert__ ("%d ", h7 ()); % also 4
+
+  % Nargout
+  h8 = @() expression_nargout ();
+  a = h8 ();
+  __printf_assert__ ("%d ", a);
+  a = h8 ();
+  __printf_assert__ ("%d ", a);
+
+  [a b] = h8 ();
+  __printf_assert__ ("%d ", a, b);
+  [a b] = h8 ();
+  __printf_assert__ ("%d ", a, b);
+
+  [a b c] = h8 ();
+  __printf_assert__ ("%d ", a, b, c);
+  [a b c] = h8 ();
+  __printf_assert__ ("%d ", a, b, c);
+
+  [a, ~, c] = h8 ();
+  __printf_assert__ ("%d ", a, c);
+  h8 = @() expression_nargout ();
+  [a, ~, c] = h8 ();
+  __printf_assert__ ("%d ", a, c);
+
+  % ans
+  h9 = @() 9;
+  h9 ();
+   __printf_assert__ ("%d ", ans);
+
+  % word command
+  h10 = @() nargout;
+  h10 ();
+  __printf_assert__ ("%d ", ans);
+  a = h10 ();
+  __printf_assert__ ("%d ", a);
+
+  % inputname
+  h11 = @(x) inputname (1);
+  fooo = 123;
+  __printf_assert__ ("%s ", h11 (fooo));
+
+  % Ignored outputs are propagated to nested calls
+  h12 = @() try_isargout ();
+  x = y = z = 0;
+  [x, ~, z] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [x, ~, z] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [x, y, ~] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [x, ~, z] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [~, y, ~] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [~, ~, ~] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+  [~, y, z] = h12 (); __printf_assert__ ("%d%d%d ", x, y, z);
+
+  % The optim package exposed a bug with EXPAND_CS_LIST during development
+  h1 = @ (p) - (p(1)^2 + 1 - p(2));
+  h2 = @ (p) {[], h1(p)}{:};
+  [~, a] = h2 ([-2 5]);
+  assert (a == 0)
+  [~, a] = h2 ([-2 5]);
+  assert (a == 0)
+
+  % Nested anon functions
+  h1 = @(y) y * 2;
+  h2 = @(yy) execute_handle (@(yyy) h1 (yyy), yy); %h1 captured here
+  assert (h2 (3) == 6)
+  h2 = @(yy) execute_handle (@(yyyy) execute_handle (@(yyy) h1 (yyy), yyyy), yy); % Nest some more
+  assert (h2 (3) == 6)
+
+  % Test not enough return values
+  threw = false;
+  try
+    h1 = @() 1;
+    [a, b] = h1 ();
+  catch
+    threw = true;
+  end
+
+  assert (threw);
+
+  h1 = @(varargin) varargin{:};
+  [a b c] = h1(1,2,3);
+  assert ([a b c] == [1 2 3])
+
+  threw = false;
+  try
+    [a b c d] = h1(1,2,3);
+  catch
+    threw = true;
+  end
+
+  assert (threw);
+end
+
+function [x, y, z] = try_isargout ()
+  __printf_assert__ ("~%d%d%d ", isargout (1), isargout (2), isargout (3))
+  x = 1; y = 2; z = 3;
+
+  bar (); % Does nothing. Check return from nested subfunction works with active ignore
+endfunction
+
+function bar
+end
+
+function ret = foo
+  evalin ("caller", "a++;"); % Should not change 'a' for the next time h7 is called
+  ret = 0;
+end
+
+function varargout = expression_nargout ()
+  varargout = cell (1, nargout);
+  for i = 1:nargout
+    varargout{i} = i;
+  end
+end
+
+function b = execute_handle (h, arg1)
+  b = h (arg1);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_ans.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,13 @@
+function bytecode_ans ()
+    max (1, 2);
+    __printf_assert__ ("%d ", ans);
+    1 + 1 + 3;
+    __printf_assert__ ("%d ", ans);
+    !false;
+    __printf_assert__ ("%d ", ans);
+    true;
+    __printf_assert__ ("%d ", ans);
+
+    c = 13; % Not written to ans
+    __printf_assert__ ("%d ", ans);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_assign.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,57 @@
+function bytecode_assign ()
+
+  a = 2;
+  __printf_assert__ ("%d ", a);
+
+  a = 3;
+  __printf_assert__ ("%d ", a);
+
+  b = a = 1;
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  c = [2 2; 3 3];
+  d = c (1,2);
+  __printf_assert__ ("%d ", c);
+  __printf_assert__ ("%d ", d);
+
+  % Compound assignment
+  d = 1;
+  d += 1;
+  __printf_assert__ ("%d ", d);
+  d += d * 2;
+  __printf_assert__ ("%d ", d);
+  d *= 3;
+  __printf_assert__ ("%d ", d);
+  d /= 9;
+  __printf_assert__ ("%f ", d);
+
+  b = [1 2 3 4];
+  b += 1;
+  __printf_assert__ ("%f ", b);
+  __printf_assert__ ("%d ", size(b));
+  __printf_assert__ ("%s ", class(b));
+  b \= 2;
+  b -= 2;
+  b *= 2;
+  b /= 2;
+  b += 2;
+
+  b .\= 2;
+  %b .-= 2; % TODO: Removed in interpreter. Remove in VM too.
+  b .*= 2;
+  b ./= 2;
+  %b .+= 2; % TODO: Removed in interpreter. Remove in VM too.
+  b .^= 2;
+
+  __printf_assert__ ("%f ", b);
+  __printf_assert__ ("%d ", size(b));
+  __printf_assert__ ("%s ", class(b));
+
+  b = [1 2; 3 4];
+  b ^= 3;
+
+  __printf_assert__ ("%f ", b);
+  __printf_assert__ ("%d ", size(b));
+  __printf_assert__ ("%s ", class(b));
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_binops.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,117 @@
+function bytecode_binops()
+  % Root level binary expressions8
+  % should not mess up the operand stack
+  2 * 3 + 1;
+  max (3, 2) - min (3, 2);
+  sin (3) * sin (2);
+
+  % General
+  a = 1 + 2 + 3 + 4;
+  __printf_assert__ ("%.17g ", a);
+  a = -1 - 2 - 3 - 4;
+  __printf_assert__ ("%.17g ", a);
+  a = 1 * 2 * 3 * 4;
+  __printf_assert__ ("%.17g ", a);
+  a = 1 / 2 / 3 / 4;
+  __printf_assert__ ("%.17g ", a);
+  a = 1^2^3^4;
+  __printf_assert__ ("%.17g ", a);
+
+  % Order
+  a = 1 + 2 - 3 * 4 / 5 ^ 6 * 7 / 8 - 9 + 10 / 11;
+  __printf_assert__ ("%.17g ", a);
+
+  % Function calls
+  a = max (3, 2) * min (2, 1) + max (10, 9);
+  __printf_assert__ ("%.17g ", a);
+
+  % Logical
+  a = 1 && 2;
+  __printf_assert__ ("%d ", a);
+  a = 1 && 0;
+  __printf_assert__ ("%d ", a);
+  a = 1 || 2;
+  __printf_assert__ ("%d ", a);
+  a = 0 || 0;
+  __printf_assert__ ("%d ", a);
+
+  % Need to not linger on stack
+  1 && 1;
+  1 && 0;
+  0 && 1;
+  0 || 0;
+  0 || 1;
+  1 || 0;
+
+  % We wanna make sure there actually is a short circuit
+  % and that the operands are only evaluated once
+  a = truthy (1) || falsy(2);
+  __printf_assert__ ("%d ", a);
+
+  a = falsy (3) || falsy (4) || truthy (5) || falsy (6);
+  __printf_assert__ ("%d ", a);
+
+  a = truthy (7) && truthy (8) || falsy (12);
+  __printf_assert__ ("%d ", a);
+
+  a = falsy (9) && truthy (10) || falsy (11);
+  __printf_assert__ ("%d ", a);
+
+  % Compares
+
+  a = 1 == 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 == 2;
+  __printf_assert__ ("%d ", a);
+  a = 1 < 2;
+  __printf_assert__ ("%d ", a);
+  a = 2 < 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 > 2;
+  __printf_assert__ ("%d ", a);
+  a = 2 > 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 <= 2;
+  __printf_assert__ ("%d ", a);
+  a = 2 <= 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 <= 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 >= 2;
+  __printf_assert__ ("%d ", a);
+  a = 2 >= 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 >= 1;
+  __printf_assert__ ("%d ", a);
+  a = 2 ~= 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 ~= 1;
+  __printf_assert__ ("%d ", a);
+  a = 2 != 1;
+  __printf_assert__ ("%d ", a);
+  a = 1 != 1;
+  __printf_assert__ ("%d ", a);
+
+  a = 1 == 1 && 2 > 1 && 3 > -3 || 1 < 10;
+  __printf_assert__ ("%d ", a);
+
+  % Elementwise logical
+
+  a = ones (2,2) & zeros (2,2);
+  __printf_assert__ ("%d ", a);
+  a = ones (2,2) & ones (2,2);
+  __printf_assert__ ("%d ", a);
+  a = ones (2,2) | zeros (2,2);
+  __printf_assert__ ("%d ", a);
+
+endfunction
+
+function out = truthy (i)
+  __printf_assert__ ("truthy%d ", i);
+  out = 1;
+end
+
+function out = falsy (i)
+  __printf_assert__ ("falsy%d ", i);
+  out = 0;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_cdef_use.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,82 @@
+function bytecode_cdef_use ()
+  % The classdef handle class cdef_foo prints its ctor argument
+  % in the dtor with __printf_assert__ aswell asd
+  % adds one to the globals cdef_foo_ctor_cnt and cdef_foo_dtor_cnt
+  % in the ctor and dtor.
+
+  h1 = cdef_foo("f1");
+  __printf_assert__ (". ");
+  h1 = 2; % Triggers dtor
+  __printf_assert__ (". ");
+
+  h4 = make_obj ("f2") + make_obj ("f3"); % Two dtors will trigger here
+  __printf_assert__ (". ");
+  __printf_assert__ ("%s %d ", h4.msg, h4.val);
+
+  __printf_assert__ (". ");
+  make_obj ("f4");          % Saved in ans
+  __printf_assert__ (". "); % Wont trigger dtor since no defined return value
+  print_arg_ret_one ("call1"); % ans' dtor executed after rhs eval
+  __printf_assert__ (". ");
+
+  % Test calling function in packages. Not really a classdef
+  u = matlab.lang.makeUniqueStrings ({"a","a"});
+  __printf_assert__ ("%s %s ", u{1}, u{2});
+
+  % Check dtor call order
+  a = make_obj ("f5");
+  b = make_obj ("f6");
+  c = make_obj ("f7");
+
+  suby (); % Same tests in a subfunction
+end
+
+function suby ()
+  h1 = cdef_foo("f8");
+  __printf_assert__ (". ");
+  h1 = 2; % Triggers dtor
+  __printf_assert__ (". ");
+
+  h4 = make_obj ("f9") + make_obj ("f10"); % Two dtors will trigger here
+  __printf_assert__ (". ");
+  __printf_assert__ ("%s %d ", h4.msg, h4.val);
+
+  __printf_assert__ (". ");
+  make_obj ("f11");          % Saved in ans
+  __printf_assert__ (". "); % Wont trigger dtor since no defined return value
+  print_arg_ret_one ("call1"); % ans' dtor executed after rhs eval
+  __printf_assert__ (". ");
+
+  % Check that the classdef object is called
+  m = containers.Map;
+  m("qwe") = 2;
+  __printf_assert__ ("%d ", m("qwe"));
+  __printf_assert__ ("%d ", m("qwe")++); % Test ++-- on objects
+  __printf_assert__ ("%d ", m("qwe"));
+  __printf_assert__ ("%d ", ++m("qwe"));
+  __printf_assert__ ("%d ", m("qwe"));
+  __printf_assert__ ("%d ", m("qwe")--);
+  __printf_assert__ ("%d ", m("qwe"));
+  __printf_assert__ ("%d ", --m("qwe"));
+  __printf_assert__ ("%d ", m("qwe"));
+  % Different op code than cmd form call
+  m = containers.Map();
+  m("qwe") = 3;
+  __printf_assert__ ("%d ", m("qwe"));
+
+  __printf_assert__ (". ");
+  % Check dtor call order
+  a = make_obj ("f12");
+  b = make_obj ("f13");
+  c = make_obj ("f14");
+end
+
+
+function h = make_obj (msg)
+    h = cdef_foo (msg);
+end
+
+function a = print_arg_ret_one (msg)
+  __printf_assert__ (msg);
+  a = 1;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_cell.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,132 @@
+function bytecode_cell ()
+  a = {'a','b'};
+  __printf_assert__ ("%s ", a{1});
+  __printf_assert__ ("%s ", a{2});
+  __printf_assert__ ("%s ", a{:});
+  __printf_assert__ ("%d ", size (a));
+
+  b = 'b';
+  c = 'c';
+
+  a = {b, c};
+  __printf_assert__ ("%s ", a{1});
+  __printf_assert__ ("%s ", a{2});
+  __printf_assert__ ("%s ", a{:});
+  __printf_assert__ ("%d ", size (a));
+  __printf_assert__ ("%s ", class (a{1}));
+
+  d = 'd';
+  e = 'e';
+
+  a = {b, c; d, e};
+  __printf_assert__ ("%s ", a{1});
+  __printf_assert__ ("%s ", a{3});
+  __printf_assert__ ("%s ", a{1,2});
+  __printf_assert__ ("%s ", a{2,1});
+  __printf_assert__ ("%s ", a{:});
+  __printf_assert__ ("%d ", size (a));
+
+  f = 'f';
+  g = 'g';
+  h = 'h';
+  i = 'i';
+  a = {b, c; d, e; f g; h, i; 'j', 'k'; 'l', 'm'};
+  __printf_assert__ ("%s ", a{:});
+  __printf_assert__ ("%d ", size (a));
+
+  b = 1;
+  c = 2;
+  d = 3;
+  e = 4;
+  a = {b, c; d, e};
+  __printf_assert__ ("%d ", a{1});
+  __printf_assert__ ("%d ", a{3});
+  __printf_assert__ ("%d ", a{1,2});
+  __printf_assert__ ("%d ", a{2,1});
+  __printf_assert__ ("%d ", a{:});
+  __printf_assert__ ("%d ", a{:, 1});
+  __printf_assert__ ("%d ", a{1, :});
+  __printf_assert__ ("%d ", a{:, :});
+  __printf_assert__ ("%d ", size (a));
+  __printf_assert__ ("%s ", class (a{1}));
+
+  a = {'qwe','asd','zxc'};
+  f = a{:};
+  __printf_assert__ ("%s ", f);
+  __printf_assert__ ("%d ", size (f));
+  __printf_assert__ ("%s ", class (f));
+
+  % Command form function call subref
+  __printf_assert__ ("%d ", suby{:});
+
+  % Test making cells dynamically with unpacking of cells
+  a = {1,2};
+  b = {};
+  d = {11; 12};
+
+  c = {a{:}};
+  assert (c, {1, 2});
+
+  c = {d{:}};
+  assert (c, {11, 12});
+
+  c = {a{:}, 3, 4};
+  assert (c, {1, 2, 3, 4});
+
+  c = {b{:}, a{:}, 3, 4, b{:}};
+  assert (c, {1, 2, 3, 4});
+
+  c = {;;; a{:}; 3 4;;;; b{:}};
+  assert (c, {1, 2; 3, 4});
+
+  c = {b{:}};
+  assert (c, {});
+
+  c = {b{:}; b{:}};
+  assert (c, cell (2, 0));
+
+  threw = false;
+  try
+    c = {b{:}; 1 2};
+  catch e
+    assert (regexp (e.message, "number of columns must match"))
+    threw = true;
+  end
+
+  assert (threw)
+
+  threw = false;
+  try
+    c = {1 2 3; a{:}};
+  catch e
+    assert (regexp (e.message, "number of columns must match"))
+    threw = true;
+  end
+
+  assert (threw)
+
+  threw = false;
+  try
+    c = {1 2 3; a{:}; 4 5 6};
+  catch e
+    assert (regexp (e.message, "number of columns must match"))
+    threw = true;
+  end
+
+  assert (threw)
+
+  threw = false;
+  try
+    c = {a{:}; 1 2 3};
+  catch e
+    assert (regexp (e.message, "number of columns must match"))
+    threw = true;
+  end
+
+  assert (threw)
+
+end
+
+function a = suby()
+  a = {1,2};
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_dountil.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,115 @@
+function bytecode_dountil ()
+  i = 5;
+  do
+    __printf_assert__ ("%d ", i);
+    i--;
+  until i
+
+  i = 0;
+  do
+    i++;
+  until i >= 3
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  do
+    ctr++;
+  until i++ >= 4
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  ctr = 0;
+  do
+    ctr++;
+  until ++i >= 4
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  ctr = 0;
+  do
+    i++;
+    if i == 2
+      continue
+    end
+    ctr++;
+  until i >= 4
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  do
+    i++;
+    if i == 2
+      break
+    end
+    ctr++;
+  until i >= 4
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  do
+    i++;
+    if i == 2
+      continue
+    elseif i == 3
+      break
+    end
+    ctr++;
+  until i >= 4
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  do
+    i++;
+    if i == 1
+      continue
+    else
+      break
+    end
+  until i > 100
+  __printf_assert__ ("%d ", i);
+
+  ctr = 0;
+  j = 0;
+  do
+    i = 0;
+    do
+      k = 0;
+      do
+        k++;
+        ctr++;
+      until k >= 2
+      i++;
+    until i > 2
+    j++;
+  until j >= 2
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  do
+    i++;
+    if i == 4
+      break;
+    end
+    continue;
+  until i == 3
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  do
+    break
+  until i++ > 2
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  do
+  until i++ == 2
+  __printf_assert__ ("%d ", i);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_end.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,58 @@
+function bytecode_end ()
+
+  A = [1 2; 3 4];
+  __printf_assert__ ("%d ", A(1:end));
+
+  A(2:end) = [5 6 7];
+  __printf_assert__ ("%d ", A);
+  __printf_assert__ ("%d ", size (A));
+
+  A = [1 2; 3 4];
+  A(end, end) = 5;
+  __printf_assert__ ("%d ", A(2, 2));
+  __printf_assert__ ("%d ", A(end, end));
+
+  A = [1 2; 3 4];
+  A(end - 1, end - 1) = 6;
+  __printf_assert__ ("%d ", A(1, 1));
+  __printf_assert__ ("%d ", A(end - 1, end - 1));
+
+  A = [1:4];
+  A(end + 1) = 5;
+  __printf_assert__ ("%d ", A);
+
+  A = 1;
+  A(end) = 2;
+  __printf_assert__ ("%d ", A);
+  __printf_assert__ ("%d ", A(end));
+
+  A(end + 1) = 3;
+  __printf_assert__ ("%d ", A);
+  __printf_assert__ ("%d ", A(end));
+
+  __printf_assert__ ("%d ", suby1()(end));
+
+  % End indexing an object that is not an id
+  s = {"ifs"};
+  a = s{1}(2:end);
+  __printf_assert__ ("%s ", a);
+
+  % Nested index expressions
+  M = [1 2 3 4];
+  __printf_assert__ ("%d ", M (min (2, end))); % End of M
+  __printf_assert__ ("%d ", M (max (3, min (2, end)))); % End of M
+
+  min_h = @min;
+  __printf_assert__ ("%d ", M (min_h (2, end))); % End of min_h
+
+  s = [struct struct struct];
+  s(2).name = "foo";
+  __printf_assert__ ("%s ", s(min (2, end)).name);
+
+  % end together with struct refs are annoying
+  __printf_assert__ ("%s ", s(2).name (end - 1: end));
+end
+
+function a = suby1()
+  a = [1 2 3 4];
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_errors.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,68 @@
+function bytecode_errors (idx)
+  % We put test dispatch last, so that we don't have
+  % to update all columns and rows each time we
+  % add a test ...
+  run_test (idx);
+end
+
+function if_undefined_value ()
+  if qweqwe
+    a = 2;
+  end
+end
+
+function assign_undef ()
+  a = b;
+end
+
+function subsref_undef_id ()
+  a = b(1,2,3);
+end
+
+function subsref_cell_undef_id ()
+  a = b{1,2,3};
+end
+
+function wordcmd_undef_id ()
+  b 1 2 3;
+end
+
+function binary_undef ()
+  b * a;
+end
+
+function id_index_oob_error_1 ()
+  a = [1, 2];
+  b = a (3);
+end
+
+function id_index_oob_error_2 ()
+  a = [1, 2];
+  b = a (-1);
+end
+
+function binary_wrong_size_1 ()
+  a = [1 2 3] + [1 2];
+end
+
+function run_test (idx)
+  if idx == 0
+    if_undefined_value ();
+  elseif idx == 1
+    assign_undef ();
+  elseif idx == 2
+    subsref_undef_id ();
+  elseif idx == 3
+    subsref_cell_undef_id ();
+  elseif idx == 4
+    wordcmd_undef_id ();
+  elseif idx == 5
+    binary_undef ();
+  elseif idx == 6
+    id_index_oob_error_1 ();
+  elseif idx == 7
+    id_index_oob_error_2 ();
+  elseif idx == 8
+    binary_wrong_size_1 ();
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_eval_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,126 @@
+function bytecode_eval_1 ()
+  % Simple
+  assert (2 == eval ("2"));
+  assert (2 == eval ("2;"));
+
+  v = eval("11");
+  assert (v == 11);
+
+  % ans
+  eval ("12;");
+  assert (ans == 12);
+
+  % Change variable value
+  a = 2;
+  eval ("a = 3;");
+
+  __printf_assert__ ("%f ", a)
+  __printf_assert__ ("%f ", size (a))
+  __printf_assert__ ("%s ", class (a));
+
+  % Create new variable in an eval
+  eval ("b = 4;");
+  __printf_assert__ ("%f ", b)
+  __printf_assert__ ("%f ", size (b))
+  __printf_assert__ ("%s ", class (b));
+
+  % Create new variable in an eval, that is also not
+  % not in a bytecode slot
+  eval ("c = 4;");
+  __printf_assert__ ("%f ", eval("c"))
+  __printf_assert__ ("%f ", size (eval("c")))
+  __printf_assert__ ("%s ", class (eval("c")));
+  eval ("c = 5;");
+  __printf_assert__ ("%f ", eval("c"))
+
+  % Change a global in an eval
+  clear global d
+  global d = 3;
+  eval ("d = 4;")
+  __printf_assert__ ("%f ", d);
+  clear global d
+  d = 2;
+  __printf_assert__ ("%f ", d);
+
+  % Create a global in an eval
+
+  %% TODO: Not supported. Does it have to be?
+  % eval ("clear global e");
+  % eval ("global e = 5;")
+  % __printf_assert__ ("%f ", e);
+  % __printf_assert__ ("%d ", length(who('global','e')));
+
+  % Just test the same thing in a subfunction
+  sub1 ();
+
+  % Change the value of arguments and returns in an eval
+  % Also do nargin and nargout in a subfunction
+  [aa bb] = suby2 (11, 22, 33);
+  __printf_assert__ ("%f ", aa);
+  __printf_assert__ ("%f ", bb);
+end
+
+function sub1()
+  % Simple
+  assert (2 == eval ("2;"));
+  assert (2 == eval ("2;"));
+
+  v = eval("11;");
+  assert (v == 11);
+
+  % ans
+  eval ("12;");
+  assert (ans == 12);
+
+  % Change variable value
+  a = 2;
+  eval ("a = 3;");
+
+  __printf_assert__ ("%f ", a)
+  __printf_assert__ ("%f ", size (a))
+  __printf_assert__ ("%s ", class (a));
+
+  % Create new variable in an eval
+  eval ("b = 4;");
+  __printf_assert__ ("%f ", b)
+  __printf_assert__ ("%f ", size (b))
+  __printf_assert__ ("%s ", class (b));
+
+  % Create new variable in an eval, that is also not
+  % not in a bytecode slot
+  eval ("c = 4;");
+  __printf_assert__ ("%f ", eval("c"))
+  __printf_assert__ ("%f ", size (eval("c")))
+  __printf_assert__ ("%s ", class (eval("c")));
+  eval ("c = 5;");
+  __printf_assert__ ("%f ", eval("c"))
+
+  % Change a global in an eval
+  clear global d
+  global d = 3;
+  eval ("d = 4;")
+  __printf_assert__ ("%f ", d);
+  clear global d
+  d = 2;
+  __printf_assert__ ("%f ", d);
+end
+
+function [c d] = suby2 (a, b, c)
+  __printf_assert__ ("1:%f ", a);
+  __printf_assert__ ("2:%f ", b);
+  __printf_assert__ ("3:%f ", c);
+
+  eval ("c = 3;")
+  eval ("a = c;")
+  eval ("d = a;")
+  __printf_assert__ ("4:%f ", a);
+  __printf_assert__ ("5:%f ", b);
+  __printf_assert__ ("6:%f ", c);
+  __printf_assert__ ("7:%f ", d);
+
+  __printf_assert__ ("%d ", nargin);
+  __printf_assert__ ("%d ", eval ("nargin"));
+
+  __printf_assert__ ("%d ", nargout);
+  __printf_assert__ ("%d ", eval ("nargout"));
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_evalin_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,35 @@
+function bytecode_evalin_1 ()
+  b = evalin ("caller", "caller_a");
+  __printf_assert__ ("%f ", b);
+
+  evalin ("caller", "__printf_assert__('yoyo ')");
+
+  evalin ("base", "__printf_assert__('yobase ')");
+
+  caller_b = 3;
+  sub1();
+  __printf_assert__ ("%f ", caller_e);
+  __printf_assert__ ("%f ", eval ("caller_f")); % No slot for caller_f
+
+  caller_c = 11;
+  bytecode_evalin_2 ();
+  __printf_assert__ ("%f ", caller_c); % Changes to 33
+
+  %%% TODO: Can't create a variable with evalin in the treewalker
+  %%%       need to verify it aint working with the VM too.
+  %% __printf_assert__ ("%f ", caller_d); % Is initialized to 22
+
+
+end
+
+function sub1()
+  b = evalin ("caller", "caller_b");
+  __printf_assert__ ("%f ", b);
+
+  evalin ("caller", "__printf_assert__('yoyo2 ')");
+
+  evalin ("base", "__printf_assert__('yobase2 ')");
+
+  assignin ("caller", "caller_e", 123);
+  assignin ("caller", "caller_f", 124);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_evalin_2.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,8 @@
+function bytecode_evalin_2 ()
+  __printf_assert__ ("%f ", evalin ("caller", "caller_c"));
+
+  evalin ("caller", "caller_c = 33;");
+
+  %% %Can't create local in caller in the treewalker
+  %% evalin ("caller", "caller_d = 22;");
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_for.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,228 @@
+function bytecode_for()
+  ctr = 0;
+
+  for i = 1:4
+    __printf_assert__ ("%d ", i);
+  end
+  __printf_assert__ ("%d ", i);
+
+  for i = 1:2:5
+    __printf_assert__ ("%d ", i);
+  end
+  __printf_assert__ ("%d ", i);
+
+  for i = 1:3:5
+    __printf_assert__ ("%d ", i);
+  end
+  __printf_assert__ ("%d ", i);
+
+  for i = 4:-1:1
+    __printf_assert__ ("%d ", i);
+  end
+  __printf_assert__ ("%d ", i);
+
+  for i = 0.2:0.1:0.4
+    __printf_assert__ ("%3.3f ", i);
+  end
+  __printf_assert__ ("%3.3f ", i);
+
+  for i = 0.3:-0.1:0
+    __printf_assert__ ("%3.3f ", i);
+  end
+  __printf_assert__ ("%3.3f ", i);
+
+  for i = 0:NaN:2
+    __printf_assert__ ("%3.3f ", i);
+  end
+  for i = 0:1:NaN
+    __printf_assert__ ("%3.3f ", i);
+  end
+  for i = NaN:1:2
+    __printf_assert__ ("%3.3f ", i);
+  end
+
+  for j = 1:4
+    break
+  end
+  __printf_assert__ ("%d ", j);
+
+  for j = 1:4
+    continue
+  end
+  __printf_assert__ ("%d ", j);
+
+  for j = 1:4
+    if j == 2
+      break
+    end
+  end
+  __printf_assert__ ("%d ", j);
+
+  for j = 1:4
+    if j == 2
+      break
+    else
+      continue
+    end
+  end
+  __printf_assert__ ("%d ", j);
+
+  ctr = 0;
+  for i = 1:4
+    for j = 1:4
+      ctr++;
+    end
+  end
+  __printf_assert__ ("%d ", ctr);
+
+  ctr = 0;
+  for i = 1:4
+    if i == 2
+      continue
+    end
+
+    for j = 1:4
+      if j == 2
+        continue
+      end
+
+      ctr++;
+
+      if j == 3
+        break
+      end
+    end
+
+    if i == 3
+      break
+    end
+  end
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%d ", j);
+
+  ctr = 0;
+  for i = 1:2
+    for j = 1:2
+      for k = 1:2
+        for l = 1:2
+          for m = 1:2
+            for n = 1:2
+              for o = 1:2
+                for p = 1:2
+                  ctr++;
+                end
+              end
+            end
+          end
+        end
+      end
+    end
+  end
+  __printf_assert__ ("%d ", ctr);
+
+  for i = 1:3
+  end
+  __printf_assert__ ("%d ", i);
+
+  % sclar range, only executed once
+  for i = 2
+    __printf_assert__ ("%d ", i);
+  end
+
+  n = 1;
+  for i = 1:n
+    __printf_assert__ ("%d ", i);
+  end
+
+  n = 1;
+  for i = 2:n
+    __printf_assert__ ("boo");
+  end
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%s ", class (i));
+
+  %Matrix
+  M = [1 2; 3 4];
+  for i = M
+    __printf_assert__ ("%d ", i);
+    __printf_assert__ ("size %d ", size (i));
+  end
+
+  for i = []
+    __printf_assert__ ("boo");
+  end
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%s ", class (i));
+
+
+  n = 'qwe';
+  for i = n
+    __printf_assert__ ("%s ", i);
+    __printf_assert__ ("size %d ", size (i));
+  end
+  __printf_assert__ ("%s ", class (i));
+
+  % The iteration variable is a double
+  % ??? Changed in someones patch? TODO: Bug?
+  for i = single(1):single(3)
+    if i == 1
+      __printf_assert__ ("%s ", class (i));
+    end
+  end
+
+  % ... unless rhs is a scalar ...
+  for i = single (1)
+  end
+  __printf_assert__ ("%s ", class (i));
+
+  % Test return from for loop (need to pop native integers from stack)
+  __printf_assert__ ("%d ", foo ());
+
+  % Iterate over struct
+  for s = struct ("a", {"1", "2"}, "b", {"11", "12"})
+    __printf_assert__ ("%s %s ", s.a, s.b);
+  end
+
+  % Complex for loop
+
+  x.a = 1;
+  x.b = [1, 2; 3, 4];
+  x.c = "string";
+  for [val, key] = x
+    __printf_assert__ ("key:%s ", key)
+    if isa(val, "char")
+      __printf_assert__ ("val:%s ", val)
+    else
+      __printf_assert__ ("val:%d %d", val, size(val))
+    end
+  endfor
+
+  for [val, key] = struct ()
+    __printf_assert__ ("boo");
+  end
+
+  % If rhs is an undefined value, the body should not be executed
+  % and the iteration variable not modified.
+  x = 1;
+  for x = __varval__ ("") % Use __varval__ to get an undefined value
+    __printf_assert__ ("booo");
+  end
+  assert (x == 1);
+
+  y = 2;
+  for [x, y] = __varval__ ("")
+    __printf_assert__ ("boo");
+  end
+  assert (x == 1);
+  assert (y == 2);
+end
+
+
+function i = foo ()
+  for i = 1:10
+    if i == 5
+      return
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_global_1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,219 @@
+% TODO: When clear and dynamic stack works, test
+% that behavoiur is the same when clearing globals
+% in another function while they are on the stack in the
+% caller.
+
+function bytecode_global_1 ()
+  % General test. a and b are also read and verified in test .tst file
+  global a b
+  __printf_assert__ ("%s ", class (a));
+  __printf_assert__ ("%d ", size (a));
+
+  __printf_assert__ ("%d ", length (who ('global','a')));
+  __printf_assert__ ("%d ", length (who ('global','b')));
+  __printf_assert__ ("%d ", isglobal ('a'));
+  __printf_assert__ ("%d ", isglobal ('b'));
+
+  a = 1;
+  __printf_assert__ ("%d ", a);
+  b = 2;
+  __printf_assert__ ("%d ", b);
+
+  a = b;
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+  b = 100;
+  a = 3 * b + max (a, b);
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  % Test that we can make globals in subfunctions
+  global e %sub1 needs a global "e"
+  e = 11;
+  sub1 (1);
+
+  __printf_assert__ ("%d ", isglobal ('a'));
+  __printf_assert__ ("%d ", isglobal ('b'));
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  sub1 (0);
+
+  __printf_assert__ ("%d ", isglobal ('a'));
+  __printf_assert__ ("%d ", isglobal ('b'));
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  % Declare global, clear it, use identifier as local, declare
+  % it as global ...
+  global c
+  __printf_assert__ ("%d ", length (who ('global','c')));
+  clear global c;
+  __printf_assert__ ("%d ", length (who ('global','c')));
+
+  c = 2;
+  __printf_assert__ ("%d ", c);
+  __printf_assert__ ("%s ", class(c));
+
+  global c
+  __printf_assert__ ("%d ", c);
+  __printf_assert__ ("%s ", class(c));
+
+  % Subassign
+  global f
+  f = [1 2 3 4 5];
+  f(3) = 6;
+  f(1) = 11;
+  __printf_assert__ ("%d ", f);
+  __printf_assert__ ("%s ", class(f));
+  __printf_assert__ ("%d ", size (f));
+
+  % Multiassign
+  global g h
+  [g h f] = returns3 ();
+  __printf_assert__ ("%d ", g);
+  __printf_assert__ ("%s ", class(g));
+  __printf_assert__ ("%d ", size (g));
+  __printf_assert__ ("%d ", h);
+  __printf_assert__ ("%s ", class(h));
+  __printf_assert__ ("%d ", size (h));
+  __printf_assert__ ("%d ", f);
+  __printf_assert__ ("%s ", class(f));
+  __printf_assert__ ("%d ", size (f));
+
+  % Init expression
+  global k = 3;
+  __printf_assert__ ("%d ", k);
+  __printf_assert__ ("%s ", class(k));
+  __printf_assert__ ("%d ", size (k));
+
+  global l = 4 m = max (10,9) n = [2,3] o = k;
+  __printf_assert__ ("%d ", l);
+  __printf_assert__ ("%s ", class(l));
+  __printf_assert__ ("%d ", size (l));
+  __printf_assert__ ("%d ", m);
+  __printf_assert__ ("%s ", class(m));
+  __printf_assert__ ("%d ", size (m));
+  __printf_assert__ ("%d ", n);
+  __printf_assert__ ("%s ", class(n));
+  __printf_assert__ ("%d ", size (n));
+  __printf_assert__ ("%d ", o);
+  __printf_assert__ ("%s ", class(o));
+  __printf_assert__ ("%d ", size (o));
+
+  % Init expression for existing local
+  p = 2;
+  global p = 3;
+  __printf_assert__ ("%d ", p);
+  __printf_assert__ ("%s ", class(p));
+  __printf_assert__ ("%d ", size (p));
+  % q created in caller already
+  global q = 4;
+  __printf_assert__ ("%d ", q);
+  __printf_assert__ ("%s ", class(q));
+  __printf_assert__ ("%d ", size (q));
+
+  % Reinit does nothing
+  global r = 7
+  global r = 8
+  __printf_assert__ ("%d ", r);
+  __printf_assert__ ("%s ", class(r));
+  __printf_assert__ ("%d ", size (r));
+
+  clear global c
+  __printf_assert__ ("%d ", length (who ('global','c')));
+
+  % Assure some different subassignment operators are working
+  global s;
+  s = [1 2 3 4];
+  s(1) = 11;  %SUBASSIGN_ID
+  s(2) += 10; %SUBASSIGN_CHAINED
+  s(3) = [];  %SUBASSIGN_ID
+
+  __printf_assert__ ("%d ", s);
+
+  global t;
+  t.s = [1 2 3 4];
+  t.s(1) = 11;  %SUBASSIGN_CHAINED
+  t.s(2) += 10; %SUBASSIGN_CHAINED
+  t.s(3) = [];  %SUBASSIGN_CHAINED
+
+  __printf_assert__ ("%d ", t.s);
+
+  clear global d
+  clear global e
+  clear global f
+  clear global g
+  clear global h
+  clear global k
+  clear global l
+  clear global m
+  clear global n
+  clear global o
+  clear global p
+  clear global r
+  clear global s
+  clear global t
+end
+
+function [q w e] = returns3()
+  q = 11;
+  w = 22;
+  e = 33;
+end
+
+
+function sub1(make_global)
+  % Already defined local, later declared global
+  d = 3;
+  __printf_assert__ ("%d ", length(who('global','d')));
+  global d
+  __printf_assert__ ("%d ", length(who('global','d')));
+  __printf_assert__ ("%d ", d);
+  __printf_assert__ ("%s ", class(d));
+  __printf_assert__ ("%d ", size (d));
+  d = [1 2];
+  __printf_assert__ ("%d ", d);
+  __printf_assert__ ("%s ", class(d));
+  __printf_assert__ ("%d ", size (d));
+
+  % Already defined local, later declared global,
+  % but with the global already with a value from the caller
+  e = 4;
+  __printf_assert__ ("%d ", length(who('global','e')));
+  global e
+  __printf_assert__ ("%d ", length(who('global','e')));
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("eclass:%s ", class(e));
+  __printf_assert__ ("%d ", size (e));
+  e = [3 4];
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("%s ", class(e));
+  __printf_assert__ ("%d ", size (e));
+
+
+  % Conditionally global a and b
+  if make_global
+    global a
+    global b
+
+    __printf_assert__ ("%d ", a);
+    __printf_assert__ ("%d ", b);
+  end
+
+  __printf_assert__ ("%d ", isglobal ('a'));
+  __printf_assert__ ("%d ", isglobal ('b'));
+  __printf_assert__ ("%d ", length(who('global','a')));
+  __printf_assert__ ("%d ", length(who('global','b')));
+
+  a = 3;
+  b = 4;
+
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  if make_global
+    a = 5;
+    b = 6;
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_if.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,141 @@
+function bytecode_if()
+  ctr = 0;
+  a = 1;
+  b = 0;
+
+  if a
+    __printf_assert__ ("%d ", ctr++);
+  end
+
+  if a
+    __printf_assert__ ("%d ", ctr++);
+  else
+    __printf_assert__ ("booo ");
+  end
+
+  if a
+    __printf_assert__ ("%d ", ctr++);
+  elseif a
+    __printf_assert__ ("booo ");
+  else
+    __printf_assert__ ("booo ");
+  end
+
+  if b
+    __printf_assert__ ("booo ", ctr++);
+  end
+
+  if b
+    __printf_assert__ ("booo ");
+  else
+    __printf_assert__ ("%d ", ctr++);
+  end
+
+  if b
+    __printf_assert__ ("booo ");
+  elseif b
+    __printf_assert__ ("booo ");
+  else
+    __printf_assert__ ("%d ", ctr++);
+  end
+
+  if b
+    __printf_assert__ ("booo ");
+  elseif a
+    __printf_assert__ ("%d ", ctr++);
+  else
+    __printf_assert__ ("booo ");
+  end
+
+  if a
+    if a
+      if a
+        if a
+          if a
+            if b
+              __printf_assert__ ("booo ");
+            else
+              if a
+                if a
+                  if b
+                    __printf_assert__ ("booo ");
+                  elseif a
+                    if b
+                      __printf_assert__ ("booo ");
+                    else
+                      __printf_assert__ ("%d ", ctr++);
+                    end
+                  else
+                    __printf_assert__ ("booo ");
+                  end
+                end
+              end
+            end
+          end
+        else
+          __printf_assert__ ("booo ");
+        end
+      end
+    end
+  end
+
+  if 3 > 2
+    __printf_assert__ ("%d ", ctr++);
+  end
+
+  if []
+    __printf_assert__ ("booo ");
+  end
+
+  if ~b
+    __printf_assert__ ("%d ", ctr++);
+  end
+
+  if b
+  end
+
+  % "Braindead" short circuit
+  %
+  % We also check that there is a proper short circuit
+  if truthy (1) & truthy (2)
+    __printf_assert__ ("yay1 ");
+  end
+
+  if falsy (3) & truthy (4)
+    __printf_assert__ ("booo ");
+  end
+
+  if falsy (5) & falsy (6)
+    __printf_assert__ ("booo ");
+  end
+
+  if truthy (7) & falsy (8)
+    __printf_assert__ ("booo ");
+  end
+
+  if truthy (1)| truthy (2)
+    __printf_assert__ ("yay1 ");
+  end
+
+  if falsy (3) | truthy (4)
+    __printf_assert__ ("yay2 ");
+  end
+
+  if falsy (5) | falsy (6)
+    __printf_assert__ ("booo ");
+  end
+
+  if truthy (7) | falsy (8)
+    __printf_assert__ ("yay3 ");
+  end
+end
+
+function a = truthy (b)
+  __printf_assert__ ("%d ", b);
+  a = 1;
+end
+
+function a = falsy (b)
+  __printf_assert__ ("%d ", b);
+  a = 0;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_index_obj.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,63 @@
+function bytecode_index_obj ()
+
+    % Chained indexing with struct
+    __printf_assert__ ("%d ", suby1 ().a);
+    __printf_assert__ ("%d ", suby1.a); % cmd fn call
+    __printf_assert__ ("%d ", suby1 ().b.a);
+    __printf_assert__ ("%d ", suby1.b.a);
+
+    % Chained indexing with matrix
+    __printf_assert__ ("%d ", suby_mat1 ()(2));
+
+    % Chained indexing with cells
+    __printf_assert__ ("%s ", class(suby_cell1 ()(2)));
+    __printf_assert__ ("%d ", size(suby_cell1 ()(2)));
+    __printf_assert__ ("%d ", suby_cell1 (){1}{3});
+    __printf_assert__ ("%d ", suby_cell1 (){1}{3}(1));
+    __printf_assert__ ("%d ", suby_cell1 ()(1){1}{2});
+    __printf_assert__ ("%d ", suby_cell1{1}{3}); % cmd fn call
+
+    % Dynamic struct field
+    s = struct;
+    s.qwe = 22;
+    s.asd = struct ("qwe", 33);
+
+    __printf_assert__ ("%d ", s.("qwe"));
+    __printf_assert__ ("%s ", class (s.("asd").("qwe")));
+    __printf_assert__ ("%d ", s.("asd").("qwe"));
+
+    % Subassign dynamic field
+    t.qwe = 3;
+    t.("asd") = 4;
+    __printf_assert__ ("%d ", t.qwe);
+    __printf_assert__ ("%d ", t.asd);
+
+    % : and end for eg. foo()(:,end) etc
+
+    % Check that classdef metas can be used to construct a classdef object
+    h = @sin;
+    o = matlab.lang.MemoizedFunction (h);
+    __printf_assert__ ("%s ", class (o))
+
+    % Check proper argument order
+    s = [struct struct ; struct struct];
+    M = [1:10 ; 11:20];
+    s(1,2).a = M;
+    __printf_assert__ ("%d ", s(1,2).a(1,2));
+end
+
+function s = suby1()
+    s = struct;
+    s.a = 2;
+    b = struct;
+    b.a = 3;
+    s.b = b;
+end
+
+function m = suby_mat1()
+    m = [1 2 3];
+end
+
+function c = suby_cell1()
+    c = {{1 2 3}, 4, 5};
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_inputname.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,28 @@
+function bytecode_inputname (arg1, arg2)
+    __printf_assert__ ("%s ", inputname (1, 0));
+    __printf_assert__ ("%s ", inputname (1, 1));
+    __printf_assert__ ("%s ", inputname (2, 0));
+    __printf_assert__ ("%s ", inputname (2, 1));
+
+    a = 2;
+    b = 3;
+    suby (a, b);
+    suby (a + 1, b * 3);
+
+    % inputname from non-compiled function
+    inputname_args (a, b);
+    inputname_args (a + 1, b * 3);
+end
+
+function suby (arg1, arg2)
+    __printf_assert__ ("%s ", inputname (1, 0));
+    __printf_assert__ ("%s ", inputname (1, 1));
+    __printf_assert__ ("%s ", inputname (2, 0));
+    __printf_assert__ ("%s ", inputname (2, 1));
+
+    aa = 22;
+    bb = 33;
+    % inputname from non-compiled function
+    inputname_args (aa, bb);
+    inputname_args (aa + 1, bb * 3);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_leaks.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,168 @@
+#
+# We are looking for leaks of octave_value:s by checking the refrence counts
+# in different code paths of the calling convention.
+#
+function [a b] = bytecode_leaks (c, d)
+  cc = c;
+  b = d;
+
+  e = 1+1;
+  refs_e = __ref_count__ (e);
+  suby1(e);
+  assert (refs_e, __ref_count__ (e))
+
+  aa = suby2(e);
+  assert (refs_e, __ref_count__ (e))
+
+  % Test special code path for assigning argument to return value
+  aa = suby3(e);
+  assert (refs_e + 1, __ref_count__ (e))
+  aa = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  % varargin
+  suby4 (e,e,e,e);
+  assert (refs_e, __ref_count__ (e))
+  suby5 (e,e,e,e);
+  assert (refs_e, __ref_count__ (e))
+
+  % varargout
+  suby6(e);
+  assert (refs_e, __ref_count__ (e))
+  [tmp1, tmp2, tmp3, tmp4, tmp5] = suby6(e);
+  tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  suby7(e);
+  assert (refs_e, __ref_count__ (e))
+  [tmp1, tmp2, tmp3, tmp4, tmp5] = suby6(e);
+  tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  % Call non-vm function
+  sin (e);
+  assert (refs_e, __ref_count__ (e))
+
+  % Index matrix
+  m = [1 2 3 4];
+  m (e);
+  assert (refs_e, __ref_count__ (e))
+
+  % Ops
+  tmp1 = -e + e * e - e / e ^ e;
+  assert (refs_e, __ref_count__ (e))
+
+  % Dynamic matrix
+  m = [1 2 3 e; e 4 5 6];
+  m = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  % Cell
+  m = {1,2,3, e; 4, 5, e, 6};
+  m = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  % "command call" with disp. Do it in an evalc () to not clutter stdout
+  s_ans = evalc ("silent_disp_test ()");
+  assert (strfind (s_ans, "e = 2"))
+  assert (strfind (s_ans, "ans = 3.14"))
+
+  % no disp
+  e;
+  assert (refs_e + 1, __ref_count__ (e)) % in ans
+  ans = 0;
+  assert (refs_e, __ref_count__ (e))
+
+
+  % Too many or few args
+  try
+    suby1 (e,e,e);
+  catch
+  end
+  assert (refs_e, __ref_count__ (e))
+
+  try
+    subsuby5 (e);
+  catch
+  end
+  assert (refs_e, __ref_count__ (e))
+
+  try
+    m = [];
+    m(e)
+  catch
+  end
+  assert (refs_e, __ref_count__ (e))
+
+  try
+    m = [];
+    m(e) = 123;
+  catch
+  end
+  assert (refs_e, __ref_count__ (e))
+
+  % eval dynamic stack
+  suby8 (e);
+  assert (refs_e, __ref_count__ (e))
+end
+
+function silent_disp_test ()
+  assert (__vm_is_executing__);
+
+  e = 2;
+  refs_e = __ref_count__ (e);
+
+  e % Should print "e = 2"
+  assert (refs_e + 1, __ref_count__ (e)) % in ans
+  ans = 0;
+  assert (refs_e, __ref_count__ (e))
+
+  % This will be a function call and should print "ans = 3.14..."
+  pi
+end
+
+function suby1 (a)
+  aa = 1 + a;
+  bb = a;
+end
+
+function aa = suby2 (a)
+  aa = 1 + a;
+  bb = a;
+end
+
+function a = suby3(a)
+end
+
+function b = suby4(varargin)
+  b = 3 + varargin{1};
+  c = varargin{2};
+end
+
+function b = suby5(a, b, varargin)
+  b = 3 + varargin{1};
+  c = varargin{2};
+  d = a;
+end
+
+function varargout = suby6(a)
+  varargout{1} = 3;
+  varargout{2} = a;
+  varargout{3} = 1;
+  varargout{4} = 1;
+  varargout{5} = a;
+  cc = a;
+end
+
+function [aa bb varargout] = suby7(a)
+  varargout{1} = 3;
+  varargout{2} = a;
+  varargout{3} = 1;
+  aa = 1;
+  bb = a;
+  cc = a;
+end
+
+function suby8(a)
+  eval ("g = a;");
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_load_script_load_and_assert.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,26 @@
+local_cc = 33; %Defined before the load
+glb_ee = 123;
+
+assert (!isglobal ("glb_aa"))
+assert (!isglobal ("glb_bb"))
+assert (!isglobal ("glb_cc"))
+assert (!isglobal ("glb_dd"))
+assert (!isglobal ("glb_ee"))
+
+global bytecode_load_script_file
+load (bytecode_load_script_file)
+
+assert (isglobal ("glb_aa"))
+assert (isglobal ("glb_bb"))
+assert (isglobal ("glb_cc"))
+assert (isglobal ("glb_dd"))
+assert (isglobal ("glb_ee"))
+
+assert (local_aa == 1)
+assert (local_bb == 2)
+assert (local_cc == 3)
+assert (glb_aa == 1);
+assert (isempty (glb_bb));
+assert (isempty (glb_cc));
+assert (glb_dd == 4);
+assert (glb_ee == 5);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_load_script_save.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,16 @@
+global glb_aa = 1;
+global glb_bb
+eval ("global glb_cc;")
+eval ("global glb_dd = 4;")
+global glb_ee;
+glb_ee = 5;
+
+local_aa = 1;
+eval ("local_bb = 2;")
+local_cc = 3;
+
+global bytecode_load_script_file
+save (bytecode_load_script_file, "glb_aa", "glb_bb", "glb_cc", "glb_dd", "glb_ee", "local_aa", "local_bb", "local_cc")
+
+clear global glb_aa glb_bb glb_cc glb_dd glb_ee
+clear local_aa local_bb local_cc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_matrix.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,222 @@
+function bytecode_matrix ()
+  % Dynamically built matrixes, i.e. those
+  % who are not built by the parser, with a dynamic value
+  % inside the initializer.
+
+  a = 1;
+  b = 2;
+  c = 3;
+  d = 4;
+  A = [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [A; A];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Evalorder
+  A = [hi('a') hi('b'); hi('c') hi('d')];
+
+  % Matrix multiply
+  A = [a b; c d] * [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] * [a; b; c; d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a; b; c; d] * [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Matrix div
+  A = [a b c d] / [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] \ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b; c d] / [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Compound mul herm
+  A = [a b c d] * [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Compound herm mul
+  A = [a b; c d]' * [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Compound trans ldiv
+  A = [a; b; c; d].' \ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Compound trans ldiv
+  A = [a b; c d].' \ [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Matrix add
+  A = [a b; c d] + [a b; c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] + [a; b; c; d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Scalar
+  A = [a b c d] + 2;
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = 2 + [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] - 2;
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = 2 * [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] / 2;
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = 2 \ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Dot
+
+  A = [a b c d] .* [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] ./ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] .\ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] .^ [a b c d];
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] .* [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] ./ [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] .\ [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  A = [a b c d] .^ [a b c d]';
+  __printf_assert__ ("%g ", A);
+  __printf_assert__ ("%g ", size (A));
+
+  % Matrixes with unequal row length
+  s = "zxc";
+  A = ["qweasd"; s];
+  __printf_assert__ ("%s ", A);
+  A = [s s; s];
+  __printf_assert__ ("%s ", A);
+
+  % Matrixes with more than 255 elements use
+  % a differens op-code.
+  a = 111;
+        % note a here
+  b = [ a 1   2   3   4   5   6   7   8   9  10 ...
+        11  12  13  14  15  16  17  18  19  20 ...
+        21  22  23  24  25  26  27  28  29  30 ...
+        31  32  33  34  35  36  37  38  39  40 ...
+        41  42  43  44  45  46  47  48  49  50 ...
+        51  52  53  54  55  56  57  58  59  60 ...
+        61  62  63  64  65  66  67  68  69  70 ...
+        71  72  73  74  75  76  77  78  79  80 ...
+        81  82  83  84  85  86  87  88  89  90 ...
+        91  92  93  94  95  96  97  98  99 100 ...
+        101 102 103 104 105 106 107 108 109 110 ...
+        111 112 113 114 115 116 117 118 119 120 ...
+        121 122 123 124 125 126 127 128 129 130 ...
+        131 132 133 134 135 136 137 138 139 140 ...
+        141 142 143 144 145 146 147 148 149 150 ...
+        151 152 153 154 155 156 157 158 159 160 ...
+        161 162 163 164 165 166 167 168 169 170 ...
+        171 172 173 174 175 176 177 178 179 180 ...
+        181 182 183 184 185 186 187 188 189 190 ...
+        191 192 193 194 195 196 197 198 199 200 ...
+        201 202 203 204 205 206 207 208 209 210 ...
+        211 212 213 214 215 216 217 218 219 220 ...
+        221 222 223 224 225 226 227 228 229 230 ...
+        231 232 233 234 235 236 237 238 239 240 ...
+        241 242 243 244 245 246 247 248 249 250 ...
+        251 252 253 254 255 256 257];
+  __printf_assert__ ("%d ", size (b));
+  __printf_assert__ ("%d ", sum (b));
+  a = 111;
+        % note a here
+  b = [ a;   1;   2;   3;   4;   5;   6;   7;   8;   9;  10; ...
+        11;  12;  13;  14;  15;  16;  17;  18;  19;  20; ...
+        21;  22;  23;  24;  25;  26;  27;  28;  29;  30; ...
+        31;  32;  33;  34;  35;  36;  37;  38;  39;  40; ...
+        41;  42;  43;  44;  45;  46;  47;  48;  49;  50; ...
+        51;  52;  53;  54;  55;  56;  57;  58;  59;  60; ...
+        61;  62;  63;  64;  65;  66;  67;  68;  69;  70; ...
+        71;  72;  73;  74;  75;  76;  77;  78;  79;  80; ...
+        81;  82;  83;  84;  85;  86;  87;  88;  89;  90; ...
+        91;  92;  93;  94;  95;  96;  97;  98;  99; 100; ...
+        101; 102; 103; 104; 105; 106; 107; 108; 109; 110; ...
+        111; 112; 113; 114; 115; 116; 117; 118; 119; 120; ...
+        121; 122; 123; 124; 125; 126; 127; 128; 129; 130; ...
+        131; 132; 133; 134; 135; 136; 137; 138; 139; 140; ...
+        141; 142; 143; 144; 145; 146; 147; 148; 149; 150; ...
+        151; 152; 153; 154; 155; 156; 157; 158; 159; 160; ...
+        161; 162; 163; 164; 165; 166; 167; 168; 169; 170; ...
+        171; 172; 173; 174; 175; 176; 177; 178; 179; 180; ...
+        181; 182; 183; 184; 185; 186; 187; 188; 189; 190; ...
+        191; 192; 193; 194; 195; 196; 197; 198; 199; 200; ...
+        201; 202; 203; 204; 205; 206; 207; 208; 209; 210; ...
+        211; 212; 213; 214; 215; 216; 217; 218; 219; 220; ...
+        221; 222; 223; 224; 225; 226; 227; 228; 229; 230; ...
+        231; 232; 233; 234; 235; 236; 237; 238; 239; 240; ...
+        241; 242; 243; 244; 245; 246; 247; 248; 249; 250; ...
+        251; 252; 253; 254; 255; 256; 257];
+  __printf_assert__ ("%d ", size (b));
+  __printf_assert__ ("%d ", sum (b));
+
+end
+
+function i = hi (s)
+  __printf_assert__ ("%s ", s);
+  i = 1;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_misc.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,149 @@
+function bytecode_misc ()
+  % Assure "set_internal_variable" are reset properly
+  max_stack = max_stack_depth;
+  set_max_stack_depth_1p (max_stack + 1);
+  assert (max_stack_depth == max_stack);
+
+  % Check that the WIDE opcode extension works
+  wide_sub();
+
+  % Try to run out of allowed number of stack frames
+  threw_up = false;
+  try
+    stack_overflow (max_stack * 2); % Should hit the limit
+  catch
+    threw_up = true;
+  end
+
+  assert (threw_up);
+
+  % Try to run out of VM stack space
+  % Assure that the VM is running, since we will disable the tree_evaluators
+  % stack limit mechanism.
+  if __vm_is_executing__ ()
+    absurd_frame_limit = max_stack_depth * 10000;
+    max_stack_depth (absurd_frame_limit, "local");
+
+    threw_up = false;
+    try
+      stack_overflow (absurd_frame_limit); % Should hit the VM limit
+    catch
+      threw_up = true;
+    end
+    assert (threw_up);
+  end
+end
+
+function set_max_stack_depth_1p (x)
+  max_stack_depth (x + 1, "local");
+  assert (max_stack_depth == x + 1);
+end
+
+function stack_overflow (n)
+  if n != 0
+    stack_overflow (n - 1);
+  end
+end
+
+function wide_sub ()
+  % 512 locals
+  a001=1; a002=2; a003=3; a004=4; a005=5; a006=6; a007=7; a008=8; a009=9; a010=10; a011=11; a012=12; a013=13; a014=14; a015=15; a016=16; a017=17; a018=18; a019=19; a020=20; a021=21; a022=22; a023=23; a024=24; a025=25; a026=26; a027=27; a028=28; a029=29; a030=30; a031=31; a032=32; a033=33; a034=34; a035=35; a036=36; a037=37; a038=38; a039=39; a040=40; a041=41; a042=42; a043=43; a044=44; a045=45; a046=46; a047=47; a048=48; a049=49; a050=50; a051=51; a052=52; a053=53; a054=54; a055=55; a056=56; a057=57; a058=58; a059=59; a060=60; a061=61; a062=62; a063=63; a064=64; a065=65; a066=66; a067=67; a068=68; a069=69; a070=70; a071=71; a072=72; a073=73; a074=74; a075=75; a076=76; a077=77; a078=78; a079=79; a080=80; a081=81; a082=82; a083=83; a084=84; a085=85; a086=86; a087=87; a088=88; a089=89; a090=90; a091=91; a092=92; a093=93; a094=94; a095=95; a096=96; a097=97; a098=98; a099=99; a100=100; a101=101; a102=102; a103=103; a104=104; a105=105; a106=106; a107=107; a108=108; a109=109; a110=110; a111=111; a112=112; a113=113; a114=114; a115=115; a116=116; a117=117; a118=118; a119=119; a120=120; a121=121; a122=122; a123=123; a124=124; a125=125; a126=126; a127=127; a128=128; a129=129; a130=130; a131=131; a132=132; a133=133; a134=134; a135=135; a136=136; a137=137; a138=138; a139=139; a140=140; a141=141; a142=142; a143=143; a144=144; a145=145; a146=146; a147=147; a148=148; a149=149; a150=150; a151=151; a152=152; a153=153; a154=154; a155=155; a156=156; a157=157; a158=158; a159=159; a160=160; a161=161; a162=162; a163=163; a164=164; a165=165; a166=166; a167=167; a168=168; a169=169; a170=170; a171=171; a172=172; a173=173; a174=174; a175=175; a176=176; a177=177; a178=178; a179=179; a180=180; a181=181; a182=182; a183=183; a184=184; a185=185; a186=186; a187=187; a188=188; a189=189; a190=190; a191=191; a192=192; a193=193; a194=194; a195=195; a196=196; a197=197; a198=198; a199=199; a200=200; a201=201; a202=202; a203=203; a204=204; a205=205; a206=206; a207=207; a208=208; a209=209; a210=210; a211=211; a212=212; a213=213; a214=214; a215=215; a216=216; a217=217; a218=218; a219=219; a220=220; a221=221; a222=222; a223=223; a224=224; a225=225; a226=226; a227=227; a228=228; a229=229; a230=230; a231=231; a232=232; a233=233; a234=234; a235=235; a236=236; a237=237; a238=238; a239=239; a240=240; a241=241; a242=242; a243=243; a244=244; a245=245; a246=246; a247=247; a248=248; a249=249; a250=250; a251=251; a252=252; a253=253; a254=254; a255=255; a256=256; a257=257; a258=258; a259=259; a260=260; a261=261; a262=262; a263=263; a264=264; a265=265; a266=266; a267=267; a268=268; a269=269; a270=270; a271=271; a272=272; a273=273; a274=274; a275=275; a276=276; a277=277; a278=278; a279=279; a280=280; a281=281; a282=282; a283=283; a284=284; a285=285; a286=286; a287=287; a288=288; a289=289; a290=290; a291=291; a292=292; a293=293; a294=294; a295=295; a296=296; a297=297; a298=298; a299=299; a300=300; a301=301; a302=302; a303=303; a304=304; a305=305; a306=306; a307=307; a308=308; a309=309; a310=310; a311=311; a312=312; a313=313; a314=314; a315=315; a316=316; a317=317; a318=318; a319=319; a320=320; a321=321; a322=322; a323=323; a324=324; a325=325; a326=326; a327=327; a328=328; a329=329; a330=330; a331=331; a332=332; a333=333; a334=334; a335=335; a336=336; a337=337; a338=338; a339=339; a340=340; a341=341; a342=342; a343=343; a344=344; a345=345; a346=346; a347=347; a348=348; a349=349; a350=350; a351=351; a352=352; a353=353; a354=354; a355=355; a356=356; a357=357; a358=358; a359=359; a360=360; a361=361; a362=362; a363=363; a364=364; a365=365; a366=366; a367=367; a368=368; a369=369; a370=370; a371=371; a372=372; a373=373; a374=374; a375=375; a376=376; a377=377; a378=378; a379=379; a380=380; a381=381; a382=382; a383=383; a384=384; a385=385; a386=386; a387=387; a388=388; a389=389; a390=390; a391=391; a392=392; a393=393; a394=394; a395=395; a396=396; a397=397; a398=398; a399=399; a400=400; a401=401; a402=402; a403=403; a404=404; a405=405; a406=406; a407=407; a408=408; a409=409; a410=410; a411=411; a412=412; a413=413; a414=414; a415=415; a416=416; a417=417; a418=418; a419=419; a420=420; a421=421; a422=422; a423=423; a424=424; a425=425; a426=426; a427=427; a428=428; a429=429; a430=430; a431=431; a432=432; a433=433; a434=434; a435=435; a436=436; a437=437; a438=438; a439=439; a440=440; a441=441; a442=442; a443=443; a444=444; a445=445; a446=446; a447=447; a448=448; a449=449; a450=450; a451=451; a452=452; a453=453; a454=454; a455=455; a456=456; a457=457; a458=458; a459=459; a460=460; a461=461; a462=462; a463=463; a464=464; a465=465; a466=466; a467=467; a468=468; a469=469; a470=470; a471=471; a472=472; a473=473; a474=474; a475=475; a476=476; a477=477; a478=478; a479=479; a480=480; a481=481; a482=482; a483=483; a484=484; a485=485; a486=486; a487=487; a488=488; a489=489; a490=490; a491=491; a492=492; a493=493; a494=494; a495=495; a496=496; a497=497; a498=498; a499=499; a500=500; a501=501; a502=502; a503=503; a504=504; a505=505; a506=506; a507=507; a508=508; a509=509; a510=510; a511=511; a512=512;
+
+  assert (a511 == 511);
+  assert (a512 == 512);
+  assert (a400 + a500 == 900);
+
+  % Do some ops to check that WIDE does not mess things up
+
+  % Loop to test specializations and despecializations
+  for j = 1:4
+    b = 3;
+    c = 4;
+    d = b * c;
+    assert (d == 12);
+
+    e = [1 2 3 4];
+    two = 2;
+    if (j == 3)
+      e = single (e); % despecialization
+      two = single (two);
+    end
+
+    assert (e(2) == 2);
+    assert (e(2) == two);
+    e(3) = 11;
+    assert (e(3) == 11);
+
+    assert (e(end) == 4);
+    assert (e(end - 1) == 11);
+
+    f = [5 6 7 8; 9 10 11 12];
+    six = 6;
+    if (j == 3)
+      f = single (f); % despecialization
+      six = single (six);
+    end
+    assert (f(1,2) == 6);
+    assert (f(1,2) == six);
+    f(1,2) = 7;
+    assert (f(1,2) == 7);
+
+    g = 0;
+    if (j == 3)
+      g = single (g); % despecialization
+    end
+
+    g++;
+    assert (g == 1);
+    ++g;
+    assert (g == 2);
+    g += 3;
+    assert (g == 5);
+    eval ("assert (g == 5);")
+
+    sum = 0;
+    for i = 1:3 % WIDE FOR_COND
+      sum += i;
+    end
+    assert (sum == 6);
+
+    s.s = 2;
+    assert (s.s == 2);
+    s.w.s = 3;
+    assert (s.w.s == 3);
+
+    % Test command function calling
+    pi; % WIDE PUSH_PI
+    assert (round (100 * pi) == 314)
+
+    suby1;
+    assert (suby1 == 1);
+    suby2;
+    [a, b] = suby2;
+    assert (a == 1)
+    assert (b == 2)
+    [a, b] = suby2 ();
+    assert (a == 1)
+    assert (b == 2)
+  end
+
+  % Bug #64749: With no iterations, FOR_SETUP writes to the wrong id with wide slots
+  i = 123;
+  for i = 1:0
+  end
+  assert (isempty (i));
+  assert (size(i) == [1 0])
+
+  % Check that a001 to a512 have to correct values
+  for i = 1:512
+    eval (sprintf ("assert (a%03d == %d);", i, i));
+  end
+end
+
+function ret = suby1
+  ret = 1;
+end
+
+function [a, b] = suby2
+  a = 1; b = 2;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_multi_assign.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,73 @@
+function bytecode_multi_assign ()
+  A = [1 2; 3 4];
+  [a, b] = max (A);
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  [a,b,c,d] = foo ();
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+  __printf_assert__ ("%d ", c);
+  __printf_assert__ ("%d ", d);
+
+  % Non ids in lhs
+  % Eval is used as a cheat since
+  % rhs need to know how many lhs values
+  % there are.
+
+  [e, f.a, g, h.b] = foo ();
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("%d ", f.a);
+  __printf_assert__ ("%d ", g);
+  __printf_assert__ ("%d ", h.b);
+
+  e = [1 2 3];
+  g = {1, 2, 3};
+  [e(2), f.a, g{2}, h.b] = foo ();
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("%d ", f.a);
+  __printf_assert__ ("%d ", g{2});
+  __printf_assert__ ("%d ", h.b);
+
+  [e(end), f.a, g{min (100, end)}, h.b] = foo ();
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("%d ", f.a);
+  __printf_assert__ ("%d ", g{min (100, end)});
+  __printf_assert__ ("%d ", h.b);
+
+  [e(end), f.a, ~, h.b] = foo ();
+  __printf_assert__ ("%d ", e);
+  __printf_assert__ ("%d ", f.a);
+  __printf_assert__ ("%d ", g{end});
+  __printf_assert__ ("%d ", h.b);
+
+
+  [C{1:2}, D] = {1,2,3}{:};
+  __printf_assert__ ("%d ", C{1});
+  __printf_assert__ ("%d ", C{2});
+  __printf_assert__ ("%d ", D);
+
+  % Check that opcodes SET_IGNORE_OUTPUTS and CLEAR_IGNORE_OUTPUTS
+  % does not mess up if a nested expression throws before SET_IGNORE_OUTPUTS
+  % is executed.
+  try
+    [~, b] = bar (baz_throws ());
+  catch
+  end
+end
+
+function [a,b,c,d] = foo ()
+  a = 1;
+  b = 2;
+  c = 3;
+  d = 4;
+end
+
+function [a b] = bar (c)
+  a = 1;
+  b = 0;
+end
+
+function a = baz_throws ()
+  error ("qwe");
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_nested.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,501 @@
+function bytecode_nested ()
+
+  %% Empty function
+  a = 1; b = 1; c = 1;
+  function nested1
+  end
+
+  nested1
+  nested1 ();
+
+  %% Changes value of a
+  function nested2 ()
+    assert (a == 1);
+    a = 111;
+    assert (a == 111);
+  end
+
+  nested2 ();
+  assert (a == 111);
+
+  %% Changes value of b
+  function nested3 (a)
+    assert (a == 2);
+    a = 222; % Does not update a in parent
+    b = 111;
+    assert (a == 222);
+  end
+
+  a = 1; b = 1; c = 1;
+  nested3 (2);
+  assert (a == 1); % a still same, since argument in nested3
+  assert (b == 111);
+
+  %% a, b and c are arguments and return values in nested4
+  %% and do not change in the outer frame
+  function c = nested4 (a, b)
+    assert (a == 3)
+    assert (b == 4);
+    a = 0;
+    b = 0;
+    c = 2;
+  end
+
+  a = 1; b = 1; c = 1;
+  assert (nested4 (3, 4) == 2);
+  assert (a == 1);
+  assert (b == 1);
+  assert (c == 1);
+
+  %% Sets local variable that should not leak to this frame
+  a = 1; b = 1; c = 1;
+  nested5;
+  assert (! exist ('d5'))
+
+  function nested5
+    d5 = 3;
+    assert (d5 == 3);
+  end
+
+  %% Calls empty nested function nested61
+  function nested6
+    function nested61
+    end
+  end
+
+  nested6;
+  nested6 ();
+
+  %% Nested function nested71
+  function nested7
+    a = 2;
+    d7 = 2;
+    function nested71
+      assert (a == 2)
+      assert (d7 == 2)
+      a = 3;
+      d7 = 3;
+    end
+
+    nested71;
+    assert (a == 3)
+    assert (d7 == 3)
+  end
+
+  a = 1; b = 1; c = 1;
+  nested7;
+  assert (a == 3)
+
+  %% Nested with args
+  function b = nested8(a)
+    assert (a == 2)
+    b = 2;
+
+    % Args and returns are not shared
+    function b = nested81(a)
+      assert (a == 3)
+      b = 3;
+      assert (b == 3)
+    end
+
+    nested81 (3);
+    assert (a == 2)
+    assert (b == 2)
+  end
+
+  a = 1; b = 1; c = 1;
+  nested8 (2);
+  assert (a == 1)
+  assert (b == 1)
+
+  %% Recursive nested function
+  function b = nested9 (a)
+    d9 = 0; % d9 is not shared with recursive calls
+    assert (c == a + 1)
+    c = a;
+
+    a_cpy = a; % Test that argument are not changed by children
+
+    if a == 0
+      b = 3;
+      d9 = 3; % d9 does not change in parent frames
+      return;
+    end
+
+    b = 0;
+
+    b_tmp = nested9 (a - 1);
+    assert (b == 0)
+    b = b_tmp;
+
+    assert (d9 == 0)
+    assert (b == 3)
+    assert (a == a_cpy)
+  end
+
+  a = 1; b = 1;
+  c = 9;
+  ret = nested9 (8);
+  assert (a == 1)
+  assert (b == 1)
+  assert (c == 0)
+  assert (ret == 3)
+
+  %% Call siblings
+  function b = nested10 (d10, e10=true)
+    a = 1; b = 1; c = 1;
+    nested7;
+    assert (a == 3)
+
+    a = 1; b = 1;
+    c = 9;
+    ret = nested9 (8);
+    assert (a == 1)
+    assert (b == 1)
+    assert (c == 0)
+    assert (ret == 3)
+
+    if ! d10
+      return;
+    end
+
+    nested10 (d10 - 1, e10); % Test siblings from recursive call
+
+    if e10
+      nested11 (); % Calls nested10
+    end
+  end
+
+  function nested11
+    nested10(2, false);
+
+    a = 1; b = 1; c = 1;
+    assert (nested4 (3, 4) == 2);
+    assert (a == 1);
+    assert (b == 1);
+    assert (c == 1);
+  end
+
+  a = 1; b = 1; c = 1;
+  nested10 (1);
+
+  %% Test globals
+
+  function nested12
+    assert (isglobal ("glb_d"))
+    % Note: If a global is not added to the frame of nested12,
+    % is is not marked as global.
+    assert (!isglobal ("glb_e")) % Not a global, since not added to nested12's frame
+    assert (glb_d == 3)
+    glb_d = 4;
+
+    eval ("glb_f = 24;"); % glb_f on dynamic frame in nested12
+
+    function nested12_1
+      assert (isglobal ("glb_d"))
+      assert (isglobal ("glb_e"))
+      assert (glb_d == 4)
+      assert (glb_e == 13)
+      eval ("assert (glb_f == 24)");
+      glb_d = 5;
+      glb_e = 14;
+    end
+
+    nested12_1;
+
+    nested_sibling13;
+  end
+
+  function nested_sibling13
+    assert (!isglobal ("glb_d"))
+    assert (isglobal ("glb_e"))
+    assert (glb_e == 14)
+    glb_e = 15;
+  end
+
+  global glb_d;
+  global glb_e;
+  global glb_f;
+
+  glb_d = 3;
+  glb_e = 13;
+  glb_f = 23;
+
+  nested12;
+  assert (glb_d == 5)
+  assert (glb_e == 15)
+  assert (glb_f == 24)
+
+  %% Can't add dynamic variables
+  function nested14
+    eval ("a14 = 3;")
+  end
+
+  try
+    nested14;
+  catch e
+    assert (regexp (e.message, "can not add variable"));
+  end
+
+  try
+    eval ("aaa = 3;") % Can't add dynamic variable in root either
+  catch e
+    assert (regexp (e.message, "can not add variable"));
+  end
+
+  %% evalin
+  a = 1; b = 1; c = 1;
+  function nested15
+    evalin ("caller", "assert (b == 3);");
+    a15 = 15;
+
+    function nested15_1 (a15)
+      evalin ("caller", "assert (a15 == 15);");
+    end
+    nested15_1 (10);
+  end
+
+  b = 3;
+  nested15;
+
+  %% nargout, isargout
+  a = 1; b = 1; c = 1;
+  function [a, b, c] = nested16 ()
+    function [a, b, c] = nested16_1 ()
+      a = nargout;
+      b = 2;
+      c = 3;
+    end
+    a = nargout;
+    b = 2;
+    c = 3;
+
+    glb_d = [isargout(1), isargout(2), isargout(3)];
+
+    [a2, b2, c2] = nested16_1 ();
+    assert (a2 == 3);
+    [a2, b2] = nested16_1 ();
+    assert (a2 == 2);
+    [a2] = nested16_1 ();
+    assert (a2 == 1);
+  end
+
+  [a, b, c] = nested16 ();
+  assert (a == 3);
+  assert (glb_d == [1 1 1]) % isargout stored in glb_d
+  [a, b] = nested16 ();
+  assert (a == 2);
+  assert (glb_d == [1 1 0])
+  [a] = nested16 ();
+  assert (a == 1);
+  assert (glb_d == [1 0 0])
+  [~] = nested16 ();
+  assert (glb_d == [0 0 0])
+  [~, b] = nested16 ();
+  assert (b == 2)
+  assert (glb_d == [0 1 0])
+  [~, b, ~] = nested16 ();
+  assert (b == 2)
+  assert (glb_d == [0 1 0])
+  [~, b, c] = nested16 ();
+  assert (b == 2)
+  assert (c == 3)
+  assert (glb_d == [0 1 1])
+
+  %% Call handle to nested function 1
+  function ret = nested17
+    ret = b;
+  end
+
+  b = 3;
+
+  h1 = @nested17;
+
+  assert (call_handle1 (h1) == 3);
+  assert (h1 () == 3)
+  b = 4;
+  assert (call_handle1 (h1) == 4);
+
+  %% Changes value in decoupled nested frame
+  h2 = sub_returns_nested_fn;
+  h3 = sub_returns_nested_fn;
+
+  assert (h2() == 33)
+  assert (h2() == 34)
+  assert (call_handle1(h2) == 35)
+  assert (h3() == 33)
+
+  c1 = cdef_bar ("1");
+
+  % Two levels of nested nesting
+  h4 = sub_returns_nested_fn2;
+  assert (h4 () == 1)
+  assert (h4 () == 2)
+
+  % Try a function with both nested and anonymous functions (bug #64703)
+  assert (sub_nestandanon (2) == [6; 8])
+
+  % Try some legacy inline functions
+  h1i = inline ("x + 1");
+  assert (h1i (2) == 3)
+  h2i = inline ("__vm_is_executing__()");
+  assert (h2i() == __vm_is_executing__);
+
+  %% Bug 64778. clone() of nil octave_value causing problems.
+  function nested_18
+    nested_18_local_a = 1;
+
+    function nested_18_1
+      nested_18_local_a = 2;
+    end
+
+    nested_18_1;
+    assert (nested_18_local_a == 2); % Assure value on the current stack frame changed
+
+    nested_18_local_a = 1;
+    hh = @nested_18_1;
+    hh();
+    % A handle call also changes the current stack frame, since the handle refers to the current frame
+    assert (nested_18_local_a == 2);
+  end
+
+  h5 = @nested_18; % Make a handle
+  nested_18 ();    % Call normally. Caused problem since "is_closure_frame ()" was used improperly.
+  subby2 (); % The slot value for subby2 was cloned and caused an internal error "ov.is_nil()" check to trigger
+  call_handle0 (h5); % Test calling from another frame too.
+
+  a = length ([1 2]);
+
+  ## Test calling through C++ code
+  function b = nested19 (c)
+    b = length (c);
+  end
+
+  c = {1, 2};
+  d = cellfun (@(x) nested19 (x), c);
+  d = cellfun (@nested19, c);
+
+  ## misc test
+  misc_nested;
+end
+
+function subby
+end
+
+function subby2
+end
+
+function a = call_handle1 (h)
+  a = h ();
+end
+
+function call_handle0 (h)
+  h ();
+end
+
+function h = sub_returns_nested_fn ()
+  a = 33;
+  c2 = cdef_bar ("2");
+  function ret = sub_nested1
+    ret = a;
+    a++;
+    c3 = cdef_bar ("3");
+  end
+
+  h = @sub_nested1;
+end
+
+function h1 = sub_returns_nested_fn2
+  c2 = cdef_bar ("4");
+
+  function h2 = nested_fn1
+    a = 1;
+    c3 = cdef_bar ("5");
+
+    function ret = nested_fn2
+      c4 = cdef_bar ("6");
+      ret = a;
+      a++;
+    end
+
+    h2 = @nested_fn2;
+  end
+
+  h1 = nested_fn1 ();
+end
+
+function retval = sub_nestandanon(x)
+  retval = zeros(2,1);
+
+  f1 = @(x) 3 .* x;
+  retval(1) = f1(x);
+
+  function [ret] = f2(x)
+    ret = 4 .* x;
+  endfunction
+
+  retval(2) = f2(x);
+endfunction
+
+function misc_nested ()
+  ## The behaviour here is kinda strange, but the bytecode interpreter
+  ## need to do what the tree_evaluator does.
+
+  function nested1 ()
+    global a
+    global b
+    global c
+  
+    b = 1;
+    c = 2;
+  
+    function nested2 ()
+      assert (! isglobal ("a"))
+      assert (isglobal ("b"))
+      assert (b == 1)
+      assert (! isglobal ("c"))
+
+      b = 3;
+      bb = 3;
+
+      clear global a
+      clear global b
+      clear global c
+
+      clear global aa
+      clear global bb
+      clear global cc
+
+      assert (! isglobal ("a"))
+      assert (isglobal ("b")) % Yupp, should still be global
+      assert (! isglobal ("c"))
+
+      assert (! exist ("a"))
+      assert (! exist ("b"))
+      assert (! exist ("c"))
+
+      assert (! exist ("aa"))
+      assert (! exist ("bb"))
+      assert (! exist ("cc"))
+    end
+
+    nested2 ();
+
+    assert (! isglobal ("a"))
+    assert (  isglobal ("b")) % Yupp, should still be global
+    assert (! isglobal ("c"))
+
+    assert (! exist ("a"))
+    assert (! exist ("b"))
+    assert (! exist ("c"))
+
+    clear global a
+    clear global b
+    clear global c
+    assert (! isglobal ("a"))
+    assert (! isglobal ("b"))
+    assert (! isglobal ("c"))
+  end
+
+  nested1
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_persistant.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,23 @@
+function bytecode_persistant ()
+  q = 3;
+  l = 4;
+
+  persistent a = 3;
+  __printf_assert__ ("a:%d ", a++);
+
+  persistent b;
+  __printf_assert__ ("b:%d ", b);
+  __printf_assert__ ("%s ", class (b));
+  __printf_assert__ ("%d ", size (b));
+  b = 0;
+  __printf_assert__ ("%d ", b++);
+
+  suby ();
+  suby ();
+end
+
+function suby ()
+  persistent c = 2;
+  c++;
+  __printf_assert__ ("c:%d ", c);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_range.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,68 @@
+function bytecode_range ()
+
+  % The should be range constants, but I think
+  % negative limits are not folded to range constants
+
+  a = 1:3;
+  __printf_assert__ ("%d ", a);
+
+  a = 1:2:6;
+  __printf_assert__ ("%d ", a);
+
+  a = 1:2:5;
+  __printf_assert__ ("%d ", a);
+
+  a = 1:0.1:1.4;
+  __printf_assert__ ("%d ", a);
+
+  a = 1:-0.1:0.7;
+  __printf_assert__ ("%d ", a);
+
+  a = 7:7;
+  __printf_assert__ ("%d ", a);
+
+  a = 7:-1:7;
+  __printf_assert__ ("%d ", a);
+
+  a = 7:-1:8;
+  __printf_assert__ ("%d ", isempty (a));
+
+  % Dynamically created with COLON2 or 3 opcode.
+  % Colons behave differently when in command expression,
+  % they use the COLONX_CMD opcodes.
+  %
+  % ??? I don't think the ranges are allocated as matrixes
+  % when used in commands.
+  base = 8;
+  inc = 2;
+  lim = 11;
+
+  a = base : inc : lim;
+  __printf_assert__ ("%d ", a);
+  for i = base : inc : lim
+    __printf_assert__ ("%d ", i);
+  end
+
+  a = base : lim;
+  __printf_assert__ ("%d ", a);
+  for i = base : lim
+    __printf_assert__ ("%d ", i);
+  end
+
+  base = 10;
+  inc = -2;
+  lim = 7;
+
+  a = base : inc : lim;
+  __printf_assert__ ("%d ", a);
+  for i = base : inc : lim
+    __printf_assert__ ("%d ", i);
+  end
+
+  a = -base : -lim;
+  __printf_assert__ ("%d ", a);
+  for i = -base : -lim
+    __printf_assert__ ("%d ", i);
+  end
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_return.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,93 @@
+function bytecode_return ()
+  a = foo ();
+  __printf_assert__ ("%d ", a);
+
+  bar (1);
+  bar (0);
+
+  baz (0);
+  baz (1);
+  baz (2);
+
+  boz();
+
+  meh ();
+
+  a = return_1 ();
+  __printf_assert__ ("%d ", a);
+  [a b] = return_2 ();
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  % Drop one output variable
+  a = return_2 ();
+  __printf_assert__ ("%d ", a);
+
+  % Drop all output variables
+  return_2 ();
+
+  % Command form call
+  a = return_1;
+  __printf_assert__ ("%d ", a);
+
+  [a b] = return_2;
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", b);
+
+  a = return_2;
+  __printf_assert__ ("%d ", a);
+
+  return_2;
+
+  silly();
+  silly(2);
+end
+
+function [a b] = silly(i)
+  __printf_assert__ ("silly ");
+end
+
+function a = return_1 ()
+  a = 1;
+end
+
+function [a b] = return_2 ()
+  a = 1;
+  b = 2;
+end
+
+function out = foo ()
+  out = 2;
+  return
+end
+
+function bar (i)
+  if i
+    __printf_assert__ ("baaar ");
+    return
+  end
+
+  __printf_assert__ ("bääär ");
+end
+
+function out = baz (i)
+  if i == 0
+    __printf_assert__ ("baaaaz ");
+    return
+  elseif i == 1
+    __printf_assert__ ("bääääz ");
+    return
+  end
+
+  __printf_assert__ ("bååååz ");
+end
+
+function boz ()
+  __printf_assert__ ("booz ");
+  return
+  __printf_assert__ ("booo ");
+end
+
+function meh ()
+  return
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_script_nargin.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,19 @@
+
+s_whos = whos;
+
+if length (s_whos)
+  s_whos = s_whos(end);
+
+  % Only in the top scope nargin is equal to the size of argv
+  if strcmp (s_whos.nesting.function, "top scope")
+    assert (length (argv ()) == nargin ())
+  end
+end
+
+assert (bytecode_script_nargin_expected_value == nargin)
+
+if exist ("bytecode_script_nargin_call_recursive") && bytecode_script_nargin_call_recursive
+  bytecode_script_nargin_call_recursive = false;
+  bytecode_script_nargin;
+  bytecode_script_nargin_call_recursive = true;
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_script_topscope.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,79 @@
+% (bug #64705)
+%
+% Test the interaction between a script called from top scope and
+% global and local variables
+
+% If this global is true, we do a recursive call to this script
+% to add another frame.
+global bytecode_script_topscope_call_self
+if bytecode_script_topscope_call_self
+  bytecode_script_topscope_call_self = false;
+  bytecode_script_topscope;
+  bytecode_script_topscope_call_self = true;
+  return
+end
+
+global bytecode_script_topscope_place % "caller" or "base", set in the test script
+
+assert (ans == 1234); % Last calculation in bytecode_script_topscope_setup_script
+4321; % Change ans to 4321
+evalin (bytecode_script_topscope_place, "assert (ans == 4321)"); % Check that ans changed in "caller" or "base" frame
+evalin ("caller", "assert (ans == 4321)"); % Should always change in caller
+
+% These should allready be global
+assert (isglobal ("glb_a"))
+assert (isglobal ("glb_b"))
+assert (isglobal ("glb_c"))
+assert (isglobal ("glb_d"))
+assert (isglobal ("glb_f"))
+assert (isglobal ("glb_g"))
+
+global glb_a % "re-global"
+global glb_b % "re-global"
+global glb_c % "re-global"
+global glb_d = 1234; % re-global, init should not run due to being defined in top scope allready
+global glb_e = 6; % Not defined in top scope
+eval ("global glb_f"); % "re-global", dynamic frame
+global glb_g % "re-global"
+
+assert (glb_a == 2)
+assert (glb_b == 3)
+assert (glb_c == 4)
+assert (glb_d == 5)
+assert (glb_e == 6)
+eval ("assert (glb_f == 7)")
+assert (glb_g == 8)
+
+assert (local_b == 103)
+
+glb_b = 33;
+
+clear global glb_c;
+
+glb_d = 55;
+
+%% Locals
+
+local_a = 102; % Will be added to top scope
+
+local_b = 113; % Added in top scope, change it
+
+clear local_c % Added in top scope, clear it here
+
+% Added in top scope. Clear after using
+local_d = 123;
+clear local_d;
+
+% Not added in top scope. Clear after using
+local_e = 123;
+clear local_e;
+
+% Not added in top scope. Clear from top scope ...
+local_f = 123;
+evalin (bytecode_script_topscope_place, "clear local_f");
+assert (!exist ("local_f"))
+
+% Added in top scope. Clear from top scope ...
+local_g = 123;
+evalin (bytecode_script_topscope_place, "clear local_g");
+assert (!exist ("local_g"))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_script_topscope_assert.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,25 @@
+function bytecode_script_topscope_assert (place = "base")
+ evalin (place, "assert (isglobal ('glb_a'))");
+ evalin (place, "assert (isglobal ('glb_b'))");
+ evalin (place, "assert (!isglobal ('glb_c'))"); % Unglobalized in script
+ evalin (place, "assert (isglobal ('glb_d'))");
+ evalin (place, "assert (isglobal ('glb_e'))");
+ evalin (place, "assert (isglobal ('glb_g'))");
+ evalin (place, "assert (glb_a == 2)");
+ evalin (place, "assert (glb_b == 33)");
+ evalin (place, "assert (!exist ('glb_c'))");
+ evalin (place, "assert (glb_d == 55)");
+ evalin (place, "assert (glb_e == 6)"); % Added in the script
+ evalin (place, "assert (glb_g == 8)");
+ evalin (place, "assert (local_a == 102)"); % Local added in script
+ evalin (place, "assert (local_b == 113)");
+ evalin (place, "assert (!exist ('local_c'))"); % Cleared in script
+ evalin (place, "assert (!exist ('local_d'))"); % Cleared in script
+ evalin (place, "assert (!exist ('local_e'))"); % Cleared in script
+ evalin (place, "assert (!exist ('local_f'))"); % Cleared in script
+ evalin (place, "assert (!exist ('local_g'))"); % Cleared in script
+ evalin (place, "assert (local_h == 456)");
+
+ evalin (place, "clear global glb_a glb_b glb_c glb_d glb_e glb_f glb_g");
+ evalin (place, "clear local_a local_b local_c local_d local_e local_f local_g local_h");
+endfunction
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_script_topscope_setup.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,11 @@
+function bytecode_script_topscope_setup (place = "base")
+ evalin (place, "global glb_a = 2");
+ evalin (place, "global glb_b = 3");
+ evalin (place, "global glb_c = 4");
+ evalin (place, "global glb_d = 5");
+ evalin (place, "global glb_f = 7");
+ evalin (place, "local_b = 103;");
+ evalin (place, "local_c = 104;");
+ evalin (place, "local_d = 105;");
+ evalin (place, "local_g = 108;");
+endfunction
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_script_topscope_setup_script.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,7 @@
+% This script is a complement to bytecode_script_topscope_setup, but is a
+% script instead of a function.
+
+local_h = 456;
+global glb_g;
+glb_g = 8;
+1234; % Should assign 1234 to ans
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_scripts.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,25 @@
+function bytecode_scripts
+    % Each variable is named after the corrensponding script that messes with them.
+    % When eval() is used, it is to not introduce the variable in the static precompiled
+    % stack frame, to test the dynamic stack frame.
+
+    b1 = 2;
+    __printf_assert__ ("%d ", exist ("a1"));
+    script1 (); % defines 'a1', 'b1', 'c1', 'd1'
+
+    __printf_assert__ ("%d ", eval ("a1;"));
+    __printf_assert__ ("%d ", b1);
+    __printf_assert__ ("%d ", eval ("c1;"));
+    __printf_assert__ ("%d ", d1);
+
+    try
+        script2 (); % defines 'a2 = 3' and then errors
+    catch
+        __printf_assert__ ("%d ", a2);
+    end
+
+    script3 ();
+
+    % Inline function defined in script1
+    assert (inlinefn2 (234) == 235)
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_struct.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,38 @@
+function bytecode_struct ()
+  s = struct ('a', 1, 'b', 2);
+  __printf_assert__ ("%d ", s.a);
+  __printf_assert__ ("%d ", s.b);
+  __printf_assert__ ("%s ", class (s.a));
+  __printf_assert__ ("%d ", size (s.a));
+
+  % Should not mess up stack
+  s.a;
+
+  % Test simple assigns
+  r.a = 3;
+
+  __printf_assert__ ("%s ", class (r));
+  __printf_assert__ ("%d ", r.a);
+
+  % Test word command struct subref
+
+  __printf_assert__ ("%d ", suby.b);
+
+  % Bug 64817
+  test_bug_64817;
+end
+
+function a = suby ()
+  a.b = 4;
+end
+
+function test_bug_64817
+  % Field id slot collided with variable slot when making
+  % the script frame.
+
+  script_defines_qweqwe; % Just does "qweqwe = 0;"
+
+  if 0
+    asd.qweqwe; % 'qweqwe' is a field here
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_subfuncs.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,128 @@
+function bytecode_subfuncs (h_to_anon_fn)
+  a = foo ();
+  __printf_assert__ ("%.17g ", a);
+  b = bar ();
+  __printf_assert__ ("%.17g ", b);
+  c = baz ();
+  __printf_assert__ ("%.17g ", c);
+
+  meh ();
+
+  a = para_ret_same_name (11);
+  __printf_assert__ ("%.17g ", a);
+
+  % Test default args
+  a = defaultarg ();
+  __printf_assert__ ("%.17g ", a);
+
+  a = defaultarg (10);
+  __printf_assert__ ("%.17g ", a);
+
+  a = defaultarg2 ();
+  __printf_assert__ ("%.17g ", a);
+
+  a = defaultarg2 (11);
+  __printf_assert__ ("%.17g ", a);
+
+  a = defaultarg2 (11, 12, 13, 14);
+  __printf_assert__ ("%.17g ", a);
+
+  % Magic colon
+  a = defaultarg2 (11, :, 13, 14);
+  __printf_assert__ ("%.17g ", a);
+
+  % Functions handles
+  h = @max;
+  __printf_assert__ ("%d ", h ([1 3]));
+  __printf_assert__ ("%d ", just_call_handle_with_arg (h, [1 3]));
+  __printf_assert__ ("%d ", just_call_handle_with_arg_bytecode (h, [1 3]));
+
+  h = @foo;
+  __printf_assert__ ("%d ", h ());
+  __printf_assert__ ("%d ", just_call_handle_with_arg (h));
+  __printf_assert__ ("%d ", just_call_handle_with_arg_bytecode (h));
+
+  % Call an anonymous function from the tst-file
+  h_to_anon_fn ();
+
+  % Many args and returns
+  [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32 ();
+  __printf_assert__ ("%d ", a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32);
+  __printf_assert__ ("%d ", ret32 ()); % nargout = 1
+  ret32 (); % nargout = 0
+  [args32{1:32}] = ret32 ();
+  __printf_assert__ ("%d ", args32{:});
+
+  [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32take32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32);
+  __printf_assert__ ("%d ", a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32);
+
+  take32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32);
+
+  [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, ...
+   a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45, a46, a47, a48, a49, a50, a51, a52, a53, a54, a55, a56, a57, a58, a59, a60, a61, a62, a63, a64] = ...
+      takeXp32retXp32 (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ...
+                       33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64);
+  __printf_assert__ ("%d ", a01, a18, a59, a64);
+end
+
+function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, varargout] = takeXp32retXp32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32, varargin)
+  for i = 1:32
+    eval (sprintf ("a%02.f = b%02.f;", i, i));
+  end
+  varargout = varargin;
+end
+
+function take32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32)
+  __printf_assert__ ("take32:");
+  __printf_assert__ ("%d ", b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32);
+end
+
+function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32 ()
+  __printf_assert__ ("ret32:");
+  for i = 1:32
+    eval (sprintf ("a%02.f = %f;", i, i));
+  end
+end
+
+function [a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32] = ret32take32 (b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24, b25, b26, b27, b28, b29, b30, b31, b32)
+  for i = 1:32
+    eval (sprintf ("a%02.f = b%02.f;", i, i));
+  end
+end
+
+function out = just_call_handle_with_arg_bytecode (h, varargin)
+  out = h (varargin{:});
+end
+
+function out = foo ()
+  out = 2;
+end
+
+function out = bar ()
+  out = foo ();
+end
+
+function out = baz ()
+  out = bar ();
+end
+
+function meh()
+end
+
+function i = para_ret_same_name (i)
+end
+
+function a = defaultarg (b = 30)
+  a = b;
+end
+
+function a = defaultarg2 (a = 30, b = max (4, 5), c = [], d = [1 2])
+  __printf_assert__ ("%.17g ", a);
+  __printf_assert__ ("%.17g ", b);
+  __printf_assert__ ("%.17g ", c);
+  __printf_assert__ ("%d ", size(c));
+  __printf_assert__ ("%s ", class(c));
+  __printf_assert__ ("%.17g ", d);
+  __printf_assert__ ("%d ", size(d));
+  __printf_assert__ ("%s ", class(d));
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_subsasgn.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,143 @@
+function bytecode_subsasgn ()
+  A = [1 2; 3 4];
+  A(1) = 3;
+  __printf_assert__ ("%d ", A(1));
+
+  A(1,2) = 5;
+  __printf_assert__ ("%d ", A(1,2));
+
+  A(:,1) = [9;8];
+  __printf_assert__ ("%d ", A(:,1));
+
+  A(:,:) = [11 12; 13 14];
+  __printf_assert__ ("%d ", A(:,1));
+
+  B = [1:10];
+  B(7:end) = [77 88 99 1010];
+  __printf_assert__ ("%d ", B);
+  B(4:min (5, end)) = 987;
+  __printf_assert__ ("%d ", B);
+
+  % Subassign to a undefined variable
+  C(3,2) = 13;
+  __printf_assert__ ("%d ", C);
+  __printf_assert__ ("%s ", class (C));
+  __printf_assert__ ("%d ", size (C));
+
+  % Subassign cells
+  D = {1,2,3};
+  D{1} = 4;
+  __printf_assert__ ("%d ", D{:});
+  __printf_assert__ ("%s ", class (D));
+  __printf_assert__ ("%d ", size (D));
+
+  D{2,3} = {6,7};
+  dd = D{2,3};
+  __printf_assert__ ("%d ", dd{:});
+  __printf_assert__ ("%d ", size (D));
+
+  E = {1,2,3};
+  E(2:3) = {4,5};
+  __printf_assert__ ("%d ", E{:});
+
+  % Use cells as a subscript
+  M = [1 2 3; 3 4 5; 5 6 7];
+  s = {":", [1;2]};
+  __printf_assert__ ("%d ", M(s{:}));
+  M(s{:}) = 7;
+  __printf_assert__ ("%d ", M(s{:}));
+
+  % Assure that sources are not modified
+  x = [1 2 3];
+  y = x;
+  y(2) = 3;
+  __printf_assert__ ("%d %d ", x, y);
+
+  x = {1 2 3};
+  y = x;
+  y{2} = 3;
+  __printf_assert__ ("%d %d ", x{2}, y{2});
+
+  % Chained assigns
+  a.b.c.d = 2;
+  __printf_assert__ ("%d ", a.b.c.d);
+  a.("a").c.d = 3;
+  __printf_assert__ ("%d ", a.a.c.d);
+  q.w = {{1},{2}};
+  __printf_assert__ ("%d ", q.w{1}{1});
+  q.w{1} = {3};
+  __printf_assert__ ("%d ", q.w{1}{1});
+
+  z.x.c = [1 2 3];
+  __printf_assert__ ("%d ", z.x.c);
+  z.x.c(:) = 4;
+  __printf_assert__ ("%d ", z.x.c);
+
+  x = {[1 2 3], [4 5 6]; [8 9 10], [11 12 13]};
+  q.y = {{}, {}; {} {}};
+  q.y{1, 2} = x;
+  q.y{1, 2}{2, 1} = 3;
+
+  __printf_assert__ ("%d ", q.y{1, 2}{2, 1});
+  __printf_assert__ ("%d ", q.y{1, 2}{1, 2});
+
+  % += etc
+  A = [1 2 3 4];
+  A(2) += 3;
+  __printf_assert__ ("%d ", A);
+  A(3) -= 4;
+  __printf_assert__ ("%d ", A);
+
+  C = struct ();
+  C.A = A;
+  C.A(4) *= 2;
+  __printf_assert__ ("%d ", C.A);
+
+  % Chained assignments
+  AA = BB.c = 3;
+  __printf_assert__ ("%d ", AA);
+  CC = DD.c.d = 3;
+  __printf_assert__ ("%d ", DD.c.d);
+  __printf_assert__ ("%d ", CC);
+  EE.a = FF.c.d = 3;
+  __printf_assert__ ("%d ", EE.a);
+  GG = HH.a = II.a.b = JJ.a.b.c = 3;
+  __printf_assert__ ("%d ", GG);
+  __printf_assert__ ("%d ", HH.a);
+  __printf_assert__ ("%d ", II.a.b);
+  __printf_assert__ ("%d ", JJ.a.b.c);
+
+  % cs-list in lhs using opcode SUBASSIGN_CHAINED
+  idx = {1, 2};
+  M = ones (2,2);
+  M(idx{:}) *= 3;
+  __printf_assert__ ("%d ", M);
+
+  % SUBASSIGN_ID need to handle magic [] constant when chained (bug #64704)
+  M1 = [1 2 3];
+  M2 = [4 5 6];
+  M3 = [7 8 9];
+
+  M1(1) = M2(2) = M3(3) = [];
+  assert (M1 == [2 3])
+  assert (M2 == [4 6])
+  assert (M3 == [7 8])
+
+  M1 = {1 2 3};
+  M2 = {4 5 6};
+  M3 = {7 8 9};
+
+  M1(1) = M2(2) = M3(3) = [];
+  assert (cell2mat (M1) == [2 3])
+  assert (cell2mat (M2) == [4 6])
+  assert (cell2mat (M3) == [7 8])
+
+  M1 = [1 2 3; 4 5 6];
+  M2 = {4 5 6; 7 8 9};
+  S3.a = "zxc";
+
+  M1(1,2) = M2{2,1} = S3.a = 123;
+  assert (M1 == [1 123 3; 4 5 6])
+  assert (cell2mat (M2) == [4 5 6; 123 8 9])
+  assert (S3.a == 123)
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_switch.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,164 @@
+function bytecode_switch ()
+  a = 2;
+  switch (a)
+    case 1
+      __printf_assert__ ("boo ");
+    case 2
+      __printf_assert__ ("yay ");
+    otherwise
+      __printf_assert__ ("boo ");
+  end
+
+  switch (a)
+    case 1
+      __printf_assert__ ("boo ");
+    case 3
+      __printf_assert__ ("boo ");
+    otherwise
+      __printf_assert__ ("yay2 ");
+  end
+
+  b = "yay3 ";
+  switch (b)
+    case "boo1"
+      __printf_assert__ ("boo ");
+    case "yay3 "
+      __printf_assert__ ("yay3 ");
+    otherwise
+      __printf_assert__ ("boo ");
+  end
+
+  % Cells
+  a = 2;
+  switch (a)
+    case {1,0}
+      __printf_assert__ ("boo ");
+    case {2,3}
+      __printf_assert__ ("yay4 ");
+    otherwise
+      __printf_assert__ ("boo ");
+  end
+
+  switch (a)
+    case {1,0}
+      __printf_assert__ ("boo ");
+    case {3,2} %Swapped
+      __printf_assert__ ("yay5 ");
+    otherwise
+      __printf_assert__ ("boo ");
+  end
+
+  switch (a)
+    case {1,0}
+      __printf_assert__ ("boo ");
+    case {3,4}
+      __printf_assert__ ("boo ");
+    otherwise
+      __printf_assert__ ("yay6 ");
+  end
+
+  % Silly
+  switch (a)
+    otherwise
+      __printf_assert__ ("yay7 ");
+  end
+
+  % Empty
+  switch (a)
+  end
+
+  % No default case
+  a = 2;
+  switch (a)
+    case 1
+      __printf_assert__ ("boo ");
+    case 2
+      __printf_assert__ ("yay8 ");
+  end
+
+  switch (a)
+    case 1
+      __printf_assert__ ("boo ");
+    case 3
+      __printf_assert__ ("boo ");
+  end
+
+  % Return from switch
+  __printf_assert__ ("%d ", returnfromswitch (1));
+  __printf_assert__ ("%d ", returnfromswitch (2));
+  __printf_assert__ ("%d ", returnfromswitch (3));
+  __printf_assert__ ("%d ", returnfromswitch2 (1));
+  __printf_assert__ ("%d ", returnfromswitch2 (2));
+  __printf_assert__ ("%d ", returnfromswitch2 (3));
+
+  % switch with continue
+
+  for i = 1:4
+    switch (i)
+      case 1
+        __printf_assert__ ("1:%d ", i);
+      case 2
+        __printf_assert__ ("2:%d ", i);
+        continue;
+      case 3
+        __printf_assert__ ("3:%d ", i);
+      otherwise
+        __printf_assert__ ("breaking:%d ", i);
+        break;
+    endswitch
+    __printf_assert__ ("for-end:%d", i);
+  end
+
+end
+
+
+function a = returnfromswitch (b)
+  switch (b)
+    case 1
+      a = 1;
+      return
+    case 2
+      a = 2;
+      return;
+    otherwise
+      a = 3;
+      return
+  end
+
+  __printf_assert__ ("boo ");
+end
+
+function a = returnfromswitch2 (b)
+  % switches and fors cleans the stack at returns
+  % in a special way so test that that works properly
+  %
+  % The for loops puts native ints on the stack, so we can't just pop
+  % the stack assuming everything is octave values.
+  %
+  for i = [1, 2] % Puts int n,int i and the range on the stack
+    switch (b) % Puts b on the stack
+      case 10
+        return
+      otherwise
+        for j = [3, 4] % Puts int n,int i and the range on the stack
+          __printf_assert__ ("%d ", j);
+          switch (b) % Puts b on the stack
+            case 1
+              a = 1;
+              return %pop, popint popint pop, pop, popint popint pop
+            case 2
+              a = 2;
+            otherwise
+              a = 3;
+              return
+          end
+
+          __printf_assert__ ("%d ", j);
+        end
+    end
+
+    __printf_assert__ ("%d ", i);
+  end
+
+  __printf_assert__ ("yoo ");
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_trycatch.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,173 @@
+function bytecode_trycatch ()
+  % TODO: Check identifier in error object too
+
+  try
+    __printf_assert__ ("yay ");
+  catch
+    __printf_assert__ ("boo ");
+  end
+
+  try
+    __printf_assert__ ("yay2 ");
+    error ("ooo");
+    __printf_assert__ ("boo ");
+  catch
+    __printf_assert__ ("yay3 ");
+
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+
+  % Empty body/catch
+  try
+  catch
+    __printf_assert__ ("boo ");
+  end
+
+  try
+  catch
+  end
+
+  try
+  catch err
+  end
+
+  try
+    error("foo");
+  catch
+  end
+
+  % Error object
+  try
+    __printf_assert__ ("yay2 ");
+    error ("ooo2");
+    __printf_assert__ ("boo ");
+  catch err
+    __printf_assert__ ("yay3 ");
+
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+    __printf_assert__ ("%s ", getfield (err, 'message'));
+  end
+
+  % Nested
+  try
+    __printf_assert__ ("yay3 ");
+    try
+      __printf_assert__ ("yay4 ");
+      error ("Nested error");
+      __printf_assert__ ("boo ");
+    catch
+      __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+      try
+        __printf_assert__ ("yay5 ");
+        error ("In catch");
+        __printf_assert__ ("boo ");
+      catch
+        __printf_assert__ ("yay6 ");
+        __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+      end
+    end
+
+    error ("qwe");
+    __printf_assert__ ("boo ");
+  catch
+    __printf_assert__ ("yay7 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+
+  % Unwind subfunction
+  try
+    suby ();
+  catch
+    __printf_assert__ ("yay8 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+
+  % Catch undefined id
+  try
+    qwe = asd;
+    __printf_assert__ ("boo ");
+  catch
+    __printf_assert__ ("yay9 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+  % Catch unconformant arguments
+  try
+    a = [1 2];
+    b = [1 2 3];
+    c = a * b;
+  catch
+    __printf_assert__ ("yay10 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+
+  % Rethrow
+  try
+    try
+      error ("yoyo");
+    catch err
+      rethrow (err);
+      __printf_assert__ ("boo ");
+    end
+  catch
+    __printf_assert__ ("yay11 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+  end
+
+  % There are some shenanigans going on poping native ints belonging
+  % to for-loops' iteration counters going on, so test that.
+  for i = 1:3
+    try
+      error ("foo");
+    catch
+      __printf_assert__ ("yay12 ");
+    __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+    end
+  end
+
+  % switch statements save the value on the stack so add some switches
+  % to check that they are unwound properly nested in fors etc
+
+  zxc = '1';
+  switch zxc
+    case '1'
+      for m = 1:3
+      end
+      switch m
+        case 2
+      end
+
+      for i = 1:3
+        try
+          qwe = '1';
+          switch qwe
+            case '1'
+              for j = 1:3
+                asd = '1';
+                switch asd
+                  case '1'
+                    error ("foo");
+                end
+
+                for k = 1:3
+                end
+              end
+          end
+        catch
+          __printf_assert__ ("yay13 ");
+          __printf_assert__ ("%s ", getfield (lasterror (), 'message'));
+        end
+      end
+
+      for l = 1:2
+      end
+  end
+  % TODO: Test more types of errors ...
+end
+
+function suby ()
+  for j = 1:2
+    for i = 1:3
+      error ("Error in subfunction");
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_unary.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,22 @@
+function bytecode_unary ()
+  a = 1;
+  a = -a;
+  __printf_assert__ ("%d ", a);
+
+  c = +4;
+  b = +c;
+  __printf_assert__ ("%d ", c);
+
+  a = [1 2; 3 4]';
+  __printf_assert__ ("%d ", a);
+  a = a';
+  __printf_assert__ ("%d ", a);
+
+  b = true;
+  b = ~b;
+  __printf_assert__ ("%d ", b);
+
+  b = true;
+  b = !b;
+  __printf_assert__ ("%d ", b);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_unwind.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,170 @@
+function bytecode_unwind ()
+  unwind_protect
+    __printf_assert__ ("yay1 ");
+  unwind_protect_cleanup
+    __printf_assert__ ("yay2 ");
+  end
+
+  try
+    unwind_protect
+      error ("e1");
+      __printf_assert__ ("boo ");
+    unwind_protect_cleanup
+      __printf_assert__ ("yay3 ");
+    end
+  catch e
+    __printf_assert__ ("%s ", getfield (e, 'message'));
+  end
+
+  suby (1);
+  suby (2);
+
+  try
+    suby (3);
+  catch
+  end
+
+  suby (4);
+  suby (5);
+  suby (6);
+
+  % Break and returns that are not executed
+  %
+  % The combination "break and return", "break" and "return"
+  % takes different codepaths in the walker so we test all
+  % combinations here.
+
+  unwind_protect
+    for i = 1:3
+      if i == 4
+        break
+      elseif i == 5
+        return
+      end
+    end
+  unwind_protect_cleanup
+    __printf_assert__ ("yay4 ");
+  end
+
+  % Break only
+  unwind_protect
+    for i = 1:3
+      if i == 4
+        break
+      end
+    end
+  unwind_protect_cleanup
+    __printf_assert__ ("yay5 ");
+  end
+
+  % Return only
+  unwind_protect
+    for i = 1:3
+      if i == 4
+        return
+      end
+    end
+  unwind_protect_cleanup
+    __printf_assert__ ("yay6 ");
+  end
+end
+
+function suby (a)
+  if a == 1
+    unwind_protect
+      __printf_assert__ ("subyyay1 ");
+      return
+      __printf_assert__ ("boo ");
+    unwind_protect_cleanup
+      __printf_assert__ ("subyyay2 ");
+    end
+    __printf_assert__ ("boo ");
+  elseif a == 2
+    % Nested unwind protect with return in body
+    unwind_protect
+      unwind_protect
+        __printf_assert__ ("subyyay3 ");
+        return
+        __printf_assert__ ("boo ");
+      unwind_protect_cleanup
+        __printf_assert__ ("subyyay4 ");
+      end
+      __printf_assert__ ("boo ");
+    unwind_protect_cleanup
+      __printf_assert__ ("subyyay5 ");
+    end
+    __printf_assert__ ("boo ");
+  elseif a == 3
+    % Nested unwind protect with error in body
+    unwind_protect
+      unwind_protect
+        __printf_assert__ ("subyyay6 ");
+        error foooo
+        __printf_assert__ ("boo ");
+      unwind_protect_cleanup
+        __printf_assert__ ("subyyay7 ");
+      end
+      __printf_assert__ ("boo ");
+    unwind_protect_cleanup
+      __printf_assert__ ("subyyay8 ");
+    end
+    __printf_assert__ ("boo ");
+  elseif a == 4
+    for i = 1:3
+      unwind_protect
+        __printf_assert__ ("subyyay9 ");
+        break;
+        __printf_assert__ ("boo ");
+      unwind_protect_cleanup
+        __printf_assert__ ("subyyay10 ");
+      end
+      __printf_assert__ ("boo ");
+    end
+  elseif a == 5
+    for i = 1:3
+      unwind_protect
+        __printf_assert__ ("subyyay11 ");
+        for j = 1:3
+          unwind_protect
+            __printf_assert__ ("subyyay12 ");
+            break;
+            __printf_assert__ ("boo ");
+          unwind_protect_cleanup
+            __printf_assert__ ("subyyay13 ");
+          end
+          __printf_assert__ ("boo ");
+        end
+        break;
+        __printf_assert__ ("boo ");
+      unwind_protect_cleanup
+        __printf_assert__ ("subyyay14 ");
+      end
+      __printf_assert__ ("boo ");
+    end
+  elseif a == 6
+    % Mixing unwind protect with for loops. error and break
+    for i = 1:3
+      unwind_protect
+        __printf_assert__ ("subyyay15 ");
+        try
+          for j = 1:3
+            unwind_protect
+              __printf_assert__ ("subyyay16 ");
+              error ('qwe');
+              __printf_assert__ ("boo ");
+            unwind_protect_cleanup
+              __printf_assert__ ("subyyay17 ");
+            end
+            __printf_assert__ ("boo ");
+          end
+        catch
+          break;
+        end
+        __printf_assert__ ("boo ");
+      unwind_protect_cleanup
+        __printf_assert__ ("subyyay18 ");
+      end
+      __printf_assert__ ("boo ");
+    end
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_varargin.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,87 @@
+function bytecode_varargin (b, varargin)
+  __printf_assert__ ("%d ", varargin{:});
+  __printf_assert__ ("%d ", size (varargin))
+
+  varg1 (1);
+  varg1 (1,2,3,4);
+  varg1 ();
+
+  varg2 (1,2,3,4);
+  varg2 (1);
+
+  cslist = {1,2,3,4};
+  varg2 (cslist{:});
+
+  varg3 (cslist{:});
+  cslist = {1, 2};
+  varg3 (cslist{:});
+  cslist = {1};
+  varg3 (cslist{:});
+  cslist = {};
+  varg3 (cslist{:});
+
+  justnamenotpos (1, 2);
+
+  out = inout (1,2,3,4);
+  __printf_assert__ ("%d ", out{:});
+
+  __printf_assert__ ("%d ", nargin);
+
+  % TODO: Check in caller that return is the same
+  % b = varargin{:}
+
+  suby (1,2,3);
+end
+
+function varg1 (varargin)
+  __printf_assert__ ("%d ", varargin{:});
+  __printf_assert__ ("%d ", size (varargin));
+  __printf_assert__ ("%d ", nargin);
+end
+
+function varg2 (a, varargin)
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", varargin{:});
+  __printf_assert__ ("%d ", size (varargin));
+  __printf_assert__ ("%d ", nargin);
+
+  varg1 (varargin{:})
+  varg1 (2, varargin{:})
+end
+
+function varg3 (a, b, varargin)
+  if exist ("a")
+    __printf_assert__ ("%d ", a);
+  else
+    __printf_assert__ ("noa ");
+  end
+  if exist ("b")
+    __printf_assert__ ("%d ", b);
+  else
+    __printf_assert__ ("nob ");
+  end
+  __printf_assert__ ("%d ", size (varargin));
+  __printf_assert__ ("%d ", nargin);
+end
+
+function justnamenotpos (varargin, a)
+  __printf_assert__ ("%d ", a);
+  __printf_assert__ ("%d ", varargin);
+  __printf_assert__ ("%d ", nargin);
+end
+
+function [varargin] = inout (varargin)
+  __printf_assert__ ("%d ", nargin);
+end
+
+function suby(a,b,c)
+  __printf_assert__ ("%d ", nargin);
+
+  if nargin == 3
+    suby (1,2);
+  elseif nargin == 2
+    suby (1);
+  elseif nargin == 1
+    suby ();
+  end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_varargout.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,103 @@
+function bytecode_varargout ()
+
+    % Just check this works
+    [a b] = {7, 8}{:};
+    __printf_assert__ ("%d %d ", a, b);
+
+    % n amount of vargouts from suby1(n)
+    a = suby1 (1);
+    __printf_assert__ ("%d ", a);
+    [a b] = suby1 (2);
+    __printf_assert__ ("%d %d ", a, b);
+
+    % Test that ignored outputs are set properly
+    % when calling interpreted functions.
+    %
+    % return_isargout (n) returns isargout (n) in its
+    % first output.
+    [a b c] = return_isargout (2);
+    __printf_assert__ ("%d ", a);
+
+    [a b c] = return_isargout (4);
+    __printf_assert__ ("%d ", a);
+
+    [a, ~, c] = return_isargout (2);
+    __printf_assert__ ("%d ", a);
+
+    [a, ~, ~] = return_isargout (2);
+    __printf_assert__ ("%d ", a);
+    [a, ~, ~] = return_isargout (1);
+    __printf_assert__ ("%d ", a);
+    [a, ~, ~] = return_isargout (3);
+    __printf_assert__ ("%d ", a);
+
+    [~, ~, ~] = return_isargout (3);
+
+    % Do the same for a vm function
+    [a b c] = sub_return_isargout (2);
+    __printf_assert__ ("%d ", a);
+
+    [a b c] = sub_return_isargout (4);
+    __printf_assert__ ("%d ", a);
+
+    [a, ~, c] = sub_return_isargout (2);
+    __printf_assert__ ("%d ", a);
+
+    [a, ~, ~] = sub_return_isargout (2);
+    __printf_assert__ ("%d ", a);
+    [a, ~, ~] = sub_return_isargout (1);
+    __printf_assert__ ("%d ", a);
+    [a, ~, ~] = sub_return_isargout (3);
+    __printf_assert__ ("%d ", a);
+
+    [~, ~, ~] = sub_return_isargout (3);
+
+    % Check that 40000 return values wont cause a stack overflow
+    ans = 0;
+    suby1 (40000); % returns [varargout] => [1 2 3 ... n]
+    assert (ans == 1)
+
+    ans = 0;
+    suby2 (40000); % returns [a b varargout] => [1 2 3 ... n]
+    assert (ans == 1)
+
+    % Check dropping return values
+    [a b c] = suby1 (10);
+    assert (all ([a b c] == [1 2 3]))
+    [a b c] = suby2 (10);
+    assert (all ([a b c] == [1 2 3]))
+
+    % Check too few return values
+    threw = false;
+    try
+        [a b c] = suby1 (2);
+    catch
+        threw = true;
+    end
+    assert (threw);
+
+    % Bug #65029, stack overflowed when deal returned
+    nlay = 20000;
+    a = struct ("aa", {1:nlay});
+    tmp = {1:nlay};
+    [a(1:nlay).aa] = deal (tmp{:});
+end
+
+function [a b c] = sub_return_isargout (n)
+    b = 0; c = 0;
+    a = isargout (n);
+end
+
+function varargout = suby1(n)
+    for i = 1:n
+        varargout{i} = i;
+    end
+end
+
+function [a b varargout] = suby2(n)
+    a = 1; b = 2;
+    for i = 1:n-2
+        varargout{i} = i + 2;
+    end
+end
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_while.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,111 @@
+function bytecode_while ()
+  i = 5;
+  while i
+    __printf_assert__ ("%d ", i);
+    i--;
+  end
+
+  i = 0;
+  while i < 3
+    i++;
+  end
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  while i++ < 4
+    ctr++;
+  end
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  ctr = 0;
+  while ++i < 4
+    ctr++;
+  end
+  __printf_assert__ ("%d ", i);
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  ctr = 0;
+  while i < 4
+    i++;
+    if i == 2
+      continue
+    end
+    ctr++;
+  end
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  while i < 4
+    i++;
+    if i == 2
+      break
+    end
+    ctr++;
+  end
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  ctr = 0;
+  while i < 4
+    i++;
+    if i == 2
+      continue
+    elseif i == 3
+      break
+    end
+    ctr++;
+  end
+  __printf_assert__ ("%d ", ctr);
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  while i < 4
+    i++;
+    if i == 1
+      continue
+    else
+      break
+    end
+  end
+  __printf_assert__ ("%d ", i);
+
+  ctr = 0;
+  j = 0;
+  while j < 2
+    i = 0;
+    while i < 2
+      k = 0;
+      while k < 2
+        k++;
+        ctr++;
+      end
+      i++;
+    end
+    j++;
+  end
+  __printf_assert__ ("%d ", ctr);
+
+  i = 0;
+  while i++ < 2
+    continue
+  end
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  while i++ < 2
+    break
+  end
+  __printf_assert__ ("%d ", i);
+
+  i = 0;
+  while i++ < 2
+  end
+  __printf_assert__ ("%d ", i);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/bytecode_wordlistcmd.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,25 @@
+function bytecode_wordlistcmd ()
+  foo A B C;
+
+  bar QWE;
+
+  boz
+  boz A
+  boz A B
+end
+
+
+function foo (a,b,c)
+  __printf_assert__ ("%s ", a);
+  __printf_assert__ ("%s ", b);
+  __printf_assert__ ("%s ", c);
+end
+
+function [a b] = bar (c)
+  __printf_assert__ ("%s ", c);
+  a = 1;
+  b = 2;
+end
+
+function boz (a,b,c)
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/cdef_bar.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,52 @@
+% classdef that keeps track of alive objects of itself too be able
+% to ensure all are destroyed.
+%
+% Inspect cdef_bar_alive_objs to see which object with what message
+% that is alive during debugging.
+
+classdef cdef_bar < handle
+  properties
+    msg = "";
+  end
+  methods
+    function f = cdef_bar(msg = "")
+        global cdef_bar_cnt = 0;
+        global cdef_bar_alive_objs = struct;
+        f.msg = msg;
+        cdef_bar_cnt++;
+
+        if isfield (cdef_bar_alive_objs, msg)
+          entry = cdef_bar_alive_objs.(msg);
+          entry.cnt++;
+          cdef_bar_alive_objs.(msg) = entry;
+        else
+          entry = struct;
+          entry.cnt = 1;
+          cdef_bar_alive_objs.(msg) = entry;
+        end
+
+        %printf ("ctored %s cnt=%d\n", msg, cdef_bar_cnt);
+    end
+
+    function delete (self)
+      global cdef_bar_cnt = 0;
+      global cdef_bar_alive_objs = struct;
+      cdef_bar_cnt--;
+
+      if isfield (cdef_bar_alive_objs, self.msg)
+        entry = cdef_bar_alive_objs.(self.msg);
+        entry.cnt--;
+
+        if entry.cnt
+          cdef_bar_alive_objs.(self.msg) = entry;
+        else
+          cdef_bar_alive_objs = rmfield (cdef_bar_alive_objs, self.msg);
+        end
+      else
+        printf ("Unexpected missing alive objects entry for cdef_bar in cdef_bar.m")
+      end
+
+      %printf ("dtored %s cnt=%d\n", self.msg, cdef_bar_cnt);
+    endfunction
+  endmethods
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/cdef_foo.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,21 @@
+classdef cdef_foo < handle
+  properties
+    msg = "";
+    val = 1;
+  end
+  methods
+    function f = cdef_foo(msg)
+        global cdef_foo_ctor_cnt = 0;
+        f.msg = msg;
+        cdef_foo_ctor_cnt++;
+    end
+    function delete (self)
+      global cdef_foo_dtor_cnt = 0;
+      __printf_assert__ ("%d %s ", ++cdef_foo_dtor_cnt, self.msg);
+    endfunction
+    function c = plus (a, b)
+        c = cdef_foo (strcat("sum", a.msg, b.msg));
+        c.val = a.val + b.val;
+    end
+  endmethods
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/inputname_args.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,6 @@
+function inputname_args (arg1, arg2)
+    __printf_assert__ ("%s ", inputname (1, 0));
+    __printf_assert__ ("%s ", inputname (1, 1));
+    __printf_assert__ ("%s ", inputname (2, 0));
+    __printf_assert__ ("%s ", inputname (2, 1));
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/just_call_handle_with_arg.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,3 @@
+function a = just_call_handle_with_arg (h, varargin)
+    a = h (varargin{:});
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/module.mk	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,63 @@
+compile_TEST_FILES = \
+  %reldir%/bytecode.tst \
+  %reldir%/bytecode_ans.m \
+  %reldir%/bytecode_assign.m \
+  %reldir%/bytecode_binops.m \
+  %reldir%/bytecode_anon_handles.m \
+  %reldir%/bytecode_cdef_use.m \
+  %reldir%/bytecode_cell.m \
+  %reldir%/bytecode_dountil.m \
+  %reldir%/bytecode_end.m \
+  %reldir%/bytecode_errors.m \
+  %reldir%/bytecode_eval_1.m \
+  %reldir%/bytecode_evalin_1.m \
+  %reldir%/bytecode_evalin_2.m \
+  %reldir%/bytecode_for.m \
+  %reldir%/bytecode_global_1.m \
+  %reldir%/bytecode_if.m \
+  %reldir%/bytecode_index_obj.m \
+  %reldir%/bytecode_inputname.m \
+  %reldir%/bytecode_leaks.m \
+  %reldir%/bytecode_load_script_save.m \
+  %reldir%/bytecode_load_script_load_and_assert.m \
+  %reldir%/bytecode_matrix.m \
+  %reldir%/bytecode_misc.m \
+  %reldir%/bytecode_multi_assign.m \
+  %reldir%/bytecode_nested.m \
+  %reldir%/bytecode_persistant.m \
+  %reldir%/bytecode_range.m \
+  %reldir%/bytecode_return.m \
+  %reldir%/bytecode_scripts.m \
+  %reldir%/bytecode_script_nargin.m \
+  %reldir%/bytecode_script_topscope.m \
+  %reldir%/bytecode_script_topscope_assert.m \
+  %reldir%/bytecode_script_topscope_setup.m \
+  %reldir%/bytecode_script_topscope_setup_script.m \
+  %reldir%/bytecode_struct.m \
+  %reldir%/bytecode_subfuncs.m \
+  %reldir%/bytecode_subsasgn.m \
+  %reldir%/bytecode_switch.m \
+  %reldir%/bytecode_trycatch.m \
+  %reldir%/bytecode_unary.m \
+  %reldir%/bytecode_unwind.m \
+  %reldir%/bytecode_varargin.m \
+  %reldir%/bytecode_varargout.m \
+  %reldir%/bytecode_while.m \
+  %reldir%/bytecode_wordlistcmd.m \
+  %reldir%/cdef_bar.m \
+  %reldir%/cdef_foo.m \
+  %reldir%/inputname_args.m \
+  %reldir%/just_call_handle_with_arg.m \
+  %reldir%/return_isargout.m \
+  %reldir%/script_defines_qweqwe.m \
+  %reldir%/script1.m \
+  %reldir%/script11.m \
+  %reldir%/script2.m \
+  %reldir%/script3.m \
+  %reldir%/shutup_operator_test/@double/display.m \
+  %reldir%/shutup-operator-test/@logical/display.m \
+  %reldir%/shutup-operator-test/bytecode_disp.m \
+  %reldir%/shutup-operator-test/bytecode-disp.tst \
+  %reldir%/wrongname_fn.m
+
+TEST_FILES += $(compile_TEST_FILES)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/return_isargout.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,5 @@
+function [a b c] = return_isargout (n)
+    b = 0;
+    c = 0;
+    a = isargout (n);
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/script1.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,27 @@
+a1 = 1;
+b1 = 3;
+eval ("c1 = 4;")
+eval ("d1 = 5;")
+
+% Test function definitions inlined in scripts
+function inlinefn1 (a)
+  assert (!exist ("a1")) % Ensure inlinefn1 has its own scope
+  b = inlinefn2 (a);
+  assert (b == a + 1);
+end
+
+inlinefn2 = 3; % Assure a function can be defined when id exists with that name
+
+function b = inlinefn2 (a)
+  b = a + 1;
+end
+
+assert (inlinefn2 (3) == 4);
+inlinefn1 (123);
+
+script11 ();
+
+assert (a11 == 1)
+assert (b11 == 3)
+eval ("assert (c11 == 4)")
+eval ("assert (d11 == 5)")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/script11.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,8 @@
+a11 = 1;
+b11 = 3;
+
+% Inline function in parent script
+assert (inlinefn2 (4) == 5);
+
+eval ("c11 = 4;")
+eval ("d11 = 5;")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/script2.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,3 @@
+a2 = 3;
+
+error ("Error in script");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/script3.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,7 @@
+% Check that variables from script one are accessable
+% and have the correct value.
+assert (a1 == 1);
+assert (b1 == 3);
+assert (c1 == 4);
+eval ("assert (d1 == 5)");
+assert (a2 == 3);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/script_defines_qweqwe.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,1 @@
+qweqwe = 0;
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/shutup-operator-test/@double/display.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,11 @@
+% For use by bytecode_disp().
+%
+% Overload display
+%
+function display (x)
+    if (inputname(1))
+        __printf_assert__ ("%s = %d ", inputname(1), x);
+    else
+        __printf_assert__ ("%d ", x);
+    end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/shutup-operator-test/@logical/display.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,11 @@
+% For use by bytecode_disp().
+%
+% Overload display
+%
+function display (x)
+    if (inputname(1))
+        __printf_assert__ ("%s = %d ", inputname(1), x);
+    else
+        __printf_assert__ ("%d ", x);
+    end
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/shutup-operator-test/bytecode-disp.tst	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,18 @@
+## Test display due to no ";" at eol
+##
+## We are overloading display for double so we place this test
+## in its own folder to not mess double up for the other
+## tests.
+
+%!testif ENABLE_BYTECODE_EVALUATOR
+%! % Overloading of class-methods seems to stick so we need to clear them since we overload
+%! % double's display. Is this a bug ???
+%! clear classes
+%! key = "ans = 1 . ans = 5 . . ans = 0 . ans = 8 . ans = 3 . x = 3 . x = 1 y = 2 . x = 1 . . x = 1 . y = 2 . x = 1 ";
+%! __vm_compile__ bytecode_disp clear;
+%! bytecode_disp;
+%! assert (__prog_output_assert__ (key));
+%!
+%! assert (__vm_compile__ ("bytecode_disp"));
+%! bytecode_disp;
+%! assert (__prog_output_assert__ (key));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/shutup-operator-test/bytecode_disp.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,38 @@
+function bytecode_disp()
+  %  int8 type's display is overloaded with a __printf_assert__
+  1               % "ans = 1"
+  print_dot;
+  2 + 3           % "ans = 5"
+  print_dot;
+  0;              %
+  0 + 0;          %
+  print_dot;
+  ~4              % "ans = 0"
+  print_dot;
+  2^3             % "ans = 8"
+  print_dot       %
+
+  if 2
+    3             % "ans = 3"
+  end
+  print_dot;
+  x = 4 - 1       % "x = 3"
+
+  print_dot;
+  [x, y] = deal (1,2) % "x = 1" "y = 2"
+  print_dot;
+  [x, ~] = deal (1,2) % "x = 1"
+  print_dot;
+  % If all lvalues are black holes, nothing is printed
+  [~, ~] = deal (1,2) %
+  print_dot;
+  [x, ~, ~] = deal (1,2, 3) % "x = 1"
+  print_dot;
+  [~, y, ~] = deal (1,2, 3) % "y = 2"
+  print_dot;
+  x
+end
+
+function print_dot()
+  __printf_assert__(". ");
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compile/wrongname_fn.m	Fri Apr 19 12:57:20 2024 -0400
@@ -0,0 +1,3 @@
+function a = wrongname_fn (b)
+  a = b + 1;
+end