From 94104aea596bbec8f432f0c4c653996bd9b5a0dc Mon Sep 17 00:00:00 2001 From: Daniel Kroening Date: Wed, 3 Jan 2024 16:14:55 +0000 Subject: [PATCH 1/2] nuterm using libtorch Initial prototype of neural ranking function synthesis for liveness properties. --- .github/workflows/pull-request-checks.yaml | 49 ++ examples/Benchmarks/run_nuterm | 145 ++++++ regression/ebmc/neural-liveness/counter1.desc | 2 +- .../ebmc/neural-liveness/live_signal1.desc | 22 +- regression/nuterm/Makefile | 7 + regression/nuterm/counters/README | 3 + regression/nuterm/counters/counter1.desc | 9 + regression/nuterm/counters/counter1.v | 11 + regression/nuterm/counters/counter1/trace.1 | 39 ++ regression/nuterm/counters/counter1/trace.10 | 42 ++ regression/nuterm/counters/counter1/trace.2 | 41 ++ regression/nuterm/counters/counter1/trace.3 | 42 ++ regression/nuterm/counters/counter1/trace.4 | 43 ++ regression/nuterm/counters/counter1/trace.5 | 40 ++ regression/nuterm/counters/counter1/trace.6 | 44 ++ regression/nuterm/counters/counter1/trace.7 | 39 ++ regression/nuterm/counters/counter1/trace.8 | 41 ++ regression/nuterm/counters/counter1/trace.9 | 42 ++ regression/nuterm/counters/counter2.desc | 9 + regression/nuterm/counters/counter2.v | 13 + regression/nuterm/counters/counter2/trace.1 | 41 ++ regression/nuterm/counters/counter2/trace.10 | 44 ++ regression/nuterm/counters/counter2/trace.2 | 43 ++ regression/nuterm/counters/counter2/trace.3 | 44 ++ regression/nuterm/counters/counter2/trace.4 | 45 ++ regression/nuterm/counters/counter2/trace.5 | 42 ++ regression/nuterm/counters/counter2/trace.6 | 46 ++ regression/nuterm/counters/counter2/trace.7 | 41 ++ regression/nuterm/counters/counter2/trace.8 | 43 ++ regression/nuterm/counters/counter2/trace.9 | 44 ++ src/ebmc/live_signal.cpp | 9 +- src/ebmc/live_signal.h | 4 +- src/ebmc/neural_liveness.cpp | 31 +- src/nuterm/CMakeLists.txt | 13 + src/nuterm/README | 7 + src/nuterm/nuterm_main.cpp | 340 ++++++++++++++ src/nuterm/pytorch_tests.cpp | 439 ++++++++++++++++++ src/nuterm/training.cpp | 95 ++++ src/nuterm/training.h | 41 ++ src/nuterm/vcd_parser.cpp | 142 ++++++ src/nuterm/vcd_parser.h | 61 +++ 41 files changed, 2266 insertions(+), 32 deletions(-) create mode 100755 examples/Benchmarks/run_nuterm create mode 100644 regression/nuterm/Makefile create mode 100644 regression/nuterm/counters/README create mode 100644 regression/nuterm/counters/counter1.desc create mode 100644 regression/nuterm/counters/counter1.v create mode 100644 regression/nuterm/counters/counter1/trace.1 create mode 100644 regression/nuterm/counters/counter1/trace.10 create mode 100644 regression/nuterm/counters/counter1/trace.2 create mode 100644 regression/nuterm/counters/counter1/trace.3 create mode 100644 regression/nuterm/counters/counter1/trace.4 create mode 100644 regression/nuterm/counters/counter1/trace.5 create mode 100644 regression/nuterm/counters/counter1/trace.6 create mode 100644 regression/nuterm/counters/counter1/trace.7 create mode 100644 regression/nuterm/counters/counter1/trace.8 create mode 100644 regression/nuterm/counters/counter1/trace.9 create mode 100644 regression/nuterm/counters/counter2.desc create mode 100644 regression/nuterm/counters/counter2.v create mode 100644 regression/nuterm/counters/counter2/trace.1 create mode 100644 regression/nuterm/counters/counter2/trace.10 create mode 100644 regression/nuterm/counters/counter2/trace.2 create mode 100644 regression/nuterm/counters/counter2/trace.3 create mode 100644 regression/nuterm/counters/counter2/trace.4 create mode 100644 regression/nuterm/counters/counter2/trace.5 create mode 100644 regression/nuterm/counters/counter2/trace.6 create mode 100644 regression/nuterm/counters/counter2/trace.7 create mode 100644 regression/nuterm/counters/counter2/trace.8 create mode 100644 regression/nuterm/counters/counter2/trace.9 create mode 100644 src/nuterm/CMakeLists.txt create mode 100644 src/nuterm/README create mode 100644 src/nuterm/nuterm_main.cpp create mode 100644 src/nuterm/pytorch_tests.cpp create mode 100644 src/nuterm/training.cpp create mode 100644 src/nuterm/training.h create mode 100644 src/nuterm/vcd_parser.cpp create mode 100644 src/nuterm/vcd_parser.h diff --git a/.github/workflows/pull-request-checks.yaml b/.github/workflows/pull-request-checks.yaml index ede4dc76e..60c99d903 100644 --- a/.github/workflows/pull-request-checks.yaml +++ b/.github/workflows/pull-request-checks.yaml @@ -191,3 +191,52 @@ jobs: run: make -C regression/verilog test-z3 - name: Print ccache stats run: ccache -s + + # This job takes approximately 1 minute + check-ubuntu-22_04-nuterm: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Fetch dependencies + env: + # This is needed in addition to -yq to prevent apt-get from asking for + # user input + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends -yq gcc g++ ccache cmake + - name: Prepare ccache + uses: actions/cache@v3 + with: + path: .ccache + key: ${{ runner.os }}-22.04-nuterm-${{ github.ref }}-${{ github.sha }}-PR + restore-keys: | + ${{ runner.os }}-22.04-nuterm-${{ github.ref }} + ${{ runner.os }}-22.04-nuterm + - name: ccache environment + run: | + echo "CCACHE_BASEDIR=$PWD" >> $GITHUB_ENV + echo "CCACHE_DIR=$PWD/.ccache" >> $GITHUB_ENV + - name: Zero ccache stats and limit in size + run: ccache -z --max-size=500M + - name: Get pytorch + run: | + cd src/nuterm + wget -q https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.1.2%2Bcpu.zip + unzip -q *.zip + - name: Build with cmake + run: | + cd src/nuterm + LIBTORCH=`pwd`/libtorch + mkdir build + cd build + cmake -DCMAKE_PREFIX_PATH=$LIBTORCH -DCMAKE_CXX_COMPILER_LAUNCHER=ccache .. + cmake --build . --config Release + - name: Run the unit tests + run: src/nuterm/build/pytorch_tests + - name: Run the system tests + run: make -C regression/nuterm + - name: Print ccache stats + run: ccache -s diff --git a/examples/Benchmarks/run_nuterm b/examples/Benchmarks/run_nuterm new file mode 100755 index 000000000..71724275a --- /dev/null +++ b/examples/Benchmarks/run_nuterm @@ -0,0 +1,145 @@ +#!/bin/sh + +set -e + +ebmc PWM_1.sv --neural-liveness --trace-steps 700 --traces 1 +ebmc PWM_2.sv --neural-liveness --trace-steps 1000 --traces 1 +ebmc PWM_3.sv --neural-liveness --trace-steps 3000 --traces 1 +ebmc PWM_4.sv --neural-liveness --trace-steps 5000 --traces 1 +ebmc PWM_5.sv --neural-liveness --trace-steps 10000 --traces 1 +ebmc PWM_6.sv --neural-liveness --trace-steps 20000 --traces 1 +ebmc PWM_7.sv --neural-liveness --trace-steps 40000 --traces 1 +ebmc PWM_8.sv --neural-liveness --trace-steps 80000 --traces 1 +ebmc PWM_9.sv --neural-liveness --trace-steps 160000 --traces 1 + +if false ; then +ebmc delay_1.sv --neural-liveness "750-cnt" +ebmc delay_2.sv --neural-liveness "1250-cnt" +ebmc delay_3.sv --neural-liveness "2500-cnt" +ebmc delay_4.sv --neural-liveness "5000-cnt" +ebmc delay_5.sv --neural-liveness "7500-cnt" +ebmc delay_6.sv --neural-liveness "10000-cnt" +ebmc delay_7.sv --neural-liveness "12500-cnt" +ebmc delay_8.sv --neural-liveness "15000-cnt" +ebmc delay_9.sv --neural-liveness "17500-cnt" +ebmc delay_10.sv --neural-liveness "20000-cnt" +ebmc delay_11.sv --neural-liveness "22500-cnt" +ebmc delay_12.sv --neural-liveness "25000-cnt" +ebmc delay_13.sv --neural-liveness "50000-cnt" +ebmc delay_14.sv --neural-liveness "100000-cnt" +ebmc delay_15.sv --neural-liveness "200000-cnt" +ebmc delay_16.sv --neural-liveness "400000-cnt" +fi + +ebmc gray_1.sv --neural-liveness +ebmc gray_2.sv --neural-liveness +ebmc gray_3.sv --neural-liveness +ebmc gray_4.sv --neural-liveness +ebmc gray_5.sv --neural-liveness +ebmc gray_6.sv --neural-liveness +ebmc gray_7.sv --neural-liveness +ebmc gray_8.sv --neural-liveness +ebmc gray_9.sv --neural-liveness +ebmc gray_10.sv --neural-liveness +ebmc gray_11.sv --neural-liveness + +if false ; then +ebmc i2c_1.sv --neural-liveness "2**9-cnt" +ebmc i2c_2.sv --neural-liveness "2**10-cnt" +ebmc i2c_3.sv --neural-liveness "2**11-cnt" +ebmc i2c_4.sv --neural-liveness "2**12-cnt" +ebmc i2c_5.sv --neural-liveness "2**13-cnt" +ebmc i2c_6.sv --neural-liveness "2**14-cnt" +ebmc i2c_7.sv --neural-liveness "2**15-cnt" +ebmc i2c_8.sv --neural-liveness "2**16-cnt" +ebmc i2c_9.sv --neural-liveness "2**17-cnt" +ebmc i2c_10.sv --neural-liveness "2**18-cnt" +ebmc i2c_11.sv --neural-liveness "2**19-cnt" +ebmc i2c_12.sv --neural-liveness "2**10-cnt" +ebmc i2c_13.sv --neural-liveness "2**10-cnt" +ebmc i2c_14.sv --neural-liveness "2**10-cnt" +ebmc i2c_15.sv --neural-liveness "2**10-cnt" +ebmc i2c_16.sv --neural-liveness "2**10-cnt" +ebmc i2c_17.sv --neural-liveness "2**10-cnt" +ebmc i2c_18.sv --neural-liveness "2**10-cnt" +ebmc i2c_19.sv --neural-liveness "2**10-cnt" +ebmc i2c_20.sv --neural-liveness "2**19-cnt" +fi + +if false ; then +ebmc lcd_1.sv --neural-liveness "{3-state,500-cnt}" +ebmc lcd_2.sv --neural-liveness "{3-state,1000-cnt}" +ebmc lcd_3.sv --neural-liveness "{3-state,1500-cnt}" +ebmc lcd_4.sv --neural-liveness "{3-state,2500-cnt}" +ebmc lcd_5.sv --neural-liveness "{3-state,5000-cnt}" +ebmc lcd_6.sv --neural-liveness "{3-state,7500-cnt}" +ebmc lcd_7.sv --neural-liveness "{3-state,10000-cnt}" +ebmc lcd_8.sv --neural-liveness "{3-state,12500-cnt}" +ebmc lcd_9.sv --neural-liveness "{3-state,15000-cnt}" +ebmc lcd_10.sv --neural-liveness "{3-state,17500-cnt}" +ebmc lcd_11.sv --neural-liveness "{3-state,20000-cnt}" +ebmc lcd_12.sv --neural-liveness "{3-state,22500-cnt}" +ebmc lcd_13.sv --neural-liveness "{3-state,90000-cnt}" +ebmc lcd_14.sv --neural-liveness "{3-state,180000-cnt}" +fi + +ebmc seven_seg_1.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_2.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_3.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_4.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_5.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_6.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_7.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_8.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_9.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_10.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_11.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_12.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_16.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_17.sv --neural-liveness --property SEVEN.property.p1 +ebmc seven_seg_18.sv --neural-liveness --property SEVEN.property.p1 + +if false ; then +ebmc thermocouple_1.sv --neural-liveness "{2-state,2**5-cnt}" +ebmc thermocouple_2.sv --neural-liveness "{2-state,2**9-cnt}" +ebmc thermocouple_3.sv --neural-liveness "{2-state,2**10-cnt}" +ebmc thermocouple_4.sv --neural-liveness "{2-state,2**10-cnt}" +ebmc thermocouple_5.sv --neural-liveness "{2-state,2**11-cnt}" +ebmc thermocouple_6.sv --neural-liveness "{2-state,2**11-cnt}" +ebmc thermocouple_7.sv --neural-liveness "{2-state,2**12-cnt}" +ebmc thermocouple_8.sv --neural-liveness "{2-state,2**12-cnt}" +ebmc thermocouple_9.sv --neural-liveness "{2-state,2**13-cnt}" +ebmc thermocouple_10.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_11.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_12.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_13.sv --neural-liveness "{2-state,2**15-cnt}" +ebmc thermocouple_14.sv --neural-liveness "{2-state,2**16-cnt}" +ebmc thermocouple_15.sv --neural-liveness "{2-state,2**17-cnt}" +ebmc thermocouple_16.sv --neural-liveness "{2-state,2**18-cnt}" +ebmc thermocouple_17.sv --neural-liveness "{2-state,2**19-cnt}" +fi + +ebmc uart_transmit_1.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_2.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_3.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_4.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_5.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_6.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_7.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_8.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_9.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_10.sv --neural-liveness --trace-steps 200 --traces 20 +ebmc uart_transmit_11.sv --neural-liveness --trace-steps 200 --traces 20 +ebmc uart_transmit_12.sv --neural-liveness --trace-steps 200 --traces 20 + +if false ; then +ebmc vga_1.sv --neural-liveness "{2**5-v_cnt,2**7-h_cnt}" +ebmc vga_2.sv --neural-liveness "{2**6-v_cnt,2**8-h_cnt}" +ebmc vga_3.sv --neural-liveness "{2**6-v_cnt,2**8-h_cnt}" +ebmc vga_4.sv --neural-liveness "{2**7-v_cnt,2**9-h_cnt}" +ebmc vga_5.sv --neural-liveness "{2**8-v_cnt,2**9-h_cnt}" +ebmc vga_6.sv --neural-liveness "{2**8-v_cnt,2**9-h_cnt}" +ebmc vga_7.sv --neural-liveness "{2**8-v_cnt,2**10-h_cnt}" +ebmc vga_8.sv --neural-liveness "{2**9-v_cnt,2**10-h_cnt}" +ebmc vga_9.sv --neural-liveness "{2**9-v_cnt,2**11-h_cnt}" +fi diff --git a/regression/ebmc/neural-liveness/counter1.desc b/regression/ebmc/neural-liveness/counter1.desc index da00acbd7..aad6e440b 100644 --- a/regression/ebmc/neural-liveness/counter1.desc +++ b/regression/ebmc/neural-liveness/counter1.desc @@ -1,6 +1,6 @@ CORE counter1.sv ---traces 2 --neural-liveness --neural-engine "echo Candidate: counter\\\\n" +--traces 2 --neural-liveness --neural-engine "echo RESULT: counter\\\\n" ^\[main\.property\.p0\] always s_eventually main.counter == 0: PROVED$ ^EXIT=0$ ^SIGNAL=0$ diff --git a/regression/ebmc/neural-liveness/live_signal1.desc b/regression/ebmc/neural-liveness/live_signal1.desc index 233ba63db..2a6b37471 100644 --- a/regression/ebmc/neural-liveness/live_signal1.desc +++ b/regression/ebmc/neural-liveness/live_signal1.desc @@ -1,17 +1,17 @@ CORE counter1.sv --traces 1 --neural-liveness --show-traces -^nuterm::live@0 = 0$ -^nuterm::live@1 = 0$ -^nuterm::live@2 = 0$ -^nuterm::live@3 = 0$ -^nuterm::live@4 = 0$ -^nuterm::live@5 = 0$ -^nuterm::live@6 = 0$ -^nuterm::live@7 = 0$ -^nuterm::live@8 = 0$ -^nuterm::live@9 = 0$ -^nuterm::live@10 = 1$ +^Verilog::main\.\$live@0 = 0$ +^Verilog::main\.\$live@1 = 0$ +^Verilog::main\.\$live@2 = 0$ +^Verilog::main\.\$live@3 = 0$ +^Verilog::main\.\$live@4 = 0$ +^Verilog::main\.\$live@5 = 0$ +^Verilog::main\.\$live@6 = 0$ +^Verilog::main\.\$live@7 = 0$ +^Verilog::main\.\$live@8 = 0$ +^Verilog::main\.\$live@9 = 0$ +^Verilog::main\.\$live@10 = 1$ ^EXIT=0$ ^SIGNAL=0$ -- diff --git a/regression/nuterm/Makefile b/regression/nuterm/Makefile new file mode 100644 index 000000000..e71292299 --- /dev/null +++ b/regression/nuterm/Makefile @@ -0,0 +1,7 @@ +default: test + +TEST_PL = ../../lib/cbmc/regression/test.pl + +test: + @$(TEST_PL) -c ../../../src/nuterm/build/nuterm + diff --git a/regression/nuterm/counters/README b/regression/nuterm/counters/README new file mode 100644 index 000000000..204b5d3f1 --- /dev/null +++ b/regression/nuterm/counters/README @@ -0,0 +1,3 @@ +Sample as follows: + +ebmc FILE.v --random-traces --vcd FILE/trace diff --git a/regression/nuterm/counters/counter1.desc b/regression/nuterm/counters/counter1.desc new file mode 100644 index 000000000..b97a3bb09 --- /dev/null +++ b/regression/nuterm/counters/counter1.desc @@ -0,0 +1,9 @@ +CORE +counter1 + +^weight clk = 0 .*$ +^weight counter = 1 .*$ +^bias: 0 .*$ +^RESULT: counter$ +^EXIT=0$ +^SIGNAL=0$ diff --git a/regression/nuterm/counters/counter1.v b/regression/nuterm/counters/counter1.v new file mode 100644 index 000000000..ce683060c --- /dev/null +++ b/regression/nuterm/counters/counter1.v @@ -0,0 +1,11 @@ +module main(input clk); + + reg [31:0] counter; + + always @(posedge clk) + if(counter != 0) + counter = counter - 1; + + wire \$live = counter == 0; + +endmodule diff --git a/regression/nuterm/counters/counter1/trace.1 b/regression/nuterm/counters/counter1/trace.1 new file mode 100644 index 000000000..019800113 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.1 @@ -0,0 +1,39 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +1main.clk +b00001111111111111111111111111111 main.counter +#2 +b00001111111111111111111111111110 main.counter +#3 +0main.clk +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +b00001111111111111111111111111000 main.counter +#9 +b00001111111111111111111111110111 main.counter +#10 +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.10 b/regression/nuterm/counters/counter1/trace.10 new file mode 100644 index 000000000..14af98364 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.10 @@ -0,0 +1,42 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +1main.clk +b00001111111111111111111111111111 main.counter +#2 +0main.clk +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +0main.clk +b00001111111111111111111111111011 main.counter +#6 +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +1main.clk +b00001111111111111111111111111000 main.counter +#9 +b00001111111111111111111111110111 main.counter +#10 +0main.clk +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.2 b/regression/nuterm/counters/counter1/trace.2 new file mode 100644 index 000000000..6d6433de1 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.2 @@ -0,0 +1,41 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +b00001111111111111111111111111111 main.counter +#2 +1main.clk +b00001111111111111111111111111110 main.counter +#3 +0main.clk +b00001111111111111111111111111101 main.counter +#4 +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +1main.clk +b00001111111111111111111111111000 main.counter +#9 +0main.clk +b00001111111111111111111111110111 main.counter +#10 +1main.clk +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.3 b/regression/nuterm/counters/counter1/trace.3 new file mode 100644 index 000000000..68b813118 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.3 @@ -0,0 +1,42 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +0main.clk +b00001111111111111111111111111111 main.counter +#2 +b00001111111111111111111111111110 main.counter +#3 +1main.clk +b00001111111111111111111111111101 main.counter +#4 +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +b00001111111111111111111111111010 main.counter +#7 +0main.clk +b00001111111111111111111111111001 main.counter +#8 +1main.clk +b00001111111111111111111111111000 main.counter +#9 +0main.clk +b00001111111111111111111111110111 main.counter +#10 +1main.clk +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.4 b/regression/nuterm/counters/counter1/trace.4 new file mode 100644 index 000000000..d9b50731b --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.4 @@ -0,0 +1,43 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +1main.clk +b00001111111111111111111111111111 main.counter +#2 +b00001111111111111111111111111110 main.counter +#3 +0main.clk +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +0main.clk +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +1main.clk +b00001111111111111111111111111000 main.counter +#9 +0main.clk +b00001111111111111111111111110111 main.counter +#10 +1main.clk +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.5 b/regression/nuterm/counters/counter1/trace.5 new file mode 100644 index 000000000..21cbd62ac --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.5 @@ -0,0 +1,40 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +b00001111111111111111111111111111 main.counter +#2 +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +0main.clk +b00001111111111111111111111111100 main.counter +#5 +1main.clk +b00001111111111111111111111111011 main.counter +#6 +0main.clk +b00001111111111111111111111111010 main.counter +#7 +1main.clk +b00001111111111111111111111111001 main.counter +#8 +b00001111111111111111111111111000 main.counter +#9 +b00001111111111111111111111110111 main.counter +#10 +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.6 b/regression/nuterm/counters/counter1/trace.6 new file mode 100644 index 000000000..936aff9d8 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.6 @@ -0,0 +1,44 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +1main.clk +b00001111111111111111111111111111 main.counter +#2 +0main.clk +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +0main.clk +b00001111111111111111111111111010 main.counter +#7 +1main.clk +b00001111111111111111111111111001 main.counter +#8 +0main.clk +b00001111111111111111111111111000 main.counter +#9 +1main.clk +b00001111111111111111111111110111 main.counter +#10 +0main.clk +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.7 b/regression/nuterm/counters/counter1/trace.7 new file mode 100644 index 000000000..fda15d36b --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.7 @@ -0,0 +1,39 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +b00001111111111111111111111111111 main.counter +#2 +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +0main.clk +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +b00001111111111111111111111111000 main.counter +#9 +1main.clk +b00001111111111111111111111110111 main.counter +#10 +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.8 b/regression/nuterm/counters/counter1/trace.8 new file mode 100644 index 000000000..86ee47095 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.8 @@ -0,0 +1,41 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +1main.clk +b00001111111111111111111111111111 main.counter +#2 +0main.clk +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +1main.clk +b00001111111111111111111111111100 main.counter +#5 +0main.clk +b00001111111111111111111111111011 main.counter +#6 +1main.clk +b00001111111111111111111111111010 main.counter +#7 +b00001111111111111111111111111001 main.counter +#8 +b00001111111111111111111111111000 main.counter +#9 +b00001111111111111111111111110111 main.counter +#10 +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.9 b/regression/nuterm/counters/counter1/trace.9 new file mode 100644 index 000000000..562f94619 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.9 @@ -0,0 +1,42 @@ +$date + Mon May 20 17:39:58 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b00010000000000000000000000000000 main.counter +0main.$live +#1 +0main.clk +b00001111111111111111111111111111 main.counter +#2 +1main.clk +b00001111111111111111111111111110 main.counter +#3 +b00001111111111111111111111111101 main.counter +#4 +0main.clk +b00001111111111111111111111111100 main.counter +#5 +b00001111111111111111111111111011 main.counter +#6 +1main.clk +b00001111111111111111111111111010 main.counter +#7 +0main.clk +b00001111111111111111111111111001 main.counter +#8 +b00001111111111111111111111111000 main.counter +#9 +1main.clk +b00001111111111111111111111110111 main.counter +#10 +b00001111111111111111111111110110 main.counter diff --git a/regression/nuterm/counters/counter2.desc b/regression/nuterm/counters/counter2.desc new file mode 100644 index 000000000..d62db8dfd --- /dev/null +++ b/regression/nuterm/counters/counter2.desc @@ -0,0 +1,9 @@ +CORE +counter2 + +^weight clk = 0 .*$ +^weight counter = -1 .*$ +^bias: 0 .*$ +^RESULT: -1\*\$signed\(\{1'b0,counter\}\)$ +^EXIT=0$ +^SIGNAL=0$ diff --git a/regression/nuterm/counters/counter2.v b/regression/nuterm/counters/counter2.v new file mode 100644 index 000000000..367de8228 --- /dev/null +++ b/regression/nuterm/counters/counter2.v @@ -0,0 +1,13 @@ +module main(input clk); + + reg [31:0] counter; + + always @(posedge clk) + if(counter < 100) + counter = counter + 1; + else + counter = 0; + + wire \$live = counter == 0; + +endmodule diff --git a/regression/nuterm/counters/counter2/trace.1 b/regression/nuterm/counters/counter2/trace.1 new file mode 100644 index 000000000..3e2f4ea87 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.1 @@ -0,0 +1,41 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.10 b/regression/nuterm/counters/counter2/trace.10 new file mode 100644 index 000000000..0f0c351a4 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.10 @@ -0,0 +1,44 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +0main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +0main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.2 b/regression/nuterm/counters/counter2/trace.2 new file mode 100644 index 000000000..02ca97c9f --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.2 @@ -0,0 +1,43 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +1main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.3 b/regression/nuterm/counters/counter2/trace.3 new file mode 100644 index 000000000..72e7ab63a --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.3 @@ -0,0 +1,44 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +0main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +1main.clk +b00000000000000000000000000000010 main.counter +#4 +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +0main.clk +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +1main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.4 b/regression/nuterm/counters/counter2/trace.4 new file mode 100644 index 000000000..0518fe3ed --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.4 @@ -0,0 +1,45 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +0main.clk +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +1main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.5 b/regression/nuterm/counters/counter2/trace.5 new file mode 100644 index 000000000..60bd38a99 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.5 @@ -0,0 +1,42 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +0main.clk +b00000000000000000000000000000011 main.counter +#5 +1main.clk +b00000000000000000000000000000100 main.counter +#6 +0main.clk +b00000000000000000000000000000101 main.counter +#7 +1main.clk +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.6 b/regression/nuterm/counters/counter2/trace.6 new file mode 100644 index 000000000..d015d1828 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.6 @@ -0,0 +1,46 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +0main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +0main.clk +b00000000000000000000000000000101 main.counter +#7 +1main.clk +b00000000000000000000000000000110 main.counter +#8 +0main.clk +b00000000000000000000000000000111 main.counter +#9 +1main.clk +b00000000000000000000000000001000 main.counter +#10 +0main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.7 b/regression/nuterm/counters/counter2/trace.7 new file mode 100644 index 000000000..397fc0ff1 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.7 @@ -0,0 +1,41 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +0main.clk +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +1main.clk +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.8 b/regression/nuterm/counters/counter2/trace.8 new file mode 100644 index 000000000..fe600de06 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.8 @@ -0,0 +1,43 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +0main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +1main.clk +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.9 b/regression/nuterm/counters/counter2/trace.9 new file mode 100644 index 000000000..af98eb204 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.9 @@ -0,0 +1,44 @@ +$date + Mon May 20 17:40:02 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b10000000000000000000000000001000 main.counter +0main.$live +#1 +0main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +0main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +1main.clk +b00000000000000000000000000000101 main.counter +#7 +0main.clk +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +1main.clk +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/src/ebmc/live_signal.cpp b/src/ebmc/live_signal.cpp index a0f8bc029..3686160a2 100644 --- a/src/ebmc/live_signal.cpp +++ b/src/ebmc/live_signal.cpp @@ -1,6 +1,6 @@ /*******************************************************************\ -Module: Live Signal +Module: Liveness Signal Author: Daniel Kroening, dkr@amazon.com @@ -13,9 +13,10 @@ Author: Daniel Kroening, dkr@amazon.com #include "transition_system.h" -void set_live_signal(transition_systemt &transition_system, exprt property) +void set_liveness_signal(transition_systemt &transition_system, exprt property) { - static const irep_idt identifier = "nuterm::live"; + static const irep_idt identifier = + id2string(transition_system.main_symbol->name) + ".$live"; // add symbol if needed if(!transition_system.symbol_table.has_symbol(identifier)) @@ -24,7 +25,7 @@ void set_live_signal(transition_systemt &transition_system, exprt property) identifier, bool_typet(), transition_system.main_symbol->mode}; live_symbol.module = transition_system.main_symbol->module; - live_symbol.base_name = "live"; + live_symbol.base_name = "$live"; const auto symbol_expr = live_symbol.symbol_expr(); diff --git a/src/ebmc/live_signal.h b/src/ebmc/live_signal.h index 4bc7a5246..5a8dba8c0 100644 --- a/src/ebmc/live_signal.h +++ b/src/ebmc/live_signal.h @@ -1,6 +1,6 @@ /*******************************************************************\ -Module: Live Signal +Module: Liveness Signal Author: Daniel Kroening, dkr@amazon.com @@ -13,6 +13,6 @@ Author: Daniel Kroening, dkr@amazon.com class transition_systemt; -void set_live_signal(transition_systemt &, exprt property); +void set_liveness_signal(transition_systemt &, exprt property); #endif // EBMC_LIVE_SIGNAL_H diff --git a/src/ebmc/neural_liveness.cpp b/src/ebmc/neural_liveness.cpp index d54eb1ce9..d7ccb508e 100644 --- a/src/ebmc/neural_liveness.cpp +++ b/src/ebmc/neural_liveness.cpp @@ -58,7 +58,7 @@ class neural_livenesst int show_traces(); void validate_properties(); - void set_live_signal(const ebmc_propertiest::propertyt &, const exprt &); + void set_liveness_signal(const ebmc_propertiest::propertyt &, const exprt &); void sample(std::function); std::function dump_vcd_files(temp_dirt &); exprt guess(ebmc_propertiest::propertyt &, const temp_dirt &); @@ -70,9 +70,6 @@ int neural_livenesst::operator()() if(cmdline.isset("show-traces")) return show_traces(); - if(!cmdline.isset("neural-engine")) - throw ebmc_errort() << "give a neural engine"; - transition_system = get_transition_system(cmdline, message.get_message_handler()); @@ -97,7 +94,7 @@ int neural_livenesst::operator()() continue; // Set the liveness signal for the property. - set_live_signal(property, original_trans_expr); + set_liveness_signal(property, original_trans_expr); // Now sample some traces. // Store the traces in a set of files, one per @@ -113,6 +110,8 @@ int neural_livenesst::operator()() if(verify(property, candidate).is_true()) break; + + throw ebmc_errort() << "giving up"; } } @@ -139,7 +138,7 @@ int neural_livenesst::show_traces() if(property.is_disabled()) continue; - set_live_signal(property, original_trans_expr); + set_liveness_signal(property, original_trans_expr); sample([&](trans_tracet trace) { namespacet ns(transition_system.symbol_table); @@ -180,7 +179,7 @@ void neural_livenesst::validate_properties() } } -void neural_livenesst::set_live_signal( +void neural_livenesst::set_liveness_signal( const ebmc_propertiest::propertyt &property, const exprt &original_trans_expr) { @@ -188,7 +187,7 @@ void neural_livenesst::set_live_signal( auto main_symbol_writeable = transition_system.symbol_table.get_writeable( transition_system.main_symbol->name); main_symbol_writeable->value = original_trans_expr; // copy - ::set_live_signal(transition_system, property.normalized_expr); + ::set_liveness_signal(transition_system, property.normalized_expr); } std::function @@ -198,7 +197,7 @@ neural_livenesst::dump_vcd_files(temp_dirt &temp_dir) return [&, trace_nr = 0ull, outfile_prefix](trans_tracet trace) mutable -> void { namespacet ns(transition_system.symbol_table); - auto filename = outfile_prefix + std::to_string(trace_nr + 1); + auto filename = outfile_prefix + std::to_string(++trace_nr); std::ofstream out(widen_if_needed(filename)); if(!out) @@ -226,8 +225,7 @@ void neural_livenesst::sample(std::function trace_consumer) return 100; // default }(); - const std::size_t number_of_trace_steps = [this]() -> std::size_t - { + const std::size_t number_of_trace_steps = [this]() -> std::size_t { if(cmdline.isset("trace-steps")) { auto trace_steps_opt = @@ -245,12 +243,14 @@ void neural_livenesst::sample(std::function trace_consumer) message.status() << "Sampling " << number_of_traces << " traces with " << number_of_trace_steps << " steps" << messaget::eom; + null_message_handlert null_message_handler; + random_traces( transition_system, trace_consumer, number_of_traces, number_of_trace_steps, - message.get_message_handler()); + null_message_handler); } exprt neural_livenesst::guess( @@ -259,7 +259,10 @@ exprt neural_livenesst::guess( { message.status() << "Fitting a ranking function" << messaget::eom; - const auto engine = cmdline.get_value("neural-engine"); + const auto engine = cmdline.isset("neural-engine") + ? cmdline.get_value("neural-engine") + : "nuterm"; + temporary_filet engine_output("ebmc-neural", "txt"); const auto cmd = engine + " " + temp_dir.path + " | tee " + engine_output(); @@ -275,7 +278,7 @@ exprt neural_livenesst::guess( if(!in) throw ebmc_errort() << "failed to open " << engine_output(); - std::string prefix = "Candidate: "; + std::string prefix = "RESULT: "; std::string line; while(std::getline(in, line)) { diff --git a/src/nuterm/CMakeLists.txt b/src/nuterm/CMakeLists.txt new file mode 100644 index 000000000..fa2383654 --- /dev/null +++ b/src/nuterm/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +project(nuterm) + +find_package(Torch REQUIRED) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") + +add_executable(pytorch_tests pytorch_tests.cpp) +target_link_libraries(pytorch_tests "${TORCH_LIBRARIES}") +set_property(TARGET pytorch_tests PROPERTY CXX_STANDARD 17) + +add_executable(nuterm nuterm_main.cpp vcd_parser.cpp training.cpp) +target_link_libraries(nuterm "${TORCH_LIBRARIES}") +set_property(TARGET nuterm PROPERTY CXX_STANDARD 17) diff --git a/src/nuterm/README b/src/nuterm/README new file mode 100644 index 000000000..d509a7131 --- /dev/null +++ b/src/nuterm/README @@ -0,0 +1,7 @@ +wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-2.1.2.zip + +mkdir build +cd build +cmake -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_PREFIX_PATH=/Users/kroening/progr/2hw-cbmc/src/nuterm/libtorch .. +cmake --build . --config Release + diff --git a/src/nuterm/nuterm_main.cpp b/src/nuterm/nuterm_main.cpp new file mode 100644 index 000000000..ed57654bd --- /dev/null +++ b/src/nuterm/nuterm_main.cpp @@ -0,0 +1,340 @@ +/*******************************************************************\ + +Module: nuterm main + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "training.h" +#include "vcd_parser.h" + +#include +#include +#include +#include +#include +#include + +using tracest = std::list; + +tracest read_traces(const std::string &path) +{ + std::vector file_names; + + for(const auto &entry : std::filesystem::directory_iterator(path)) + file_names.push_back(entry.path()); + + // sort to get a fixed ordering + std::sort(file_names.begin(), file_names.end()); + + tracest traces; + + for(const auto &entry : file_names) + { + //std::cout << "Reading " << entry << '\n'; + std::ifstream in(entry); + traces.push_back(vcd_parser(in)); + } + + std::cout << "Read " << traces.size() << " trace files\n"; + + return traces; +} + +std::size_t number_of_transitions(const tracest &traces) +{ + std::size_t result = 0; + + for(auto &trace : traces) + result += trace.states.empty() ? 0 : trace.states.size() - 1; + + return result; +} + +struct state_variablet +{ + std::size_t index; + std::string reference; +}; + +using state_variablest = std::map; + +state_variablest state_variables(const tracest &traces) +{ + // number all identifiers + state_variablest state_variables; + + for(auto &trace : traces) + { + for(auto &[id, var] : trace.var_map) + { + if(state_variables.find(id) == state_variables.end()) + { + auto &state_variable = state_variables[id]; + state_variable.index = state_variables.size() - 1; + state_variable.reference = var.reference; + } + } + } + + return state_variables; +} + +bool has_suffix(const std::string &s, const std::string &suffix) +{ + if(s.length() >= suffix.length()) + return s.compare(s.length() - suffix.length(), suffix.length(), suffix) == + 0; + else + return false; +} + +#include + +double vcd_to_value(const std::string &src) +{ + // VCD gives binary values + auto integer = std::stoull(src, nullptr, 2); + //std::cout << "VALUE " << src << " -> " << double(integer) << "\n"; + return integer; +} + +torch::Tensor state_to_tensor( + const state_variablest &state_variables, + const vcdt::statet &state) +{ + std::vector data; + data.resize(state_variables.size(), 0); + for(auto &[id, var] : state_variables) + { + if(var.reference == "clk") + continue; + auto v_it = state.changes.find(id); + if(v_it != state.changes.end()) + data[var.index] = vcd_to_value(v_it->second); + } + + return torch::tensor(data, torch::kFloat64); +} + +torch::Tensor state_pair_to_tensor( + const state_variablest &state_variables, + const vcdt::statet ¤t, + const vcdt::statet &next) +{ + // We make a tensor that has dimensions 2 x |V|. + // The '2' allows for current and next state. + auto tensor_current = state_to_tensor(state_variables, current); + auto tensor_next = state_to_tensor(state_variables, next); + auto tensor_pair = torch::stack({tensor_current, tensor_next}, 0); + // std::cout << "P: " << tensor_pair << "\n" << std::flush; + return std::move(tensor_pair); +} + +bool is_live_state( + const std::string &liveness_signal, + const vcdt::statet &state) +{ + auto value_it = state.changes.find(liveness_signal); + if(value_it == state.changes.end()) + { + std::cerr << "state without liveness signal" << '\n'; + abort(); + } + return vcd_to_value(value_it->second) != 0; +} + +std::string liveness_signal(const state_variablest &state_variables) +{ + for(auto &[id, var] : state_variables) + if(var.reference == "$live") + return id; + + std::cout.flush(); + std::cerr << "failed to find liveness signal" << '\n'; + + abort(); +} + +std::vector traces_to_tensors( + const state_variablest &state_variables, + const std::string &liveness_signal, + const tracest &traces) +{ + auto t = number_of_transitions(traces); + + std::vector result; + result.reserve(t); + + for(auto &trace : traces) + { + const auto full_trace = trace.full_trace(); + + for(std::size_t t = 1; t < full_trace.size(); t++) + { + auto ¤t = full_trace[t - 1]; + auto &next = full_trace[t]; + + // We must discard transitions in/out of 'live' states. + // There is no need for the ranking function to decrease + // on such transitions. + if( + !is_live_state(liveness_signal, current) && + !is_live_state(liveness_signal, next)) + { + // std::cout << "\n" << current << "---->\n" << next; + auto tensor = state_pair_to_tensor(state_variables, current, next); + assert(tensor.size(0) == 2); + assert(tensor.size(1) == state_variables.size()); + result.push_back(std::move(tensor)); + } + } + } + + return result; +} + +std::string sum(const std::vector &terms) +{ + std::string result; + for(auto &term : terms) + { + if(result.empty()) + result = term; + else if(term != "" && term[0] == '-') + result += term; + else + result += "+" + term; + } + return result; +} + +std::string ranking_net_to_string( + const state_variablest &state_variables, + const std::shared_ptr net) +{ + std::vector terms; + + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + + for(auto &[id, var] : state_variables) + { + assert(var.index < state_variables.size()); + long long weight_int = round(weight[0][var.index].item()); + if(weight_int == 0) + { + } + else if(weight_int == 1) + { + terms.push_back(var.reference); + } + else + { + if(weight_int >= 0) + terms.push_back(std::to_string(weight_int) + "*" + var.reference); + else + { + // make signed, but first add a bit + terms.push_back( + std::to_string(weight_int) + "*$signed({1'b0," + var.reference + + "})"); + } + } + } + + long long bias_int = round(bias.item()); + if(bias_int != 0) + terms.push_back(std::to_string(bias_int)); + + return sum(terms); +} + +int main(int argc, const char *argv[]) +{ + // The first argument is the directory with the VCD files. + if(argc != 2) + { + std::cout << "Usage: nuterm trace_directory\n"; + return 1; + } + + const auto traces = read_traces(argv[1]); + + auto state_variables = ::state_variables(traces); + + if(state_variables.empty()) + { + std::cout << "no state variables\n"; + return 1; + } + + auto liveness_signal = ::liveness_signal(state_variables); + + for(auto &[_, var] : state_variables) + std::cout << "V" << var.index << "=" << var.reference << '\n'; + + std::cout << "Liveness signal: " << liveness_signal << '\n'; + + torch::manual_seed(0); + + const auto tensors = + traces_to_tensors(state_variables, liveness_signal, traces); + + std::cout << "Got " << tensors.size() << " transitions to rank\n"; + + if(tensors.empty()) + { + return 0; + } + + const auto net = std::make_shared(state_variables.size()); + +#if 0 + for(auto &p : net->named_parameters()) + { + std::cout << p.key() << ": " << p.value() << "\n"; + } +#endif + + { + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + assert(weight.dim() == 2); + assert(bias.dim() == 1); + std::cout << "#weights: " << weight.size(1) << "\n"; + assert(weight.size(1) == state_variables.size()); + +#if 0 + for(auto &var : state_variables) + { + assert(var.second < state_variables.size()); + std::cout << "weight " << var.first << " = " << weight[0][var.second].item() << '\n'; + } + + std::cout << "bias: " << bias.item() << "\n"; +#endif + } + + std::cout << "TRAINING\n"; + ranking_function_training(net, tensors); + + { + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + for(auto &[_, var] : state_variables) + { + assert(var.index < state_variables.size()); + std::cout << "weight " << var.reference << " = " + << (long long)round(weight[0][var.index].item()) << ' ' + << weight[0][var.index].item() << '\n'; + } + + std::cout << "bias: " << (long long)(round(bias.item())) << ' ' + << bias.item() << "\n"; + } + + std::cout << "RESULT: " << ranking_net_to_string(state_variables, net) + << '\n'; +} diff --git a/src/nuterm/pytorch_tests.cpp b/src/nuterm/pytorch_tests.cpp new file mode 100644 index 000000000..82f2c1160 --- /dev/null +++ b/src/nuterm/pytorch_tests.cpp @@ -0,0 +1,439 @@ +#include + +#include +#include + +struct test_net : torch::nn::Module +{ + test_net() + { + fc1 = register_module("fc1", torch::nn::Linear(1, 1)); + fc1->to(torch::kFloat64); + } + + torch::Tensor forward(torch::Tensor x) + { + return fc1->forward(x); + } + + torch::nn::Linear fc1{nullptr}; +}; + +struct test_batcht +{ + torch::Tensor data; + torch::Tensor target; +}; + +void train( + std::shared_ptr net, + const std::vector &batches) +{ + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 30; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : batches) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch.data); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = torch::mse_loss(prediction, batch.target); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } +} + +void pytorch_test1() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 10, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ 11, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ 12, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 10); // bias +} + +void pytorch_test2() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 0, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ -1, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ -2, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +void pytorch_test3() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 10, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ 9, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ 8, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 10); // bias +} + +void pytorch_test4() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector data = { + torch::full({1}, /*value*/ 0, torch::kFloat64), + torch::full({1}, /*value*/ 1, torch::kFloat64), + torch::full({1}, /*value*/ 2, torch::kFloat64), + torch::full({1}, /*value*/ 3, torch::kFloat64), + torch::full({1}, /*value*/ 4, torch::kFloat64), + torch::full({1}, /*value*/ 5, torch::kFloat64)}; + + auto my_loss = [](const torch::Tensor &input) -> torch::Tensor { + return input.abs(); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 10; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0].item()) == 0); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +struct test_net2 : torch::nn::Module +{ + explicit test_net2(std::size_t number_of_inputs) + { + fc1 = register_module("fc1", torch::nn::Linear(number_of_inputs, 1)); + fc1->to(torch::kFloat64); + } + + torch::Tensor forward(torch::Tensor x) + { + x = fc1->forward(x); + // the relu ensures x is bounded from below + x = torch::relu(x); + return x; + } + + torch::nn::Linear fc1{nullptr}; +}; + +void pytorch_test5() +{ + torch::manual_seed(0); + + auto net = std::make_shared(2); + + std::vector data = { + torch::tensor({0, 0}, torch::kFloat64), + torch::tensor({1, 0}, torch::kFloat64), + torch::tensor({2, 0}, torch::kFloat64), + torch::tensor({3, 0}, torch::kFloat64), + torch::tensor({4, 0}, torch::kFloat64), + torch::tensor({5, 0}, torch::kFloat64)}; + + auto my_loss = [](const torch::Tensor &input) -> torch::Tensor { + return input.abs(); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 10; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0][0][0].item()) == 0); // coefficient + assert(round(net->parameters()[0][0][1].item()) == 0); // coefficient + assert(round(net->parameters()[1].item()) == -1); // bias +} + +void pytorch_test6() +{ + torch::manual_seed(0); + + auto net = std::make_shared(1); + + // one state variable, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({5}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({4}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({3}, torch::kFloat64), + torch::tensor({2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({2}, torch::kFloat64), + torch::tensor({1}, torch::kFloat64)}), + torch::stack( + {torch::tensor({1}, torch::kFloat64), + torch::tensor({0}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + return torch::relu(next - curr + 1.0); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 3; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 1); // bias +} + +void pytorch_test7() +{ + torch::manual_seed(0); + + auto net = std::make_shared(2); + + // two state variables, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({0, 5}, torch::kFloat64), + torch::tensor({0, 4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 4}, torch::kFloat64), + torch::tensor({0, 3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 3}, torch::kFloat64), + torch::tensor({0, 2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 2}, torch::kFloat64), + torch::tensor({0, 1}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 1}, torch::kFloat64), + torch::tensor({0, 0}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + return torch::relu(next - curr + 1.0); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 3; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + //std::cout << "L " << loss.item() << "\n"; + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + //std::cout << "XX: " << net->parameters() << "\n"; + //std::cout << "C: " << net->parameters()[0][0][0].item() << "\n"; + //std::cout << "C: " << net->parameters()[0][0][1].item() << "\n"; + //std::cout << "B: " << net->parameters()[1].item() << "\n"; + assert(round(net->parameters()[0][0][0].item()) == 0); // coefficient + assert(round(net->parameters()[0][0][1].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +void pytorch_test8() +{ + torch::manual_seed(0); + + auto net = std::make_shared(1); + + // one state variable, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({1}, torch::kFloat64), + torch::tensor({2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({2}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({3}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({4}, torch::kFloat64), + torch::tensor({5}, torch::kFloat64)}), + torch::stack( + {torch::tensor({5}, torch::kFloat64), + torch::tensor({6}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + // std::cout << "LL " << from.item() << " -> " << to.item() << "\n"; + return torch::relu(next - curr + 1.0); + }; + + //torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + torch::optim::Adam optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 30; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + + // std::cout << "L " << loss.item() << "\n"; + } + } + + //std::cout << "C: " << net->parameters()[0].item() << "\n"; + //std::cout << "B: " << net->parameters()[1].item() << "\n"; + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 6); // bias +} + +int main() +{ + std::cout << "Running tests\n"; + + pytorch_test1(); + pytorch_test2(); + pytorch_test3(); + pytorch_test4(); + pytorch_test5(); + pytorch_test6(); + pytorch_test7(); + pytorch_test8(); +} diff --git a/src/nuterm/training.cpp b/src/nuterm/training.cpp new file mode 100644 index 000000000..bbad82829 --- /dev/null +++ b/src/nuterm/training.cpp @@ -0,0 +1,95 @@ +/*******************************************************************\ + +Module: Ranking Function Training + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "training.h" + +#include + +const double delta = 1; + +#include + +void ranking_function_training( + std::shared_ptr net, + const std::vector &data) +{ + assert(net != nullptr); + + for(auto &batch : data) + assert(batch.size(0) == 2); + + auto ranking_function_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + assert(curr.dim() == 1 && next.dim() == 1); + // The ranking needs to decrease from 'curr' to 'next' + // by at least 'delta'. Anything less than that is loss. + return torch::relu(next - curr + delta); + }; + +#if 0 + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); +#endif +#if 1 + torch::optim::Adam optimizer(net->parameters(), /*lr=*/0.01); +#endif + + torch::Tensor last_loss = {}; + + for(size_t epoch = 1; epoch <= 10; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + //std::cout << "B: " << batch << "\n"; + + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + assert(batch.size(0) == 2); + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + // std::cout << "B: " << std::fixed << batch[0][1].item() << " -> " << batch[1][1].item() << "\n"; + // std::cout << "R: " << std::fixed << prediction_curr.item() << " -> " << prediction_next.item() << "\n"; + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = + ranking_function_loss(prediction_curr, prediction_next); + + // std::cout << "L: " << loss << "\n"; + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + +#if 1 + if(1) + { + std::cout << "Epoch: " << epoch << " | Batch: " << batch_index + << " | Loss: " << std::fixed << std::setprecision(3) + << loss.item() << '\n'; + //torch::save(net, "net.pt"); + } +#endif + + batch_index++; + + last_loss = loss; + } + + if(last_loss.item() == 0) + break; // done + } + + std::cout << "Final loss: " << std::fixed << std::setprecision(3) + << last_loss.item() << '\n'; +} diff --git a/src/nuterm/training.h b/src/nuterm/training.h new file mode 100644 index 000000000..95b788023 --- /dev/null +++ b/src/nuterm/training.h @@ -0,0 +1,41 @@ +/*******************************************************************\ + +Module: Training + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include + +#include + +struct RankingNet : torch::nn::Module +{ + explicit RankingNet(std::size_t number_of_state_variables) + { + fc1 = + register_module("fc1", torch::nn::Linear(number_of_state_variables, 1)); + fc1->to(torch::kFloat64); +#if 0 + // The default values for nn::Linear are kaiming_uniform(sqrt(5)). + // Use zeros instead. + fc1->named_parameters()["weight"].fill_(0, torch::requires_grad()); + fc1->named_parameters()["bias"] = torch::zeros({1}); // .fill_(0, torch::requires_grad()); + std::cout << "CON: " << fc1->named_parameters()["bias"].item() << "\n"; +#endif + } + + torch::Tensor forward(torch::Tensor x) + { + // the relu ensures that the function is bounded from below by 0 + // return torch::relu(fc1->forward(x)); + return fc1->forward(x); + } + + torch::nn::Linear fc1{nullptr}; +}; + +void ranking_function_training( + const std::shared_ptr net, + const std::vector &); diff --git a/src/nuterm/vcd_parser.cpp b/src/nuterm/vcd_parser.cpp new file mode 100644 index 000000000..b1aae773f --- /dev/null +++ b/src/nuterm/vcd_parser.cpp @@ -0,0 +1,142 @@ +/*******************************************************************\ + +Module: VCD Parser + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "vcd_parser.h" + +#include + +static bool is_whitespace(char ch) +{ + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'; +} + +static std::string vcd_token(std::istream &in) +{ + std::string token; + char ch; + while(!in.eof()) + { + ch = in.get(); + if(is_whitespace(ch)) + { + if(!token.empty()) + return token; + } + else + token += ch; + } + + return token; +} + +void vcd_command(vcdt &vcd, const std::string &token, std::istream &in) +{ + if(token == "$var") + { + vcdt::vart var; + var.type = vcd_token(in); + var.size = std::stoull(vcd_token(in)); + var.id = vcd_token(in); + var.reference = vcd_token(in); + vcd.var_map[var.id] = std::move(var); + } + + // look for $end + while(true) + { + auto t = vcd_token(in); + + if(t.empty() || t == "$end") + return; + } +} + +vcdt vcd_parser(std::istream &in) +{ + vcdt vcd; + + while(true) + { + auto token = vcd_token(in); + if(token.empty()) + break; + switch(token[0]) + { + case '$': + vcd_command(vcd, token, in); + break; + case '#': + // #decimal + vcd.simulation_time(token.substr(1, std::string::npos)); + break; + case '0': + case '1': + case 'x': + case 'X': + case 'z': + case 'Z': + // (0 | 1 | x | X | z | Z) identifier + // one token, no space + vcd.value_change(token.substr(0, 1), token.substr(1)); + break; + case 'b': + case 'B': + // (b | B) value identifier + // two tokens, space between value and identifier + vcd.value_change(token.substr(1), vcd_token(in)); + break; + case 'r': + case 'R': + // (r | R) value identifier + // two tokens, space between value and identifier + vcd.value_change(token.substr(1), vcd_token(in)); + break; + default: + break; + } + } + + return vcd; +} + +std::ostream &operator<<(std::ostream &out, const vcdt::statet &state) +{ + for(auto &[id, value] : state.changes) + out << id << " = " << std::stoull(value, nullptr, 2) << '\n'; + return out; +} + +std::vector vcdt::full_trace() const +{ + std::vector result; + result.reserve(states.size()); + + for(auto &state : states) + { + if(result.empty()) + { + // first state + result.push_back(state); + } + else + { + statet full_state(state.simulation_time); + + // copy the previous map + full_state.changes = result.back().changes; + + // apply the changes + for(auto &change : state.changes) + full_state.changes[change.first] = change.second; + + result.push_back(std::move(full_state)); + } + } + + return result; +} diff --git a/src/nuterm/vcd_parser.h b/src/nuterm/vcd_parser.h new file mode 100644 index 000000000..b009278e3 --- /dev/null +++ b/src/nuterm/vcd_parser.h @@ -0,0 +1,61 @@ +/*******************************************************************\ + +Module: VCD Parser + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include +#include +#include +#include + +class vcdt +{ +public: + struct vart + { + std::string type; + std::size_t size; + std::string id; + std::string reference; + }; + + using var_mapt = std::map; + var_mapt var_map; + + using value_mapt = std::map; + + struct statet + { + explicit statet(std::string t) : simulation_time(std::move(t)) + { + } + + std::string simulation_time; + value_mapt changes; + }; + + std::vector states; + + void simulation_time(const std::string &t) + { + states.push_back(statet(t)); + } + + void value_change(std::string value, const std::string &identifier) + { + if(states.empty()) + simulation_time(""); + states.back().changes[identifier] = std::move(value); + } + + // Expand the differential trace into a trace of full states, + // including all unchanged values. + std::vector full_trace() const; +}; + +std::ostream &operator<<(std::ostream &, const vcdt::statet &); + +vcdt vcd_parser(std::istream &); From 7b5024427a6836977e4cdd00eaab93a11aa46ffd Mon Sep 17 00:00:00 2001 From: Daniel Kroening Date: Sun, 26 May 2024 19:54:59 +0100 Subject: [PATCH 2/2] fx --- src/nuterm/pytorch_tests.cpp | 84 ++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/src/nuterm/pytorch_tests.cpp b/src/nuterm/pytorch_tests.cpp index 82f2c1160..ab3242005 100644 --- a/src/nuterm/pytorch_tests.cpp +++ b/src/nuterm/pytorch_tests.cpp @@ -424,6 +424,89 @@ void pytorch_test8() assert(round(net->parameters()[1].item()) == 6); // bias } +struct test_net3 : torch::nn::Module +{ + explicit test_net3(std::size_t number_of_inputs) + { + fc1 = register_module("fc1", torch::nn::Linear(number_of_inputs, 1)); + fc1->to(torch::kFloat64); + } + + torch::Tensor forward(torch::Tensor x) + { + x = fc1->forward(x); + return x; + } + + torch::nn::Linear fc1{nullptr}; +}; + +void pytorch_test9() +{ + torch::manual_seed(0); + + auto net = std::make_shared(1); + + // one state variable + // batch-size x state-vars + torch::Tensor batch_curr = torch::stack({ + torch::tensor({1}, torch::kFloat64), + torch::tensor({2}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64), + torch::tensor({1000}, torch::kFloat64)}); + + torch::Tensor batch_next = torch::stack({ + torch::tensor({2}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64), + torch::tensor({5}, torch::kFloat64), + torch::tensor({1001}, torch::kFloat64)}); + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + // both curr and next are a batch of ranks + auto point_loss = torch::relu(next - curr + 1.0); + auto loss = torch::sum(point_loss); + return loss; + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + //torch::optim::AdamW optimizer(net->parameters(), /*lr=*/0.01); + + for(size_t epoch = 1; epoch <= 5; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + //for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch_curr); + torch::Tensor prediction_next = net->forward(batch_next); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + + // std::cout << "L " << loss.item() << "\n"; + } + } + + //std::cout << "C: " << net->parameters()[0].item() << "\n"; + //std::cout << "B: " << net->parameters()[1].item() << "\n"; + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 1); // bias +} + int main() { std::cout << "Running tests\n"; @@ -436,4 +519,5 @@ int main() pytorch_test6(); pytorch_test7(); pytorch_test8(); + pytorch_test9(); }