Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions .github/workflows/local-benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
name: Local Benchmarks CI

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
local-benchmark-test:
runs-on: ubuntu-22.04

steps:
- name: Checkout repository with submodules
uses: actions/checkout@v4
with:
submodules: recursive

- name: Set up Java 17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'

- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: '3.11'

- name: Install Python dependencies
run: |
pip install paramiko pandas seaborn imageio matplotlib

- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential cmake ninja-build

- name: Build Lingua Franca compiler
run: |
cd lingua-franca
./gradlew assemble

- name: Build and install trace utilities
run: |
cd lingua-franca/core/src/main/resources/lib/c/reactor-c/util/tracing
make trace_to_csv trace_to_chrome
sudo cp trace_to_csv trace_to_chrome /usr/local/bin/

- name: Add lfc to PATH
run: |
echo "${{ github.workspace }}/lingua-franca/bin" >> $GITHUB_PATH

- name: Verify lfc installation
run: |
lfc-dev --version
trace_to_csv --help | head -3

- name: Run local benchmark test (PingPong with NP scheduler)
run: |
cd benchmarks
python3 scripts/run_benchmark.py --local --src=src --src-gen=src-gen --select=PingPong -f=--scheduler=NP

- name: Verify benchmark output
run: |
# Check that CSV file was generated
ls -la benchmarks/data/
CSV_DIR=$(ls -td benchmarks/data/*/ | head -1)
echo "Checking directory: $CSV_DIR"
ls -la "$CSV_DIR"
test -f "${CSV_DIR}PingPong.csv" && echo "CSV file generated successfully"
test -f "${CSV_DIR}PingPong.lft" && echo "LFT trace file generated successfully"

- name: Upload benchmark artifacts
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: benchmarks/data/
retention-days: 7
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,7 @@

benchmarks/experiment-data
benchmarks/scripts/credentials.py
benchmarks/data
benchmarks/include

.vscode/
83 changes: 83 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
FROM ubuntu:22.04

# Timezone and locale
ENV TZ="UTC"
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

# Base packages
RUN apt-get update && apt-get install -y \
git wget curl tar sudo apt-utils software-properties-common \
&& rm -rf /var/lib/apt/lists/*

# Build tools
RUN apt-get update && apt-get install -y \
build-essential ninja-build cmake make pkg-config libssl-dev \
&& rm -rf /var/lib/apt/lists/*

# Java 17 (for Lingua Franca compiler)
RUN apt-get update && apt-get install -y openjdk-17-jdk \
&& rm -rf /var/lib/apt/lists/*
ENV JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64

# Python 3
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv \
&& rm -rf /var/lib/apt/lists/*

# Python dependencies for benchmarks
RUN pip3 install --no-cache-dir \
paramiko==3.5.0 pandas==2.2.3 seaborn==0.13.2 imageio==2.36.0 matplotlib

# Python dependencies for EGS
RUN pip3 install --no-cache-dir \
numpy==2.1.3 scipy==1.15.2 tensorflow==2.19.0

# Ruby (for WCET analysis tools)
RUN apt-get update && apt-get install -y ruby ruby-dev \
&& rm -rf /var/lib/apt/lists/*
RUN gem install bundler -v 2.4.22

# Verilator (for FlexPRET emulator)
RUN apt-get update && apt-get install -y verilator \
&& rm -rf /var/lib/apt/lists/*

# Scala/SBT (for FlexPRET build)
RUN apt-get update && apt-get install -y gnupg2 \
&& rm -rf /var/lib/apt/lists/*
RUN echo "deb https://repo.scala-sbt.org/scalasbt/debian all main" | tee /etc/apt/sources.list.d/sbt.list
RUN echo "deb https://repo.scala-sbt.org/scalasbt/debian /" | tee /etc/apt/sources.list.d/sbt_old.list
RUN curl -sL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x2EE0EA64E40A89B84B2DF73499E82A75642AC823" | apt-key add
RUN apt-get update && apt-get install -y scala sbt \
&& rm -rf /var/lib/apt/lists/*

# RISC-V Toolchain (xPack)
ENV RISCV_VERSION=14.2.0-2
RUN mkdir -p /opt && cd /opt \
&& wget -q https://github.com/xpack-dev-tools/riscv-none-elf-gcc-xpack/releases/download/v${RISCV_VERSION}/xpack-riscv-none-elf-gcc-${RISCV_VERSION}-linux-x64.tar.gz \
&& tar -xzf xpack-riscv-none-elf-gcc-${RISCV_VERSION}-linux-x64.tar.gz \
&& rm xpack-riscv-none-elf-gcc-${RISCV_VERSION}-linux-x64.tar.gz
ENV RISCV_TOOL_PATH_PREFIX=/opt/xpack-riscv-none-elf-gcc-${RISCV_VERSION}
ENV PATH="${RISCV_TOOL_PATH_PREFIX}/bin:${PATH}"

# WCET analysis dependencies (from case-study)
RUN apt-get update && apt-get install -y \
liblpsolve55-dev lp-solve \
libc6-dev-i386 gcc-multilib \
&& rm -rf /var/lib/apt/lists/*

WORKDIR /workspace

# Setup environment script
RUN echo '#!/bin/bash\n\
export RISCV_TOOL_PATH_PREFIX=/opt/xpack-riscv-none-elf-gcc-14.2.0-2\n\
export PATH="${RISCV_TOOL_PATH_PREFIX}/bin:${PATH}"\n\
if [ -d "/workspace/lingua-franca/build/install/lf-cli/bin" ]; then\n\
export PATH="/workspace/lingua-franca/build/install/lf-cli/bin:${PATH}"\n\
export LFC=/workspace/lingua-franca/build/install/lf-cli/bin/lfc\n\
fi\n\
if [ -f "/workspace/flexpret/env.bash" ]; then\n\
source /workspace/flexpret/env.bash\n\
fi' > /setup_env.sh && chmod +x /setup_env.sh

RUN echo 'source /setup_env.sh' >> /root/.bashrc

ENTRYPOINT ["/bin/bash"]
78 changes: 54 additions & 24 deletions benchmarks/scripts/experiment_timing.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
import imageio

# NOTE: Ensure that there is a credentials.py that defines the IP, username, and
# password of the target platform.
import credentials
# password of the target platform (only needed for remote mode).
# import credentials # Imported conditionally in main() when not in local mode

################## CONFIGS ##################

Expand Down Expand Up @@ -69,6 +69,19 @@
default="qnx/low_level_platform",
help="Specify the directory containing QNX support files."
)
parser.add_argument(
"-l",
"--local",
action="store_true",
help="Run benchmarks locally instead of on a remote embedded device."
)
parser.add_argument(
"-s",
"--select",
type=str,
action='append',
help="Select a list of programs to run instead of the entire directory (--select can be specified multiple times). No .lf extension is needed.",
)


def set_ssh_info():
Expand Down Expand Up @@ -558,7 +571,14 @@ def main(args=None):
# Parse arguments.
args = parser.parse_args(args)
platform = args.platform
IP, PW, UN = set_ssh_info()

# Conditionally import credentials and set SSH info (only for remote mode)
if not args.local and args.experiment_dir is None:
import credentials
globals()['credentials'] = credentials
IP, UN, PW = set_ssh_info()
else:
IP, UN, PW = None, None, None

# Variable declarations
expr_data_dirname = "experiment-data/"
Expand All @@ -579,10 +599,11 @@ def main(args=None):
timing_dir.mkdir(exist_ok=True)

# Create a directory for this experiment run.
# Time at which the script starts
# Time at which the script starts (command line --select takes priority over config)
programs_to_select = args.select if args.select else SELECT_PROGRAMS
if args.experiment_dir is None:
time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # Format: Year-Month-Day_Hour-Minute-Second
expr_run_dir = timing_dir / (time + "-" + platform + "-" + "_".join(STATIC_SCHEDULER_NAMES) + "-" + ("_".join(SELECT_PROGRAMS) if len(SELECT_PROGRAMS) > 0 else "ALL"))
expr_run_dir = timing_dir / (time + "-" + platform + "-" + "_".join(STATIC_SCHEDULER_NAMES) + "-" + ("_".join(programs_to_select) if len(programs_to_select) > 0 else "ALL"))
expr_run_dir.mkdir()
else:
expr_run_dir = timing_dir / args.experiment_dir
Expand All @@ -601,26 +622,34 @@ def main(args=None):

if args.experiment_dir is None:
# Prepare arguments for each run_benchmark call.
args_1 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=NP", "-dd="+str(np_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_2 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=LB", "-dd="+str(lb_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_3 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=EGS", "-dd="+str(egs_dir.resolve()), "--src=src", "--src-gen=src-gen"]
if platform == "RPI4-QNX":
qnx_support_directory = Path(args.qnx_support_directory).resolve()
args_1.append("-qd=" + str(qnx_support_directory))
args_2.append("-qd=" + str(qnx_support_directory))
args_3.append("-qd=" + str(qnx_support_directory))

if args.local:
# LOCAL MODE: Use -l flag instead of SSH credentials
args_1 = ["-l", "-pl=" + platform, "-f=--scheduler=NP", "-dd="+str(np_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_2 = ["-l", "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=LB", "-dd="+str(lb_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_3 = ["-l", "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=EGS", "-dd="+str(egs_dir.resolve()), "--src=src", "--src-gen=src-gen"]
else:
# REMOTE MODE: Use SSH credentials
args_1 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=NP", "-dd="+str(np_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_2 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=LB", "-dd="+str(lb_dir.resolve()), "--src=src", "--src-gen=src-gen"]
args_3 = ["-hn=" + IP, "-un=" + UN, "-pwd=" + PW, "-pl=" + platform, "-f=--scheduler=STATIC", "-f=--mapper=EGS", "-dd="+str(egs_dir.resolve()), "--src=src", "--src-gen=src-gen"]
if platform == "RPI4-QNX":
qnx_support_directory = Path(args.qnx_support_directory).resolve()
args_1.append("-qd=" + str(qnx_support_directory))
args_2.append("-qd=" + str(qnx_support_directory))
args_3.append("-qd=" + str(qnx_support_directory))

if DASH_MODE:
args_2.append("-f=--dash")
args_3.append("-f=--dash")

# Select programs
if len(SELECT_PROGRAMS) > 0:
for prog in SELECT_PROGRAMS:

# Select programs from command line argument (takes priority over config)
programs_to_select = args.select if args.select else SELECT_PROGRAMS
if len(programs_to_select) > 0:
for prog in programs_to_select:
args_1.append("--select="+prog)
args_2.append("--select="+prog)
args_3.append("--select="+prog)

# Exclude programs based on scheduler.
if len(EXCLUDED_PROGRAMS) > 0:
for prog in EXCLUDED_PROGRAMS['NP']:
Expand All @@ -629,11 +658,11 @@ def main(args=None):
args_2.append("--exclude="+prog)
for prog in EXCLUDED_PROGRAMS['EGS']:
args_3.append("--exclude="+prog)

# Run the benchmark runner using the NP and the STATIC scheduler.
# NOTE: The 2nd run's src-gen is copied back the host. So it's better to
# be static because we want to inspect the graphs.
run_benchmark.main(args_1) # NP
run_benchmark.main(args_1) # NP
run_benchmark.main(args_2) # LB
run_benchmark.main(args_3) # EGS

Expand All @@ -646,9 +675,10 @@ def main(args=None):
except Exception as e:
print(f"Error occurred: {e}")

# Get a list of benchmark names
if len(SELECT_PROGRAMS) > 0:
program_names = SELECT_PROGRAMS
# Get a list of benchmark names (command line --select takes priority over config)
programs_to_select = args.select if args.select else SELECT_PROGRAMS
if len(programs_to_select) > 0:
program_names = programs_to_select
else:
# Extract all program names from the benchmark directory.
program_names = [str(file).split("/")[-1][:-3] for file in timing_benchmark_dir.glob('*') if file.is_file()]
Expand Down
Loading