diff --git a/.github/workflows/build-and-run.yml b/.github/workflows/build-and-run.yml index 1551f320..46f0696d 100644 --- a/.github/workflows/build-and-run.yml +++ b/.github/workflows/build-and-run.yml @@ -76,11 +76,11 @@ jobs: echo "Detected TornadoVM SDK: $FULL_SDK" # Export for current shell session - export TORNADO_SDK="$FULL_SDK" + export TORNADOVM_HOME="$FULL_SDK" export PATH="$FULL_SDK/bin:$JAVA_HOME/bin:$PATH" # Save for subsequent steps - echo "TORNADO_SDK=$FULL_SDK" >> $GITHUB_ENV + echo "TORNADOVM_HOME=$FULL_SDK" >> $GITHUB_ENV echo "PATH=$PATH" >> $GITHUB_ENV echo "=== Checking tornado CLI ===" @@ -89,105 +89,105 @@ jobs: - name: Build GPULlama3.java run: | cd ${{ github.workspace }} - echo "Using TORNADO_SDK=$TORNADO_SDK" - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + echo "Using TORNADOVM_HOME=$TORNADOVM_HOME" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" tornado --version ./mvnw clean package -DskipTests - name: FP16 - Run Llama-3.2-1B-Instruct-F16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Llama-3.2-1B-Instruct-F16.gguf \ --prompt "Say hello" - name: FP16 - Run Qwen3-4B-f16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Qwen3-4B-f16.gguf \ --prompt "Say hello" - name: FP16 - Run Mistral-7B-Instruct-v0.3.fp16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Mistral-7B-Instruct-v0.3.fp16.gguf \ --prompt "Say hello" - name: FP16 - Run Qwen2.5-1.5b-instruct-fp16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/qwen2.5-1.5b-instruct-fp16.gguf \ --prompt "Say hello" - name: FP16 - Run Phi-3-mini-4k-instruct-fp16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model /$MODELS_DIR/Phi-3-mini-4k-instruct-fp16.gguf \ --prompt "Say hello" - name: FP16 - Run Granite-3.2-2b-instruct-f16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model /$MODELS_DIR/granite-3.2-2b-instruct-f16.gguf \ --prompt "Say hello" - name: FP16 - Run Granite-4.0-1b-F16.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model /$MODELS_DIR/granite-4.0-1b-F16.gguf \ --prompt "Say hello" - name: Q8 - Run Llama-3.2-1B-Instruct-Q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Llama-3.2-1B-Instruct-Q8_0.gguf \ --prompt "Say hello" - name: Q8 - Run Qwen3-0.6B-Q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Qwen3-0.6B-Q8_0.gguf \ --prompt "Say hello" - name: Q8 - Run Phi-3-mini-4k-instruct-Q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Phi-3-mini-4k-instruct-Q8_0.gguf \ --prompt "Say hello" - name: Q8 - Run Qwen2.5-1.5b-instruct-q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/qwen2.5-1.5b-instruct-q8_0.gguf \ --prompt "Say hello" - name: Q8 - Mistral-7B-Instruct-v0.3.Q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model $MODELS_DIR/Mistral-7B-Instruct-v0.3.Q8_0.gguf \ --prompt "Say hello" - name: Q8 - Run Granite-3.2-2b-instruct-Q8.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model /$MODELS_DIR/granite-3.2-2b-instruct-Q8_0.gguf \ --prompt "Say hello" - name: Q8 - Run Granite-4.0-1b-Q8_0.gguf run: | cd ${{ github.workspace }} - export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" + export PATH="$TORNADOVM_HOME/bin:$JAVA_HOME/bin:$PATH" ./llama-tornado --gpu --${{ matrix.backend.name }} \ --model /$MODELS_DIR/granite-4.0-1b-Q8_0.gguf \ --prompt "Say hello" diff --git a/.github/workflows/deploy-maven-central.yml b/.github/workflows/deploy-maven-central.yml index a30c48ef..d0714a8f 100644 --- a/.github/workflows/deploy-maven-central.yml +++ b/.github/workflows/deploy-maven-central.yml @@ -3,8 +3,7 @@ name: Deploy to Maven Central on: push: tags: - - 'v*' - - '[0-9]+.[0-9]+.[0-9]+*' + - 'v[0-9]+.[0-9]+.[0-9]+' workflow_run: workflows: ["Finalize GPULlama3 Release"] types: [completed] diff --git a/LlamaTornadoCli.java b/LlamaTornadoCli.java index 5a502aeb..cb0bf01c 100755 --- a/LlamaTornadoCli.java +++ b/LlamaTornadoCli.java @@ -1,8 +1,8 @@ //JAVA 21 //PREVIEW //DEPS io.github.beehive-lab:gpu-llama3:0.3.2-dev -//DEPS io.github.beehive-lab:tornado-api:2.1.0 -//DEPS io.github.beehive-lab:tornado-runtime:2.1.0 +//DEPS io.github.beehive-lab:tornado-api:2.2.0 +//DEPS io.github.beehive-lab:tornado-runtime:2.2.0 //SOURCES TornadoFlags.java // === Set to not get annoying warnings about annotation processing diff --git a/README.md b/README.md index e1591ea8..a9bb8822 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,6 @@ Ensure you have the following installed and configured: ### Install, Build, and Run -When cloning this repository, use the `--recursive` flag to ensure that TornadoVM is properly included as submodule: - ```bash # Clone the repository with all submodules git clone https://github.com/beehive-lab/GPULlama3.java.git @@ -80,66 +78,32 @@ git clone https://github.com/beehive-lab/GPULlama3.java.git #### Install the TornadoVM SDK on Linux or macOS Ensure that your JAVA_HOME points to a supported JDK before using the SDK. Download an SDK package matching your OS, architecture, and accelerator backend (opencl, ptx). -All pre-built SDKs are available on the TornadoVM [Releases Page](https://github.com/beehive-lab/TornadoVM/releases). -#After extracting the SDK, add its bin/ directory to your PATH so the `tornado` command becomes available. +TornadoVM is distributed through our [**official website**](https://www.tornadovm.org/downloads) and **SDKMAN!**. Install a version that matches your OS, architecture, and accelerator backend. -##### Linux (x86_64) +All TornadoVM SDKs are available on the [SDKMAN! TornadoVM page](https://sdkman.io/sdks/tornadovm/). -```bash -wget https://github.com/beehive-lab/TornadoVM/releases/download/v2.1.0/tornadovm-2.1.0-opencl-linux-amd64.zip -unzip tornadovm-2.1.0-opencl-linux-amd64.zip -# Replace manually with the absolute path of the extracted folder -export TORNADOVM_HOME="/tornadovm-2.1.0-opencl" -export PATH=$TORNADO_SDK/bin:$PATH +#### SDKMAN! Installation (Recommended) -tornado --devices -tornado --version +##### Install SDKMAN! if not installed already +```bash +curl -s "https://get.sdkman.io" | bash +source "$HOME/.sdkman/bin/sdkman-init.sh" +sdk version ``` - -##### macOS (Apple Silicon) - +##### Install TornadoVM via SDKMAN! ```bash -wget https://github.com/beehive-lab/TornadoVM/releases/download/v2.1.0/tornadovm-2.1.0-opencl-mac-aarch64.zip -unzip tornadovm-2.1.0-opencl-mac-aarch64.zip -# Replace manually with the absolute path of the extracted folder -export TORNADOVM_HOME="/tornadovm-2.1.0-opencl" -export PATH=$TORNADO_SDK/bin:$PATH - -tornado --devices -tornado --version +sdk install tornadovm ``` -#### Build the GPULlama3.java - +#### Verify TornadoVM is Installed Correctly ```bash -# Navigate to the project directory -cd GPULlama3.java - -# Source the project-specific environment paths -> this will ensure the correct paths are set for the project and the TornadoVM SDK -# Expect to see: [INFO] Environment configured for Llama3 with TornadoVM at: $TORNADO_SDK -source set_paths - -# Build the project using Maven (skip tests for faster build) -# mvn clean package -DskipTests or just make -make - -# Run the model (make sure you have downloaded the model file first - see below) -./llama-tornado --gpu --verbose-init --opencl --model beehive-llama-3.2-1b-instruct-fp16.gguf --prompt "tell me a joke" +tornado --devices ``` - - ---------- ### TornadoVM-Accelerated Inference Performance and Optimization Status We are at the early stages of Java entering the AI world with features added to the JVM that enable faster execution such as GPU acceleration, Vector acceleration, high-performance access to off-heap memory and others. -

This repository provides the first Java-native implementation of Llama3 that automatically compiles and executes Java code on GPUs via TornadoVM. -The baseline numbers presented below provide a solid starting point for achieving more competitive performance compared to llama.cpp or native CUDA implementations. -[Our roadmap](https://github.com/beehive-lab/GPULlama3.java/blob/main/docs/GPULlama3_ROADMAP.md) provides the upcoming set of features that will dramatically improve the numbers below with the clear target being to achieve performance parity with the fastest implementations. -

-If you achieve additional performance data points (e.g. new hardware or platforms) please let us know to add them below. -

-In addition, if you are interested to learn more about the challenges of managed programming languages and GPU acceleration, you can read [our book](https://link.springer.com/book/10.1007/978-3-031-49559-5) or consult the [TornadoVM educational pages](https://www.tornadovm.org/resources). | Vendor / Backend | Hardware | Llama-3.2-1B-Instruct | Llama-3.2-3B-Instruct | Optimizations | diff --git a/TornadoFlags.java b/TornadoFlags.java index df956937..36d4e276 100644 --- a/TornadoFlags.java +++ b/TornadoFlags.java @@ -13,7 +13,7 @@ //JAVA_OPTIONS -XX:+UseParallelGC // === Native library path === -//JAVA_OPTIONS -Djava.library.path=${env.TORNADO_SDK}/lib +//JAVA_OPTIONS -Djava.library.path=${env.TORNADOVM_HOME}/lib // === Tornado runtime classes === //JAVA_OPTIONS -Dtornado.load.api.implementation=uk.ac.manchester.tornado.runtime.tasks.TornadoTaskGraph @@ -23,8 +23,8 @@ //JAVA_OPTIONS -Dtornado.load.annotation.parallel=uk.ac.manchester.tornado.api.annotations.Parallel // === Module system === -//JAVA_OPTIONS --module-path ${env.TORNADO_SDK}/share/java/tornado -//JAVA_OPTIONS --upgrade-module-path ${env.TORNADO_SDK}/share/java/graalJars +//JAVA_OPTIONS --module-path ${env.TORNADOVM_HOME}/share/java/tornado +//JAVA_OPTIONS --upgrade-module-path ${env.TORNADOVM_HOME}/share/java/graalJars //JAVA_OPTIONS --add-modules ALL-SYSTEM,tornado.runtime,tornado.annotation,tornado.drivers.common,tornado.drivers.opencl // === Common exports === diff --git a/llama-tornado b/llama-tornado index c98090f8..57a50f1c 100755 --- a/llama-tornado +++ b/llama-tornado @@ -26,12 +26,12 @@ class LlamaRunner: def __init__(self): self.java_home = os.environ.get("JAVA_HOME") - self.tornado_sdk = os.environ.get("TORNADO_SDK") + self.tornado_sdk = os.environ.get("TORNADOVM_HOME") self.llama_root = os.environ.get("LLAMA_ROOT") if not all([self.java_home, self.tornado_sdk, self.llama_root]): print("Error: Required environment variables not set") - print("Please ensure JAVA_HOME, TORNADO_SDK, and LLAMA_ROOT are defined") + print("Please ensure JAVA_HOME, TORNADOVM_HOME, and LLAMA_ROOT are defined") print("Note: check set_path in root dir -> source set_path") sys.exit(1) @@ -39,7 +39,7 @@ class LlamaRunner: """Validate that required paths exist.""" paths_to_check = { "JAVA_HOME": self.java_home, - "TORNADO_SDK": self.tornado_sdk, + "TORNADOVM_HOME": self.tornado_sdk, "LLAMA_ROOT": self.llama_root, } diff --git a/pom.xml b/pom.xml index 4e38bdb8..4dde1b1a 100644 --- a/pom.xml +++ b/pom.xml @@ -54,12 +54,12 @@ io.github.beehive-lab tornado-api - 2.1.0 + 2.2.0 io.github.beehive-lab tornado-runtime - 2.1.0 + 2.2.0 diff --git a/set_paths b/set_paths index 0f356cc8..84661272 100644 --- a/set_paths +++ b/set_paths @@ -8,13 +8,13 @@ export LLAMA_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Add TornadoVM and LLaMA bin directories to PATH -export PATH="${PATH}:${TORNADO_SDK}/bin:${LLAMA_ROOT}" +export PATH="${PATH}:${TORNADOVM_HOME}/bin:${LLAMA_ROOT}" # Optional: Set JAVA_HOME if required # export JAVA_HOME=/path/to/graalvm # export PATH="${JAVA_HOME}/bin:${PATH}" -echo "[INFO] Environment configured for LLaMA3 with TornadoVM at: $TORNADO_SDK" +echo "[INFO] Environment configured for LLaMA3 with TornadoVM at: $TORNADOVM_HOME" # ===== Notes ===== # After sourcing this script: # 1. TornadoVM will be available for GPU computation diff --git a/set_paths.cmd b/set_paths.cmd index 4a5fafbf..be531ff3 100644 --- a/set_paths.cmd +++ b/set_paths.cmd @@ -7,12 +7,8 @@ REM Resolve the absolute path to this script's directory set "LLAMA_ROOT=%~dp0" set "LLAMA_ROOT=%LLAMA_ROOT:~0,-1%" -REM Set TornadoVM root and SDK paths -set "TORNADO_ROOT=%LLAMA_ROOT%\external\tornadovm" -set "TORNADO_SDK=%TORNADO_ROOT%\bin\sdk" - REM Add TornadoVM SDK and LLaMA3 bin to PATH -set "PATH=%TORNADO_SDK%;%LLAMA_ROOT%;%PATH%" +set "PATH=%TORNADOVM_HOME%;%LLAMA_ROOT%;%PATH%" REM Optional: Set JAVA_HOME if needed REM set "JAVA_HOME=C:\Path\To\GraalVM"