diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/CMakeLists.txt b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/CMakeLists.txt
new file mode 100755
index 0000000000..b2a53283e0
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/CMakeLists.txt
@@ -0,0 +1,20 @@
+if(UNIX)
+ # Direct CMake to use icpx rather than the default C++ compiler/linker
+ set(CMAKE_CXX_COMPILER icpx)
+else() # Windows
+ # Force CMake to use icx-cl rather than the default C++ compiler/linker
+ # (needed on Windows only)
+ include (CMakeForceCompiler)
+ CMAKE_FORCE_CXX_COMPILER (icx-cl IntelDPCPP)
+ include (Platform/Windows-Clang)
+endif()
+
+cmake_minimum_required (VERSION 3.4)
+
+project(EIGEN CXX)
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+
+add_subdirectory (src)
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/README.md b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/README.md
new file mode 100755
index 0000000000..c5b72cbd2d
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/README.md
@@ -0,0 +1,320 @@
+# `QRD` Sample
+
+The `QRD` reference design demonstrates high-performance QR decomposition of real and complex matrices on an FPGA.
+
+| Area | Description
+|:--- |:---
+| What you will learn | How to implement a high performance FPGA version of QR decomposition algorithm.
+| Time to complete | 1 hr (not including compile time)
+| Category | Reference Designs and End to End
+
+## Purpose
+
+This FPGA reference design demonstrates QR decomposition of matrices of complex/real numbers, a common operation employed in linear algebra. Matrix *A* (input) is decomposed into a product of an orthogonal matrix *Q* and an upper triangular matrix *R*.
+
+The algorithms employed by the reference design are the **Gram-Schmidt process** QR decomposition algorithm and the thin **QR factorization** method. The original algorithm has been modified and optimized for performance on FPGAs in this implementation.
+
+QR decomposition is used extensively in signal processing applications such as beamforming, multiple-input multiple-output (MIMO) processing, and Space Time Adaptive Processing (STAP).
+
+>**Note**: Read the *[QR decomposition](https://en.wikipedia.org/wiki/QR_decomposition)* Wikipedia article for more information on the algorithms.
+
+## Prerequisites
+
+This sample is part of the FPGA code samples.
+It is categorized as a Tier 4 sample that demonstrates a reference design.
+
+```mermaid
+flowchart LR
+ tier1("Tier 1: Get Started")
+ tier2("Tier 2: Explore the Fundamentals")
+ tier3("Tier 3: Explore the Advanced Techniques")
+ tier4("Tier 4: Explore the Reference Designs")
+
+ tier1 --> tier2 --> tier3 --> tier4
+
+ style tier1 fill:#0071c1,stroke:#0071c1,stroke-width:1px,color:#fff
+ style tier2 fill:#0071c1,stroke:#0071c1,stroke-width:1px,color:#fff
+ style tier3 fill:#0071c1,stroke:#0071c1,stroke-width:1px,color:#fff
+ style tier4 fill:#f96,stroke:#333,stroke-width:1px,color:#fff
+```
+
+Find more information about how to navigate this part of the code samples in the [FPGA top-level README.md](/DirectProgramming/C++SYCL_FPGA/README.md).
+You can also find more information about [troubleshooting build errors](/DirectProgramming/C++SYCL_FPGA/README.md#troubleshooting), [running the sample on the Intel® DevCloud](/DirectProgramming/C++SYCL_FPGA/README.md#build-and-run-the-samples-on-intel-devcloud-optional), [using Visual Studio Code with the code samples](/DirectProgramming/C++SYCL_FPGA/README.md#use-visual-studio-code-vs-code-optional), [links to selected documentation](/DirectProgramming/C++SYCL_FPGA/README.md#documentation), etc.
+
+| Optimized for | Description
+|:--- |:---
+| OS | Ubuntu* 18.04/20.04
RHEL*/CentOS* 8
SUSE* 15
Windows* 10
+| Hardware | Intel® Agilex™, Arria® 10, and Stratix® 10 FPGAs
+| Software | Intel® oneAPI DPC++/C++ Compiler
+
+> **Note**: Even though the Intel DPC++/C++ OneAPI compiler is enough to compile for emulation, generating reports and generating RTL, there are extra software requirements for the simulation flow and FPGA compiles.
+>
+> For using the simulator flow, Intel® Quartus® Prime Pro Edition and one of the following simulators must be installed and accessible through your PATH:
+> - Questa*-Intel® FPGA Edition
+> - Questa*-Intel® FPGA Starter Edition
+> - ModelSim® SE
+>
+> When using the hardware compile flow, Intel® Quartus® Prime Pro Edition must be installed and accessible through your PATH.
+>
+> :warning: Make sure you add the device files associated with the FPGA that you are targeting to your Intel® Quartus® Prime installation.
+
+### Performance
+
+Performance results are based on testing as of July 29, 2020.
+
+> **Note**: Refer to the [Performance Disclaimers](/DirectProgramming/C++SYCL_FPGA/README.md#performance-disclaimers) section for important performance information.
+
+| Device | Throughput
+|:--- |:---
+| Intel® PAC with Intel® Arria® 10 GX FPGA | 24k matrices/s for complex matrices of size 128 * 128
+| Intel® FPGA PAC D5005 (with Intel Stratix® 10 SX) | 7k matrices/s for complex matrices of size 256 * 256
+
+
+## Key Implementation Details
+
+The QR decomposition algorithm factors a complex _m_ × _n_ matrix, where _m_ ≥ _n_. The algorithm computes the vector dot product of two columns of the matrix. In our FPGA implementation, the dot product is computed in a loop over the column's _m_ elements. The loop is unrolled fully to maximize throughput. The *m* complex multiplication operations are performed in parallel on the FPGA followed by sequential additions to compute the dot product result.
+
+The design uses the `-fp-relaxed` option, which permits the compiler to reorder floating point additions (to assume that floating point addition is commutative). The compiler reorders the additions so that the dot product arithmetic can be optimally implemented using the specialized floating point DSP (Digital Signal Processing) hardware in the FPGA.
+
+With this optimization, our FPGA implementation requires 4*m* DSPs to compute the complex floating point dot product or 2*m* DSPs for the real case. The matrix size is constrained by the total FPGA DSP resources available.
+
+By default, the design is parameterized to process 128 × 128 matrices when compiled targeting an Intel® Arria® 10 FPGA. It is parameterized to process 256 × 256 matrices when compiled targeting a Intel® Stratix® 10 or Intel® Agilex™ FPGA; however, the design can process matrices from 4 x 4 to 512 x 512.
+
+To optimize the performance-critical loop in its algorithm, the design leverages concepts discussed in the following FPGA tutorials:
+
+- **Triangular Loop Optimization** (triangular_loop)
+- **Explicit Pipelining with `fpga_reg`** (fpga_register)
+- **Loop `ivdep` Attribute** (loop_ivdep)
+- **Unrolling Loops** (loop_unroll)
+
+The key optimization techniques used are as follows:
+
+1. Refactoring the original Gram-Schmidt algorithm to merge two dot products into one, reducing the total number of dot products needed to three from two. This helps us reduce the DSPs required for the implementation.
+2. Converting the nested loop into a single merged loop and applying Triangular Loop optimizations. This allows us to generate a design that is very well pipelined.
+3. Fully vectorizing the dot products using loop unrolling.
+4. Using the compiler flag -Xsfp-relaxed to re-order floating point operations and allowing the inference of a specialized dot-product DSP. This further reduces the number of DSP blocks needed by the implementation, the overall latency, and pipeline depth.
+5. Using an efficient memory banking scheme to generate high performance hardware.
+6. Using the `fpga_reg` attribute to insert more pipeline stages where needed to improve the frequency achieved by the design.
+
+### Compiler Flags Used
+
+| Flag | Description
+|:--- |:---
+| `-Xshardware` | Target FPGA hardware (as opposed to FPGA emulator)
+| `-Xsclock=360MHz` | The FPGA backend attempts to achieve 360 MHz
+| `-Xsfp-relaxed` | Allows the FPGA backend to re-order floating point arithmetic operations (e.g. permit assuming (a + b + c) == (c + a + b) )
+| `-Xsparallel=2` | Use 2 cores when compiling the bitstream through Intel® Quartus®
+| `-Xsseed` | Specifies the Intel® Quartus® compile seed, to yield slightly higher fmax
+
+Additionaly, the cmake build system can be configured using the following parameters:
+
+| cmake option | Description
+|:--- |:---
+| `-DSET_ROWS_COMPONENT` | Specifies the number of rows of the matrix
+| `-DSET_COLS_COMPONENT` | Specifies the number of columns of the matrix
+| `-DSET_FIXED_ITERATIONS` | Used to set the ivdep safelen attribute for the performance critical triangular loop
+| `-DSET_COMPLEX` | Used to select between the complex and real QR decomposition (complex is the default)
+
+>**Note**: The values for `seed`, `-DSET_FIXED_ITERATIONS`, `-DSET_ROWS_COMPONENT`, `-DSET_COLS_COMPONENT` and `-DSET_COMPLEX` depend on the board being targeted.
+
+## Build the `QRD` Design
+
+> **Note**: When working with the command-line interface (CLI), you should configure the oneAPI toolkits using environment variables.
+> Set up your CLI environment by sourcing the `setvars` script located in the root of your oneAPI installation every time you open a new terminal window.
+> This practice ensures that your compiler, libraries, and tools are ready for development.
+>
+> Linux*:
+> - For system wide installations: `. /opt/intel/oneapi/setvars.sh`
+> - For private installations: ` . ~/intel/oneapi/setvars.sh`
+> - For non-POSIX shells, like csh, use the following command: `bash -c 'source /setvars.sh ; exec csh'`
+>
+> Windows*:
+> - `C:\Program Files(x86)\Intel\oneAPI\setvars.bat`
+> - Windows PowerShell*, use the following command: `cmd.exe "/K" '"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" && powershell'`
+>
+> For more information on configuring environment variables, see [Use the setvars Script with Linux* or macOS*](https://www.intel.com/content/www/us/en/develop/documentation/oneapi-programming-guide/top/oneapi-development-environment-setup/use-the-setvars-script-with-linux-or-macos.html) or [Use the setvars Script with Windows*](https://www.intel.com/content/www/us/en/develop/documentation/oneapi-programming-guide/top/oneapi-development-environment-setup/use-the-setvars-script-with-windows.html).
+
+### On Linux*
+
+1. Change to the sample directory.
+2. Configure the build system for the Agilex™ device family, which is the default.
+
+ ```
+ mkdir build
+ cd build
+ cmake ..
+ ```
+
+ > **Note**: You can change the default target by using the command:
+ > ```
+ > cmake .. -DFPGA_DEVICE=
+ > ```
+ >
+ > Alternatively, you can target an explicit FPGA board variant and BSP by using the following command:
+ > ```
+ > cmake .. -DFPGA_DEVICE=:
+ > ```
+ >
+ > You will only be able to run an executable on the FPGA if you specified a BSP.
+
+3. Compile the design. (The provided targets match the recommended development flow.)
+
+ 1. Compile for emulation (fast compile time, targets emulated FPGA device).
+ ```
+ make fpga_emu
+ ```
+ 2. Compile for simulation (fast compile time, targets simulator FPGA device):
+ ```
+ make fpga_sim
+ ```
+ 3. Generate HTML performance report.
+ ```
+ make report
+ ```
+ The report resides at `qrd_report/reports/report.html`.
+
+ 4. Compile for FPGA hardware (longer compile time, targets FPGA device).
+ ```
+ make fpga
+ ```
+
+### On Windows*
+
+1. Change to the sample directory.
+2. Configure the build system for the Agilex™ device family, which is the default.
+ ```
+ mkdir build
+ cd build
+ cmake -G "NMake Makefiles" ..
+ ```
+
+ > **Note**: You can change the default target by using the command:
+ > ```
+ > cmake -G "NMake Makefiles" .. -DFPGA_DEVICE=
+ > ```
+ >
+ > Alternatively, you can target an explicit FPGA board variant and BSP by using the following command:
+ > ```
+ > cmake -G "NMake Makefiles" .. -DFPGA_DEVICE=:
+ > ```
+ >
+ > You will only be able to run an executable on the FPGA if you specified a BSP.
+
+3. Compile the design. (The provided targets match the recommended development flow.)
+
+ 1. Compile for emulation (fast compile time, targets emulated FPGA device).
+ ```
+ nmake fpga_emu
+ ```
+ 2. Compile for simulation (fast compile time, targets simulator FPGA device):
+ ```
+ nmake fpga_sim
+ ```
+ 3. Generate HTML performance report.
+ ```
+ nmake report
+ ```
+ The report resides at `qrd_report.a.prj/reports/report.html`.
+
+ 4. Compile for FPGA hardware (longer compile time, targets FPGA device).
+ ```
+ nmake fpga
+ ```
+>**Note**: If you encounter any issues with long paths when compiling under Windows*, you may have to create your ‘build’ directory in a shorter path, for example `C:\samples\build`. You can then run cmake from that directory, and provide cmake with the full path to your sample directory.
+
+## Run the `QRD` Design
+
+### Configurable Parameters
+
+| Argument | Description
+|:--- |:---
+| `` | (Optional) Specifies the number of times to repeat the decomposition of a set of 8 matrices (only 1 matrix when running simulation). Its default value is **16** for the emulation flow, **1** for the simulation flow and **819200** for the FPGA flow.
+
+You can perform the QR decomposition of the set of matrices repeatedly. This step performs the following:
+- Generates the set of random matrices.
+- Computes the QR decomposition of the set of matrices.
+- Repeats the decomposition multiple times (specified as a command line argument) to evaluate performance.
+
+### On Linux
+
+#### Run on FPGA Emulator
+
+1. Increase the amount of memory that the emulator runtime is permitted to allocate by exporting the `CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE` environment variable before running the executable.
+2. Run the sample on the FPGA emulator (the kernel executes on the CPU).
+ ```
+ export CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE=32MB
+ ./qrd.fpga_emu
+ ```
+
+#### Run on FPGA Simulator
+
+1. Run the sample on the FPGA simulator.
+ ```
+ CL_CONTEXT_MPSIM_DEVICE_INTELFPGA=1 ./qrd.fpga_sim
+ ```
+
+#### Run on FPGA
+
+1. Run the sample on the FPGA device (only if you ran `cmake` with `-DFPGA_DEVICE=:`).
+ ```
+ ./qrd.fpga
+ ```
+
+### On Windows
+
+#### Run on FPGA Emulator
+
+1. Increase the amount of memory that the emulator runtime is permitted to allocate by setting the `CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE` environment variable before running the executable.
+2. Run the sample on the FPGA emulator (the kernel executes on the CPU).
+ ```
+ set CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE=32MB
+ qrd.fpga_emu.exe
+ ```
+
+#### Run on FPGA Simulator
+
+1. Run the sample on the FPGA simulator.
+ ```
+ set CL_CONTEXT_MPSIM_DEVICE_INTELFPGA=1
+ qrd.fpga_sim.exe
+ set CL_CONTEXT_MPSIM_DEVICE_INTELFPGA=
+ ```
+
+#### Run on FPGA
+
+1. Run the sample on the FPGA device (only if you ran `cmake` with `-DFPGA_DEVICE=:`).
+ ```
+ qrd.fpga.exe
+ ```
+
+## Example Output
+
+Example Output when running on **Intel® PAC with Intel® Arria® 10 GX FPGA** for 8 matrices 819200 times (each matrix consisting of 128x128 complex numbers).
+
+```
+Device name: pac_a10 : Intel PAC Platform (pac_f000000)
+Generating 8 random complex matrices of size 128x128
+Running QR decomposition of 8 matrices 819200 times
+ Total duration: 268.733 s
+Throughput: 24.387k matrices/s
+Verifying results...
+PASSED
+```
+
+Example output when running on **Intel® FPGA PAC D5005 (with Intel Stratix® 10 SX)** for the decomposition of 8 matrices 819200 times (each matrix consisting of 256x256 complex numbers).
+
+```
+Device name: pac_s10 : Intel PAC Platform (pac_f100000)
+Generating 8 random complex matrices of size 256x256
+Running QR decomposition of 8 matrices 819200 times
+ Total duration: 888.077 s
+Throughput: 7.37954k matrices/s
+Verifying results...
+PASSED
+```
+
+## License
+
+Code samples are licensed under the MIT license. See [License.txt](/License.txt) for details.
+
+Third party program Licenses can be found here: [third-party-programs.txt](/third-party-programs.txt).
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/sample.json b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/sample.json
new file mode 100755
index 0000000000..62c88881be
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/sample.json
@@ -0,0 +1,77 @@
+{
+ "guid": "3228581F-9DF8-4696-9B1C-0B31286B97C3",
+ "name": "QRD",
+ "categories": ["Toolkit/oneAPI Direct Programming/C++SYCL FPGA/Reference Designs"],
+ "description": "Reference design demonstrating high-performance QR Decomposition (QRD) of real and complex matrices on a Intel® FPGA",
+ "toolchain": ["icpx"],
+ "os": ["linux", "windows"],
+ "builder": ["ide", "cmake"],
+ "targetDevice": ["FPGA"],
+ "languages": [{"cpp":{}}],
+ "commonFolder": {
+ "base": "../..",
+ "include": [
+ "README.md",
+ "ReferenceDesigns/qrd",
+ "include"
+ ],
+ "exclude": []
+ },
+ "ciTests": {
+ "linux": [
+ {
+ "id": "fpga_emu",
+ "env": [
+ "export CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE=32MB"
+ ],
+ "steps": [
+ "icpx --version",
+ "mkdir build",
+ "cd build",
+ "cmake ..",
+ "make fpga_emu",
+ "./qrd.fpga_emu"
+ ]
+ },
+ {
+ "id": "report",
+ "steps": [
+ "icpx --version",
+ "mkdir build",
+ "cd build",
+ "cmake ..",
+ "make report"
+ ]
+ }
+ ],
+ "windows": [
+ {
+ "id": "fpga_emu",
+ "env": [
+ "set CL_CONFIG_CPU_FORCE_PRIVATE_MEM_SIZE=32MB"
+ ],
+ "steps": [
+ "icpx --version",
+ "cd ../..",
+ "mkdir build",
+ "cd build",
+ "cmake -G \"NMake Makefiles\" ../ReferenceDesigns/qrd",
+ "nmake fpga_emu",
+ "qrd.fpga_emu.exe"
+ ]
+ },
+ {
+ "id": "report",
+ "steps": [
+ "icpx --version",
+ "cd ../..",
+ "mkdir build",
+ "cd build",
+ "cmake -G \"NMake Makefiles\" ../ReferenceDesigns/qrd",
+ "nmake report"
+ ]
+ }
+ ]
+ },
+ "expertise": "Reference Designs and End to End"
+}
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/CMakeLists.txt b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/CMakeLists.txt
new file mode 100644
index 0000000000..459d8bc1fe
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/CMakeLists.txt
@@ -0,0 +1,147 @@
+set(TARGET_NAME eigen)
+set(SOURCE_FILE eigen_demo.cpp)
+set(EMULATOR_TARGET ${TARGET_NAME}.fpga_emu)
+set(SIMULATOR_TARGET ${TARGET_NAME}.fpga_sim)
+set(FPGA_TARGET ${TARGET_NAME}.fpga)
+set(FPGA_EARLY_IMAGE ${TARGET_NAME}_report.a)
+
+# FPGA board selection
+if(NOT DEFINED FPGA_DEVICE)
+ set(FPGA_DEVICE "Agilex")
+ set(DEVICE_FLAG "Agilex")
+ message(STATUS "FPGA_DEVICE was not specified.\
+ \nConfiguring the design to the default FPGA family: ${FPGA_DEVICE}\
+ \nPlease refer to the README for information on target selection.")
+ set(BSP_FLAG "")
+else()
+ string(TOLOWER ${FPGA_DEVICE} FPGA_DEVICE_NAME)
+ if(FPGA_DEVICE_NAME MATCHES ".*a10.*" OR FPGA_DEVICE_NAME MATCHES ".*arria10.*")
+ set(DEVICE_FLAG "A10")
+ elseif(FPGA_DEVICE_NAME MATCHES ".*s10.*" OR FPGA_DEVICE_NAME MATCHES ".*stratix10.*")
+ set(DEVICE_FLAG "S10")
+ elseif(FPGA_DEVICE_NAME MATCHES ".*agilex.*")
+ set(DEVICE_FLAG "Agilex")
+ endif()
+ message(STATUS "Configuring the design with the following target: ${FPGA_DEVICE}")
+
+ # Check if the target is a BSP
+ if(IS_BSP MATCHES "1" OR FPGA_DEVICE MATCHES ".*pac_a10.*|.*pac_s10.*")
+ set(BSP_FLAG "-DIS_BSP")
+ else()
+ set(BSP_FLAG "")
+ message(STATUS "The selected target ${FPGA_DEVICE} is assumed to be an FPGA part number, so the IS_BSP macro will not be passed to your C++ code.")
+ message(STATUS "If the target is actually a BSP, run cmake with -DIS_BSP=1 to pass the IS_BSP macro to your C++ code.")
+ endif()
+endif()
+
+if(NOT DEFINED DEVICE_FLAG)
+ message(FATAL_ERROR "An unrecognized or custom board was passed, but DEVICE_FLAG was not specified. \
+ Make sure you have set -DDEVICE_FLAG=A10, -DDEVICE_FLAG=S10 or -DDEVICE_FLAG=Agilex")
+endif()
+
+# This is a Windows-specific flag that enables error handling in host code
+if(WIN32)
+ set(AC_TYPES_COMPILE_FLAG "/Qactypes")
+ set(AC_TYPES_LINK_FLAG "/Qactypes")
+ set(PLATFORM_SPECIFIC_COMPILE_FLAGS "/EHsc /Wall /fp:precise -Xsfp-relaxed")
+ set(PLATFORM_SPECIFIC_LINK_FLAGS "/fp:precise -Xsfp-relaxed")
+ set(STACK_FLAG "/F 12582912")
+else()
+ set(AC_TYPES_COMPILE_FLAG "-qactypes")
+ set(AC_TYPES_LINK_FLAG "")
+ set(PLATFORM_SPECIFIC_COMPILE_FLAGS "-Wall -fno-finite-math-only")
+ set(PLATFORM_SPECIFIC_LINK_FLAGS "")
+ set(STACK_FLAG "")
+endif()
+
+
+if(DEVICE_FLAG MATCHES "A10")
+ # A10 parameters
+ set(SIZE 4)
+ set(FIXED_ITERATIONS 64)
+ set(SEED "-Xsseed=7")
+elseif(DEVICE_FLAG MATCHES "S10")
+ # S10 parameters
+ set(SIZE 4)
+ set(FIXED_ITERATIONS 110)
+ set(SEED "-Xsseed=2")
+elseif(DEVICE_FLAG MATCHES "Agilex")
+ # Agilex™ parameters
+ set(SIZE 4)
+ set(FIXED_ITERATIONS 110)
+ set(SEED "-Xsseed=5")
+else()
+ message(FATAL_ERROR "Unreachable")
+endif()
+
+
+if(IGNORE_DEFAULT_SEED)
+ set(SEED "")
+endif()
+
+if(DEFINED SET_SIZE)
+ set(SIZE ${SET_SIZE})
+endif()
+
+if(DEFINED SET_FIXED_ITERATIONS)
+ set(FIXED_ITERATIONS ${SET_FIXED_ITERATIONS})
+endif()
+
+message(STATUS "SIZE=${SIZE}")
+message(STATUS "FIXED_ITERATIONS=${FIXED_ITERATIONS}")
+message(STATUS "SEED=${SEED}")
+
+# A SYCL ahead-of-time (AoT) compile processes the device code in two stages.
+# 1. The "compile" stage compiles the device code to an intermediate representation (SPIR-V).
+# 2. The "link" stage invokes the compiler's FPGA backend before linking.
+# For this reason, FPGA backend flags must be passed as link flags in CMake.
+set(EMULATOR_COMPILE_FLAGS "-fsycl -fintelfpga -Wall ${PLATFORM_SPECIFIC_COMPILE_FLAGS} ${AC_TYPES_COMPILE_FLAG} -Wformat-security -Werror=format-security -fbracket-depth=512 -DFIXED_ITERATIONS=${FIXED_ITERATIONS} -DCOMPLEX=${COMPLEX} -DSIZE=${SIZE} -DFPGA_EMULATOR ${BSP_FLAG}")
+set(EMULATOR_LINK_FLAGS "-fsycl -fintelfpga ${PLATFORM_SPECIFIC_LINK_FLAGS} ${STACK_FLAG} ${AC_TYPES_LINK_FLAG} ${BSP_FLAG}")
+set(SIMULATOR_COMPILE_FLAGS "-fsycl -fintelfpga -Wall ${PLATFORM_SPECIFIC_COMPILE_FLAGS} ${AC_TYPES_COMPILE_FLAG} -DFPGA_SIMULATOR -fbracket-depth=512 -DFIXED_ITERATIONS=${FIXED_ITERATIONS} -DCOMPLEX=${COMPLEX} -DSIZE=${SIZE} ${USER_HARDWARE_FLAGS} ${BSP_FLAG}")
+set(SIMULATOR_LINK_FLAGS "-fsycl -fintelfpga ${PLATFORM_SPECIFIC_LINK_FLAGS} ${STACK_FLAG} -Xssimulation -Xsghdl -Xstarget=${FPGA_DEVICE} ${USER_SIMULATOR_FLAGS} ${AC_TYPES_LINK_FLAG} ${BSP_FLAG}")
+set(HARDWARE_COMPILE_FLAGS "-fsycl -fintelfpga -Wall ${PLATFORM_SPECIFIC_COMPILE_FLAGS} ${AC_TYPES_COMPILE_FLAG} -Wformat-security -Werror=format-security -fbracket-depth=512 -DFIXED_ITERATIONS=${FIXED_ITERATIONS} -DCOMPLEX=${COMPLEX} -DSIZE=${SIZE} -DFPGA_HARDWARE ${BSP_FLAG}")
+set(REPORT_LINK_FLAGS "-fsycl -fintelfpga -Xshardware ${PLATFORM_SPECIFIC_LINK_FLAGS} -Xsparallel=2 ${SEED} -Xstarget=${FPGA_DEVICE} ${USER_HARDWARE_FLAGS} ${AC_TYPES_LINK_FLAG} ${BSP_FLAG}")
+set(HARDWARE_LINK_FLAGS "${REPORT_LINK_FLAGS} ${STACK_FLAG} ${BSP_FLAG}")
+# use cmake -D USER_HARDWARE_FLAGS= to set extra flags for FPGA backend compilation
+
+###############################################################################
+### FPGA Emulator
+###############################################################################
+add_executable(${EMULATOR_TARGET} ${SOURCE_FILE})
+target_include_directories(${EMULATOR_TARGET} PRIVATE ../../../include)
+set_target_properties(${EMULATOR_TARGET} PROPERTIES COMPILE_FLAGS "${EMULATOR_COMPILE_FLAGS}")
+set_target_properties(${EMULATOR_TARGET} PROPERTIES LINK_FLAGS "${EMULATOR_LINK_FLAGS}")
+add_custom_target(fpga_emu DEPENDS ${EMULATOR_TARGET})
+
+###############################################################################
+### Generate Report
+###############################################################################
+# The compile output is not an executable, but an intermediate compilation result unique to SYCL.
+add_executable(${FPGA_EARLY_IMAGE} EXCLUDE_FROM_ALL ${SOURCE_FILE})
+target_include_directories(${FPGA_EARLY_IMAGE} PRIVATE ../../../include)
+add_custom_target(report DEPENDS ${FPGA_EARLY_IMAGE})
+set_target_properties(${FPGA_EARLY_IMAGE} PROPERTIES COMPILE_FLAGS "${HARDWARE_COMPILE_FLAGS}")
+set_target_properties(${FPGA_EARLY_IMAGE} PROPERTIES LINK_FLAGS "${REPORT_LINK_FLAGS} -fsycl-link=early")
+# fsycl-link=early stops the compiler after RTL generation, before invoking Quartus
+
+###############################################################################
+### FPGA Simulator
+###############################################################################
+add_executable(${SIMULATOR_TARGET} EXCLUDE_FROM_ALL ${SOURCE_FILE})
+target_include_directories(${SIMULATOR_TARGET} PRIVATE ../../../include)
+add_custom_target(fpga_sim DEPENDS ${SIMULATOR_TARGET})
+set_target_properties(${SIMULATOR_TARGET} PROPERTIES COMPILE_FLAGS "${SIMULATOR_COMPILE_FLAGS}")
+set_target_properties(${SIMULATOR_TARGET} PROPERTIES LINK_FLAGS "${SIMULATOR_LINK_FLAGS} -reuse-exe=${CMAKE_BINARY_DIR}/${SIMULATOR_TARGET}")
+# The -reuse-exe flag enables rapid recompilation of host-only code changes.
+# See C++SYCL_FPGA/GettingStarted/fast_recompile for details.
+
+###############################################################################
+### FPGA Hardware
+###############################################################################
+add_executable(${FPGA_TARGET} EXCLUDE_FROM_ALL ${SOURCE_FILE})
+target_include_directories(${FPGA_TARGET} PRIVATE ../../../include)
+add_custom_target(fpga DEPENDS ${FPGA_TARGET})
+set_target_properties(${FPGA_TARGET} PROPERTIES COMPILE_FLAGS "${HARDWARE_COMPILE_FLAGS}")
+set_target_properties(${FPGA_TARGET} PROPERTIES LINK_FLAGS "${HARDWARE_LINK_FLAGS} -reuse-exe=${CMAKE_BINARY_DIR}/${FPGA_TARGET}")
+# The -reuse-exe flag enables rapid recompilation of host-only code changes.
+# See C++SYCL_FPGA/GettingStarted/fast_recompile for details.
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen.hpp b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen.hpp
new file mode 100644
index 0000000000..62fd5f19f9
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen.hpp
@@ -0,0 +1,162 @@
+#ifndef __Eigen_HPP__
+#define __Eigen_HPP__
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "memory_transfers.hpp"
+#include "streaming_eigen.hpp"
+#include "tuple.hpp"
+
+// Forward declare the kernel and pipe names
+// (This prevents unwanted name mangling in the optimization report.)
+class EigenDDRToLocalMem;
+class Eigen;
+class EigenLocalMemToDDRQ;
+class EigenLocalMemToDDRR;
+class APipe;
+class QPipe;
+class RPipe;
+
+/*
+ Implementation of an Eigen values/vectors solver using multiple streaming
+ kernels Can be configured by datatype, matrix size and works with real square
+ matrices.
+*/
+template
+void EigenImpl(
+ std::vector &a_matrix, // Input matrix to decompose
+ std::vector &q_matrix, // Output matrix Q
+ std::vector &eigen_values_matrix, // Output Eigen values
+ sycl::queue &q, // Device queue
+ int matrix_count, // Number of matrices to decompose
+ int repetitions // Number of repetitions, for performance evaluation
+) {
+ constexpr int kMatrixSize = k_size * k_size;
+ constexpr int kEValuesSize = k_size;
+ constexpr int kNumElementsPerDDRBurst = 8;
+
+ using PipeType = fpga_tools::NTuple;
+
+ // Pipes to communicate the A, Q and R matrices between kernels
+ using AMatrixPipe = sycl::ext::intel::pipe;
+ using QMatrixPipe = sycl::ext::intel::pipe;
+ using EValuesPipe =
+ sycl::ext::intel::pipe;
+
+ // Allocate FPGA DDR memory.
+#if defined(IS_BSP)
+ T *a_device = sycl::malloc_device(kMatrixSize * matrix_count, q);
+ T *q_device = sycl::malloc_device(kMatrixSize * matrix_count, q);
+ T *eigen_values_device =
+ sycl::malloc_device(kEValuesSize * matrix_count, q);
+#else
+ // malloc_device are not supported when targeTing an FPGA part/family
+ T *a_device = sycl::malloc_shared(kMatrixSize * matrix_count, q);
+ T *q_device = sycl::malloc_shared(kMatrixSize * matrix_count, q);
+ T *eigen_values_device =
+ sycl::malloc_shared(kEValuesSize * matrix_count, q);
+#endif
+
+ if (a_device == nullptr) {
+ std::cout << "Failed to allocated memory for the A matrix" << std::endl;
+ std::terminate();
+ }
+
+ std::cout << "kMatrixSize " << kMatrixSize << std::endl;
+ std::cout << "matrix_count " << matrix_count << std::endl;
+ std::cout << "sizeof(T) " << int(sizeof(T)) << std::endl;
+
+ q.memcpy(a_device, a_matrix.data(), kMatrixSize * matrix_count * sizeof(T))
+ .wait();
+
+ auto ddr_write_event = q.submit([&](sycl::handler &h) {
+ h.single_task([=]() [[intel::kernel_args_restrict]] {
+ MatrixReadFromDDRToPipe(a_device, matrix_count, repetitions);
+ });
+ });
+
+ // Read the A matrix from the AMatrixPipe pipe and compute the QR
+ // decomposition. Write the Q and R output matrices to the QMatrixPipe
+ // and EValuesPipe pipes.
+ q.single_task(
+ fpga_linalg::StreamingEigen());
+
+ auto q_event =
+ q.single_task([=]() [[intel::kernel_args_restrict]] {
+ // Read the Q matrix from the QMatrixPipe pipe and copy it to the
+ // FPGA DDR
+ MatrixReadPipeToDDR(q_device, matrix_count, repetitions);
+ });
+
+ auto r_event = q.single_task([=
+ ]() [[intel::kernel_args_restrict]] {
+ // Read the R matrix from the EValuesPipe pipe and copy it to the
+ // FPGA DDR
+
+#if defined(IS_BSP)
+ // When targeting a BSP, we instruct the compiler that this pointer
+ // lives on the device.
+ // Knowing this, the compiler won't generate hardware to
+ // potentially get data from the host.
+ sycl::device_ptr vector_ptr_located(eigen_values_device);
+#else
+ // Device pointers are not supported when targeting an FPGA
+ // family/part
+ T* vector_ptr_located(eigen_values_device);
+#endif
+
+ // Repeat matrix_count complete R matrix pipe reads
+ // for as many repetitions as needed
+ for (int repetition_index = 0; repetition_index < repetitions;
+ repetition_index++) {
+ [[intel::loop_coalesce(2)]] // NO-FORMAT: ATribute
+ for (int matrix_index = 0; matrix_index < matrix_count; matrix_index++) {
+ for (int r_idx = 0; r_idx < kEValuesSize; r_idx++) {
+ vector_ptr_located[matrix_index * kEValuesSize + r_idx] =
+ EValuesPipe::read();
+ } // end of r_idx
+ } // end of repetition_index
+ } // end of li
+ });
+
+ q_event.wait();
+ r_event.wait();
+
+ // Compute the total time the execution lasted
+ auto start_time = ddr_write_event.template get_profiling_info<
+ sycl::info::event_profiling::command_start>();
+ auto end_time = q_event.template get_profiling_info<
+ sycl::info::event_profiling::command_end>();
+ double diff = (end_time - start_time) / 1.0e9;
+ q.throw_asynchronous();
+
+ std::cout << " Total duration: " << diff << " s" << std::endl;
+ std::cout << "Throughput: " << repetitions * matrix_count / diff * 1e-3
+ << "k matrices/s" << std::endl;
+
+ // Copy the Q and R matrices result from the FPGA DDR to the host memory
+ q.memcpy(q_matrix.data(), q_device, kMatrixSize * matrix_count * sizeof(T))
+ .wait();
+ q.memcpy(eigen_values_matrix.data(), eigen_values_device,
+ kEValuesSize * matrix_count * sizeof(T))
+ .wait();
+
+ // Clean allocated FPGA memory
+ free(a_device, q);
+ free(q_device, q);
+ free(eigen_values_device, q);
+}
+
+#endif /* __Eigen_HPP__ */
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen_demo.cpp b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen_demo.cpp
new file mode 100644
index 0000000000..32d30e967b
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/eigen_demo.cpp
@@ -0,0 +1,518 @@
+#include
+
+#include
+#include
+
+#include
+
+#include "exception_handler.hpp"
+
+#include "eigen.hpp"
+
+#define DEBUG
+
+#ifdef FPGA_SIMULATOR
+#define SIZE_V 8
+#else
+#define SIZE_V SIZE
+#endif
+
+/*
+ COLS_COMPONENT, ROWS_COMPONENT and FIXED_ITERATIONS are defined
+ by the build system.
+
+ Function arguments:
+ - a_matrix: The input matrix. Interpreted as a transposed matrix.
+ - q_matrix: The Q matrix. The function will overwrite this matrix.
+ - eigen_values_matrix The R matrix. The function will overwrite this matrix.
+ The vector will only contain the upper triangular elements
+ of the matrix, in a row by row fashion.
+ - q: The device queue.
+ - matrix_count: Number of matrices to decompose.
+ - repetitions: The number of repetitions of the computation to execute.
+ (for performance evaluation)
+*/
+// Real single precision floating-point QR Decomposition
+void Eigen(std::vector &a_matrix, std::vector &q_matrix,
+ std::vector &eigen_values_matrix, sycl::queue &q,
+ int matrix_count,
+ int repetitions) {
+ EigenImpl(a_matrix, q_matrix, eigen_values_matrix, q,
+ matrix_count, repetitions);
+}
+
+
+/*
+ returns if the given value is finite
+*/
+bool IsFinite(float val) { return std::isfinite(val); }
+
+template
+void QRD(std::vector input, std::vector &q, std::vector &r){
+
+ for (int i = 0; i> &A) {
+// [H,Q] = hessred(A)
+// %
+// % Compute the Hessenberg decomposition H = Q’*A*Q using
+// % Householder transformations.
+// %
+// function [H,Q] = hessred(A)
+// n = length(A);
+// Q = eye(n); % Orthogonal transform so far
+// H = A; % Transformed matrix so far
+
+// for j = 1:n-2
+
+// % -- Find W = I-2vv’ to put zeros below H(j+1,j)
+// u = H(j+1:end,j);
+// u(1) = u(1) + sign(u(1))*norm(u);
+// v = u/norm(u);
+
+// % -- H := WHW’, Q := QW
+// H(j+1:end,:) = H(j+1:end,:)-2*v*(v’*H(j+1:end,:));
+// H(:,j+1:end) = H(:,j+1:end)-(H(:,j+1:end)*(2*v))*v’;
+// Q(:,j+1:end) = Q(:,j+1:end)-(Q(:,j+1:end)*(2*v))*v’;
+
+// end
+
+// end
+ // TODO
+}
+
+
+int main(int argc, char *argv[]) {
+ constexpr size_t kRandomSeed = 1138;
+ constexpr size_t kRandomMin = 1;
+ constexpr size_t kRandomMax = 10;
+ constexpr size_t kSize = SIZE_V;
+ constexpr size_t kAMatrixSize = kSize * kSize;
+ constexpr size_t kQMatrixSize = kSize * kSize;
+ constexpr size_t kRMatrixSize = kSize;
+ constexpr size_t kQRMatrixSize = kQMatrixSize + kRMatrixSize;
+
+#if defined(FPGA_SIMULATOR)
+ std::cout << "Using 32x32 matrices for simulation to reduce runtime" << std::endl;
+#endif
+
+ constexpr bool kShift = true;
+
+ // Get the number of times we want to repeat the decomposition
+ // from the command line.
+// #if defined(FPGA_EMULATOR)
+// int repetitions = argc > 1 ? atoi(argv[1]) : 16;
+// #elif defined(FPGA_SIMULATOR)
+// int repetitions = argc > 1 ? atoi(argv[1]) : 1;
+// #else
+// int repetitions = argc > 1 ? atoi(argv[1]) : 819200;
+// #endif
+
+ int repetitions = 1;
+
+
+ if (repetitions < 1) {
+ std::cout << "Number of repetitions given is lower that 1." << std::endl;
+ std::cout << "The decomposition must occur at least 1 time." << std::endl;
+ std::cout << "Increase the number of repetitions (e.g. 16)." << std::endl;
+ return 1;
+ }
+
+ constexpr size_t kMatricesToDecompose = 1;
+// #if defined(FPGA_SIMULATOR)
+// constexpr size_t kMatricesToDecompose = 1;
+// #else
+// constexpr size_t kMatricesToDecompose = 8;
+// #endif
+
+ try {
+
+#if FPGA_SIMULATOR
+ auto selector = sycl::ext::intel::fpga_simulator_selector_v;
+#elif FPGA_HARDWARE
+ auto selector = sycl::ext::intel::fpga_selector_v;
+#else // #if FPGA_EMULATOR
+ sycl::ext::intel::fpga_emulator_selector selector;
+ // auto selector = sycl::ext::intel::fpga_emulator_selector_v;
+#endif
+
+ // Enable the queue profiling to time the execution
+ sycl::property_list
+ queue_properties{sycl::property::queue::enable_profiling()};
+ sycl::queue q = sycl::queue(selector,
+ fpga_tools::exception_handler,
+ queue_properties);
+
+ sycl::device device = q.get_device();
+
+ std::cout << "Running on device: "
+ << device.get_info().c_str()
+ << std::endl;
+
+ // Create vectors to hold all the input and output matrices
+ std::vector a_matrix;
+ std::vector q_matrix;
+ std::vector eigen_values_matrix;
+
+ a_matrix.resize(kAMatrixSize * kMatricesToDecompose);
+ q_matrix.resize(kQMatrixSize * kMatricesToDecompose);
+ eigen_values_matrix.resize(kRMatrixSize * kMatricesToDecompose);
+
+ std::cout << "Generating " << kMatricesToDecompose << " random real ";
+ std::cout << "matri" << (kMatricesToDecompose > 1 ? "ces" : "x")
+ << " of size "
+ << kSize << "x" << kSize << " " << std::endl;
+
+ // Generate the random symetric matrix input matrices
+ srand(kRandomSeed);
+ float expected_eigen_values[kSize*kMatricesToDecompose];
+
+ int total_iterations = 0;
+ constexpr double kThresholdMatrixGeneration = 1e-4;
+
+ for(int matrix_index = 0; matrix_index < kMatricesToDecompose;
+ matrix_index++){
+
+ std::cout << "Generating matrix " << matrix_index << std::endl;
+ for (size_t row = 0; row < kSize; row++) {
+ for (size_t col = row; col < kSize; col++) {
+ float random_real;
+ if (col > (row+1)) {
+ random_real = 0;
+ }
+ else{
+ random_real = rand() % (kRandomMax - kRandomMin) + kRandomMin;
+ }
+ a_matrix[matrix_index * kAMatrixSize
+ + col * kSize + row] = random_real;
+ a_matrix[matrix_index * kAMatrixSize
+ + row * kSize + col] = random_real;
+ } // end of col
+ } // end of row
+
+
+ // #ifdef DEBUG
+ std::cout << "A MATRIX " << matrix_index << std::endl;
+ for (size_t row = 0; row < kSize; row++) {
+ for (size_t col = 0; col < kSize; col++) {
+ std::cout << a_matrix[matrix_index * kAMatrixSize
+ + col * kSize + row] << " ";
+ } // end of col
+ std::cout << std::endl;
+ } // end of row
+ // #endif
+
+ int iterations = 0;
+ bool cond = false;
+ std::vector q, r, rq;
+ q.resize(kSize*kSize);
+ r.resize(kSize*kSize);
+ rq.resize(kSize*kSize);
+
+ for(int k = 0; k< kSize*kSize; k++){
+ rq[k] = a_matrix[matrix_index * kAMatrixSize + k];
+ }
+
+ while (!cond){
+ for (int k = 0; k=1; row--){
+ bool row_is_zero = true;
+ for (int col=0; col=0){
+ // Compute the shift value
+ // Take the submatrix:
+ // [a b]
+ // [b c]
+ // and compute the shift such as
+ // mu = c - (sign(d)* b*b)/(abs(d) + sqrt(d*d + b*b))
+ // where d = (a - c)/2
+
+ double a = rq[shift_row + kSize*shift_row];
+ double b = rq[shift_row + kSize*(shift_row+1)];
+ double c = rq[(shift_row+1) + kSize*(shift_row+1)];
+
+ double d = (a - c) / 2;
+ double b_squared = b*b;
+ double d_squared = d*d;
+ double b_squared_signed = d<0 ? -b_squared : b_squared;
+ shift_value = c - b_squared_signed / (abs(d) + sqrt(d_squared + b_squared));
+ }
+
+ // Subtract the shift value from the diagonal of RQ
+ for(int row=0; row(rq, q, r);
+
+ // Compute RQ
+ for(int row=0; row(kSize*kSize*kSize*16)){
+ std::cout << "Number of iterations too high" << std::endl;
+ break;
+ }
+ }
+
+ if(iterations>kSize*kSize*kSize*16){
+ matrix_index--;
+ }
+ else{
+ total_iterations += iterations;
+ // std::cout << "expected eigen values after " << iterations << " iterations:" << std::endl;
+ for(int k=0; k 1 ? "ces " : "x ")
+ << repetitions << " times" << std::endl;
+
+ Eigen(a_matrix, q_matrix, eigen_values_matrix, q, kMatricesToDecompose, repetitions);
+
+ // For output post-processing (op)
+ float q_matrix_op[kSize][kSize];
+ float eigen_values_matrix_op[kSize];
+
+ // Floating-point error threshold value at which we decide that the design
+ // computed an incorrect value
+ constexpr float kErrorThreshold = 1e-3;
+
+ // Count the number of errors found for this matrix
+ size_t error_count = 0;
+
+ // Check Q and R matrices
+ std::cout << "Verifying results..." << std::endl;
+ for(int matrix_index = 0; matrix_index < kMatricesToDecompose;
+ matrix_index++){
+
+ // keep track of Q element index
+ size_t q_idx = 0;
+
+ // Read the R matrix from the output vector to the RMatrixOP matrix
+ for (size_t i = 0; i < kSize; i++) {
+ eigen_values_matrix_op[i] =
+ eigen_values_matrix[matrix_index * kRMatrixSize + i];
+ }
+
+ // Read the Q matrix from the output vector to the QMatrixOP matrix
+ for (size_t j = 0; j < kSize; j++) {
+ for (size_t i = 0; i < kSize; i++) {
+ q_matrix_op[i][j] = q_matrix[matrix_index*kQMatrixSize
+ + q_idx];
+ q_idx++;
+ }
+ }
+
+ // #ifdef DEBUG
+ // std::cout << "Eigen values MATRIX" << std::endl;
+ // for (size_t i = 0; i < kSize; i++) {
+ // std::cout << eigen_values_matrix_op[i] << " ";
+ // }
+ // std::cout << std::endl;
+
+ // // std::cout << "Q MATRIX" << std::endl;
+ // // for (size_t i = 0; i < kSize; i++) {
+ // // for (size_t j = 0; j < kSize; j++) {
+ // // std::cout << q_matrix_op[i][j] << " ";
+ // // }
+ // // std::cout << std::endl;
+ // // }
+ // #endif
+
+ for (size_t i = 0; i < kSize; i++) {
+
+ bool found_a_matching_eigen_value = fabs(fabs(eigen_values_matrix_op[i]) -fabs(expected_eigen_values[matrix_index * kSize + i])) <= kErrorThreshold;
+
+ // It may be that the eigen values are not in the same order.
+ if(!found_a_matching_eigen_value) {
+
+ for (size_t j = 0; j < kSize; j++) {
+ if(fabs(fabs(eigen_values_matrix_op[i]) - fabs(expected_eigen_values[matrix_index * kSize + j])) <= kErrorThreshold){
+ found_a_matching_eigen_value = true;
+ break;
+ }
+ }
+ }
+
+ if(!found_a_matching_eigen_value){
+ std::cout << "Eigen value computed " << eigen_values_matrix_op[i]
+ << " does not match any of the precomputed Eigen values ";
+ for (int i=0; i 0) {
+ std::cout << std::endl << "FAILED" << std::endl;
+ std::cout << std::endl
+ << "!!!!!!!!!!!!!! " << error_count << " errors" << std::endl;
+ return 1;
+ }
+
+ } // end of matrix_index
+
+
+ std::cout << std::endl << "PASSED" << std::endl;
+ return 0;
+
+ } catch (sycl::exception const &e) {
+ std::cerr << "Caught a synchronous SYCL exception: " << e.what()
+ << std::endl;
+ std::cerr << " If you are targeting an FPGA hardware, "
+ "ensure that your system is plugged to an FPGA board that is "
+ "set up correctly"
+ << std::endl;
+ std::cerr << " If you are targeting the FPGA emulator, compile with "
+ "-DFPGA_EMULATOR"
+ << std::endl;
+
+ std::terminate();
+ } catch (std::bad_alloc const &e) {
+ std::cerr << "Caught a memory allocation exception on the host: "
+ << e.what() << std::endl;
+ std::cerr << " You can reduce the memory requirement by reducing the "
+ "number of matrices generated. Specify a smaller number when "
+ "running the executable."
+ << std::endl;
+ std::cerr << " In this run, more than "
+ << ((kAMatrixSize + kQRMatrixSize) * 2 * kMatricesToDecompose
+ * sizeof(float)) / pow(2, 30)
+ << " GBs of memory was requested for the decomposition of a "
+ << "matrix of size " << kSize << " x " << kSize
+ << std::endl;
+ std::terminate();
+ }
+} // end of main
diff --git a/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/memory_transfers.hpp b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/memory_transfers.hpp
new file mode 100644
index 0000000000..62f575b87f
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/ReferenceDesigns/eigen/src/memory_transfers.hpp
@@ -0,0 +1,207 @@
+#ifndef __MEMORY_TRANSFERS_HPP__
+#define __MEMORY_TRANSFERS_HPP__
+
+#include "tuple.hpp"
+#include "constexpr_math.hpp"
+#include "unrolled_loop.hpp"
+
+/*
+ Read matrix_count matrices of type TT from DDR by bursts of num_elem_per_bank
+ elements, and write the matrices to the "MatrixPipe" pipe num_elem_per_bank by
+ num_elem_per_bank elements.
+ Repeat this operations "repetitions" times.
+*/
+template
+void MatrixReadFromDDRToPipe(
+ TT* matrix_ptr, // Input matrix pointer
+ int matrix_count,// Number of matrix to read from DDR
+ int repetitions // Number of time to write the same matrix to the pipe
+ ) {
+
+ // We may perform an incomplete memory read if the number of elements per row
+ // is not a multiple of the DDR burst size
+ constexpr bool kIncompleteBurst = rows%num_elem_per_bank != 0;
+ constexpr int kExtraIteration = kIncompleteBurst ? 1 : 0;
+ // Number of DDR burst reads of num_elem_per_bank elements required to read a
+ // full column
+ constexpr int kLoopIterPerColumn = rows / num_elem_per_bank + kExtraIteration;
+ // Number of DDR burst reads of num_elem_per_bank to read all the matrices
+ constexpr int kLoopIter = kLoopIterPerColumn * columns;
+ // Size in bits of the loop iterator over kLoopIter iterations
+ constexpr int kLoopIterBitSize = fpga_tools::BitsForMaxValue();
+ // Size of a full matrix
+ constexpr int kMatrixSize = rows * columns;
+
+#if defined (IS_BSP)
+ // When targeting a BSP, we instruct the compiler that this pointer
+ // lives on the device.
+ // Knowing this, the compiler won't generate hardware to
+ // potentially get data from the host.
+ sycl::device_ptr matrix_ptr_located(matrix_ptr);
+#else
+ // Device pointers are not supported when targeting an FPGA
+ // family/part
+ TT* matrix_ptr_located(matrix_ptr);
+#endif
+
+ // Repeatedly read matrix_count matrices from DDR and sends them to the pipe
+ for (int repetition = 0; repetition < repetitions; repetition++){
+
+ for (int matrix_index = 0; matrix_index < matrix_count; matrix_index++){
+
+ // Keep track of the current element index in the matrix
+ // Only useful in the case of kIncompleteBurst
+ int load_index = 0;
+
+ [[intel::initiation_interval(1)]] // NO-FORMAT: Attribute
+ for (ac_int li = 0; li < kLoopIter; li++) {
+
+ bool last_burst_of_col;
+ if constexpr (kIncompleteBurst){
+ // Check if we are reading the last DDR burst of the current column
+ last_burst_of_col = (li % kLoopIterPerColumn)
+ == kLoopIterPerColumn - 1;
+ }
+
+ fpga_tools::NTuple ddr_read;
+
+ // Perform the DDR burst read of num_elem_per_bank elements
+ fpga_tools::UnrolledLoop([&](auto k) {
+
+ if constexpr (kIncompleteBurst){
+ // Check if the current read index is beyond the end of the current
+ // matrix column
+ bool out_of_bounds = last_burst_of_col &&
+ ((k % num_elem_per_bank) > ((rows - 1) % num_elem_per_bank));
+
+ // Only perform the DDR reads that are relevant (and don't access a
+ // memory address that may be beyond the matrix last address)
+ if (!out_of_bounds) {
+ ddr_read.template get() = matrix_ptr_located
+ [matrix_index * kMatrixSize + load_index + k];
+ }
+ }
+ else{
+ ddr_read.template get() = matrix_ptr_located
+ [matrix_index * kMatrixSize + (int)(li)*num_elem_per_bank + k];
+
+ }
+ });
+
+ if constexpr (kIncompleteBurst){
+ // Update the current element index in the input matrix according
+ // to the read size of the current iteration
+ load_index += last_burst_of_col ? rows % num_elem_per_bank :
+ num_elem_per_bank;
+ }
+
+ MatrixPipe::write(ddr_read);
+ } // end of li
+
+ } // end of matrix_index
+ } // end of repetition
+}
+
+/*
+ Write matrix_count matrices of type TT from a pipe, num_elem_per_bank by
+ num_elem_per_bank and write them to DDR by bursts of num_elem_per_bank
+ elements.
+ Repeat this operations "repetitions" times.
+*/
+template
+void MatrixReadPipeToDDR(
+ TT* matrix_ptr, // Output matrix pointer
+ int matrix_count,// Number of matrix to write to DDR
+ int repetitions // Number of time to read the same matrix to the pipe
+ ) {
+
+ // We may perform an incomplete memory write if the number of elements per row
+ // is not a multiple of the DDR burst size
+ constexpr bool kIncompleteBurst = rows%num_elem_per_bank != 0;
+ constexpr int kExtraIteration = kIncompleteBurst ? 1 : 0;
+ // Number of DDR burst of num_elem_per_bank required to write a full column
+ constexpr int kLoopIterPerColumn = rows / num_elem_per_bank + kExtraIteration;
+ // Number of DDR burst of num_elem_per_bank to write all the matrices
+ constexpr int kLoopIter = kLoopIterPerColumn * columns;
+ // Size in bits of the loop iterator over kLoopIter iterations
+ constexpr int kLoopIterBitSize = fpga_tools::BitsForMaxValue();
+ // Size of a full matrix
+ constexpr int kMatrixSize = rows * columns;
+
+#if defined (IS_BSP)
+ // When targeting a BSP, we instruct the compiler that this pointer
+ // lives on the device.
+ // Knowing this, the compiler won't generate hardware to
+ // potentially get data from the host.
+ sycl::device_ptr matrix_ptr_located(matrix_ptr);
+#else
+ // Device pointers are not supported when targeting an FPGA
+ // family/part
+ TT* matrix_ptr_located(matrix_ptr);
+#endif
+
+
+ // Repeatedly read matrix_count matrices from the pipe and write them to DDR
+ for (int repetition = 0; repetition < repetitions; repetition++){
+
+ for (int matrix_index = 0; matrix_index < matrix_count; matrix_index++){
+ // Keep track of the current element index in the output matrix
+ // Only useful in the case of kIncompleteBurst
+ int write_idx = 0;
+
+ [[intel::initiation_interval(1)]] // NO-FORMAT: Attribute
+ [[intel::ivdep]] // NO-FORMAT: Attribute
+ for (ac_int li = 0; li < kLoopIter; li++) {
+ fpga_tools::NTuple pipe_read =
+ MatrixPipe::read();
+
+ bool last_burst_of_col;
+ if constexpr (kIncompleteBurst){
+ // Check if we are writing the last DDR burst of the current column
+ last_burst_of_col =
+ (li % kLoopIterPerColumn) == kLoopIterPerColumn - 1;
+ }
+
+ fpga_tools::UnrolledLoop([&](auto k) {
+ if constexpr (kIncompleteBurst){
+ // Check if the current write index is beyond the end of the current
+ // matrix column
+ bool out_of_bounds = last_burst_of_col &&
+ (k > ((rows - 1) % num_elem_per_bank));
+
+ // Only perform the DDR writes that are relevant (and don't access a
+ // memory address that may be beyond the buffer last address)
+ if (!out_of_bounds) {
+ matrix_ptr_located[matrix_index * kMatrixSize + write_idx + k] =
+ pipe_read.template get();
+ }
+ }
+ else{
+ matrix_ptr_located[matrix_index * kMatrixSize
+ + int(li) * num_elem_per_bank + k] = pipe_read.template get();
+ }
+
+ });
+
+ if constexpr (kIncompleteBurst){
+ // Update the current element index in the write buffer according
+ // to the write size of the current iteration
+ write_idx += last_burst_of_col ? rows % num_elem_per_bank :
+ num_elem_per_bank;
+ }
+ } // end of li
+ } // end of matrix_index
+ } // end of repetition
+}
+
+#endif /* __MEMORY_TRANSFERS_HPP__ */
\ No newline at end of file
diff --git a/DirectProgramming/C++SYCL_FPGA/include/streaming_eigen.hpp b/DirectProgramming/C++SYCL_FPGA/include/streaming_eigen.hpp
new file mode 100755
index 0000000000..22b577d83f
--- /dev/null
+++ b/DirectProgramming/C++SYCL_FPGA/include/streaming_eigen.hpp
@@ -0,0 +1,503 @@
+#ifndef __STREAMING_QRD_HPP__
+#define __STREAMING_QRD_HPP__
+
+#include "constexpr_math.hpp"
+#include "tuple.hpp"
+#include "unrolled_loop.hpp"
+
+#ifdef __SYCL_DEVICE_ONLY__
+#define CL_CONSTANT __attribute__((opencl_constant))
+#else
+#define CL_CONSTANT
+#endif
+
+using namespace sycl;
+
+#define PRINTF(format, ...) \
+ { \
+ static const CL_CONSTANT char _format[] = format; \
+ ext::oneapi::experimental::printf(_format, ##__VA_ARGS__); \
+ }
+
+namespace fpga_linalg {
+
+/*
+ QRD (QR decomposition) - Computes Q and R matrices such that A=QR where:
+ - A is the input matrix
+ - Q is a unitary/orthogonal matrix
+ - R is an upper triangular matrix
+
+ This function implements a OneAPI optimized version of the "High performance
+ QR Decomposition for FPGAs" FPGA'18 paper by Martin Langhammer and Bogdan
+ Pasca.
+
+ Each matrix (input and output) are represented in a column wise (transposed).
+
+ Then input and output matrices are consumed/produced from/to pipes.
+*/
+template
+struct StreamingEigen {
+ void operator()() const {
+ // Functional limitations
+ static_assert(k_size >= 4,
+ "only matrices of size 4x4 and over are supported");
+
+ // Type used to store the matrices in the compute loop
+ using column_tuple = fpga_tools::NTuple;
+
+ // Count the total number of iterations performed to compute the average
+ int iterations = 0;
+ int matrices_computed = 0;
+
+ // Compute Eigen values as long as matrices are given as inputs
+ while (1) {
+ // Three copies of the full matrix, so that each matrix has a single
+ // load and a single store.
+ // a_load is the initial matrix received from the pipe
+ // a_compute is used and modified during calculations
+ // q_result is a copy of a_compute and is used to send the final output
+
+ // Break memories up to store 4 complex numbers (32 bytes) per bank
+ constexpr short kBankwidth = pipe_size * sizeof(T);
+ constexpr unsigned short kNumBanks = k_size / pipe_size;
+
+ // When specifying numbanks for a memory, it must be a power of 2.
+ // Unused banks will be automatically optimized away.
+ constexpr short kNumBanksNextPow2 =
+ fpga_tools::Pow2(fpga_tools::CeilLog2(kNumBanks));
+
+ [[intel::numbanks(kNumBanksNextPow2)]] // NO-FORMAT: Attribute
+ [[intel::bankwidth(kBankwidth)]] // NO-FORMAT: Attribute
+ [[intel::private_copies(4)]] // NO-FORMAT: Attribute
+ [[intel::max_replicates(1)]] // NO-FORMAT: Attribute
+ column_tuple a_load[k_size],
+ a_compute[k_size], q_result[k_size];
+
+ // Contains the values of the upper-right part of R in a row by row
+ // fashion, starting by row 0
+ [[intel::private_copies(4)]] // NO-FORMAT: Attribute
+ T eigen_values[k_size];
+
+ // Copy a matrix from the pipe to a local memory
+ // Number of pipe reads of pipe_size required to read a full column
+ constexpr int kExtraIteration = (k_size % pipe_size) != 0 ? 1 : 0;
+ constexpr int kLoopIterPerColumn = k_size / pipe_size + kExtraIteration;
+ // Number of pipe reads of pipe_size to read all the matrices
+ constexpr int kLoopIter = kLoopIterPerColumn * k_size;
+ // Size in bits of the loop iterator over kLoopIter iterations
+ constexpr int kLoopIterBitSize =
+ fpga_tools::BitsForMaxValue();
+
+ [[intel::initiation_interval(1)]] // NO-FORMAT: Attribute
+ for (ac_int li = 0; li < kLoopIter; li++) {
+ fpga_tools::NTuple pipe_read = AIn::read();
+
+ int write_idx;
+ int a_col_index;
+ write_idx = li % kLoopIterPerColumn;
+ a_col_index = li / kLoopIterPerColumn;
+
+ fpga_tools::UnrolledLoop([&](auto k) {
+ fpga_tools::UnrolledLoop([&](auto t) {
+ if (write_idx == k) {
+ if constexpr (k * pipe_size + t < k_size) {
+ a_load[a_col_index].template get() =
+ pipe_read.template get();
+ }
+ }
+
+ // Delay data signals to create a vine-based data distribution
+ // to lower signal fanout.
+ pipe_read.template get() =
+ sycl::ext::intel::fpga_reg(pipe_read.template get());
+ });
+
+ write_idx = sycl::ext::intel::fpga_reg(write_idx);
+ });
+ }
+
+ // Compute the Eigen values
+
+ // store the entire matrix in a 3 x k_size table as the matrix is
+ // tridiagonal, and we'll need a fourth diagonal for R
+ T a_tri_diag[k_size][4];
+ for (int col = 0; col < 4; col++) {
+ fpga_tools::UnrolledLoop([&](auto k) {
+ if constexpr (k == 0) {
+ if (col == 0) {
+ a_tri_diag[k][col] = T{0};
+ } else {
+ a_tri_diag[k][col] = a_load[col + k - 1].template get();
+ }
+ } else {
+ if ((col + k) > k_size) {
+ a_tri_diag[k][col] = T{0};
+ }
+ a_tri_diag[k][col] = a_load[col + k - 1].template get();
+ }
+ });
+ }
+
+ PRINTF("a_tri_diag\n");
+ for(int row=0; row 1) {
+ PRINTF("========================================================\n");
+ PRINTF("rows_to_compute %d \n", rows_to_compute);
+ T rq[k_size][4];
+
+ for (int row = 0; row < k_size; row++) {
+ for (int col = 0; col < 4; col++) {
+ rq[row][col] = -1;
+ }
+ }
+
+ T givens_it_minus_1[2][2] = {{1, 0}, {0, 1}};
+ T givens_it_minus_2[2][2] = {{1, 0}, {0, 1}};
+
+ T a_result_rq_buffer[3][4];
+
+ for (int row = 0; row < 3; row++) {
+ for (int col = 0; col < 4; col++) {
+ a_result_rq_buffer[row][col] = -66;
+ }
+ }
+
+ T shift_value = T{0};
+ if constexpr (kShift) {
+ // Compute the shift value
+ // Take the submatrix:
+ // [a b]
+ // [b c]
+ // and compute the shift such as
+ // mu = c - (sign(d)* b*b)/(abs(d) + sqrt(d*d + b*b))
+ // where d = (a - c)/2
+ T a = rows_to_compute - 2 < 0 ? T{0}
+ : a_tri_diag[rows_to_compute - 2][1];
+ T b = rows_to_compute - 1 < 0 ? T{0}
+ : a_tri_diag[rows_to_compute - 1][0];
+ T c = rows_to_compute - 1 < 0 ? T{0}
+ : a_tri_diag[rows_to_compute - 1][1];
+
+ T d = (a - c) / 2;
+ T b_squared = b * b;
+ T d_squared = d * d;
+ T b_squared_signed = d < 0 ? -b_squared : b_squared;
+ shift_value =
+ c - b_squared_signed / (abs(d) + sqrt(d_squared + b_squared));
+
+ // Only taking 90% of the shift value as there is a high probability
+ // that the shift value matches one of the diagonal elements and
+ // create a big cancelation
+ // (as the shift value approximated an eigen value, which are the
+ // values that will end up on the diagonal)
+ shift_value *= 0.9;
+
+ // Subtract the shift from the diagonal of RQ
+ // TODO: flag potential cancelations
+ for (int row = 0; row < rows_to_compute; row++) {
+ a_tri_diag[row][1] -= shift_value;
+ }
+ }
+ PRINTF("shift_value %f\n", shift_value);
+
+ PRINTF("a_tri_diag before QR\n");
+ for (int row = 0; row < k_size; row++) {
+ for (int col = 0; col < 4; col++) {
+ PRINTF("%f ", a_tri_diag[row][col]);
+ }
+ PRINTF("\n");
+ }
+
+ T last_rq_val = -55;
+
+ for (int row = 0; row < rows_to_compute + 1; row++) {
+ // PRINTF("-------------------------------------------------------\n");
+
+ T givens[2][2];
+
+ if (row < rows_to_compute) {
+ // Take the diagonal element and the one below it
+ T x = a_tri_diag[row][1];
+ T y = row + 1 > k_size - 1 ? T{0} : a_tri_diag[row + 1][0];
+
+ // Compute the Givens rotation matrix
+ T x_squared = x * x;
+ T y_squared = y * y;
+ T norm = sycl::sqrt(x_squared + y_squared);
+ T sign = x >= 0 ? 1 : -1;
+ T c = sign * x / norm;
+ T s = sign * -y / norm;
+
+ // The final Givens matrix must be (-1 0, 0 1)
+ // to get the correct sign for the final value of A
+
+ givens[0][0] = row == (k_size - 1) ? -1 : c;
+ givens[0][1] = row == (k_size - 1) ? 0 : -s;
+ givens[1][0] = row == (k_size - 1) ? 0 : s;
+ givens[1][1] = row == (k_size - 1) ? 1 : c;
+
+ /*
+ Compute the sub product givens*A
+ This only affect two rows of A
+ Here with the example of the first givens matrix
+ (a00 a01 a02 a03)
+ (a10 a11 a12 a13)
+ x (a20 a21 a22 a23)
+ (a30 a31 a32 a33)
+
+ (c -s 0 0) (a0 b0 c0 d0 )
+ (s c 0 0) (a1 b1 c1 d1 )
+ (0 0 1 0) (a20 a21 a22 a23)
+ (0 0 0 1) (a30 a31 a32 a33)
+
+ */
+ for (int j = 0; j < 4; j++) {
+ // Go through all the columns of A
+ // Because A is tridiagonal, there are only 3 columns, but we
+ // need one more for intermediate results
+
+ // Take the first two elements of the current column of A
+ // Continuing with the previous example: a00 and a10
+ T a0 = (j == 3) ? T{0} : a_tri_diag[row][j + 1];
+ T a1 = (row + 1) >= k_size ? T{0} : a_tri_diag[row + 1][j];
+
+ // Go through the two affected rows
+ for (int i = 0; i < 2; i++) {
+ T new_a_element = givens[i][0] * a0 + givens[i][1] * a1;
+ // Don't write beyond the last matrix row
+ if (row != (k_size - 1) || (i == 0)) {
+ a_tri_diag[row + i][j] = new_a_element;
+ }
+
+ if (i == 0) {
+ a_result_rq_buffer[2][j] = new_a_element;
+ }
+
+ } // end of i
+ } // end of j
+ } // end of if (row < k_size)
+
+ if (row > 0) {
+ int rq_col = row - 1;
+ T g_prod_col[3] = {
+ givens_it_minus_2[0][1] * givens_it_minus_1[0][0],
+ givens_it_minus_2[1][1] * givens_it_minus_1[0][0],
+ givens_it_minus_1[1][0]};
+
+ // PRINTF("g_prod_col: %f %f %f\n", g_prod_col[0], g_prod_col[1],
+ // g_prod_col[2]);
+
+ // PRINTF("RQ value at %d: %f \n", rq_col, last_rq_val);
+ T dot_prod_val[2];
+ T last_dp_val;
+ for (int rq_row = 0; rq_row < 2; rq_row++) {
+ T value = 0;
+ // PRINTF("value = 0");
+#pragma unroll
+ for (int k = 0; k < 3; k++) {
+ T g_prod_col_val =
+ (k + rq_row + 1) > 2 ? T{0} : g_prod_col[k + rq_row + 1];
+ T a_result_rq_buffer_val = a_result_rq_buffer[rq_row + 1][k];
+ value += a_result_rq_buffer_val * g_prod_col_val;
+ // PRINTF(" + (%f * %f)", a_result_rq_buffer_val, g_prod_col_val);
+ }
+ // PRINTF(" = %f\n", value);
+ dot_prod_val[rq_row] = value;
+ last_dp_val = value;
+ }
+
+ for (int row_rq = 0; row_rq < k_size; row_rq++) {
+ T rq_value;
+ if (row_rq == rq_col) {
+ rq_value = dot_prod_val[0];
+ } else if (row_rq == (rq_col + 1)) {
+ rq_value = dot_prod_val[1];
+ } else if (row_rq == (rq_col - 1)) {
+ rq_value = last_rq_val;
+ } else {
+ rq_value = 0;
+ }
+
+ int col_index = rq_col+1-row_rq;
+ if (col_index < 4 && col_index >= 0){
+ // PRINTF("Writing RQ %d %d = %f\n", row_rq, col_index, rq_value);
+ rq[row_rq][col_index] = rq_value;
+ }
+ }
+
+ last_rq_val = last_dp_val;
+ }
+
+ for (int row = 0; row < 2; row++) {
+ for (int col = 0; col < 4; col++) {
+ a_result_rq_buffer[row][col] = a_result_rq_buffer[row + 1][col];
+ }
+ }
+
+ // Store the previous Givens matrix for the computation of RQ
+ // Also transpose this matrix
+ givens_it_minus_2[0][0] = givens_it_minus_1[0][0];
+ givens_it_minus_2[0][1] = givens_it_minus_1[0][1];
+ givens_it_minus_2[1][0] = givens_it_minus_1[1][0];
+ givens_it_minus_2[1][1] = givens_it_minus_1[1][1];
+
+ givens_it_minus_1[0][0] = givens[0][0];
+ givens_it_minus_1[0][1] = -givens[0][1];
+ givens_it_minus_1[1][0] = -givens[1][0];
+ givens_it_minus_1[1][1] = givens[1][1];
+
+ } // end of row
+
+ // Copy RQ in A
+ for (int row = 0; row < rows_to_compute; row++) {
+ for (int col = 0; col < 4; col++) {
+ bool is_first_elem = (row == 0) && (col == 0);
+ bool is_last_elem = (row == k_size-1) && (col == 2);
+ bool is_last_col = col == 3;
+ if ( is_first_elem || is_last_elem || is_last_col ) {
+ a_tri_diag[row][col] = T{0};
+ } else {
+ a_tri_diag[row][col] = rq[row][col];
+ }
+ }
+ }
+
+ PRINTF("rq \n");
+ for(int row=0; row threshold){
+ // rows_to_compute++;
+ // }
+ // }
+
+ // bool reached = true;
+ // // // //
+ // // // PRINTF("===============================================================");
+ // // // PRINTF("checking... ");
+ // for (int row = 1; row < k_size; row++) {
+ // // PRINTF("%f ", fabs(rq[row][0]));
+ // reached &= sycl::fabs(rq[row][0]) < threshold;
+ // }
+ // // PRINTF("\n");
+ // cond = reached;
+ // cond = iteration==0;
+ iterations++;
+ // if(iterations==2){
+ // PRINTF("NEEEEEEEEXXXXXXXXXXXXXXXXXXXXTTTT\n");
+ // exit(0);
+ // }
+ // if(iteration>10000){
+ // PRINTF("TOO MANY ITERATIONS!!!\n");
+ // for (int row = 0; row < k_size; row++) {
+ // PRINTF("%f\n", sycl::fabs(rq[row][0]));
+ // }
+ // }
+ }
+
+ matrices_computed++;
+
+ PRINTF("Average number of iterations: %d\n",
+ iterations / matrices_computed);
+
+ // PRINTF("a_tri_diag\n");
+ // for (int row = 0; row < k_size; row++) {
+ // for (int col = 0; col < 4; col++) {
+ // PRINTF("%f ", a_tri_diag[row][col]);
+ // }
+ // PRINTF("\n");
+ // }
+
+ for (int row = 0; row < k_size; row++) {
+ eigen_values[row] = a_tri_diag[row][1];
+ }
+
+ // PRINTF("iteration %d\n", iteration);
+
+ [[intel::initiation_interval(1)]] // NO-FORMAT: Attribute
+ for (int r_idx = 0; r_idx < k_size; r_idx++) {
+ EValuesOut::write(eigen_values[r_idx]);
+ }
+
+ [[intel::initiation_interval(1)]] // NO-FORMAT: Attribute
+ for (ac_int li = 0; li < kLoopIter; li++) {
+ int column_iter = li % kLoopIterPerColumn;
+ bool get[kLoopIterPerColumn];
+ fpga_tools::UnrolledLoop([&](auto k) {
+ get[k] = column_iter == k;
+ column_iter = sycl::ext::intel::fpga_reg(column_iter);
+ });
+
+ fpga_tools::NTuple pipe_write;
+ fpga_tools::UnrolledLoop([&](auto t) {
+ fpga_tools::UnrolledLoop([&](auto k) {
+ if constexpr (t * pipe_size + k < k_size) {
+ pipe_write.template get() =
+ get[t] ? q_result[li / kLoopIterPerColumn]
+ .template get()
+ : sycl::ext::intel::fpga_reg(
+ pipe_write.template get());
+ }
+ });
+ });
+ QOut::write(pipe_write);
+ }
+
+ } // end of while(1)
+ } // end of operator
+}; // end of struct
+
+} // namespace fpga_linalg
+
+#endif /* __STREAMING_QRD_HPP__ */
\ No newline at end of file
diff --git a/DirectProgramming/C++SYCL_FPGA/include/streaming_qrd.hpp b/DirectProgramming/C++SYCL_FPGA/include/streaming_qrd.hpp
index 67f0ba9fce..6d03af465f 100644
--- a/DirectProgramming/C++SYCL_FPGA/include/streaming_qrd.hpp
+++ b/DirectProgramming/C++SYCL_FPGA/include/streaming_qrd.hpp
@@ -1,6 +1,7 @@
#ifndef __STREAMING_QRD_HPP__
#define __STREAMING_QRD_HPP__
+#include
#include "constexpr_math.hpp"
#include "tuple.hpp"
#include "unrolled_loop.hpp"