Skip to content
This repository was archived by the owner on Dec 4, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 31 additions & 26 deletions python/fastlaps.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,37 @@

if __name__ == "__main__":
# Quick example for a 3x3 matrix
matrix = np.random.rand(4, 5)
for algo in ["lapjv", "hungarian"]:
print(f"\nAlgorithm: {algo}")
start = time.time()
fastlap_cost, fastlap_rows, fastlap_cols = fastlap.solve_lap(matrix, algo)
fastlap_end = time.time()
print(f"fastlap.{algo}: Time={fastlap_end - start:.8f}s")
print(
f"fastlap.{algo}: Cost={fastlap_cost}, Rows={fastlap_rows}, Cols={fastlap_cols}"
)
if algo == "hungarian":
cols = 3000
rows = 3000
matrix = np.random.rand(rows, cols)
for i in range(10):

for algo in ["lapjv", "hungarian"]:
print(f"\nAlgorithm: {algo}")
start = time.time()
scipy_rows, scipy_cols = linear_sum_assignment(matrix)
scipy_cost = matrix[scipy_rows, scipy_cols].sum()
scipy_end = time.time()
print(
f"scipy.optimize.linear_sum_assignment: Time={scipy_end - start:.8f}s"
)
print(
f"scipy.optimize.linear_sum_assignment: Cost={scipy_cost}, Rows={list(scipy_rows)}, Cols={list(scipy_cols)}"
)
if algo == "lapjv":
start = time.time()
lap_cost, lap_rows, lap_cols = lap.lapjv(matrix, extend_cost=True)
lap_end = time.time()
print(f"lap.lapjv: Time={lap_end - start:.8f}s")
print(f"lap.lapjv: Cost={lap_cost}, Rows={lap_rows}, Cols={lap_cols}")
fastlap_cost, fastlap_rows, fastlap_cols = fastlap.solve_lap(matrix, algo)
fastlap_end = time.time()
print(f"fastlap.{algo}: Time={fastlap_end - start:.8f}s")
# print(
# f"fastlap.{algo}: Cost={fastlap_cost}, Rows={list(fastlap_rows)}, Cols={list(fastlap_cols)}"
# )
if algo == "hungarian":
start = time.time()
scipy_rows, scipy_cols = linear_sum_assignment(matrix)
scipy_cost = matrix[scipy_rows, scipy_cols].sum()
scipy_end = time.time()
print(
f"scipy.optimize.linear_sum_assignment: Time={scipy_end - start:.8f}s"
)
print(
f"scipy.optimize.linear_sum_assignment: Cost={scipy_cost}, Rows={list(scipy_rows)}, Cols={list(scipy_cols)}"
)
if algo == "lapjv":
start = time.time()
lap_cost, lap_rows, lap_cols = lap.lapjv(matrix, extend_cost=True)
lap_end = time.time()
print(f"lap.lapjv: Time={lap_end - start:.8f}s")
# print(f"lap.lapjv: Cost={lap_cost}, Rows={lap_rows}, Cols={lap_cols}")

"""
First release:
Expand All @@ -49,4 +53,5 @@
fastlap.hungarian: Cost=0.7465856501551806, Rows=[2, 0, 1, 3], Cols=[1, 2, 0, 3, 18446744073709551615]
scipy.optimize.linear_sum_assignment: Time=0.00001287s
scipy.optimize.linear_sum_assignment: Cost=0.6229432588732741, Rows=[0, 1, 2, 3], Cols=[2, 0, 1, 4]

"""
113 changes: 113 additions & 0 deletions src/lap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -228,3 +228,116 @@ pub fn hungarian(matrix: Vec<Vec<f64>>) -> (f64, Vec<usize>, Vec<usize>) {

(total_cost, row_assign, col_assign)
}

pub fn lapmod(matrix: Vec<Vec<f64>>) -> (f64, Vec<usize>, Vec<usize>) {
let n = matrix.len();
if n == 0 {
return (0.0, vec![], vec![]);
}
let m = matrix[0].len();

// Handle non-square matrices by padding with INFINITY
let dim = n.max(m);
let padded_matrix = if n != m {
let mut new_matrix = vec![vec![f64::INFINITY; dim]; dim];
for i in 0..n {
for j in 0..m {
new_matrix[i][j] = matrix[i][j];
}
}
new_matrix
} else {
matrix.clone()
};

let n = padded_matrix.len();
let mut u = vec![0.0; n]; // Dual variables for rows
let mut v = vec![0.0; n]; // Dual variables for columns
let mut row_assign = vec![usize::MAX; n];
let mut col_assign = vec![usize::MAX; n];

// Greedy initialization: skip INFINITY costs
for i in 0..n {
if let Some((j_min, &min_val)) = padded_matrix[i]
.iter()
.enumerate()
.filter(|(j, &cost)| cost != f64::INFINITY && col_assign[*j] == usize::MAX)
.min_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
{
row_assign[i] = j_min;
col_assign[j_min] = i;
u[i] = min_val;
}
}

// Augmenting path loop
for i in 0..n {
if row_assign[i] != usize::MAX {
continue;
}
let mut min_slack = vec![f64::INFINITY; n];
let mut prev = vec![usize::MAX; n];
let mut visited = vec![false; n];
let mut marked_row = i;
#[allow(unused_assignments)]
let mut marked_col = usize::MAX;

loop {
visited[marked_row] = true;
// Only consider finite costs
for j in 0..n {
let cost = padded_matrix[marked_row][j];
if cost != f64::INFINITY && !visited[j] && col_assign[j] != usize::MAX {
let slack = cost - u[marked_row] - v[j];
if slack < min_slack[j] {
min_slack[j] = slack;
prev[j] = marked_row;
}
}
}

let (j, &delta) = min_slack
.iter()
.enumerate()
.filter(|(j, _)| !visited[*j] && col_assign[*j] != usize::MAX)
.min_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.unwrap_or((0, &f64::INFINITY));

if delta == f64::INFINITY {
let unassigned_j = (0..n).find(|&j| col_assign[j] == usize::MAX).unwrap();
marked_col = unassigned_j;
break;
}

for k in 0..n {
if visited[k] {
u[k] += delta;
v[k] -= delta;
} else {
min_slack[k] -= delta;
}
}

marked_row = col_assign[j];
}

// Augment path
while marked_col != usize::MAX {
let i_prev = prev[marked_col];
let j_prev = row_assign[i_prev];
row_assign[i_prev] = marked_col;
col_assign[marked_col] = i_prev;
marked_col = j_prev;
}
}

// Compute total cost using original matrix
let total_cost: f64 = row_assign
.iter()
.enumerate()
.filter(|(i, &j)| j != usize::MAX && *i < n && j < m)
.map(|(i, &j)| matrix[i][j])
.sum();

(total_cost, row_assign, col_assign)
}
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use numpy::{PyArray2, PyArrayMethods};
use numpy::PyArray2;
use pyo3::prelude::*;

pub mod lap;
Expand Down
15 changes: 7 additions & 8 deletions src/matrix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ use pyo3::prelude::*;
use sprs::CsMat;

/// Convert a dense NumPy array to Vec<Vec<f64>>
pub fn extract_dense_matrix<'py>(cost_matrix: &Bound<'py, PyArray2<f64>>) -> PyResult<Vec<Vec<f64>>> {
pub fn extract_dense_matrix<'py>(
cost_matrix: &Bound<'py, PyArray2<f64>>,
) -> PyResult<Vec<Vec<f64>>> {
let matrix: Vec<Vec<f64>> = cost_matrix
.readonly()
.as_array()
Expand All @@ -16,15 +18,12 @@ pub fn extract_dense_matrix<'py>(cost_matrix: &Bound<'py, PyArray2<f64>>) -> PyR
}

/// Convert a scipy.sparse.csr_matrix to Vec<Vec<f64>>
pub fn extract_sparse_matrix<'py>(cost_matrix: &Bound<'py, PyArray2<f64>>) -> PyResult<Vec<Vec<f64>>> {
pub fn extract_sparse_matrix<'py>(cost_matrix: &Bound<'py, PyAny>) -> PyResult<Vec<Vec<f64>>> {
let indptr: PyReadonlyArray1<usize> = cost_matrix.getattr("indptr")?.extract()?;
let indices: PyReadonlyArray1<usize> = cost_matrix.getattr("indices")?.extract()?;
let data: PyReadonlyArray1<f64> = cost_matrix.getattr("data")?.extract()?;


let shape: (usize, usize) = cost_matrix
.getattr("shape")?
.extract::<(usize, usize)>()?;

let shape: (usize, usize) = cost_matrix.getattr("shape")?.extract::<(usize, usize)>()?;

let csr = CsMat::new(
shape,
Expand Down Expand Up @@ -76,4 +75,4 @@ pub fn validate_matrix(matrix: Vec<Vec<f64>>) -> PyResult<Vec<Vec<f64>>> {
} else {
Ok(matrix)
}
}
}
Loading