From e9cbd2fd664beba0b27608fb1416ed2920fb03c9 Mon Sep 17 00:00:00 2001 From: Treemann Date: Wed, 10 Apr 2024 18:10:38 +0800 Subject: [PATCH] Fix logging issue in llama.py --- lwm/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lwm/llama.py b/lwm/llama.py index 8a8871c..0e957f2 100644 --- a/lwm/llama.py +++ b/lwm/llama.py @@ -571,10 +571,10 @@ def __call__( platform = xla_bridge.get_backend().platform if platform == "tpu": - logging.info(f"Using fused attention for {platform}") + logger.info(f"Using fused attention for {platform}") ring_attention_fn = ring_flash_attention_tpu else: - logging.info(f"Fused attention is not yet supported for {platform}, using non-fused version") + logger.info(f"Fused attention is not yet supported for {platform}, using non-fused version") ring_attention_fn = ring_attention # uses BPT attention ring_attention_sharded = shard_map( partial(