From ff4573772d907a6655f80c90cc2d1038acfd1469 Mon Sep 17 00:00:00 2001 From: Redddy Date: Sun, 8 Mar 2026 01:10:29 +0000 Subject: [PATCH 1/4] Remove TODO in .cspell.json --- .cspell.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cspell.json b/.cspell.json index 556432d69a4..a2856029c2c 100644 --- a/.cspell.json +++ b/.cspell.json @@ -22,7 +22,7 @@ "src/intrinsic/llvm.rs" ], "ignoreRegExpList": [ - "/(FIXME|NOTE|TODO)\\([^)]+\\)/", + "/(FIXME|NOTE)\\([^)]+\\)/", "__builtin_\\w*" ] } From fb86979255df283c5f0951be12cc4e8f7f06429c Mon Sep 17 00:00:00 2001 From: Redddy Date: Mon, 6 Apr 2026 01:27:10 +0000 Subject: [PATCH 2/4] Replace TODO to FIXME --- src/intrinsic/mod.rs | 2 +- tests/lang_tests.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 36e5543b0c4..ca0430dd748 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -206,7 +206,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc &args.iter().map(|arg| arg.immediate()).collect::>(), ) } - // TODO(antoyo): We can probably remove these and use the fallback intrinsic implementation. + // FIXME(antoyo): We can probably remove these and use the fallback intrinsic implementation. sym::minimumf32 | sym::minimumf64 | sym::maximumf32 | sym::maximumf64 => { let (ty, func_name) = match name { sym::minimumf32 => (self.cx.float_type, "fminimumf"), diff --git a/tests/lang_tests.rs b/tests/lang_tests.rs index 3d1d04661c8..6afd54e1c3f 100644 --- a/tests/lang_tests.rs +++ b/tests/lang_tests.rs @@ -19,7 +19,7 @@ fn compile_and_run_cmds( // Test command 2: run `tempdir/x`. if test_target.is_some() { let mut env_path = std::env::var("PATH").unwrap_or_default(); - // TODO(antoyo): find a better way to add the PATH necessary locally. + // FIXME(antoyo): find a better way to add the PATH necessary locally. env_path = format!("/opt/m68k-unknown-linux-gnu/bin:{}", env_path); compiler.env("PATH", env_path); @@ -112,7 +112,7 @@ fn build_test_runner( println!("=== {test_kind} tests ==="); - // TODO(antoyo): find a way to send this via a cli argument. + // FIXME(antoyo): find a way to send this via a cli argument. let test_target = std::env::var("CG_GCC_TEST_TARGET").ok(); let test_target_filter = test_target.clone(); From a44d990a4fae3f4e3758a18760d2b50f5e2e1bbd Mon Sep 17 00:00:00 2001 From: Redddy Date: Sat, 7 Mar 2026 10:47:36 +0000 Subject: [PATCH 3/4] Add TODO checker --- .github/workflows/ci.yml | 3 ++ build_system/src/main.rs | 7 +++- build_system/src/todo.rs | 69 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 1 deletion(-) create mode 100644 build_system/src/todo.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fa9535a3729..2c297157a86 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -88,6 +88,9 @@ jobs: - name: Check formatting run: ./y.sh fmt --check + - name: Check todo + run: ./y.sh check-todo + - name: clippy run: | cargo clippy --all-targets -- -D warnings diff --git a/build_system/src/main.rs b/build_system/src/main.rs index ae975c94fff..150239cbbb4 100644 --- a/build_system/src/main.rs +++ b/build_system/src/main.rs @@ -12,6 +12,7 @@ mod prepare; mod rust_tools; mod rustc_info; mod test; +mod todo; mod utils; const BUILD_DIR: &str = "build"; @@ -45,7 +46,8 @@ Commands: clone-gcc : Clones the GCC compiler from a specified source. fmt : Runs rustfmt fuzz : Fuzzes `cg_gcc` using rustlantis - abi-test : Runs the abi-cafe test suite on the codegen, checking for ABI compatibility with LLVM" + abi-test : Runs the abi-cafe test suite on the codegen, checking for ABI compatibility with LLVM + check-todo : Checks todo in the project" ); } @@ -61,6 +63,7 @@ pub enum Command { Fmt, Fuzz, AbiTest, + CheckTodo, } fn main() { @@ -80,6 +83,7 @@ fn main() { Some("info") => Command::Info, Some("clone-gcc") => Command::CloneGcc, Some("abi-test") => Command::AbiTest, + Some("check-todo") => Command::CheckTodo, Some("fmt") => Command::Fmt, Some("fuzz") => Command::Fuzz, Some("--help") => { @@ -106,6 +110,7 @@ fn main() { Command::Fmt => fmt::run(), Command::Fuzz => fuzz::run(), Command::AbiTest => abi_test::run(), + Command::CheckTodo => todo::run(), } { eprintln!("Command failed to run: {e}"); process::exit(1); diff --git a/build_system/src/todo.rs b/build_system/src/todo.rs new file mode 100644 index 00000000000..b66efe0da70 --- /dev/null +++ b/build_system/src/todo.rs @@ -0,0 +1,69 @@ +use std::ffi::OsStr; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; + +const EXTENSIONS: &[&str] = + &["rs", "py", "js", "sh", "c", "cpp", "h", "md", "css", "ftl", "toml", "yml", "yaml"]; + +fn has_supported_extension(path: &Path) -> bool { + path.extension().is_some_and(|ext| EXTENSIONS.iter().any(|e| ext == OsStr::new(e))) +} + +fn list_tracked_files() -> Result, String> { + let output = Command::new("git") + .args(["ls-files", "-z"]) + .output() + .map_err(|e| format!("Failed to run `git ls-files`: {e}"))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("`git ls-files` failed: {stderr}")); + } + + let mut files = Vec::new(); + for entry in output.stdout.split(|b| *b == 0) { + if entry.is_empty() { + continue; + } + let path = std::str::from_utf8(entry).unwrap(); + files.push(PathBuf::from(path)); + } + + Ok(files) +} + +pub(crate) fn run() -> Result<(), String> { + let files = list_tracked_files()?; + let mut error_count = 0; + // Avoid embedding the task marker in source so greps only find real occurrences. + let todo_marker = "todo".to_ascii_uppercase(); + + for file in files { + if !has_supported_extension(&file) { + continue; + } + + let bytes = fs::read(&file).unwrap(); + let contents = std::str::from_utf8(&bytes).unwrap(); + + for (i, line) in contents.split('\n').enumerate() { + let trimmed = line.trim(); + if trimmed.contains(&todo_marker) { + eprintln!( + "{}:{}: {} is used for tasks that should be done before merging a PR; if you want to leave a message in the codebase use FIXME", + file.display(), + i + 1, + todo_marker + ); + error_count += 1; + } + } + } + + if error_count == 0 { + return Ok(()); + } + + Err(format!("found {} {}(s)", error_count, todo_marker)) +} From 762cdb27d0a47d2f01499bc5168804bc2310de8b Mon Sep 17 00:00:00 2001 From: Redddy Date: Thu, 9 Apr 2026 19:51:51 +0900 Subject: [PATCH 4/4] Clarify FIXME comments --- .github/workflows/failures.yml | 6 +- .github/workflows/m68k.yml | 4 +- .github/workflows/stdarch.yml | 4 +- build_system/src/prepare.rs | 2 +- build_system/src/test.rs | 12 ++-- doc/debugging-libgccjit.md | 2 +- doc/subtree.md | 2 +- example/mini_core_hello_world.rs | 2 +- src/abi.rs | 4 +- src/allocator.rs | 4 +- src/asm.rs | 24 +++---- src/attributes.rs | 6 +- src/back/lto.rs | 6 +- src/back/write.rs | 12 ++-- src/base.rs | 8 +-- src/builder.rs | 106 +++++++++++++++---------------- src/context.rs | 26 ++++---- src/coverageinfo.rs | 2 +- src/debuginfo.rs | 12 ++-- src/declare.rs | 18 +++--- src/int.rs | 33 +++++----- src/intrinsic/llvm.rs | 4 +- src/intrinsic/mod.rs | 22 +++---- src/intrinsic/simd.rs | 36 +++++------ src/lib.rs | 12 ++-- src/mono_item.rs | 8 +-- src/type_.rs | 8 +-- src/type_of.rs | 4 +- tests/lang_tests_common.rs | 4 +- 29 files changed, 197 insertions(+), 196 deletions(-) diff --git a/.github/workflows/failures.yml b/.github/workflows/failures.yml index 2c9e4950706..49c49c7d3cd 100644 --- a/.github/workflows/failures.yml +++ b/.github/workflows/failures.yml @@ -1,4 +1,4 @@ -# FIXME: refactor to avoid duplication with the ci.yml file. +# FIXME: This workflow duplicates ci.yml. name: Failures on: @@ -94,7 +94,7 @@ jobs: run: ./y.sh prepare - name: Run tests - # FIXME: re-enable those tests for libgccjit 12. + # FIXME: libgccjit 12 still fails these tests. if: matrix.libgccjit_version.gcc != 'libgccjit12.so' id: tests run: | @@ -102,7 +102,7 @@ jobs: rg --text "test result" output_log >> $GITHUB_STEP_SUMMARY - name: Run failing ui pattern tests for ICE - # FIXME: re-enable those tests for libgccjit 12. + # FIXME: libgccjit 12 still fails these tests. if: matrix.libgccjit_version.gcc != 'libgccjit12.so' id: ui-tests run: | diff --git a/.github/workflows/m68k.yml b/.github/workflows/m68k.yml index 8ed3545ee7d..dabce20929a 100644 --- a/.github/workflows/m68k.yml +++ b/.github/workflows/m68k.yml @@ -1,4 +1,4 @@ -# FIXME: check if qemu-user-static-binfmt is needed (perhaps to run some tests since it probably calls exec). +# FIXME: It is still unclear whether qemu-user-static-binfmt is required for tests that call exec. name: m68k CI @@ -24,7 +24,7 @@ jobs: matrix: commands: [ "--std-tests", - # FIXME(antoyo): fix those on m68k. + # FIXME(antoyo): These tests are still broken on m68k. #"--test-libcore", #"--extended-rand-tests", #"--extended-regex-example-tests", diff --git a/.github/workflows/stdarch.yml b/.github/workflows/stdarch.yml index 66f30b147b4..a30573d4777 100644 --- a/.github/workflows/stdarch.yml +++ b/.github/workflows/stdarch.yml @@ -38,7 +38,7 @@ jobs: - name: Install packages run: sudo apt-get install ninja-build ripgrep - # FIXME: remove when we have binutils version 2.43 in the repo. + # FIXME: This binutils workaround is required until the repo ships binutils 2.43. - name: Install more recent binutils run: | echo "deb http://archive.ubuntu.com/ubuntu plucky main universe" | sudo tee /etc/apt/sources.list.d/plucky-copies.list @@ -96,7 +96,7 @@ jobs: if: ${{ matrix.cargo_runner }} run: | # FIXME: these tests fail when the sysroot is compiled with LTO because of a missing symbol in proc-macro. - # FIXME: remove --skip test_tile_ when it's implemented. + # FIXME: test_tile_ must stay skipped until the tile builtins are implemented. STDARCH_TEST_SKIP_FUNCTION="xsave,xsaveopt,xsave64,xsaveopt64" STDARCH_TEST_EVERYTHING=1 CHANNEL=release CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="${{ matrix.cargo_runner }}" TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ./y.sh cargo test --manifest-path build/build_sysroot/sysroot_src/library/stdarch/Cargo.toml -- --skip rtm --skip tbm --skip sse4a --skip test_tile_ # Summary job for the merge queue. diff --git a/build_system/src/prepare.rs b/build_system/src/prepare.rs index b62527f241f..81dbb3c5b79 100644 --- a/build_system/src/prepare.rs +++ b/build_system/src/prepare.rs @@ -148,7 +148,7 @@ fn prepare_libcore( Ok(()) } -// FIXME: remove when we can ignore warnings in rustdoc tests. +// FIXME: This workaround is still required because rustdoc tests cannot ignore warnings. fn prepare_rand() -> Result<(), String> { // Apply patch for the rand crate. let file_path = "patches/crates/0001-Remove-deny-warnings.patch"; diff --git a/build_system/src/test.rs b/build_system/src/test.rs index 3f02df83995..377a9b35d2f 100644 --- a/build_system/src/test.rs +++ b/build_system/src/test.rs @@ -286,7 +286,7 @@ fn build_sysroot(env: &Env, args: &TestArg) -> Result<(), String> { Ok(()) } -// FIXME(GuillaumeGomez): when rewriting in Rust, refactor with the code in tests/lang_tests_common.rs if possible. +// FIXME(GuillaumeGomez): This still duplicates tests/lang_tests_common.rs; keep them in sync until the Rust rewrite. fn maybe_run_command_in_vm( command: &[&dyn AsRef], env: &Env, @@ -708,16 +708,16 @@ fn test_projects(env: &Env, args: &TestArg) -> Result<(), String> { "https://github.com/BurntSushi/memchr", "https://github.com/dtolnay/itoa", "https://github.com/rust-lang/cfg-if", - //"https://github.com/rust-lang-nursery/lazy-static.rs", // FIXME: re-enable when the + //"https://github.com/rust-lang-nursery/lazy-static.rs", // FIXME: This stays disabled until the //failing test is fixed upstream. //"https://github.com/marshallpierce/rust-base64", // FIXME: one test is OOM-killed. - // FIXME: ignore the base64 test that is OOM-killed. + // FIXME: The OOM-killed base64 test still needs to be ignored. //"https://github.com/time-rs/time", // FIXME: one test fails (https://github.com/time-rs/time/issues/719). "https://github.com/rust-lang/log", "https://github.com/bitflags/bitflags", //"https://github.com/serde-rs/serde", // FIXME: one test fails. - //"https://github.com/rayon-rs/rayon", // FIXME: very slow, only run on master? - //"https://github.com/rust-lang/cargo", // FIXME: very slow, only run on master? + //"https://github.com/rayon-rs/rayon", // FIXME: This is too slow for regular CI runs; keep it for master-only runs if re-enabled. + //"https://github.com/rust-lang/cargo", // FIXME: This is too slow for regular CI runs; keep it for master-only runs if re-enabled. ]; let mut env = env.clone(); @@ -759,7 +759,7 @@ fn test_libcore(env: &Env, args: &TestArg) -> Result<(), String> { println!("[TEST] libcore"); let path = get_sysroot_dir().join("sysroot_src/library/coretests"); let _ = remove_dir_all(path.join("target")); - // FIXME(antoyo): run in release mode when we fix the failures. + // FIXME(antoyo): test_libcore still fails in release mode. run_cargo_command(&[&"test"], Some(&path), env, args)?; Ok(()) } diff --git a/doc/debugging-libgccjit.md b/doc/debugging-libgccjit.md index b2741b4134d..bfd4c45717b 100644 --- a/doc/debugging-libgccjit.md +++ b/doc/debugging-libgccjit.md @@ -71,4 +71,4 @@ Maybe by calling the following at the beginning of gdb: set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc ``` -FIXME(antoyo): but that's not what I remember I was doing. +FIXME(antoyo): These debugging notes are stale; this is not what I remember doing. diff --git a/doc/subtree.md b/doc/subtree.md index a81b6c9c74b..a9120a46c2c 100644 --- a/doc/subtree.md +++ b/doc/subtree.md @@ -47,6 +47,6 @@ git push PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name ``` -FIXME: write a script that does the above. +FIXME: This subtree push flow is still manual. https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725 diff --git a/example/mini_core_hello_world.rs b/example/mini_core_hello_world.rs index ab841d51a7f..15140801946 100644 --- a/example/mini_core_hello_world.rs +++ b/example/mini_core_hello_world.rs @@ -275,7 +275,7 @@ fn main() { } } - // FIXME(antoyo): to make this work, support weak linkage. + // FIXME(antoyo): This example still depends on unsupported weak linkage. //unsafe { assert_eq!(ABC as usize, 0); } &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>; diff --git a/src/abi.rs b/src/abi.rs index 4d1274a63d1..7644d999a56 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -105,7 +105,7 @@ pub struct FnAbiGcc<'gcc> { } pub trait FnAbiGccExt<'gcc, 'tcx> { - // FIXME(antoyo): return a function pointer type instead? + // FIXME(antoyo): This should return a function pointer type instead of the current representation. fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>; fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; #[cfg(feature = "master")] @@ -260,7 +260,7 @@ pub fn conv_to_fn_attribute<'gcc>(conv: CanonAbi, arch: &Arch) -> Option FnAttribute::NvptxKernel, arch => panic!("Arch {arch} does not support GpuKernel calling convention"), }, - // FIXME(antoyo): check if those AVR attributes are mapped correctly. + // FIXME(antoyo): The AVR attribute mapping is still unverified. CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind { InterruptKind::Avr => FnAttribute::AvrSignal, InterruptKind::AvrNonBlocking => FnAttribute::AvrInterrupt, diff --git a/src/allocator.rs b/src/allocator.rs index 6b92174ddf1..295f12dda3b 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -98,7 +98,7 @@ fn create_wrapper_function( ))); if tcx.sess.must_emit_unwind_tables() { - // FIXME(antoyo): emit unwind tables. + // FIXME(antoyo): Unwind tables are still not emitted here. } let block = func.new_block("entry"); @@ -138,6 +138,6 @@ fn create_wrapper_function( block.end_with_void_return(None); } - // FIXME(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances + // FIXME(@Commeownist): Some cases may still be missing extra debugging info here // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } diff --git a/src/asm.rs b/src/asm.rs index 83bfe18ce7b..da1e15492dc 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -308,13 +308,13 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } InlineAsmOperandRef::SymFn { instance } => { - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). constants_len += self.tcx.symbol_name(instance).name.len(); } InlineAsmOperandRef::SymStatic { def_id } => { - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O). constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); @@ -440,7 +440,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { match *piece { InlineAsmTemplatePiece::String(ref string) => { for char in string.chars() { - // FIXME(antoyo): might also need to escape | if rustc doesn't do it. + // FIXME(antoyo): Escaping `|` may still be missing if rustc does not already do it. let escaped_char = match char { '%' => "%%", '{' => "%{", @@ -496,7 +496,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } InlineAsmOperandRef::SymFn { instance } => { - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). let name = self.tcx.symbol_name(instance).name; @@ -504,7 +504,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } InlineAsmOperandRef::SymStatic { def_id } => { - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O). let instance = Instance::mono(self.tcx, def_id); let name = self.tcx.symbol_name(instance).name; @@ -564,7 +564,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => { // "cc" is cr0 on powerpc. } - // FIXME(@Commeownist): I'm not 100% sure this one clobber is sufficient + // FIXME(@Commeownist): This clobber may still be insufficient // on all architectures. For instance, what about FP stack? _ => { extended_asm.add_clobber("cc"); @@ -578,7 +578,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { extended_asm.set_volatile_flag(true); } if !options.contains(InlineAsmOptions::NOSTACK) { - // FIXME(@Commeownist): figure out how to align stack + // FIXME(@Commeownist): The stack is still left unaligned here } if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); @@ -647,7 +647,7 @@ fn explicit_reg_to_gcc(reg: InlineAsmReg) -> &'static str { // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 match reg { InlineAsmReg::X86(reg) => { - // FIXME(antoyo): add support for vector register. + // FIXME(antoyo): Vector registers are still unsupported. match reg.reg_class() { X86InlineAsmRegClass::reg_byte => { // GCC does not support the `b` suffix, so we just strip it @@ -908,7 +908,7 @@ impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { GlobalAsmOperandRef::SymFn { instance } => { let function = get_fn(self, instance); self.add_used_function(function); - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). let name = self.tcx.symbol_name(instance).name; @@ -916,8 +916,8 @@ impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } GlobalAsmOperandRef::SymStatic { def_id } => { - // FIXME(antoyo): set the global variable as used. - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(antoyo): Global variables referenced from global_asm are still not marked as used. + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O). let instance = Instance::mono(self.tcx, def_id); let name = self.tcx.symbol_name(instance).name; @@ -944,7 +944,7 @@ impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn mangled_name(&self, instance: Instance<'tcx>) -> String { - // FIXME(@Amanieu): Additional mangling is needed on + // FIXME(@Amanieu): Additional mangling is still missing on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). self.tcx.symbol_name(instance).name.to_string() diff --git a/src/attributes.rs b/src/attributes.rs index ce1877b308e..a526f660b76 100644 --- a/src/attributes.rs +++ b/src/attributes.rs @@ -126,19 +126,19 @@ pub fn from_fn_attrs<'gcc, 'tcx>( .map(|features| features.name.as_str()) .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter()) .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x { - InstructionSetAttr::ArmA32 => "-thumb-mode", // FIXME(antoyo): support removing feature. + InstructionSetAttr::ArmA32 => "-thumb-mode", // FIXME(antoyo): Feature removal is still unsupported. InstructionSetAttr::ArmT32 => "thumb-mode", })) .collect::>(); - // FIXME(antoyo): cg_llvm adds global features to each function so that LTO keep them. + // FIXME(antoyo): GCC may still require the per-function global features that cg_llvm adds for LTO. // Check if GCC requires the same. let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str()); function_features.extend(&mut global_features); let target_features = function_features .iter() .filter_map(|feature| { - // FIXME(antoyo): support soft-float. + // FIXME(antoyo): soft-float is still ignored here. if feature.contains("soft-float") { return None; } diff --git a/src/back/lto.rs b/src/back/lto.rs index aee164cf322..a45ed2ebfcd 100644 --- a/src/back/lto.rs +++ b/src/back/lto.rs @@ -10,7 +10,7 @@ // Maybe that's because the combined object files contain the IR (true) and the final link // does not remove it? // -// FIXME(antoyo): for performance, check which optimizations the C++ frontend enables. +// FIXME(antoyo): LTO performance may still differ because we do not mirror the C++ frontend optimizations. // cSpell:disable // Fix these warnings: // /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o @@ -39,7 +39,7 @@ use crate::errors::LtoBitcodeFromRlib; use crate::{GccCodegenBackend, GccContext, LtoMode, to_gcc_opt_level}; struct LtoData { - // FIXME(antoyo): use symbols_below_threshold. + // FIXME(antoyo): symbols_below_threshold is still unused. //symbols_below_threshold: Vec, upstream_modules: Vec<(SerializedModule, CString)>, tmp_path: TempDir, @@ -173,7 +173,7 @@ fn fat_lto( .filter(|&(_, module)| module.kind == ModuleKind::Regular) .map(|(i, _module)| { //let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) }; - // FIXME(antoyo): compute the cost of a module if GCC allows this. + // FIXME(antoyo): Module costs are still not computed even when GCC could use them. (0, i) }) .max(); diff --git a/src/back/write.rs b/src/back/write.rs index 64674423de2..4e0425808c8 100644 --- a/src/back/write.rs +++ b/src/back/write.rs @@ -44,7 +44,7 @@ pub(crate) fn codegen( let _timer = prof.generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name); - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. /*if let Some(bitcode_filename) = bc_out.file_name() { cgcx.prof.artifact_size( "llvm_bitcode", @@ -68,14 +68,14 @@ pub(crate) fn codegen( let _timer = prof .generic_activity_with_arg("GCC_module_codegen_embed_bitcode", &*module.name); if lto_supported { - // FIXME(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes? + // FIXME(antoyo): iOS-specific embed_bitcode fixes are still missing here. //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); context.add_command_line_option("-flto=auto"); context.add_command_line_option("-flto-partition=one"); context.add_command_line_option("-ffat-lto-objects"); } - // FIXME(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument). + // FIXME(antoyo): The linker plugin path is still not forwarded through the rustc CLI arguments. context .compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str")); } @@ -113,7 +113,7 @@ pub(crate) fn codegen( { println!("Dumping reproducer {}", module.name); let _ = fs::create_dir("/tmp/reproducers"); - // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by + // FIXME(antoyo): The segfault in dump_reproducer_to_file() may still be caused by // transmuting an rvalue to an lvalue. // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue context.dump_reproducer_to_file(format!("/tmp/reproducers/{}.c", module.name)); @@ -135,7 +135,7 @@ pub(crate) fn codegen( // NOTE: without -fuse-linker-plugin, we get the following error: // lto1: internal compiler error: decompressed stream: Destination buffer is too small - // FIXME(antoyo): since we do not do LTO when the linker is invoked anymore, perhaps + // FIXME(antoyo): We may still be passing a stale linker-plugin flag even though LTO no longer runs at link time; // the following flag is not necessary anymore. context.add_driver_option("-fuse-linker-plugin"); } @@ -150,7 +150,7 @@ pub(crate) fn codegen( if fat_lto { let lto_path = format!("{}.lto", path); // cSpell:disable - // FIXME(antoyo): The LTO frontend generates the following warning: + // FIXME(antoyo): The LTO frontend still generates the following warning: // ../build_sysroot/sysroot_src/library/core/src/num/dec2flt/lemire.rs:150:15: warning: type of ‘_ZN4core3num7dec2flt5table17POWER_OF_FIVE_12817ha449a68fb31379e4E’ does not match original declaration [-Wlto-type-mismatch] // 150 | let (lo5, hi5) = POWER_OF_FIVE_128[index]; // | ^ diff --git a/src/base.rs b/src/base.rs index 19abc4337ea..d6a10aa90f1 100644 --- a/src/base.rs +++ b/src/base.rs @@ -51,7 +51,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { Linkage::WeakAny => unimplemented!(), Linkage::WeakODR => unimplemented!(), Linkage::Internal => GlobalKind::Internal, - Linkage::ExternalWeak => GlobalKind::Imported, // FIXME(antoyo): should be weak linkage. + Linkage::ExternalWeak => GlobalKind::Imported, // FIXME(antoyo): ExternalWeak still maps to the wrong GCC linkage here. Linkage::Common => unimplemented!(), } } @@ -59,7 +59,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { match linkage { Linkage::External => FunctionType::Exported, - // FIXME(antoyo): set the attribute externally_visible. + // FIXME(antoyo): The externally_visible attribute is still not emitted. Linkage::AvailableExternally => FunctionType::Extern, Linkage::LinkOnceAny => unimplemented!(), Linkage::LinkOnceODR => unimplemented!(), @@ -210,7 +210,7 @@ pub fn compile_codegen_unit( context.set_allow_unreachable_blocks(true); { - // FIXME: to make it less error-prone (calling get_target_info() will add the flag + // FIXME: Calling get_target_info() here still adds the flag // -fsyntax-only), forbid the compilation when get_target_info() is called on a // context. let f16_type_supported = target_info.supports_target_dependent_type(CType::Float16); @@ -218,7 +218,7 @@ pub fn compile_codegen_unit( let f64_type_supported = target_info.supports_target_dependent_type(CType::Float64); let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128); let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t); - // FIXME: improve this to avoid passing that many arguments. + // FIXME: CodegenCx::new still needs too many arguments here. let mut cx = CodegenCx::new( &context, cgu, diff --git a/src/builder.rs b/src/builder.rs index 6add7f05c2a..53b8b2fc4b3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -38,7 +38,7 @@ use crate::errors; use crate::intrinsic::llvm; use crate::type_of::LayoutGccExt; -// FIXME(antoyo) +// FIXME(antoyo): unimplemented. type Funclet = (); enum ExtremumOperation { @@ -75,7 +75,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let func = self.current_func(); let load_ordering = match order { - // FIXME(antoyo): does this make sense? + // FIXME(antoyo): This ordering downgrade may still be wrong. AtomicOrdering::AcqRel | AtomicOrdering::Release => AtomicOrdering::Acquire, _ => order, }; @@ -284,8 +284,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { func_ptr, index ); - // FIXME(antoyo): perhaps use __builtin_convertvector for vector casting. - // FIXME: remove bitcast now that vector types can be compared? + // FIXME(antoyo): Vector casts still rely on bitcasts instead of __builtin_convertvector. + // FIXME: This bitcast workaround still remains even though vector types can now be compared. // ==> We use bitcast to avoid having to do many manual casts from e.g. __m256i to __v32qi (in // the case of _mm256_aesenc_epi128). self.bitcast(actual_val, expected_ty) @@ -399,7 +399,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } else { #[cfg(not(feature = "master"))] if gcc_func.get_param_count() == 0 { - // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics. + // FIXME(antoyo): This path still relies on a temporary workaround for unsupported LLVM intrinsics. self.block.add_eval( self.location, self.cx.context.new_call_through_ptr(self.location, func_ptr, &[]), @@ -430,7 +430,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // That's why we assign the result to a local. let return_type = self.context.new_type::(); let current_func = self.block.get_function(); - // FIXME(antoyo): return the new_call() directly? Since the overflow function has no side-effects. + // FIXME(antoyo): This still materializes a temporary even though the overflow call has no side effects. let result = current_func.new_local( self.location, return_type, @@ -610,7 +610,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let current_block = self.block; self.block = try_block; - let call = self.call(typ, fn_attrs, None, func, args, None, instance); // FIXME(antoyo): use funclet here? + let call = self.call(typ, fn_attrs, None, func, args, None, instance); // FIXME(antoyo): This invoke path still ignores the funclet. self.block = current_block; let return_value = @@ -648,7 +648,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let condition = self.context.new_rvalue_from_int(self.bool_type, 1); self.llbb().end_with_conditional(self.location, condition, then, catch); if let Some(_fn_abi) = fn_abi { - // FIXME(bjorn3): Apply function attributes + // FIXME(bjorn3): Function attributes are still not applied here } call_site } @@ -675,7 +675,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.assign_to_var(a + b) } - // FIXME(antoyo): should we also override the `unchecked_` versions? + // FIXME(antoyo): The unchecked variants may still be using the wrong lowering. fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.assign_to_var(self.gcc_sub(a, b)) } @@ -703,7 +703,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): poison if not exact. + // FIXME(antoyo): Inexact division still does not produce poison here. let a_type = a.get_type().to_unsigned(self); let a = self.gcc_int_cast(a, a_type); let b_type = b.get_type().to_unsigned(self); @@ -716,8 +716,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): poison if not exact. - // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they + // FIXME(antoyo): Inexact division still does not produce poison here. + // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic still passes different types for a and b even though they // should be the same. let typ = a.get_type().to_signed(self); let b = self.gcc_int_cast(b, typ); @@ -737,7 +737,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): add check in libgccjit since using the binary operator % causes the following error: + // FIXME(antoyo): libgccjit still accepts `%` here even though it triggers the following internal error: // during RTL pass: expand // libgccjit.so: error: in expmed_mode_index, at expmed.h:240 // 0x7f0101d58dc6 expmed_mode_index @@ -766,7 +766,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let a_type_unqualified = a_type.unqualified(); if a_type.is_compatible_with(self.cx.float_type) { let fmodf = self.context.get_builtin_function("fmodf"); - // FIXME(antoyo): this seems to produce the wrong result. + // FIXME(antoyo): This still seems to produce the wrong result. return self.context.new_call(self.location, fmodf, &[a, b]); } @@ -789,7 +789,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { return self.context.new_call(self.location, fmod, &[a, b]); } TypeKind::FP128 => { - // FIXME(antoyo): use get_simple_function_f128_2args. + // FIXME(antoyo): FP128 fmod still bypasses get_simple_function_f128_2args. let f128_type = self.type_f128(); let fmodf128 = self.context.new_function( None, @@ -837,7 +837,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): check whether behavior is an arithmetic shift for >> . + // FIXME(antoyo): The `>>` behavior still needs to be verified as an arithmetic shift. // It seems to be if the value is signed. self.gcc_lshr(a, b) } @@ -937,7 +937,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> { let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes()); - // FIXME(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. + // FIXME(antoyo): alloca still returns an RValue because the rustc API does not expose the needed form here. self.current_func() .new_local(self.location, ty, format!("stack_var_{}", self.next_value_counter())) .get_address(self.location) @@ -1006,8 +1006,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { order: AtomicOrdering, size: Size, ) -> RValue<'gcc> { - // FIXME(antoyo): use ty. - // FIXME(antoyo): handle alignment. + // FIXME(antoyo): This atomic load still ignores `ty`. + // FIXME(antoyo): Alignment is still ignored here. let atomic_load = self.context.get_builtin_function(format!("__atomic_load_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); @@ -1128,11 +1128,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn nonnull_metadata(&mut self, _load: RValue<'gcc>) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { @@ -1161,8 +1161,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cx.context.new_cast(self.location, ptr, modified_destination_type.make_pointer()); let modified_destination = modified_ptr.dereference(self.location); self.llbb().add_assignment(self.location, modified_destination, val); - // FIXME(antoyo): handle `MemFlags::NONTEMPORAL`. - // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here? + // FIXME(antoyo): `MemFlags::NONTEMPORAL` is still ignored. + // NOTE: dummy value here since it's never used. FIXME(antoyo): This API still returns a value even though none is needed. // When adding support for NONTEMPORAL, make sure to not just emit MOVNT on x86; see the // LLVM backend for details. self.cx.context.new_rvalue_zero(self.type_i32()) @@ -1175,7 +1175,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { order: AtomicOrdering, size: Size, ) { - // FIXME(antoyo): handle alignment. + // FIXME(antoyo): Alignment is still ignored here. let atomic_store = self.context.get_builtin_function(format!("__atomic_store_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); @@ -1183,7 +1183,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.context.new_type::<()>().make_volatile().make_pointer(); let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type); - // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because + // FIXME(antoyo): libgccjit still cannot compare an integer type with an aligned integer type, so // the following cast is required to avoid this error: // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) let int_type = atomic_store.get_param(1).to_rvalue().get_type(); @@ -1205,10 +1205,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let ptr_type = ptr.get_type(); let mut pointee_type = ptr.get_type(); // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is - // always considered in bounds in GCC (FIXME(antoyo): to be verified). + // always considered in bounds in GCC (FIXME(antoyo): this still needs to be verified). // So, we have to cast to a number. let mut result = self.context.new_bitcast(self.location, ptr, self.sizet_type); - // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would + // FIXME(antoyo): This is probably wrong for more than one index and would // require dereferencing the pointer. for index in indices { pointee_type = pointee_type.get_pointee().expect("pointee type"); @@ -1233,7 +1233,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { ) -> RValue<'gcc> { // NOTE: due to opaque pointers now being used, we need to cast here. let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer()); - // NOTE: array indexing is always considered in bounds in GCC (FIXME(antoyo): to be verified). + // NOTE: array indexing is always considered in bounds in GCC (FIXME(antoyo): this still needs to be verified). let mut indices = indices.iter(); let index = indices.next().expect("first index in inbounds_gep"); let mut result = self.context.new_array_access(self.location, ptr, *index); @@ -1245,14 +1245,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { /* Casts */ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): check that it indeed truncate the value. + // FIXME(antoyo): This cast still needs to be verified to truncate correctly. self.gcc_int_cast(value, dest_ty) } fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): check that it indeed sign extend the value. + // FIXME(antoyo): This cast still needs to be verified to sign-extend correctly. if dest_ty.dyncast_vector().is_some() { - // FIXME(antoyo): nothing to do as it is only for LLVM? + // FIXME(antoyo): This LLVM-only case is still ignored here. return value; } self.context.new_cast(self.location, value, dest_ty) @@ -1275,7 +1275,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): make sure it truncates. + // FIXME(antoyo): This floating-point cast still needs to be verified to truncate. set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty)) } @@ -1405,7 +1405,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let dst = self.pointercast(dst, self.type_i8p()); let src = self.pointercast(src, self.type_ptr_to(self.type_void())); let memcpy = self.context.get_builtin_function("memcpy"); - // FIXME(antoyo): handle aligns and is_volatile. + // FIXME(antoyo): Alignment and volatility are still ignored here. self.block.add_eval( self.location, self.context.new_call(self.location, memcpy, &[dst, src, size]), @@ -1428,7 +1428,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let src = self.pointercast(src, self.type_ptr_to(self.type_void())); let memmove = self.context.get_builtin_function("memmove"); - // FIXME(antoyo): handle is_volatile. + // FIXME(antoyo): Volatility is still ignored here. self.block.add_eval( self.location, self.context.new_call(self.location, memmove, &[dst, src, size]), @@ -1447,7 +1447,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let _is_volatile = flags.contains(MemFlags::VOLATILE); let ptr = self.pointercast(ptr, self.type_i8p()); let memset = self.context.get_builtin_function("memset"); - // FIXME(antoyo): handle align and is_volatile. + // FIXME(antoyo): Alignment and volatility are still ignored here. let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type); let size = self.intcast(size, self.type_size_t(), false); self.block.add_eval( @@ -1516,7 +1516,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { - // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. + // FIXME(antoyo): This API still calls extract_value on arrays even though it should be struct-only. assert_eq!(idx as usize as u64, idx); let value_type = aggregate_value.get_type(); @@ -1543,7 +1543,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { value: RValue<'gcc>, idx: u64, ) -> RValue<'gcc> { - // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. + // FIXME(antoyo): This API still calls insert_value on arrays even though it should be struct-only. assert_eq!(idx as usize as u64, idx); let value_type = aggregate_value.get_type(); @@ -1599,7 +1599,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let value1_type = self.u8_type.make_pointer(); let ptr = self.cx.context.new_cast(self.location, ptr, value1_type); let value1 = ptr; - let value2 = zero; // FIXME(antoyo): set the proper value here (the type of exception?). + let value2 = zero; // FIXME(antoyo): This landing-pad slot still uses a dummy exception value. (value1, value2) } @@ -1616,7 +1616,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn filter_landing_pad(&mut self, pers_fn: Function<'gcc>) { - // FIXME(antoyo): generate the correct landing pad + // FIXME(antoyo): filter_landing_pad still reuses the wrong landing-pad shape self.cleanup_landing_pad(pers_fn); } @@ -1725,7 +1725,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let void_ptr_type = self.context.new_type::<*mut ()>(); let volatile_void_ptr_type = void_ptr_type.make_volatile(); let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type); - // FIXME(antoyo): not sure why, but we have the wrong type here. + // FIXME(antoyo): This still ends up with the wrong type here. let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); let src = self.context.new_bitcast(self.location, src, new_src_type); let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]); @@ -1747,15 +1747,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn set_invariant_load(&mut self, load: RValue<'gcc>) { // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer. self.normal_function_addresses.borrow_mut().insert(load); - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn call( @@ -1768,10 +1768,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { funclet: Option<&Funclet>, _instance: Option>, ) -> RValue<'gcc> { - // FIXME(antoyo): remove when having a proper API. + // FIXME(antoyo): This transmute is still required because the libgccjit API does not expose functions in the needed form. let gcc_func = unsafe { std::mem::transmute::, Function<'gcc>>(func) }; let call = if self.functions.borrow().values().any(|value| *value == gcc_func) { - // FIXME(antoyo): remove when the API supports a different type for functions. + // FIXME(antoyo): This cast is still required because the API does not represent function values with a distinct type. let func: Function<'gcc> = self.cx.rvalue_as_function(func); self.function_call(func, args, funclet) } else { @@ -1779,7 +1779,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.function_ptr_call(typ, func, args, funclet) }; if let Some(_fn_abi) = fn_abi { - // FIXME(bjorn3): Apply function attributes + // FIXME(bjorn3): Function attributes are still not applied here } call } @@ -1799,7 +1799,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): this does not zero-extend. + // FIXME(antoyo): This still does not zero-extend. self.gcc_int_cast(value, dest_typ) } @@ -1886,7 +1886,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if signed { i128::MIN >> (128 - int_width) } else { 0 } } - // FIXME: rewrite using a generic function with . + // FIXME: This still duplicates float-specific code instead of using a generic helper. let compute_clamp_bounds_half = |signed: bool, int_width: u64| -> (u128, u128) { let rounded_min = ieee::Half::from_i128_r(int_min(signed, int_width), Round::TowardZero); @@ -2023,7 +2023,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { block.add_assignment(self.location, mask_var, mask); let mask = mask_var.to_rvalue(); - // FIXME(antoyo): use a recursive unqualified() here. + // FIXME(antoyo): This still needs repeated unqualified() calls because of the current vector API. let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); let element_type = vector_type.get_element_type(); let vec_num_units = vector_type.get_num_units(); @@ -2387,7 +2387,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { #[cfg(feature = "master")] let (cond, element_type) = { - // FIXME(antoyo): dyncast_vector should not require a call to unqualified. + // FIXME(antoyo): Vector casts still require an unnecessary unqualified() call. let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type"); let then_val_element_type = then_val_vector_type.get_element_type(); @@ -2426,7 +2426,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // NOTE: sometimes, the type of else_val can be different than the type of then_val in // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND // operation to work. - // FIXME: remove bitcast now that vector types can be compared? + // FIXME: This bitcast workaround still remains even though vector types can now be compared. let else_val = self.context.new_bitcast(self.location, else_val, then_val.get_type()); let else_vals = inverted_masks & else_val; @@ -2508,7 +2508,7 @@ impl ToGccComp for IntPredicate { impl ToGccComp for RealPredicate { fn to_gcc_comparison(&self) -> ComparisonOp { - // FIXME(antoyo): check that ordered vs non-ordered is respected. + // FIXME(antoyo): Ordered vs non-ordered handling still needs to be verified. match *self { RealPredicate::RealPredicateFalse => unreachable!(), RealPredicate::RealOEQ => ComparisonOp::Equals, @@ -2550,7 +2550,7 @@ impl ToGccOrdering for AtomicOrdering { use MemOrdering::*; let ordering = match self { - AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // FIXME(antoyo): check if that's the same. + AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // FIXME(antoyo): This mapping still needs to be verified. AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, AtomicOrdering::Release => __ATOMIC_RELEASE, AtomicOrdering::AcqRel => __ATOMIC_ACQ_REL, diff --git a/src/context.rs b/src/context.rs index c34c615306a..0d7638343da 100644 --- a/src/context.rs +++ b/src/context.rs @@ -99,7 +99,7 @@ pub struct CodegenCx<'gcc, 'tcx> { pub vtables: RefCell, Option>), RValue<'gcc>>>, - // FIXME(antoyo): improve the SSA API to not require those. + // FIXME(antoyo): The SSA API still requires these bookkeeping maps. /// Mapping from function pointer type to indexes of on stack parameters. pub on_stack_params: RefCell, FxHashSet>>, /// Mapping from function to indexes of on stack parameters. @@ -109,7 +109,7 @@ pub struct CodegenCx<'gcc, 'tcx> { pub const_globals: RefCell, RValue<'gcc>>>, /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue). - /// FIXME(antoyo): remove when the rustc API is fixed. + /// FIXME(antoyo): This map is still required because the rustc API does not expose the needed global lookup. pub global_lvalues: RefCell, LValue<'gcc>>>, /// Cache of constant strings, @@ -198,7 +198,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap(); let u128_align = layout.align.bytes();*/ - // FIXME(antoyo): re-enable the alignment when libgccjit fixed the issue in + // FIXME(antoyo): Alignment still has to stay disabled until libgccjit fixes the issue in // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast). let i128_type = new_array_type(context, None, i64_type, 2)/*.get_aligned(i128_align)*/; let u128_type = new_array_type(context, None, u64_type, 2)/*.get_aligned(u128_align)*/; @@ -207,7 +207,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); - // FIXME(antoyo): set alignment on those types as well. + // FIXME(antoyo): Alignment is still missing on these types as well. let float_type = context.new_type::(); let double_type = context.new_type::(); @@ -308,7 +308,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { #[cfg(feature = "master")] cleanup_blocks: Default::default(), }; - // FIXME(antoyo): instead of doing this, add SsizeT to libgccjit. + // FIXME(antoyo): libgccjit still lacks SsizeT, so this signed cast remains manual. cx.isize_type = usize_type.to_signed(&cx); cx } @@ -381,15 +381,15 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { type Function = Function<'gcc>; type BasicBlock = Block<'gcc>; - type Funclet = (); // FIXME(antoyo) + type Funclet = (); // FIXME(antoyo): unimplemented. type Value = RValue<'gcc>; type Type = Type<'gcc>; type FunctionSignature = Type<'gcc>; - type DIScope = (); // FIXME(antoyo) + type DIScope = (); // FIXME(antoyo): unimplemented. type DILocation = Location<'gcc>; - type DIVariable = (); // FIXME(antoyo) + type DIVariable = (); // FIXME(antoyo): unimplemented. } impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { @@ -413,8 +413,8 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { }; let ptr = func.get_address(None); - // FIXME(antoyo): don't do this twice: i.e. in declare_fn and here. - // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). + // FIXME(antoyo): Function addresses are still recorded twice: in declare_fn and here. + // FIXME(antoyo): The rustc API still seems to call get_fn_addr() when not needed (e.g. for FFI). self.normal_function_addresses.borrow_mut().insert(ptr); self.function_address_names.borrow_mut().insert(ptr, func_name.to_string()); @@ -471,7 +471,7 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.declare_func(name, self.type_i32(), &[], true) } }; - // FIXME(antoyo): apply target cpu attributes. + // FIXME(antoyo): Target CPU attributes are still not applied. self.eh_personality.set(Some(func)); func } @@ -481,11 +481,11 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn set_frame_pointer_type(&self, _llfn: Function<'gcc>) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn apply_target_cpu_attr(&self, _llfn: Function<'gcc>) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn declare_c_main(&self, fn_type: Self::Type) -> Option { diff --git a/src/coverageinfo.rs b/src/coverageinfo.rs index bbe9296661a..1423b81122d 100644 --- a/src/coverageinfo.rs +++ b/src/coverageinfo.rs @@ -6,6 +6,6 @@ use crate::builder::Builder; impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn add_coverage(&mut self, _instance: Instance<'tcx>, _kind: &CoverageKind) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } } diff --git a/src/debuginfo.rs b/src/debuginfo.rs index cf938a3988c..5d066374031 100644 --- a/src/debuginfo.rs +++ b/src/debuginfo.rs @@ -48,7 +48,7 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { - // FIXME(antoyo): insert reference to gdb debug scripts section global. + // FIXME(antoyo): The gdb debug scripts section global is still never referenced. } /// FIXME(tempdragon): Currently, this function is not yet implemented. It seems that the @@ -153,7 +153,7 @@ fn make_mir_scope<'gcc, 'tcx>( // FIXME(eddyb) this doesn't account for the macro-related // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does. - // FIXME(tempdragon): Add scope support and then revert to cg_llvm version of this closure + // FIXME(tempdragon): Scope support is still missing, so this closure still diverges from cg_llvm // NOTE: These variables passed () here. // Changed to comply to clippy. @@ -162,7 +162,7 @@ fn make_mir_scope<'gcc, 'tcx>( cx.dbg_loc(/* callsite_scope */ (), parent_scope.inlined_at, callsite_span) }); let p_inlined_at = parent_scope.inlined_at; - // FIXME(tempdragon): dbg_scope: Add support for scope extension here. + // FIXME(tempdragon): dbg_scope still lacks scope-extension support. inlined_at.or(p_inlined_at); debug_context.scopes[scope] = DebugScope { @@ -225,7 +225,7 @@ impl<'gcc, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { _trait_ref: Option>, _vtable: Self::Value, ) { - // FIXME(antoyo) + // FIXME(antoyo): unimplemented. } fn create_function_debug_context( @@ -262,7 +262,7 @@ impl<'gcc, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { _scope_metadata: Self::DIScope, _file: &SourceFile, ) -> Self::DIScope { - // FIXME(antoyo): implement. + // FIXME(antoyo): unimplemented. } fn debuginfo_finalize(&self) { @@ -285,7 +285,7 @@ impl<'gcc, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option>, ) -> Self::DIScope { - // FIXME(antoyo): implement. + // FIXME(antoyo): unimplemented. } fn dbg_loc( diff --git a/src/declare.rs b/src/declare.rs index 4174eebcf7b..781fe9cc4f7 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -94,7 +94,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { #[cfg(feature = "master")] callconv: Option>, #[cfg(not(feature = "master"))] callconv: Option<()>, ) -> Function<'gcc> { - // FIXME(antoyo): use the fn_type parameter. + // FIXME(antoyo): `fn_type` is still ignored here. let const_string = self.context.new_type::().make_pointer().make_pointer(); let return_type = self.type_i32(); let variadic = false; @@ -142,7 +142,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn get_declared_value(&self, name: &str) -> Option> { - // FIXME(antoyo): use a different field than globals, because this seems to return a function? + // FIXME(antoyo): This still uses `globals` even though it appears to return a function. self.globals.borrow().get(name).cloned() } } @@ -166,7 +166,7 @@ fn declare_raw_fn<'gcc>( let params: Vec<_> = param_types .iter() .enumerate() - .map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // FIXME(antoyo): set name. + .map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // FIXME(antoyo): These generated declarations still get placeholder names. .collect(); #[cfg(not(feature = "master"))] let name = &mangle_name(name); @@ -194,7 +194,7 @@ fn declare_raw_fn<'gcc>( .enumerate() .map(|(index, param)| { cx.context.new_parameter(None, *param, format!("param{}", index)) - }) // FIXME(antoyo): set name. + }) // FIXME(antoyo): These generated declarations still get placeholder names. .collect(); let gcc_func = cx.context.new_function( None, @@ -228,11 +228,11 @@ fn declare_raw_fn<'gcc>( func }; - // FIXME(antoyo): set function calling convention. - // FIXME(antoyo): set unnamed address. - // FIXME(antoyo): set no red zone function attribute. - // FIXME(antoyo): set attributes for optimisation. - // FIXME(antoyo): set attributes for non lazy bind. + // FIXME(antoyo): The function calling convention is still not emitted. + // FIXME(antoyo): unnamed_addr is still not emitted. + // FIXME(antoyo): The no-red-zone attribute is still not emitted. + // FIXME(antoyo): Optimization attributes are still not emitted. + // FIXME(antoyo): Non-lazy-bind attributes are still not emitted. // FIXME(antoyo): invalid cast. func diff --git a/src/int.rs b/src/int.rs index dfae4eceebe..e6a94083a2b 100644 --- a/src/int.rs +++ b/src/int.rs @@ -75,7 +75,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let b_native = self.is_native_int_type(b_type); if a_native && b_native { // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number. - // FIXME(antoyo): cast to unsigned to do a logical shift if that does not work. + // FIXME(antoyo): Logical right shift may still require an explicit unsigned cast. if a_type.is_signed(self) != b_type.is_signed(self) { let b = self.context.new_cast(self.location, b, a_type); a >> b @@ -168,7 +168,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if a_type != b_type { if a_type.is_vector() { // Vector types need to be bitcast. - // FIXME(antoyo): perhaps use __builtin_convertvector for vector casting. + // FIXME(antoyo): Vector casts still rely on bitcasts instead of __builtin_convertvector. b = self.context.new_bitcast(self.location, b, a_type); } else { b = self.context.new_cast(self.location, b, a_type); @@ -228,7 +228,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if !a_type.is_compatible_with(b_type) { if a_type.is_vector() { // Vector types need to be bitcast. - // FIXME(antoyo): perhaps use __builtin_convertvector for vector casting. + // FIXME(antoyo): Vector casts still rely on bitcasts instead of __builtin_convertvector. b = self.context.new_bitcast(self.location, b, a_type); } else { b = self.context.new_cast(self.location, b, a_type); @@ -255,9 +255,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): check if the types are signed? + // FIXME(antoyo): Signed division still assumes the operand types are already signed. // 128-bit, signed: __divti3 - // FIXME(antoyo): convert the arguments to signed? + // FIXME(antoyo): Signed division still does not normalize the arguments to signed types. self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b) } @@ -284,7 +284,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), }; - // FIXME(antoyo): remove duplication with intrinsic? + // FIXME(antoyo): This logic still duplicates the intrinsic implementation. let name = if self.is_native_int_type(lhs.get_type()) { match oop { OverflowOp::Add => "__builtin_add_overflow", @@ -306,7 +306,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { OverflowOp::Mul => match new_kind { Int(I32) => ("__mulosi4", 32), Int(I64) => ("__mulodi4", 64), - Int(I128) => ("__rust_i128_mulo", 128), // FIXME(antoyo): use __muloti4d instead? + Int(I128) => ("__rust_i128_mulo", 128), // FIXME(antoyo): The 128-bit overflow path may still be calling the wrong helper. Uint(U128) => ("__rust_u128_mulo", 128), _ => unreachable!(), }, @@ -317,7 +317,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let intrinsic = self.context.get_builtin_function(name); let res = self .current_func() - // FIXME(antoyo): is it correct to use rhs type instead of the parameter typ? + // FIXME(antoyo): This local still uses `rhs` type even though the parameter type may differ. .new_local(self.location, rhs.get_type(), "binopResult") .get_address(self.location); let new_type = type_kind_to_gcc_type(new_kind); @@ -462,7 +462,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { lhs_high = self.context.new_cast(self.location, lhs_high, unsigned_type); rhs_high = self.context.new_cast(self.location, rhs_high, unsigned_type); } - // FIXME(antoyo): we probably need to handle signed comparison for unsigned + // FIXME(antoyo): Unsigned comparisons may still be lowered with signed semantics // integers. _ => (), } @@ -556,7 +556,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.context.new_rvalue_one(self.int_type), ); } - // FIXME(antoyo): cast to u128 for unsigned comparison. See below. + // FIXME(antoyo): Unsigned 128-bit comparison still lacks the required cast; see below. IntPredicate::IntUGT => (ComparisonOp::Equals, 2), IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1), IntPredicate::IntULT => (ComparisonOp::Equals, 0), @@ -602,7 +602,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { rhs = self.context.new_cast(self.location, rhs, unsigned_type); } } - // FIXME(antoyo): we probably need to handle signed comparison for unsigned + // FIXME(antoyo): Unsigned comparisons may still be lowered with signed semantics // integers. _ => (), } @@ -693,7 +693,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { b0_block.end_with_jump(self.location, after_block); // NOTE: cast low to its unsigned type in order to perform a logical right shift. - // FIXME(antoyo): adjust this ^ comment. + // FIXME(antoyo): The comment above no longer matches the code. let unsigned_type = native_int_type.to_unsigned(self.cx); let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type); let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type); @@ -732,7 +732,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { return self.concat_low_high_rvalues(arg_type, swapped_msb, swapped_lsb); } - // FIXME(antoyo): check if it's faster to use string literals and a + // FIXME(antoyo): This may still be slower than a string-literal match; // match instead of format!. let bswap = self.cx.context.get_builtin_function(format!("__builtin_bswap{}", width)); // FIXME(antoyo): this cast should not be necessary. Remove @@ -862,12 +862,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc) } - // FIXME(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead? + // FIXME(antoyo): This still carries a local integer-cast implementation instead of reusing compiler-builtins code. + // https://github.com/rust-lang/compiler-builtins/blob/1a99c2aa295bb2d507fa0e67a3b5eef64fba92a0/libm/src/math/support/int_traits.rs#L485 pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { let value_type = value.get_type(); if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) { - // FIXME: use self.location. + // FIXME: This cast still drops the current location information. self.context.new_cast(None, value, dest_typ) } else if self.is_native_int_type_or_bool(dest_typ) { self.context.new_cast(None, self.low(value), dest_typ) @@ -888,7 +889,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // Since u128 and i128 are the only types that can be unsupported, we know the type of // value and the destination type have the same size, so a bitcast is fine. - // FIXME(antoyo): perhaps use __builtin_convertvector for vector casting. + // FIXME(antoyo): Vector casts still rely on bitcasts instead of __builtin_convertvector. self.context.new_bitcast(None, value, dest_typ) } } diff --git a/src/intrinsic/llvm.rs b/src/intrinsic/llvm.rs index d58697f1bf2..302dcf01dce 100644 --- a/src/intrinsic/llvm.rs +++ b/src/intrinsic/llvm.rs @@ -123,7 +123,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, ) -> Cow<'b, [RValue<'gcc>]> { - // FIXME: this might not be a good way to workaround the missing tile builtins. + // FIXME: Missing tile builtins are still handled with a workaround. if func_name == "__builtin_trap" { return vec![].into(); } @@ -1578,7 +1578,7 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.uitofp.round.v8f32.v8i64" => "__builtin_ia32_cvtuqq2ps512_mask", "llvm.x86.avx512.uitofp.round.v4f32.v4i64" => "__builtin_ia32_cvtuqq2ps256_mask", - // FIXME: support the tile builtins: + // FIXME: Tile builtins are still unsupported: "llvm.x86.ldtilecfg" => "__builtin_trap", "llvm.x86.sttilecfg" => "__builtin_trap", "llvm.x86.tileloadd64" => "__builtin_trap", diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index ca0430dd748..c12870a2fb6 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -68,8 +68,8 @@ fn get_simple_intrinsic<'gcc, 'tcx>( sym::fmaf32 => "fmaf", sym::fmaf64 => "fma", // FIXME: calling `fma` from libc without FMA target feature uses expensive software emulation - sym::fmuladdf32 => "fmaf", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f32 - sym::fmuladdf64 => "fma", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f64 + sym::fmuladdf32 => "fmaf", // FIXME: This still uses libc `fmaf` instead of a GCC intrinsic analogous to llvm.fmuladd.f32 + sym::fmuladdf64 => "fma", // FIXME: This still uses libc `fma` instead of a GCC intrinsic analogous to llvm.fmuladd.f64 sym::fabsf32 => "fabsf", sym::fabsf64 => "fabs", sym::copysignf32 => "copysignf", @@ -206,7 +206,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc &args.iter().map(|arg| arg.immediate()).collect::>(), ) } - // FIXME(antoyo): We can probably remove these and use the fallback intrinsic implementation. + // FIXME(antoyo): These special cases still duplicate functionality that the fallback intrinsic path should handle. sym::minimumf32 | sym::minimumf64 | sym::maximumf32 | sym::maximumf64 => { let (ty, func_name) = match name { sym::minimumf32 => (self.cx.float_type, "fminimumf"), @@ -348,7 +348,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc sym::volatile_load | sym::unaligned_volatile_load => { let ptr = args[0].immediate(); let load = self.volatile_load(result.layout.gcc_type(self), ptr); - // FIXME(antoyo): set alignment. + // FIXME(antoyo): Alignment is still not preserved on this volatile load. if let BackendRepr::Scalar(scalar) = result.layout.backend_repr { self.to_immediate_scalar(load, scalar) } else { @@ -639,7 +639,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } fn assume(&mut self, value: Self::Value) { - // FIXME(antoyo): switch to assume when it exists. + // FIXME(antoyo): `assume` still falls back to a weaker workaround here. // Or use something like this: // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) self.expect(value, true); @@ -665,7 +665,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): implement. + // FIXME(antoyo): unimplemented. self.context.new_rvalue_from_int(self.int_type, 0) } } @@ -887,7 +887,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if width == 8 { step3 } else { self.gcc_bswap(step3, width) } } 128 => { - // FIXME(antoyo): find a more efficient implementation? + // FIXME(antoyo): This still uses an inefficient fallback implementation. let sixty_four = self.gcc_int(typ, 64); let right_shift = self.gcc_lshr(value, sixty_four); let high = self.gcc_int_cast(right_shift, self.u64_type); @@ -968,7 +968,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { builder.context.new_cast(builder.location, res, builder.u32_type) } - // FIXME(antoyo): use width? + // FIXME(antoyo): This still hard-codes the result width instead of deriving it. let result_type = self.u32_type; let mut arg_type = arg.get_type(); let arg = if arg_type.is_signed(self.cx) { @@ -977,7 +977,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } else { arg }; - // FIXME(antoyo): write a new function Type::is_compatible_with(&Type) and use it here + // FIXME(antoyo): Type compatibility is still approximated here instead of using a dedicated helper // instead of using is_uint(). if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) { let builtin = if count_leading { "__builtin_clz" } else { "__builtin_ctz" }; @@ -1081,7 +1081,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> { - // FIXME(antoyo): use the optimized version with fewer operations. + // FIXME(antoyo): This still uses the slower popcount lowering. let result_type = self.u32_type; let arg_type = value.get_type(); let value_type = arg_type.to_unsigned(self.cx); @@ -1090,7 +1090,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { if arg_type.is_signed(self.cx) { self.gcc_int_cast(value, value_type) } else { value }; // only break apart 128-bit ints if they're not natively supported - // FIXME(antoyo): remove this if/when native 128-bit integers land in libgccjit + // FIXME(antoyo): This workaround is still required until libgccjit gains native 128-bit integers if value_type.is_u128(self.cx) && !self.cx.supports_128bit_integers { let sixty_four = self.gcc_int(value_type, 64); let right_shift = self.gcc_lshr(value, sixty_four); diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 1263b2285a8..e68c41172a3 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -53,7 +53,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }; } - // FIXME(antoyo): refactor with the above require_simd macro that was changed in cg_llvm. + // FIXME(antoyo): This still duplicates the updated require_simd macro logic from cg_llvm. #[cfg(feature = "master")] macro_rules! require_simd2 { ($ty: expr, $variant:ident) => {{ @@ -473,14 +473,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } ); - // FIXME(antoyo): For simd_insert, check if the index is a constant of the correct size. + // FIXME(antoyo): simd_insert still accepts indices without validating the constant size. let vector = args[0].immediate(); let index = args[1].immediate(); let value = args[2].immediate(); let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector"); bx.llbb().add_assignment(None, variable, vector); let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index); - // FIXME(antoyo): if simd_insert is constant, use BIT_REF. + // FIXME(antoyo): simd_insert still misses the BIT_REF fast path for constant indices. bx.llbb().add_assignment(None, lvalue, value); return Ok(variable.to_rvalue()); } @@ -491,7 +491,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( ret_ty == in_elem, InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty } ); - // FIXME(antoyo): For simd_extract, check if the index is a constant of the correct size. + // FIXME(antoyo): simd_extract still accepts indices without validating the constant size. let vector = args[0].immediate(); let index = args[1].immediate(); return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue()); @@ -737,7 +737,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // endian and MSB-first for big endian. let vector = args[0].immediate(); - // FIXME(antoyo): dyncast_vector should not require a call to unqualified. + // FIXME(antoyo): Vector casts still require an unnecessary unqualified() call. let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); let elem_type = vector_type.get_element_type(); @@ -832,7 +832,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let intr_name = match name { sym::simd_ceil => "ceil", - sym::simd_fabs => "fabs", // FIXME(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fabs => "fabs", // FIXME(antoyo): simd_fabs still needs a proper pand mask with 170141183420855150465331762880109871103 sym::simd_fcos => "cos", sym::simd_fexp2 => "exp2", sym::simd_fexp => "exp", @@ -852,7 +852,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let builtin_name = format!("{}{}", intr_name, elem_ty_str); let function = bx.context.get_builtin_function(builtin_name); - // FIXME(antoyo): add platform-specific behavior here for architectures that have these + // FIXME(antoyo): Architectures with dedicated instructions still do not get platform-specific lowering for these // intrinsics as instructions (for instance, gpus) let mut vector_elements = vec![]; for i in 0..in_len { @@ -1060,7 +1060,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( assert_eq!(underlying_ty, non_ptr(element_ty0)); // The element type of the third argument must be an integer type of any width: - // FIXME: also support unsigned integers. + // FIXME: Unsigned integer lanes are still unsupported here. let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx()); match *element_ty2.kind() { ty::Int(_) => (), @@ -1175,7 +1175,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( assert_eq!(underlying_ty, non_ptr(element_ty0)); // The element type of the third argument must be a signed integer type of any width: - // FIXME: also support unsigned integers. + // FIXME: Unsigned integer lanes are still unsupported here. match *element_ty2.kind() { ty::Int(_) => (), _ => { @@ -1273,10 +1273,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } (true, true) => { // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition - // FIXME(antoyo): improve using conditional operators if possible. - // FIXME(antoyo): dyncast_vector should not require a call to unqualified. + // FIXME(antoyo): This still misses the more efficient conditional-operator lowering. + // FIXME(antoyo): Vector casts still require an unnecessary unqualified() call. let arg_type = lhs.get_type().unqualified(); - // FIXME(antoyo): convert lhs and rhs to unsigned. + // FIXME(antoyo): lhs and rhs still use the wrong signedness here. let sum = lhs + rhs; let vector_type = arg_type.dyncast_vector().expect("vector type"); let unit = vector_type.get_num_units(); @@ -1308,13 +1308,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( res & cmp } (true, false) => { - // FIXME(antoyo): dyncast_vector should not require a call to unqualified. + // FIXME(antoyo): Vector casts still require an unnecessary unqualified() call. let arg_type = lhs.get_type().unqualified(); - // FIXME(antoyo): this uses the same algorithm from saturating add, but add the + // FIXME(antoyo): Saturating subtraction still reuses the add algorithm; it adds the // negative of the right operand. Find a proper subtraction algorithm. let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); - // FIXME(antoyo): convert lhs and rhs to unsigned. + // FIXME(antoyo): lhs and rhs still use the wrong signedness here. let sum = lhs + rhs; let vector_type = arg_type.dyncast_vector().expect("vector type"); let unit = vector_type.get_num_units(); @@ -1391,7 +1391,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( vector_reduce_fadd_reassoc, false, add, - 0.0 // FIXME: Use this argument. + 0.0 // FIXME: This reduction still ignores the supplied identity argument. ); arith_red!( simd_reduce_mul_unordered: BinaryOp::Mult, @@ -1507,7 +1507,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // those lanes whose `mask` bit is enabled. // The memory addresses corresponding to the “off” lanes are not accessed. - // FIXME: handle the alignment. + // FIXME: Alignment is still ignored here. // The element type of the "mask" argument must be a signed integer type of any width let mask_ty = in_ty; @@ -1595,7 +1595,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // those lanes whose `mask` bit is enabled. // The memory addresses corresponding to the “off” lanes are not accessed. - // FIXME: handle the alignment. + // FIXME: Alignment is still ignored here. // The element type of the "mask" argument must be a signed integer type of any width let mask_ty = in_ty; diff --git a/src/lib.rs b/src/lib.rs index bf13f0b2127..f8fe5aabcf7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ /* - * FIXME(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) + * FIXME(antoyo): libgccjit still lacks the equality support needed for type equality. * For Thin LTO, this might be helpful: // cspell:disable-next-line * In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none. @@ -8,9 +8,9 @@ * Maybe some missing optimizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html // cspell:disable-next-line * Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans. - * FIXME: disable debug info always being emitted. Perhaps this slows down things? + * FIXME: Debug info is still emitted unconditionally. * - * FIXME(antoyo): remove the patches. + * FIXME(antoyo): These local patches are still required. */ #![feature(rustc_private)] @@ -328,7 +328,7 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> { version, )); } - // FIXME(antoyo): check if this should only be added when using -Cforce-unwind-tables=n. + // FIXME(antoyo): `-fno-asynchronous-unwind-tables` may still be forced too broadly here. context.add_command_line_option("-fno-asynchronous-unwind-tables"); context } @@ -425,7 +425,7 @@ impl WriteBackendMethods for GccCodegenBackend { _opt_level: OptLevel, _features: &[String], ) -> TargetMachineFactoryFn { - // FIXME(antoyo): set opt level. + // FIXME(antoyo): The backend still ignores the requested optimization level. Arc::new(|_, _| ()) } @@ -531,7 +531,7 @@ fn target_config(sess: &Session, target_info: &LockedTargetInfo) -> TargetConfig sess, |feature| to_gcc_features(sess, feature), |feature| { - // FIXME: we disable Neon for now since we don't support the LLVM intrinsics for it. + // FIXME: Neon is still disabled because the required LLVM intrinsics are unsupported. if feature == "neon" { return false; } diff --git a/src/mono_item.rs b/src/mono_item.rs index 1429738a7e7..8519a55ee24 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -37,7 +37,7 @@ impl<'gcc, 'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { #[cfg(feature = "master")] global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); - // FIXME(antoyo): set linkage. + // FIXME(antoyo): Linkage is still not set here. self.instances.borrow_mut().insert(instance, global); } @@ -69,9 +69,9 @@ impl<'gcc, 'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility))); } - // FIXME(antoyo): call set_link_section() to allow initializing argc/argv. - // FIXME(antoyo): set unique comdat. - // FIXME(antoyo): use inline attribute from there in linkage.set() above. + // FIXME(antoyo): argc/argv initialization still lacks the required link section. + // FIXME(antoyo): unique comdat is still not emitted. + // FIXME(antoyo): linkage.set() still ignores the inline attribute. self.functions.borrow_mut().insert(symbol_name.to_string(), decl); self.function_instances.borrow_mut().insert(instance, decl); diff --git a/src/type_.rs b/src/type_.rs index 5252f93a92e..0179407fdd2 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -66,7 +66,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { - // FIXME(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? + // FIXME(antoyo): address_space is still ignored for pointer types. ty.make_pointer() } @@ -213,7 +213,7 @@ impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> { } else if typ == self.type_void() { TypeKind::Void } else { - // FIXME(antoyo): support other types. + // FIXME(antoyo): Other types are still unsupported here. unimplemented!(); } } @@ -239,7 +239,7 @@ impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> { } else if typ == self.type_void() { TypeKind::Void } else { - // FIXME(antoyo): support other types. + // FIXME(antoyo): Other types are still unsupported here. unimplemented!(); } } @@ -288,7 +288,7 @@ impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> { } else { panic!("Cannot get width of float type {:?}", typ); } - // FIXME(antoyo): support other sizes. + // FIXME(antoyo): Other sizes are still unsupported here. } fn int_width(&self, typ: Type<'gcc>) -> u64 { diff --git a/src/type_of.rs b/src/type_of.rs index 5b198eeaf01..c5fc561426d 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -221,7 +221,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { let ty = match *self.ty.kind() { // NOTE: we cannot remove this match like in the LLVM codegen because the call // to fn_ptr_backend_type handle the on-stack attribute. - // FIXME(antoyo): find a less hackish way to handle the on-stack attribute. + // FIXME(antoyo): The on-stack attribute is still handled with a hack here. ty::FnPtr(sig_tys, hdr) => cx .fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig_tys.with(hdr), ty::List::empty())), _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), @@ -320,7 +320,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // immediate, just like `bool` is typically `i8` in memory and only `i1` // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. - // FIXME(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1. + // FIXME(antoyo): This workaround may be unnecessary here because bool uses `i8` instead of `i1`. if scalar.is_bool() { return cx.type_i1(); } diff --git a/tests/lang_tests_common.rs b/tests/lang_tests_common.rs index fb3e0f0d8f1..999ae651519 100644 --- a/tests/lang_tests_common.rs +++ b/tests/lang_tests_common.rs @@ -73,14 +73,14 @@ pub fn main_inner(profile: Profile) { path.to_str().expect("to_str"), ]); - // FIXME(antoyo): find a way to send this via a cli argument. + // FIXME(antoyo): This still has to be wired through the environment instead of a CLI argument. let test_target = std::env::var("CG_GCC_TEST_TARGET"); if let Ok(ref target) = test_target { compiler.args(["--target", target]); let linker = format!("{}-gcc", target); compiler.args(&[format!("-Clinker={}", linker)]); let mut env_path = std::env::var("PATH").unwrap_or_default(); - // FIXME(antoyo): find a better way to add the PATH necessary locally. + // FIXME(antoyo): Local PATH setup still relies on this ad hoc override. env_path = format!("/opt/m68k-unknown-linux-gnu/bin:{}", env_path); compiler.env("PATH", env_path); }