#!/usr/bin/env python3
# Copyright 2025 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Compiler Wrapper.

This is copied into the OSS-Fuzz container image and run there as part of the
instrumentation process.
"""

from collections.abc import Iterator, MutableSequence, Sequence
import contextlib
import dataclasses
import fcntl
import hashlib
import json
import os
from pathlib import Path  # pylint: disable=g-importing-member
import shlex
import shutil
import subprocess
import sys
import time
from typing import Any, Iterable, Set

import dwarf_info
import index_build

_LLVM_READELF_PATH = "/usr/local/bin/llvm-readelf"
_INDEXER_PATH = "/opt/indexer/indexer"
_IGNORED_DEPS_PATH = os.path.join(
    os.path.dirname(_INDEXER_PATH), "ignored_deps.json"
)

_INTERNAL_PATHS = ("/src/llvm-project/",)

# When we notice a project using these flags,
# we should figure out how to handle them.
_DISALLOWED_CLANG_FLAGS = (
    "-fdebug-prefix-map=",
    "-ffile-prefix-map=",
)

# Chromium GN builds use these flags with a period to make paths relative to
# the out directory. This is OK.
_ALLOWED_CLANG_FLAGS_ONLY_WITH_PERIOD = (
    "-fdebug-compilation-dir=",
    "-ffile-compilation-dir=",
)

_IGNORED_FILES = (
    # This file seems to cause a crash in the indexer, as well as performance
    # issues.
    "simdutf.cpp",
)

_INDEXER_THREADS_PER_MERGE_QUEUE = 16
_INDEXER_PER_THREAD_MEMORY = 2 * 1024**3  # 2 GiB

_CDB_FRAGMENT_DELIMITER = ",\n"

SRC = Path(os.getenv("SRC", "/src"))
# On OSS-Fuzz build infra, $OUT is not /out.
OUT = Path(os.getenv("OUT", "/out"))
INDEXES_PATH = Path(os.getenv("INDEXES_PATH", "/indexes"))
FUZZER_ENGINE = os.getenv("LIB_FUZZING_ENGINE", "/usr/lib/libFuzzingEngine.a")


def _get_available_memory() -> int:
  """Returns the available memory in bytes."""
  with open("/proc/meminfo", "r") as f:
    for line in f:
      if line.startswith("MemAvailable:"):
        return int(line.split()[1]) * 1024

  raise RuntimeError("Failed to get available memory")


def rewrite_argv0(argv: Sequence[str], clang_toolchain: str) -> list[str]:
  """Rewrite argv[0] to point to the real clang location."""
  # We do this because we've set PATH to our wrapper.
  rewritten = [os.path.join(clang_toolchain, "bin", os.path.basename(argv[0]))]
  rewritten.extend(argv[1:])
  return rewritten


def execute(argv: Sequence[str], clang_toolchain: str) -> None:
  argv = rewrite_argv0(argv, clang_toolchain)
  print("About to execute...", argv)
  os.execv(argv[0], tuple(argv))


def run(argv: Sequence[str], clang_toolchain: str) -> None:
  argv = rewrite_argv0(argv, clang_toolchain)
  print("About to run...", argv)
  ret = subprocess.run(argv, check=False)
  if ret.returncode != 0:
    sys.exit(ret.returncode)


def sha256(file: Path) -> str:
  hash_value = hashlib.sha256()
  with open(file, "rb") as f:
    # We can't use hashlib.file_digest here because OSS-Fuzz is still on
    # Python 3.10.
    for chunk in iter(lambda: f.read(4096), b""):
      hash_value.update(chunk)
  return hash_value.hexdigest()


def get_flag_value(argv: Sequence[str], flag: str) -> str | None:
  for i in range(len(argv) - 1):
    if argv[i] == flag:
      return argv[i + 1]
    elif flag == "-o" and argv[i].startswith(flag):
      return argv[i][2:]
  return None


def remove_flag_if_present(argv: Iterable[str], flag: str) -> list[str]:
  return [arg for arg in argv if arg != flag]


def remove_flag_and_value(argv: list[str], flag: str) -> list[str] | None:
  """Removes a flag and its value (as a separate token, --a=b not supported.)"""
  for i in range(len(argv) - 1):
    if argv[i] == flag:
      return argv[:i] + argv[i + 2 :]
  return argv


def parse_dependency_file(
    file_path: Path, output_file: Path, ignored_deps: frozenset[str]
) -> Sequence[str]:
  """Parses the dependency file generated by the linker."""
  output_file = output_file.resolve()
  with file_path.open("r") as f:
    lines = [line.strip() for line in f]

  # The first line should have the format "/path/to/file: \"
  # Make sure the binary name matches.
  if output_file.name != Path(lines[0].split(":")[0].strip()).name:
    raise RuntimeError(
        f"dependency file has invalid first line: {lines[0]}. "
        f"Expected to see {output_file.name}."
    )

  deps = []
  ignored_dep_paths = ["/usr", "/clang", "/lib"]
  for line in lines[1:]:
    if not line:
      break
    if line.endswith(" \\"):
      line = line[:-2]
    dep = os.path.realpath(line)
    # We don"t care about system-wide dependencies.
    if any([True for p in ignored_dep_paths if dep.startswith(p)]):
      continue
    if dep in ignored_deps:
      continue
    deps.append(dep)
  return deps


def files_by_creation_time(folder_path: Path) -> Sequence[Path]:
  files = [path for path in folder_path.iterdir() if path.is_file()]
  files.sort(key=os.path.getctime)
  return files


def _wait_for_cdb_fragment(file: Path) -> Sequence[str]:
  """Returns the CDB fragment from the given file, waiting if needed."""
  num_retries = 3
  for i in range(1 + num_retries):
    data = file.read_text()
    if data.endswith(_CDB_FRAGMENT_DELIMITER):
      return data.split(_CDB_FRAGMENT_DELIMITER)[:-1]

    if i < num_retries:
      print(
          f"WARNING: CDB fragment {file} appears to be invalid: {data}, "
          f"sleeping for 2^{i+1} seconds before retrying.",
          file=sys.stderr,
      )
      time.sleep(2 ** (i + 1))
    else:
      error = f"CDB fragment {file} is invalid even after retries: {data}"
      if "test.c" in file.name or "conftest.c" in file.name:
        # Some build systems seem to have a weird issue where the autotools
        # generated `test.c` or `conftest.c` for testing compilers doesn't
        # result in valid cdb fragments.
        print(f"WARNING: {error}", file=sys.stderr)
      else:
        raise RuntimeError(error)

  return ()


def read_cdb_fragments(cdb_path: Path) -> Any:
  """Iterates through the CDB fragments to reconstruct the compile commands."""
  files = files_by_creation_time(cdb_path)
  contents = []
  for file in files:
    # Don't read previously generated linker commands files.
    if file.name.endswith("_linker_commands.json"):
      continue
    if not file.name.endswith(".json"):
      continue

    fragments = _wait_for_cdb_fragment(file)
    contents.extend(fragments)

  contents = _CDB_FRAGMENT_DELIMITER.join(contents)
  contents = "[" + contents + "]"
  return json.loads(contents)


def _index_dir_path(output_file: Path) -> Path:
  """Returns the path to the index directory for the given output binary."""
  # This mirrors the absolute path of the output file.
  absolute_path = (Path(os.getcwd()) / output_file).resolve()
  return INDEXES_PATH / absolute_path.relative_to("/")


def run_indexer(
    output_file: Path,
    build_id: str,
    linker_commands: dict[str, Any],
    allow_errors: bool = False,
):
  """Run the indexer."""

  # Use a build-specific compile commands directory, since there could be
  # parallel linking happening at the same time.
  compile_commands_dir = INDEXES_PATH / f"compile_commands_{build_id}"
  try:
    compile_commands_dir.mkdir(exist_ok=False)
  except FileExistsError:
    # Somehow we've already seen this link command, don't try to redo the
    # indexing.
    # TODO: check if this is the safest behaviour.
    print(
        f"WARNING: Compile commands directory {compile_commands_dir} "
        "already created.",
        file=sys.stderr,
    )
    return

  # Indexes can be built incrementally, so use the same directory for each
  # output binary.

  index_dir = _index_dir_path(output_file)
  index_dir.mkdir(parents=True, exist_ok=True)

  # Symlink by build ID, because `index_build.py` relies on build IDs to match
  # the binaries (which may have moved around) to indexes.
  build_id_symlink = INDEXES_PATH / build_id
  if not build_id_symlink.exists():
    os.symlink(index_dir, build_id_symlink)

  if not linker_commands["compile_commands"]:
    # Nothing to index.
    return

  with (compile_commands_dir / "compile_commands.json").open("wt") as f:
    json.dump(linker_commands["compile_commands"], f, indent=2)

  with (compile_commands_dir / "full_compile_commands.json").open("wt") as f:
    json.dump(linker_commands["full_compile_commands"], f, indent=2)

  # Auto-tune the number of threads and merge queues according to the number
  # of cores and available memory.
  # Note: this might require further tuning -- this might not work well if there
  # are multiple binaries being linked/indexed at the same time.
  num_cores = len(os.sched_getaffinity(0))
  num_threads = max(
      1, min(_get_available_memory() // _INDEXER_PER_THREAD_MEMORY, num_cores)
  )
  merge_queues = max(1, num_threads // _INDEXER_THREADS_PER_MERGE_QUEUE)

  # TODO: b/447468859 - Use database_only once users are ready.
  cmd = [
      _INDEXER_PATH,
      "--build_dir",
      compile_commands_dir,
      "--index_dir",
      index_dir.as_posix(),
      "--source_dir",
      SRC.as_posix(),
      "--index_threads",
      str(num_threads),
      "--merge_queues",
      str(merge_queues),
  ]

  if (index_dir / "db.sqlite").exists():
    cmd.append("--delta")

  if allow_errors:
    cmd.append("--ignore_indexing_errors")

  result = subprocess.run(cmd, check=False, capture_output=True)
  if result.returncode != 0:
    raise RuntimeError(
        "Running indexer failed\n"
        f"stdout:\n```\n{result.stdout.decode()}\n```\n"
        f"stderr:\n```\n{result.stderr.decode()}\n```\n"
    )


def check_fuzzing_engine_and_fix_argv(argv: MutableSequence[str]) -> bool:
  """Check if this command is linking in a fuzzing engine."""
  # Also fix up incorrect link flags so we link in the correct fuzzing
  # engine.
  fuzzing_engine_in_argv = False
  idx = 0
  for arg in argv[:]:
    if arg == "-fsanitize=fuzzer":
      argv[idx] = "-lFuzzingEngine"
      fuzzing_engine_in_argv = True
    elif arg == "-fsanitize=fuzzer-no-link":
      argv.remove("-fsanitize=fuzzer-no-link")
      idx -= 1
    elif arg.startswith("-fsanitize="):
      # This could be -fsanitize=address,fuzzer.
      sanitize_vals = arg.split("=")[1].split(",")
      if "fuzzer" in sanitize_vals:
        sanitize_vals.remove("fuzzer")
        arg = "-fsanitize=" + ",".join(sanitize_vals)
        fuzzing_engine_in_argv = True
      elif "fuzzer-no-link" in sanitize_vals:
        sanitize_vals.remove("fuzzer-no-link")
        arg = "-fsanitize=" + ",".join(sanitize_vals)
      argv[idx] = arg

      if fuzzing_engine_in_argv:
        idx += 1
        argv.insert(idx, "-lFuzzingEngine")

    idx += 1

    if "libFuzzingEngine.a" in arg or "-lFuzzingEngine" in arg:
      fuzzing_engine_in_argv = True

  return fuzzing_engine_in_argv


def _has_disallowed_clang_flags(argv: Sequence[str]) -> bool:
  """Checks if the command line arguments contain disallowed flags."""
  if any(arg.startswith(_DISALLOWED_CLANG_FLAGS) for arg in argv):
    return True

  if any(
      arg.startswith(_ALLOWED_CLANG_FLAGS_ONLY_WITH_PERIOD)
      and not arg.endswith("=.")
      for arg in argv
  ):
    return True

  return False


@dataclasses.dataclass(frozen=True)
class FilteredCompileCommands:
  filtered_compile_commands: Sequence[dict[str, str]]
  unused_cu_paths: Set[Path]
  unused_cc_paths: Set[Path]


def _filter_compile_commands(
    elf_path: Path, compile_commands: Sequence[dict[str, str]]
) -> FilteredCompileCommands:
  """Extracts compile commands from the DWARF information of an ELF file.

  Args:
    elf_path: The path to the ELF file.
    compile_commands: The compile commands to filter.

  Returns:
    The filtered compile commands.
  """
  compilation_units = dwarf_info.get_all_compilation_units(elf_path)
  cu_paths = set([Path(cu.compdir) / cu.name for cu in compilation_units])
  used_cu_paths = set()
  filtered_compile_commands = []
  unused_cc_paths = set()

  for compile_command in compile_commands:
    if (
        "-ffile-compilation-dir=." in compile_command["arguments"]
        or "-fdebug-compilation-dir=." in compile_command["arguments"]
    ):
      # Handle build systems that make their debug paths relative.
      directory = Path(".")
    else:
      directory = Path(compile_command["directory"])

    cc_path = Path(directory / compile_command["file"])
    if cc_path in cu_paths and cc_path.name not in _IGNORED_FILES:
      filtered_compile_commands.append(compile_command)
      used_cu_paths.add(cc_path)
    else:
      unused_cc_paths.add(cc_path)

  unused_cu_paths = cu_paths - used_cu_paths

  return FilteredCompileCommands(
      filtered_compile_commands=filtered_compile_commands,
      unused_cu_paths=unused_cu_paths,
      unused_cc_paths=unused_cc_paths,
  )


def _write_filter_log(
    filter_log_file: Path,
    filtered_compile_commands: FilteredCompileCommands,
) -> None:
  """Writes the filter log file."""
  with open(filter_log_file, "wt") as f:
    f.write("The following files were not used in the final binary:\n")
    for cc_path in sorted(filtered_compile_commands.unused_cc_paths):
      f.write(f"\t{cc_path}\n")

    f.write(
        "The following compilation units were not matched with any compile"
        " commands:\n"
    )
    for cu_path in sorted(filtered_compile_commands.unused_cu_paths):
      if cu_path.as_posix().startswith(_INTERNAL_PATHS):
        continue
      f.write(f"\t{cu_path}\n")


def expand_rsp_file(argv: Sequence[str]) -> list[str]:
  # https://llvm.org/docs/CommandLine.html#response-files
  expanded = []
  for arg in argv:
    if arg.startswith("@"):
      with open(arg[1:], "r") as f:
        expanded_args = shlex.split(f.read())
      expanded.extend(expanded_args)
    else:
      expanded.append(arg)

  return expanded


def force_optimization_flag(argv: Sequence[str]) -> list[str]:
  """Forces -O0 in the given argument list."""
  args = []
  for arg in argv:
    if arg.startswith("-O") and arg != "-O0":
      arg = "-O0"

    args.append(arg)

  return args


def fix_coverage_flags(
    argv: Sequence[str], expected_coverage_flags: str
) -> list[str]:
  """Makes sure that the right coverage flags are set."""
  args = []
  for arg in argv:
    # Some projects use -fsanitize-coverage-allowlist/ignorelist to optimize
    # fuzzing feedback. For the indexer case, we would prefer to have all code
    # instrumented, so we remove these flags.
    # Some projects hardcode -fsanitize-coverage= options that cause conflicts
    # with our indexer / tracer options.
    if (arg.startswith("-fsanitize-coverage-allowlist=") or
        arg.startswith("-fsanitize-coverage-ignorelist=") or
        arg.startswith("-fsanitize-coverage=")):
      continue

    args.append(arg)

  args.append(expected_coverage_flags)
  return args


@contextlib.contextmanager
def _file_lock(lock_path: Path):
  """Context manager for acquiring an exclusive file lock."""
  fd = os.open(lock_path.as_posix(), os.O_CREAT | os.O_RDWR)
  fcntl.flock(fd, fcntl.LOCK_EX)

  try:
    yield
  finally:
    fcntl.flock(fd, fcntl.LOCK_UN)
    os.close(fd)


def merge_incremental_cdb(cdb_path: Path, merged_cdb_path: Path) -> None:
  """Merges new CDB fragments into the incremental CDB."""
  # Map of output file to the path of the file in the incremental CDB.
  # Use the output file path as the key for merging.
  existing_output_files: dict[Path, Path] = {}

  def load_cdbs(directory: Path) -> Iterator[tuple[Path, dict[str, Any]]]:
    for file in directory.iterdir():
      if file.suffix != ".json":
        continue

      if file.name.endswith("_linker_commands.json"):
        continue

      fragments_data = _wait_for_cdb_fragment(file)
      for fragment_data in fragments_data:
        fragment = json.loads(fragment_data)
        if "output" not in fragment:
          continue

        yield file, fragment

  # We could be running multiple linking steps in parallel, so serialize merges.
  with _file_lock(merged_cdb_path / ".lock"):
    # Load existing CDB fragments, and build the map of output file -> fragment.
    for file, fragment in load_cdbs(merged_cdb_path):
      output_path = Path(fragment["directory"]) / fragment["output"]
      existing_output_files[output_path] = file

    # Load new CDB fragments, replacing existing fragments for the same output
    # file.
    for file, fragment in load_cdbs(cdb_path):
      output_path = Path(fragment["directory"]) / fragment["output"]

      if output_path in existing_output_files:
        # Remove existing entry for the output file.
        os.unlink(existing_output_files[output_path])

      shutil.copy2(file, merged_cdb_path / file.name)


def main(argv: list[str]) -> None:
  compile_settings = index_build.read_compile_settings()
  argv = expand_rsp_file(argv)
  argv = remove_flag_if_present(argv, "-gline-tables-only")
  argv = force_optimization_flag(argv)
  argv = fix_coverage_flags(argv, compile_settings.coverage_flags)

  if _has_disallowed_clang_flags(argv):
    raise ValueError("Disallowed clang flags found, aborting.")

  # TODO: b/441872725 - Migrate more flags to be appended in the clang wrapper
  # instead.
  cdb_path = index_build.OUT / "cdb"
  argv.extend(("-gen-cdb-fragment-path", cdb_path.as_posix()))
  argv.extend((
      "-isystem",
      (
          f"{compile_settings.clang_toolchain}/lib/clang/"
          f"{compile_settings.clang_version}"
      ),
      "-resource-dir",
      (
          f"{compile_settings.clang_toolchain}/lib/clang/"
          f"{compile_settings.clang_version}"
      ),
  ))

  if "-E" in argv:
    # Preprocessor-only invocation.
    modified_argv = remove_flag_and_value(argv, "-gen-cdb-fragment-path")
    execute(modified_argv, compile_settings.clang_toolchain)

  fuzzing_engine_in_argv = check_fuzzing_engine_and_fix_argv(argv)
  indexer_targets: list[str] = [
      t for t in os.getenv("INDEXER_TARGETS", "").split(",") if t
  ]

  # If we are linking, collect the relevant flags and dependencies.
  output_file = get_flag_value(argv, "-o")
  if not output_file:
    execute(argv, compile_settings.clang_toolchain)  # Missing output file

  output_file = Path(output_file)

  if output_file.name.endswith(".o"):
    execute(argv, compile_settings.clang_toolchain)  # Not a real linker command

  if indexer_targets:
    if output_file.name not in indexer_targets:
      # Not a relevant linker command
      print(f"Not indexing as {output_file} is not in the allowlist")
      execute(argv, compile_settings.clang_toolchain)
  elif not fuzzing_engine_in_argv:
    # Not a fuzz target.
    execute(argv, compile_settings.clang_toolchain)

  print(f"Linking {argv}")

  # We can now run the linker and look at the output of some files.
  dependency_file = (cdb_path / output_file.name).with_suffix(".deps")
  why_extract_file = (cdb_path / output_file.name).with_suffix(".why_extract")
  argv.append("-fuse-ld=lld")
  argv.append(f"-Wl,--dependency-file={dependency_file}")
  argv.append(f"-Wl,--why-extract={why_extract_file}")
  argv.append("-Wl,--build-id")
  # We force lld, but it doesn't include this dir by default.
  argv.append("-L/usr/local/lib")
  argv.append("-Qunused-arguments")

  if compile_settings.coverage_flags == index_build.TRACING_COVERAGE_FLAGS:
    argv.append("/opt/indexer/coverage.o")

  run(argv, compile_settings.clang_toolchain)

  build_id = index_build.get_build_id(output_file)
  assert build_id is not None

  output_hash = sha256(output_file)

  with open(_IGNORED_DEPS_PATH) as f:
    ignored_deps = frozenset(json.load(f)["deps"])

  deps = parse_dependency_file(dependency_file, output_file, ignored_deps)
  obj_deps = [dep for dep in deps if dep.endswith(".o")]
  ar_deps = [dep for dep in deps if dep.endswith(".a") and dep != FUZZER_ENGINE]
  archive_deps = []
  for archive in ar_deps:
    res = subprocess.run(["ar", "-t", archive], capture_output=True, check=True)
    archive_deps += [dep.decode() for dep in res.stdout.splitlines()]

  # Incremental index building relies on merging all new compilation fragments
  # since the initial indexing.
  cdb_fragments_dir = cdb_path
  if _index_dir_path(output_file).exists():
    merge_incremental_cdb(cdb_path, index_build.INCREMENTAL_CDB_PATH)
    cdb_fragments_dir = index_build.INCREMENTAL_CDB_PATH

  # We only care about the compile commands that emitted an output file.
  full_compile_commands = [
      cc for cc in read_cdb_fragments(cdb_fragments_dir) if "output" in cc
  ]

  # Discard compile commands that didn't end up in the final binary.
  filtered_compile_commands = _filter_compile_commands(
      output_file, full_compile_commands
  )

  linker_commands = {
      "output": output_file.as_posix(),
      "directory": os.getcwd(),
      "deps": obj_deps + archive_deps,
      "args": argv,
      "sha256": output_hash,
      "gnu_build_id": build_id,
      "compile_commands": filtered_compile_commands.filtered_compile_commands,
      "full_compile_commands": full_compile_commands,
  }

  filter_log_file = Path(cdb_path) / f"{build_id}_filter_log.txt"
  _write_filter_log(filter_log_file, filtered_compile_commands)

  if not os.getenv("INDEXER_BINARIES_ONLY"):
    is_custom_toolchain = (
        compile_settings.clang_toolchain != index_build.DEFAULT_CLANG_TOOLCHAIN
    )

    run_indexer(
        output_file, build_id, linker_commands, allow_errors=is_custom_toolchain
    )

  linker_commands = json.dumps(linker_commands)
  commands_path = Path(cdb_path) / f"{build_id}_linker_commands.json"
  commands_path.write_text(linker_commands)


if __name__ == "__main__":
  main(sys.argv)
