-# Almost identical to the top-level .clang-tidy, except that {Member,Parameter,Variable}Case use camelBack.
-Checks: '-*,clang-diagnostic-*,llvm-*,misc-*,-misc-unused-parameters,-misc-non-private-member-variables-in-classes,readability-identifier-naming'
+InheritParentConfig: true
CheckOptions:
- - key: readability-identifier-naming.ClassCase
- value: CamelCase
- - key: readability-identifier-naming.EnumCase
- value: CamelCase
- - key: readability-identifier-naming.FunctionCase
- value: camelBack
- key: readability-identifier-naming.MemberCase
value: camelBack
- key: readability-identifier-naming.ParameterCase
value: camelBack
- - key: readability-identifier-naming.UnionCase
- value: CamelCase
- key: readability-identifier-naming.VariableCase
value: camelBack
- - key: readability-identifier-naming.IgnoreMainLikeFunctions
- value: 1
# Check if lld is built as a standalone project.
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
project(lld)
- cmake_minimum_required(VERSION 3.4.3)
+ cmake_minimum_required(VERSION 3.13.4)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(LLD_BUILT_STANDALONE TRUE)
set(LLVM_MAIN_SRC_DIR ${MAIN_SRC_DIR} CACHE PATH "Path to LLVM source tree")
file(TO_CMAKE_PATH ${LLVM_OBJ_ROOT} LLVM_BINARY_DIR)
+ file(TO_CMAKE_PATH ${LLVM_CMAKE_PATH} LLVM_CMAKE_PATH)
if(NOT EXISTS "${LLVM_CMAKE_PATH}/LLVMConfig.cmake")
message(FATAL_ERROR "LLVMConfig.cmake not found")
include(AddLLVM)
include(TableGen)
include(HandleLLVMOptions)
+ include(GetErrcMessages)
include(CheckAtomic)
if(LLVM_INCLUDE_TESTS)
- if(CMAKE_VERSION VERSION_LESS 3.12)
- include(FindPythonInterp)
- if(NOT PYTHONINTERP_FOUND)
- message(FATAL_ERROR
- "Unable to find Python interpreter, required for testing.
-
- Please install Python or specify the PYTHON_EXECUTABLE CMake variable.")
- endif()
-
- if(${PYTHON_VERSION_STRING} VERSION_LESS 2.7)
- message(FATAL_ERROR "Python 2.7 or newer is required")
- endif()
-
- add_executable(Python3::Interpeter IMPORTED)
- set_target_properties(Python3::Interpreter PROPERTIES
- IMPORTED_LOCATION ${PYTHON_EXECUTABLE})
- set(Python3_EXECUTABLE ${PYTHON_EXECUTABLE})
- else()
- find_package(Python3 COMPONENTS Interpreter)
- if(NOT Python3_Interpreter_FOUND)
- message(WARNING "Python3 not found, using python2 as a fallback")
- find_package(Python2 COMPONENTS Interpreter REQUIRED)
- if(Python2_VERSION VERSION_LESS 2.7)
- message(SEND_ERROR "Python 2.7 or newer is required")
- endif()
-
- # Treat python2 as python3
- add_executable(Python3::Interpreter IMPORTED)
- set_target_properties(Python3::Interpreter PROPERTIES
- IMPORTED_LOCATION ${Python2_EXECUTABLE})
- set(Python3_EXECUTABLE ${Python2_EXECUTABLE})
- endif()
- endif()
+ find_package(Python3 ${LLVM_MINIMUM_PYTHON_VERSION} REQUIRED
+ COMPONENTS Interpreter)
# Check prebuilt llvm/utils.
if(EXISTS ${LLVM_TOOLS_BINARY_DIR}/FileCheck${CMAKE_EXECUTABLE_SUFFIX}
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
+ get_errc_messages(LLVM_LIT_ERRC_MESSAGES)
+
# On Win32 hosts, provide an option to specify the path to the GnuWin32 tools.
if(WIN32 AND NOT CYGWIN)
set(LLVM_LIT_TOOLS_DIR "" CACHE PATH "Path to GnuWin32 tools")
set(LLVM_INCLUDE_TESTS OFF)
endif()
endif()
+
+ if(LLVM_HAVE_LIBXAR)
+ set(XAR_LIB xar)
+ endif()
endif()
set(LLD_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
option(LLD_BUILD_TOOLS
"Build the lld tools. If OFF, just generate build targets." ON)
+option(LLD_DEFAULT_LD_LLD_IS_MINGW
+ "Use MinGW as the default backend for ld.lld. If OFF, ELF will be used." OFF)
+if (LLD_DEFAULT_LD_LLD_IS_MINGW)
+ add_definitions("-DLLD_DEFAULT_LD_LLD_IS_MINGW=1")
+endif()
+
if (MSVC)
add_definitions(-wd4530) # Suppress 'warning C4530: C++ exception handler used, but unwind semantics are not enabled.'
add_definitions(-wd4062) # Suppress 'warning C4062: enumerator X in switch of enum Y is not handled' from system header.
DESTINATION include
FILES_MATCHING
PATTERN "*.h"
- PATTERN ".svn" EXCLUDE
)
endif()
N: Lang Hames, Nick Kledzik
E: lhames@gmail.com, kledzik@apple.com
-D: Mach-O backend
+D: Old Mach-O backend
N: Sam Clegg
E: sbc@chromium.org
D: WebAssembly backend (wasm/*)
+
+N: Jez Ng, Greg McGary, Shoaib Meenai
+E: jezng@fb.com, gkm@fb.com, smeenai@fb.com
+D: New Mach-O backend
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(COFFOptionsTableGen)
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
add_lld_library(lldCOFF
+ CallGraphSort.cpp
Chunks.cpp
DebugTypes.cpp
DLL.cpp
DEPENDS
COFFOptionsTableGen
- ${tablegen_deps}
+ intrinsics_gen
)
--- /dev/null
+//===- CallGraphSort.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This is based on the ELF port, see ELF/CallGraphSort.cpp for the details
+/// about the algorithm.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CallGraphSort.h"
+#include "InputFiles.h"
+#include "SymbolTable.h"
+#include "Symbols.h"
+#include "lld/Common/ErrorHandler.h"
+
+#include <numeric>
+
+using namespace llvm;
+using namespace lld;
+using namespace lld::coff;
+
+namespace {
+struct Edge {
+ int from;
+ uint64_t weight;
+};
+
+struct Cluster {
+ Cluster(int sec, size_t s) : next(sec), prev(sec), size(s) {}
+
+ double getDensity() const {
+ if (size == 0)
+ return 0;
+ return double(weight) / double(size);
+ }
+
+ int next;
+ int prev;
+ uint64_t size;
+ uint64_t weight = 0;
+ uint64_t initialWeight = 0;
+ Edge bestPred = {-1, 0};
+};
+
+class CallGraphSort {
+public:
+ CallGraphSort();
+
+ DenseMap<const SectionChunk *, int> run();
+
+private:
+ std::vector<Cluster> clusters;
+ std::vector<const SectionChunk *> sections;
+};
+
+// Maximum amount the combined cluster density can be worse than the original
+// cluster to consider merging.
+constexpr int MAX_DENSITY_DEGRADATION = 8;
+
+// Maximum cluster size in bytes.
+constexpr uint64_t MAX_CLUSTER_SIZE = 1024 * 1024;
+} // end anonymous namespace
+
+using SectionPair = std::pair<const SectionChunk *, const SectionChunk *>;
+
+// Take the edge list in Config->CallGraphProfile, resolve symbol names to
+// Symbols, and generate a graph between InputSections with the provided
+// weights.
+CallGraphSort::CallGraphSort() {
+ MapVector<SectionPair, uint64_t> &profile = config->callGraphProfile;
+ DenseMap<const SectionChunk *, int> secToCluster;
+
+ auto getOrCreateNode = [&](const SectionChunk *isec) -> int {
+ auto res = secToCluster.try_emplace(isec, clusters.size());
+ if (res.second) {
+ sections.push_back(isec);
+ clusters.emplace_back(clusters.size(), isec->getSize());
+ }
+ return res.first->second;
+ };
+
+ // Create the graph.
+ for (std::pair<SectionPair, uint64_t> &c : profile) {
+ const auto *fromSec = cast<SectionChunk>(c.first.first->repl);
+ const auto *toSec = cast<SectionChunk>(c.first.second->repl);
+ uint64_t weight = c.second;
+
+ // Ignore edges between input sections belonging to different output
+ // sections. This is done because otherwise we would end up with clusters
+ // containing input sections that can't actually be placed adjacently in the
+ // output. This messes with the cluster size and density calculations. We
+ // would also end up moving input sections in other output sections without
+ // moving them closer to what calls them.
+ if (fromSec->getOutputSection() != toSec->getOutputSection())
+ continue;
+
+ int from = getOrCreateNode(fromSec);
+ int to = getOrCreateNode(toSec);
+
+ clusters[to].weight += weight;
+
+ if (from == to)
+ continue;
+
+ // Remember the best edge.
+ Cluster &toC = clusters[to];
+ if (toC.bestPred.from == -1 || toC.bestPred.weight < weight) {
+ toC.bestPred.from = from;
+ toC.bestPred.weight = weight;
+ }
+ }
+ for (Cluster &c : clusters)
+ c.initialWeight = c.weight;
+}
+
+// It's bad to merge clusters which would degrade the density too much.
+static bool isNewDensityBad(Cluster &a, Cluster &b) {
+ double newDensity = double(a.weight + b.weight) / double(a.size + b.size);
+ return newDensity < a.getDensity() / MAX_DENSITY_DEGRADATION;
+}
+
+// Find the leader of V's belonged cluster (represented as an equivalence
+// class). We apply union-find path-halving technique (simple to implement) in
+// the meantime as it decreases depths and the time complexity.
+static int getLeader(std::vector<int> &leaders, int v) {
+ while (leaders[v] != v) {
+ leaders[v] = leaders[leaders[v]];
+ v = leaders[v];
+ }
+ return v;
+}
+
+static void mergeClusters(std::vector<Cluster> &cs, Cluster &into, int intoIdx,
+ Cluster &from, int fromIdx) {
+ int tail1 = into.prev, tail2 = from.prev;
+ into.prev = tail2;
+ cs[tail2].next = intoIdx;
+ from.prev = tail1;
+ cs[tail1].next = fromIdx;
+ into.size += from.size;
+ into.weight += from.weight;
+ from.size = 0;
+ from.weight = 0;
+}
+
+// Group InputSections into clusters using the Call-Chain Clustering heuristic
+// then sort the clusters by density.
+DenseMap<const SectionChunk *, int> CallGraphSort::run() {
+ std::vector<int> sorted(clusters.size());
+ std::vector<int> leaders(clusters.size());
+
+ std::iota(leaders.begin(), leaders.end(), 0);
+ std::iota(sorted.begin(), sorted.end(), 0);
+ llvm::stable_sort(sorted, [&](int a, int b) {
+ return clusters[a].getDensity() > clusters[b].getDensity();
+ });
+
+ for (int l : sorted) {
+ // The cluster index is the same as the index of its leader here because
+ // clusters[L] has not been merged into another cluster yet.
+ Cluster &c = clusters[l];
+
+ // Don't consider merging if the edge is unlikely.
+ if (c.bestPred.from == -1 || c.bestPred.weight * 10 <= c.initialWeight)
+ continue;
+
+ int predL = getLeader(leaders, c.bestPred.from);
+ if (l == predL)
+ continue;
+
+ Cluster *predC = &clusters[predL];
+ if (c.size + predC->size > MAX_CLUSTER_SIZE)
+ continue;
+
+ if (isNewDensityBad(*predC, c))
+ continue;
+
+ leaders[l] = predL;
+ mergeClusters(clusters, *predC, predL, c, l);
+ }
+
+ // Sort remaining non-empty clusters by density.
+ sorted.clear();
+ for (int i = 0, e = (int)clusters.size(); i != e; ++i)
+ if (clusters[i].size > 0)
+ sorted.push_back(i);
+ llvm::stable_sort(sorted, [&](int a, int b) {
+ return clusters[a].getDensity() > clusters[b].getDensity();
+ });
+
+ DenseMap<const SectionChunk *, int> orderMap;
+ // Sections will be sorted by increasing order. Absent sections will have
+ // priority 0 and be placed at the end of sections.
+ int curOrder = INT_MIN;
+ for (int leader : sorted) {
+ for (int i = leader;;) {
+ orderMap[sections[i]] = curOrder++;
+ i = clusters[i].next;
+ if (i == leader)
+ break;
+ }
+ }
+ if (!config->printSymbolOrder.empty()) {
+ std::error_code ec;
+ raw_fd_ostream os(config->printSymbolOrder, ec, sys::fs::OF_None);
+ if (ec) {
+ error("cannot open " + config->printSymbolOrder + ": " + ec.message());
+ return orderMap;
+ }
+ // Print the symbols ordered by C3, in the order of increasing curOrder
+ // Instead of sorting all the orderMap, just repeat the loops above.
+ for (int leader : sorted)
+ for (int i = leader;;) {
+ const SectionChunk *sc = sections[i];
+
+ // Search all the symbols in the file of the section
+ // and find out a DefinedCOFF symbol with name that is within the
+ // section.
+ for (Symbol *sym : sc->file->getSymbols())
+ if (auto *d = dyn_cast_or_null<DefinedCOFF>(sym))
+ // Filter out non-COMDAT symbols and section symbols.
+ if (d->isCOMDAT && !d->getCOFFSymbol().isSection() &&
+ sc == d->getChunk())
+ os << sym->getName() << "\n";
+ i = clusters[i].next;
+ if (i == leader)
+ break;
+ }
+ }
+
+ return orderMap;
+}
+
+// Sort sections by the profile data provided by /call-graph-ordering-file
+//
+// This first builds a call graph based on the profile data then merges sections
+// according to the C³ heuristic. All clusters are then sorted by a density
+// metric to further improve locality.
+DenseMap<const SectionChunk *, int> coff::computeCallGraphProfileOrder() {
+ return CallGraphSort().run();
+}
--- /dev/null
+//===- CallGraphSort.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_COFF_CALL_GRAPH_SORT_H
+#define LLD_COFF_CALL_GRAPH_SORT_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace lld {
+namespace coff {
+class SectionChunk;
+
+llvm::DenseMap<const SectionChunk *, int> computeCallGraphProfileOrder();
+} // namespace coff
+} // namespace lld
+
+#endif
SectionChunk::SectionChunk(ObjFile *f, const coff_section *h)
: Chunk(SectionKind), file(f), header(h), repl(this) {
// Initialize relocs.
- setRelocs(file->getCOFFObj()->getRelocations(header));
+ if (file)
+ setRelocs(file->getCOFFObj()->getRelocations(header));
// Initialize sectionName.
StringRef sectionName;
- if (Expected<StringRef> e = file->getCOFFObj()->getSectionName(header))
- sectionName = *e;
+ if (file) {
+ if (Expected<StringRef> e = file->getCOFFObj()->getSectionName(header))
+ sectionName = *e;
+ }
sectionNameData = sectionName.data();
sectionNameSize = sectionName.size();
// enabled, treat non-comdat sections as roots. Generally optimized object
// files will be built with -ffunction-sections or /Gy, so most things worth
// stripping will be in a comdat.
- live = !config->doGC || !isCOMDAT();
+ if (config)
+ live = !config->doGC || !isCOMDAT();
+ else
+ live = true;
}
// SectionChunk is one of the most frequently allocated classes, so it is
// Apply relocations.
size_t inputSize = getSize();
- for (size_t i = 0, e = relocsSize; i < e; i++) {
- const coff_relocation &rel = relocsData[i];
-
+ for (const coff_relocation &rel : getRelocs()) {
// Check for an invalid relocation offset. This check isn't perfect, because
// we don't have the relocation size, which is only known after checking the
// machine and relocation type. As a result, a relocation may overwrite the
continue;
}
- uint8_t *off = buf + rel.VirtualAddress;
+ applyRelocation(buf + rel.VirtualAddress, rel);
+ }
+}
- auto *sym =
- dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
+void SectionChunk::applyRelocation(uint8_t *off,
+ const coff_relocation &rel) const {
+ auto *sym = dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
- // Get the output section of the symbol for this relocation. The output
- // section is needed to compute SECREL and SECTION relocations used in debug
- // info.
- Chunk *c = sym ? sym->getChunk() : nullptr;
- OutputSection *os = c ? c->getOutputSection() : nullptr;
-
- // Skip the relocation if it refers to a discarded section, and diagnose it
- // as an error if appropriate. If a symbol was discarded early, it may be
- // null. If it was discarded late, the output section will be null, unless
- // it was an absolute or synthetic symbol.
- if (!sym ||
- (!os && !isa<DefinedAbsolute>(sym) && !isa<DefinedSynthetic>(sym))) {
- maybeReportRelocationToDiscarded(this, sym, rel);
- continue;
- }
+ // Get the output section of the symbol for this relocation. The output
+ // section is needed to compute SECREL and SECTION relocations used in debug
+ // info.
+ Chunk *c = sym ? sym->getChunk() : nullptr;
+ OutputSection *os = c ? c->getOutputSection() : nullptr;
- uint64_t s = sym->getRVA();
+ // Skip the relocation if it refers to a discarded section, and diagnose it
+ // as an error if appropriate. If a symbol was discarded early, it may be
+ // null. If it was discarded late, the output section will be null, unless
+ // it was an absolute or synthetic symbol.
+ if (!sym ||
+ (!os && !isa<DefinedAbsolute>(sym) && !isa<DefinedSynthetic>(sym))) {
+ maybeReportRelocationToDiscarded(this, sym, rel);
+ return;
+ }
- // Compute the RVA of the relocation for relative relocations.
- uint64_t p = rva + rel.VirtualAddress;
- switch (config->machine) {
- case AMD64:
- applyRelX64(off, rel.Type, os, s, p);
- break;
- case I386:
- applyRelX86(off, rel.Type, os, s, p);
- break;
- case ARMNT:
- applyRelARM(off, rel.Type, os, s, p);
- break;
- case ARM64:
- applyRelARM64(off, rel.Type, os, s, p);
+ uint64_t s = sym->getRVA();
+
+ // Compute the RVA of the relocation for relative relocations.
+ uint64_t p = rva + rel.VirtualAddress;
+ switch (config->machine) {
+ case AMD64:
+ applyRelX64(off, rel.Type, os, s, p);
+ break;
+ case I386:
+ applyRelX86(off, rel.Type, os, s, p);
+ break;
+ case ARMNT:
+ applyRelARM(off, rel.Type, os, s, p);
+ break;
+ case ARM64:
+ applyRelARM64(off, rel.Type, os, s, p);
+ break;
+ default:
+ llvm_unreachable("unknown machine type");
+ }
+}
+
+// Defend against unsorted relocations. This may be overly conservative.
+void SectionChunk::sortRelocations() {
+ auto cmpByVa = [](const coff_relocation &l, const coff_relocation &r) {
+ return l.VirtualAddress < r.VirtualAddress;
+ };
+ if (llvm::is_sorted(getRelocs(), cmpByVa))
+ return;
+ warn("some relocations in " + file->getName() + " are not sorted");
+ MutableArrayRef<coff_relocation> newRelocs(
+ bAlloc.Allocate<coff_relocation>(relocsSize), relocsSize);
+ memcpy(newRelocs.data(), relocsData, relocsSize * sizeof(coff_relocation));
+ llvm::sort(newRelocs, cmpByVa);
+ setRelocs(newRelocs);
+}
+
+// Similar to writeTo, but suitable for relocating a subsection of the overall
+// section.
+void SectionChunk::writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
+ ArrayRef<uint8_t> subsec,
+ uint32_t &nextRelocIndex,
+ uint8_t *buf) const {
+ assert(!subsec.empty() && !sec.empty());
+ assert(sec.begin() <= subsec.begin() && subsec.end() <= sec.end() &&
+ "subsection is not part of this section");
+ size_t vaBegin = std::distance(sec.begin(), subsec.begin());
+ size_t vaEnd = std::distance(sec.begin(), subsec.end());
+ memcpy(buf, subsec.data(), subsec.size());
+ for (; nextRelocIndex < relocsSize; ++nextRelocIndex) {
+ const coff_relocation &rel = relocsData[nextRelocIndex];
+ // Only apply relocations that apply to this subsection. These checks
+ // assume that all subsections completely contain their relocations.
+ // Relocations must not straddle the beginning or end of a subsection.
+ if (rel.VirtualAddress < vaBegin)
+ continue;
+ if (rel.VirtualAddress + 1 >= vaEnd)
break;
- default:
- llvm_unreachable("unknown machine type");
- }
+ applyRelocation(&buf[rel.VirtualAddress - vaBegin], rel);
}
}
void SectionChunk::addAssociative(SectionChunk *child) {
- // Insert this child at the head of the list.
+ // Insert the child section into the list of associated children. Keep the
+ // list ordered by section name so that ICF does not depend on section order.
assert(child->assocChildren == nullptr &&
"associated sections cannot have their own associated children");
- child->assocChildren = assocChildren;
- assocChildren = child;
+ SectionChunk *prev = this;
+ SectionChunk *next = assocChildren;
+ for (; next != nullptr; prev = next, next = next->assocChildren) {
+ if (next->getSectionName() <= child->getSectionName())
+ break;
+ }
+
+ // Insert child between prev and next.
+ assert(prev->assocChildren == next);
+ prev->assocChildren = child;
+ child->assocChildren = next;
}
static uint8_t getBaserelType(const coff_relocation &rel) {
case AMD64:
if (rel.Type == IMAGE_REL_AMD64_ADDR64)
return IMAGE_REL_BASED_DIR64;
+ if (rel.Type == IMAGE_REL_AMD64_ADDR32)
+ return IMAGE_REL_BASED_HIGHLOW;
return IMAGE_REL_BASED_ABSOLUTE;
case I386:
if (rel.Type == IMAGE_REL_I386_DIR32)
// fixed by the loader if load-time relocation is needed.
// Only called when base relocation is enabled.
void SectionChunk::getBaserels(std::vector<Baserel> *res) {
- for (size_t i = 0, e = relocsSize; i < e; i++) {
- const coff_relocation &rel = relocsData[i];
+ for (const coff_relocation &rel : getRelocs()) {
uint8_t ty = getBaserelType(rel);
if (ty == IMAGE_REL_BASED_ABSOLUTE)
continue;
"RVA tables should be de-duplicated");
}
+void RVAFlagTableChunk::writeTo(uint8_t *buf) const {
+ struct RVAFlag {
+ ulittle32_t rva;
+ uint8_t flag;
+ };
+ auto flags =
+ makeMutableArrayRef(reinterpret_cast<RVAFlag *>(buf), syms.size());
+ for (auto t : zip(syms, flags)) {
+ const auto &sym = std::get<0>(t);
+ auto &flag = std::get<1>(t);
+ flag.rva = sym.inputChunk->getRVA() + sym.offset;
+ flag.flag = 0;
+ }
+ llvm::sort(flags,
+ [](const RVAFlag &a, const RVAFlag &b) { return a.rva < b.rva; });
+ assert(llvm::unique(flags, [](const RVAFlag &a,
+ const RVAFlag &b) { return a.rva == b.rva; }) ==
+ flags.end() &&
+ "RVA tables should be de-duplicated");
+}
+
// MinGW specific, for the "automatic import of variables from DLLs" feature.
size_t PseudoRelocTableChunk::getSize() const {
if (relocs.empty())
// can be stored with 32 bits.
uint32_t getRVA() const { return rva; }
void setRVA(uint64_t v) {
+ // This may truncate. The writer checks for overflow later.
rva = (uint32_t)v;
- assert(rva == v && "RVA truncated");
}
// Returns readable/writable/executable bits.
ArrayRef<uint8_t> getContents() const;
void writeTo(uint8_t *buf) const;
+ // Defend against unsorted relocations. This may be overly conservative.
+ void sortRelocations();
+
+ // Write and relocate a portion of the section. This is intended to be called
+ // in a loop. Relocations must be sorted first.
+ void writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
+ ArrayRef<uint8_t> subsec,
+ uint32_t &nextRelocIndex, uint8_t *buf) const;
+
uint32_t getOutputCharacteristics() const {
return header->Characteristics & (permMask | typeMask);
}
}
void getBaserels(std::vector<Baserel> *res);
bool isCOMDAT() const;
+ void applyRelocation(uint8_t *off, const coff_relocation &rel) const;
void applyRelX64(uint8_t *off, uint16_t type, OutputSection *os, uint64_t s,
uint64_t p) const;
void applyRelX86(uint8_t *off, uint16_t type, OutputSection *os, uint64_t s,
// Allow iteration over the associated child chunks for this section.
llvm::iterator_range<AssociatedIterator> children() const {
- return llvm::make_range(AssociatedIterator(assocChildren),
- AssociatedIterator(nullptr));
+ // Associated sections do not have children. The assocChildren field is
+ // part of the parent's list of children.
+ bool isAssoc = selection == llvm::COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE;
+ return llvm::make_range(
+ AssociatedIterator(isAssoc ? nullptr : assocChildren),
+ AssociatedIterator(nullptr));
}
// The section ID this chunk belongs to in its Obj.
SymbolRVASet syms;
};
+// Table which contains symbol RVAs with flags. Used for /guard:ehcont.
+class RVAFlagTableChunk : public NonSectionChunk {
+public:
+ explicit RVAFlagTableChunk(SymbolRVASet s) : syms(std::move(s)) {}
+ size_t getSize() const override { return syms.size() * 5; }
+ void writeTo(uint8_t *buf) const override;
+
+private:
+ SymbolRVASet syms;
+};
+
// Windows-specific.
// This class represents a block in .reloc section.
// See the PE/COFF spec 5.6 for details.
#ifndef LLD_COFF_CONFIG_H
#define LLD_COFF_CONFIG_H
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/COFF.h"
class StringChunk;
class Symbol;
class InputFile;
+class SectionChunk;
// Short aliases.
static const auto AMD64 = llvm::COFF::IMAGE_FILE_MACHINE_AMD64;
Fixup = 0x4, /// Relocation Table
};
-enum class GuardCFLevel {
- Off,
- NoLongJmp, // Emit gfids but no longjmp tables
- Full, // Enable all protections.
+enum GuardCFLevel {
+ Off = 0x0,
+ CF = 0x1, /// Emit gfids tables
+ LongJmp = 0x2, /// Emit longjmp tables
+ EHCont = 0x4, /// Emit ehcont tables
+ All = 0x7 /// Enable all protections
+};
+
+enum class ICFLevel {
+ None,
+ Safe, // Safe ICF for all sections.
+ All, // Aggressive ICF for code, but safe ICF for data, similar to MSVC's
+ // behavior.
};
// Global configuration.
std::string importName;
bool demangle = true;
bool doGC = true;
- bool doICF = true;
+ ICFLevel doICF = ICFLevel::None;
bool tailMerge;
bool relocatable = true;
bool forceMultiple = false;
bool saveTemps = false;
// /guard:cf
- GuardCFLevel guardCF = GuardCFLevel::Off;
+ int guardCF = GuardCFLevel::Off;
// Used for SafeSEH.
bool safeSEH = false;
// Used for /opt:lldltocachepolicy=policy
llvm::CachePruningPolicy ltoCachePolicy;
+ // Used for /opt:[no]ltonewpassmanager
+ bool ltoNewPassManager = false;
+ // Used for /opt:[no]ltodebugpassmanager
+ bool ltoDebugPassManager = false;
+
// Used for /merge:from=to (e.g. /merge:.rdata=.text)
std::map<StringRef, StringRef> merge;
// Used for /lto-obj-path:
llvm::StringRef ltoObjPath;
+ // Used for /lto-cs-profile-generate:
+ bool ltoCSProfileGenerate = false;
+
+ // Used for /lto-cs-profile-path
+ llvm::StringRef ltoCSProfileFile;
+
+ // Used for /call-graph-ordering-file:
+ llvm::MapVector<std::pair<const SectionChunk *, const SectionChunk *>,
+ uint64_t>
+ callGraphProfile;
+ bool callGraphProfileSort = false;
+
+ // Used for /print-symbol-order:
+ StringRef printSymbolOrder;
+
uint64_t align = 4096;
uint64_t imageBase = -1;
uint64_t fileAlign = 512;
uint64_t heapCommit = 4096;
uint32_t majorImageVersion = 0;
uint32_t minorImageVersion = 0;
+ // If changing the default os/subsys version here, update the default in
+ // the MinGW driver accordingly.
uint32_t majorOSVersion = 6;
uint32_t minorOSVersion = 0;
+ uint32_t majorSubsystemVersion = 6;
+ uint32_t minorSubsystemVersion = 0;
uint32_t timestamp = 0;
uint32_t functionPadMin = 0;
bool dynamicBase = true;
bool warnLocallyDefinedImported = true;
bool warnDebugInfoUnusable = true;
bool warnLongSectionNames = true;
+ bool warnStdcallFixup = true;
bool incremental = true;
bool integrityCheck = false;
bool killAt = false;
bool thinLTOIndexOnly;
bool autoImport = false;
bool pseudoRelocs = false;
+ bool stdcallFixup = false;
};
extern Configuration *config;
#include "DLL.h"
#include "Chunks.h"
+#include "SymbolTable.h"
#include "llvm/Object/COFF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Path.h"
return v;
}
-// Export table
// See Microsoft PE/COFF spec 4.3 for details.
// A chunk for the delay import descriptor table etnry.
if (e.forwardChunk) {
write32le(p, e.forwardChunk->getRVA() | bit);
} else {
+ assert(cast<Defined>(e.sym)->getRVA() != 0 &&
+ "Exported symbol unmapped");
write32le(p, cast<Defined>(e.sym)->getRVA() | bit);
}
}
auto *c = make<HintNameChunk>(extName, 0);
names.push_back(make<LookupChunk>(c));
hintNames.push_back(c);
+ // Add a syntentic symbol for this load thunk, using the "__imp_load"
+ // prefix, in case this thunk needs to be added to the list of valid
+ // call targets for Control Flow Guard.
+ StringRef symName = saver.save("__imp_load_" + extName);
+ s->loadThunkSym =
+ cast<DefinedSynthetic>(symtab->addSynthetic(symName, t));
}
}
thunks.push_back(tm);
+ StringRef tmName =
+ saver.save("__tailMerge_" + syms[0]->getDLLName().lower());
+ symtab->addSynthetic(tmName, tm);
// Terminate with null values.
addresses.push_back(make<NullChunk>(8));
names.push_back(make<NullChunk>(8));
#include "Chunks.h"
#include "Driver.h"
#include "InputFiles.h"
+#include "PDB.h"
#include "TypeMerger.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
+#include "lld/Common/Timer.h"
+#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecordHelpers.h"
#include "llvm/DebugInfo/CodeView/TypeStreamMerger.h"
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
using namespace llvm;
using namespace lld::coff;
namespace {
+class TypeServerIpiSource;
+
// The TypeServerSource class represents a PDB type server, a file referenced by
// OBJ files compiled with MSVC /Zi. A single PDB can be shared by several OBJ
// files, therefore there must be only once instance per OBJ lot. The file path
auto expectedInfo = file.getPDBInfoStream();
if (!expectedInfo)
return;
- auto it = mappings.emplace(expectedInfo->getGuid(), this);
+ Guid = expectedInfo->getGuid();
+ auto it = mappings.emplace(Guid, this);
assert(it.second);
(void)it;
- tsIndexMap.isTypeServerMap = true;
}
- Expected<const CVIndexMap *> mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap) override;
+ Error mergeDebugT(TypeMerger *m) override;
+
+ void loadGHashes() override;
+ void remapTpiWithGHashes(GHashState *g) override;
+
bool isDependency() const override { return true; }
PDBInputFile *pdbInputFile = nullptr;
- CVIndexMap tsIndexMap;
+ // TpiSource for IPI stream.
+ TypeServerIpiSource *ipiSrc = nullptr;
+
+ // The PDB signature GUID.
+ codeview::GUID Guid;
static std::map<codeview::GUID, TypeServerSource *> mappings;
};
+// Companion to TypeServerSource. Stores the index map for the IPI stream in the
+// PDB. Modeling PDBs with two sources for TPI and IPI helps establish the
+// invariant of one type index space per source.
+class TypeServerIpiSource : public TpiSource {
+public:
+ explicit TypeServerIpiSource() : TpiSource(PDBIpi, nullptr) {}
+
+ friend class TypeServerSource;
+
+ // All of the TpiSource methods are no-ops. The parent TypeServerSource
+ // handles both TPI and IPI.
+ Error mergeDebugT(TypeMerger *m) override { return Error::success(); }
+ void loadGHashes() override {}
+ void remapTpiWithGHashes(GHashState *g) override {}
+ bool isDependency() const override { return true; }
+};
+
// This class represents the debug type stream of an OBJ file that depends on a
// PDB type server (see TypeServerSource).
class UseTypeServerSource : public TpiSource {
+ Expected<TypeServerSource *> getTypeServerSource();
+
public:
UseTypeServerSource(ObjFile *f, TypeServer2Record ts)
: TpiSource(UsingPDB, f), typeServerDependency(ts) {}
- Expected<const CVIndexMap *> mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap) override;
+ Error mergeDebugT(TypeMerger *m) override;
+
+ // No need to load ghashes from /Zi objects.
+ void loadGHashes() override {}
+ void remapTpiWithGHashes(GHashState *g) override;
// Information about the PDB type server dependency, that needs to be loaded
// in before merging this OBJ.
if (!it.second)
fatal("a PCH object with the same signature has already been provided (" +
toString(it.first->second->file) + " and " + toString(file) + ")");
- precompIndexMap.isPrecompiledTypeMap = true;
}
- Expected<const CVIndexMap *> mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap) override;
- bool isDependency() const override { return true; }
+ void loadGHashes() override;
- CVIndexMap precompIndexMap;
+ bool isDependency() const override { return true; }
static std::map<uint32_t, PrecompSource *> mappings;
};
UsePrecompSource(ObjFile *f, PrecompRecord precomp)
: TpiSource(UsingPCH, f), precompDependency(precomp) {}
- Expected<const CVIndexMap *> mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap) override;
+ Error mergeDebugT(TypeMerger *m) override;
+
+ void loadGHashes() override;
+ void remapTpiWithGHashes(GHashState *g) override;
+private:
+ Error mergeInPrecompHeaderObj();
+
+public:
// Information about the Precomp OBJ dependency, that needs to be loaded in
// before merging this OBJ.
PrecompRecord precompDependency;
};
} // namespace
-static std::vector<TpiSource *> gc;
+std::vector<TpiSource *> TpiSource::instances;
+ArrayRef<TpiSource *> TpiSource::dependencySources;
+ArrayRef<TpiSource *> TpiSource::objectSources;
-TpiSource::TpiSource(TpiKind k, ObjFile *f) : kind(k), file(f) {
- gc.push_back(this);
+TpiSource::TpiSource(TpiKind k, ObjFile *f)
+ : kind(k), tpiSrcIdx(instances.size()), file(f) {
+ instances.push_back(this);
}
// Vtable key method.
-TpiSource::~TpiSource() = default;
+TpiSource::~TpiSource() {
+ // Silence any assertions about unchecked errors.
+ consumeError(std::move(typeMergingError));
+}
+
+void TpiSource::sortDependencies() {
+ // Order dependencies first, but preserve the existing order.
+ std::vector<TpiSource *> deps;
+ std::vector<TpiSource *> objs;
+ for (TpiSource *s : instances)
+ (s->isDependency() ? deps : objs).push_back(s);
+ uint32_t numDeps = deps.size();
+ uint32_t numObjs = objs.size();
+ instances = std::move(deps);
+ instances.insert(instances.end(), objs.begin(), objs.end());
+ for (uint32_t i = 0, e = instances.size(); i < e; ++i)
+ instances[i]->tpiSrcIdx = i;
+ dependencySources = makeArrayRef(instances.data(), numDeps);
+ objectSources = makeArrayRef(instances.data() + numDeps, numObjs);
+}
TpiSource *lld::coff::makeTpiSource(ObjFile *file) {
return make<TpiSource>(TpiSource::Regular, file);
}
TpiSource *lld::coff::makeTypeServerSource(PDBInputFile *pdbInputFile) {
- return make<TypeServerSource>(pdbInputFile);
+ // Type server sources come in pairs: the TPI stream, and the IPI stream.
+ auto *tpiSource = make<TypeServerSource>(pdbInputFile);
+ if (pdbInputFile->session->getPDBFile().hasPDBIpiStream())
+ tpiSource->ipiSrc = make<TypeServerIpiSource>();
+ return tpiSource;
}
TpiSource *lld::coff::makeUseTypeServerSource(ObjFile *file,
return make<UsePrecompSource>(file, precomp);
}
-void TpiSource::forEachSource(llvm::function_ref<void(TpiSource *)> fn) {
- for_each(gc, fn);
-}
-
std::map<codeview::GUID, TypeServerSource *> TypeServerSource::mappings;
std::map<uint32_t, PrecompSource *> PrecompSource::mappings;
+bool TpiSource::remapTypeIndex(TypeIndex &ti, TiRefKind refKind) const {
+ if (ti.isSimple())
+ return true;
+
+ // This can be an item index or a type index. Choose the appropriate map.
+ ArrayRef<TypeIndex> tpiOrIpiMap =
+ (refKind == TiRefKind::IndexRef) ? ipiMap : tpiMap;
+ if (ti.toArrayIndex() >= tpiOrIpiMap.size())
+ return false;
+ ti = tpiOrIpiMap[ti.toArrayIndex()];
+ return true;
+}
+
+void TpiSource::remapRecord(MutableArrayRef<uint8_t> rec,
+ ArrayRef<TiReference> typeRefs) {
+ MutableArrayRef<uint8_t> contents = rec.drop_front(sizeof(RecordPrefix));
+ for (const TiReference &ref : typeRefs) {
+ unsigned byteSize = ref.Count * sizeof(TypeIndex);
+ if (contents.size() < ref.Offset + byteSize)
+ fatal("symbol record too short");
+
+ MutableArrayRef<TypeIndex> indices(
+ reinterpret_cast<TypeIndex *>(contents.data() + ref.Offset), ref.Count);
+ for (TypeIndex &ti : indices) {
+ if (!remapTypeIndex(ti, ref.Kind)) {
+ if (config->verbose) {
+ uint16_t kind =
+ reinterpret_cast<const RecordPrefix *>(rec.data())->RecordKind;
+ StringRef fname = file ? file->getName() : "<unknown PDB>";
+ log("failed to remap type index in record of kind 0x" +
+ utohexstr(kind) + " in " + fname + " with bad " +
+ (ref.Kind == TiRefKind::IndexRef ? "item" : "type") +
+ " index 0x" + utohexstr(ti.getIndex()));
+ }
+ ti = TypeIndex(SimpleTypeKind::NotTranslated);
+ continue;
+ }
+ }
+ }
+}
+
+void TpiSource::remapTypesInTypeRecord(MutableArrayRef<uint8_t> rec) {
+ // TODO: Handle errors similar to symbols.
+ SmallVector<TiReference, 32> typeRefs;
+ discoverTypeIndices(CVType(rec), typeRefs);
+ remapRecord(rec, typeRefs);
+}
+
+bool TpiSource::remapTypesInSymbolRecord(MutableArrayRef<uint8_t> rec) {
+ // Discover type index references in the record. Skip it if we don't
+ // know where they are.
+ SmallVector<TiReference, 32> typeRefs;
+ if (!discoverTypeIndicesInSymbol(rec, typeRefs))
+ return false;
+ remapRecord(rec, typeRefs);
+ return true;
+}
+
// A COFF .debug$H section is currently a clang extension. This function checks
// if a .debug$H section is in a format that we expect / understand, so that we
// can ignore any sections which are coincidentally also named .debug$H but do
static ArrayRef<GloballyHashedType>
getHashesFromDebugH(ArrayRef<uint8_t> debugH) {
assert(canUseDebugH(debugH));
-
debugH = debugH.drop_front(sizeof(object::debug_h_header));
uint32_t count = debugH.size() / sizeof(GloballyHashedType);
return {reinterpret_cast<const GloballyHashedType *>(debugH.data()), count};
}
// Merge .debug$T for a generic object file.
-Expected<const CVIndexMap *> TpiSource::mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap) {
+Error TpiSource::mergeDebugT(TypeMerger *m) {
+ assert(!config->debugGHashes &&
+ "use remapTpiWithGHashes when ghash is enabled");
+
CVTypeArray types;
BinaryStreamReader reader(file->debugTypes, support::little);
cantFail(reader.readArray(types, reader.getLength()));
// When dealing with PCH.OBJ, some indices were already merged.
- unsigned nbHeadIndices = indexMap->tpiMap.size();
-
- if (config->debugGHashes) {
- ArrayRef<GloballyHashedType> hashes;
- std::vector<GloballyHashedType> ownedHashes;
- if (Optional<ArrayRef<uint8_t>> debugH = getDebugH(file))
- hashes = getHashesFromDebugH(*debugH);
- else {
- ownedHashes = GloballyHashedType::hashTypes(types);
- hashes = ownedHashes;
- }
+ unsigned nbHeadIndices = indexMapStorage.size();
- if (auto err = mergeTypeAndIdRecords(m->globalIDTable, m->globalTypeTable,
- indexMap->tpiMap, types, hashes,
- file->pchSignature))
- fatal("codeview::mergeTypeAndIdRecords failed: " +
- toString(std::move(err)));
- } else {
- if (auto err =
- mergeTypeAndIdRecords(m->idTable, m->typeTable, indexMap->tpiMap,
- types, file->pchSignature))
- fatal("codeview::mergeTypeAndIdRecords failed: " +
- toString(std::move(err)));
- }
+ if (auto err = mergeTypeAndIdRecords(
+ m->idTable, m->typeTable, indexMapStorage, types, file->pchSignature))
+ fatal("codeview::mergeTypeAndIdRecords failed: " +
+ toString(std::move(err)));
+
+ // In an object, there is only one mapping for both types and items.
+ tpiMap = indexMapStorage;
+ ipiMap = indexMapStorage;
if (config->showSummary) {
+ nbTypeRecords = indexMapStorage.size() - nbHeadIndices;
+ nbTypeRecordsBytes = reader.getLength();
// Count how many times we saw each type record in our input. This
// calculation requires a second pass over the type records to classify each
// record as a type or index. This is slow, but this code executes when
m->ipiCounts.resize(m->getIDTable().size());
uint32_t srcIdx = nbHeadIndices;
for (CVType &ty : types) {
- TypeIndex dstIdx = indexMap->tpiMap[srcIdx++];
+ TypeIndex dstIdx = tpiMap[srcIdx++];
// Type merging may fail, so a complex source type may become the simple
// NotTranslated type, which cannot be used as an array index.
if (dstIdx.isSimple())
}
}
- return indexMap;
+ return Error::success();
}
// Merge types from a type server PDB.
-Expected<const CVIndexMap *> TypeServerSource::mergeDebugT(TypeMerger *m,
- CVIndexMap *) {
+Error TypeServerSource::mergeDebugT(TypeMerger *m) {
+ assert(!config->debugGHashes &&
+ "use remapTpiWithGHashes when ghash is enabled");
+
pdb::PDBFile &pdbFile = pdbInputFile->session->getPDBFile();
Expected<pdb::TpiStream &> expectedTpi = pdbFile.getPDBTpiStream();
if (auto e = expectedTpi.takeError())
maybeIpi = &*expectedIpi;
}
- if (config->debugGHashes) {
- // PDBs do not actually store global hashes, so when merging a type server
- // PDB we have to synthesize global hashes. To do this, we first synthesize
- // global hashes for the TPI stream, since it is independent, then we
- // synthesize hashes for the IPI stream, using the hashes for the TPI stream
- // as inputs.
- auto tpiHashes = GloballyHashedType::hashTypes(expectedTpi->typeArray());
- Optional<uint32_t> endPrecomp;
- // Merge TPI first, because the IPI stream will reference type indices.
- if (auto err =
- mergeTypeRecords(m->globalTypeTable, tsIndexMap.tpiMap,
- expectedTpi->typeArray(), tpiHashes, endPrecomp))
- fatal("codeview::mergeTypeRecords failed: " + toString(std::move(err)));
-
- // Merge IPI.
- if (maybeIpi) {
- auto ipiHashes =
- GloballyHashedType::hashIds(maybeIpi->typeArray(), tpiHashes);
- if (auto err = mergeIdRecords(m->globalIDTable, tsIndexMap.tpiMap,
- tsIndexMap.ipiMap, maybeIpi->typeArray(),
- ipiHashes))
- fatal("codeview::mergeIdRecords failed: " + toString(std::move(err)));
- }
- } else {
- // Merge TPI first, because the IPI stream will reference type indices.
- if (auto err = mergeTypeRecords(m->typeTable, tsIndexMap.tpiMap,
- expectedTpi->typeArray()))
- fatal("codeview::mergeTypeRecords failed: " + toString(std::move(err)));
-
- // Merge IPI.
- if (maybeIpi) {
- if (auto err = mergeIdRecords(m->idTable, tsIndexMap.tpiMap,
- tsIndexMap.ipiMap, maybeIpi->typeArray()))
- fatal("codeview::mergeIdRecords failed: " + toString(std::move(err)));
- }
+ // Merge TPI first, because the IPI stream will reference type indices.
+ if (auto err = mergeTypeRecords(m->typeTable, indexMapStorage,
+ expectedTpi->typeArray()))
+ fatal("codeview::mergeTypeRecords failed: " + toString(std::move(err)));
+ tpiMap = indexMapStorage;
+
+ // Merge IPI.
+ if (maybeIpi) {
+ if (auto err = mergeIdRecords(m->idTable, tpiMap, ipiSrc->indexMapStorage,
+ maybeIpi->typeArray()))
+ fatal("codeview::mergeIdRecords failed: " + toString(std::move(err)));
+ ipiMap = ipiSrc->indexMapStorage;
}
if (config->showSummary) {
+ nbTypeRecords = tpiMap.size() + ipiMap.size();
+ nbTypeRecordsBytes =
+ expectedTpi->typeArray().getUnderlyingStream().getLength() +
+ (maybeIpi ? maybeIpi->typeArray().getUnderlyingStream().getLength()
+ : 0);
+
// Count how many times we saw each type record in our input. If a
// destination type index is present in the source to destination type index
// map, that means we saw it once in the input. Add it to our histogram.
m->tpiCounts.resize(m->getTypeTable().size());
m->ipiCounts.resize(m->getIDTable().size());
- for (TypeIndex ti : tsIndexMap.tpiMap)
+ for (TypeIndex ti : tpiMap)
if (!ti.isSimple())
++m->tpiCounts[ti.toArrayIndex()];
- for (TypeIndex ti : tsIndexMap.ipiMap)
+ for (TypeIndex ti : ipiMap)
if (!ti.isSimple())
++m->ipiCounts[ti.toArrayIndex()];
}
- return &tsIndexMap;
+ return Error::success();
}
-Expected<const CVIndexMap *>
-UseTypeServerSource::mergeDebugT(TypeMerger *m, CVIndexMap *indexMap) {
+Expected<TypeServerSource *> UseTypeServerSource::getTypeServerSource() {
const codeview::GUID &tsId = typeServerDependency.getGuid();
StringRef tsPath = typeServerDependency.getName();
return createFileError(tsPath, std::move(*pdb->loadErr));
tsSrc = (TypeServerSource *)pdb->debugTypesObj;
+
+ // Just because a file with a matching name was found and it was an actual
+ // PDB file doesn't mean it matches. For it to match the InfoStream's GUID
+ // must match the GUID specified in the TypeServer2 record.
+ if (tsSrc->Guid != tsId) {
+ return createFileError(tsPath,
+ make_error<pdb::PDBError>(
+ pdb::pdb_error_code::signature_out_of_date));
+ }
}
+ return tsSrc;
+}
- pdb::PDBFile &pdbSession = tsSrc->pdbInputFile->session->getPDBFile();
+Error UseTypeServerSource::mergeDebugT(TypeMerger *m) {
+ Expected<TypeServerSource *> tsSrc = getTypeServerSource();
+ if (!tsSrc)
+ return tsSrc.takeError();
+
+ pdb::PDBFile &pdbSession = (*tsSrc)->pdbInputFile->session->getPDBFile();
auto expectedInfo = pdbSession.getPDBInfoStream();
if (!expectedInfo)
- return &tsSrc->tsIndexMap;
-
- // Just because a file with a matching name was found and it was an actual
- // PDB file doesn't mean it matches. For it to match the InfoStream's GUID
- // must match the GUID specified in the TypeServer2 record.
- if (expectedInfo->getGuid() != typeServerDependency.getGuid())
- return createFileError(
- tsPath,
- make_error<pdb::PDBError>(pdb::pdb_error_code::signature_out_of_date));
+ return expectedInfo.takeError();
- return &tsSrc->tsIndexMap;
+ // Reuse the type index map of the type server.
+ tpiMap = (*tsSrc)->tpiMap;
+ ipiMap = (*tsSrc)->ipiMap;
+ return Error::success();
}
static bool equalsPath(StringRef path1, StringRef path2) {
#if defined(_WIN32)
- return path1.equals_lower(path2);
+ return path1.equals_insensitive(path2);
#else
return path1.equals(path2);
#endif
return nullptr;
}
-Expected<const CVIndexMap *> findPrecompMap(ObjFile *file, PrecompRecord &pr) {
+static PrecompSource *findPrecompSource(ObjFile *file, PrecompRecord &pr) {
// Cross-compile warning: given that Clang doesn't generate LF_PRECOMP
// records, we assume the OBJ comes from a Windows build of cl.exe. Thusly,
// the paths embedded in the OBJs are in the Windows format.
SmallString<128> prFileName =
sys::path::filename(pr.getPrecompFilePath(), sys::path::Style::windows);
- PrecompSource *precomp;
auto it = PrecompSource::mappings.find(pr.getSignature());
if (it != PrecompSource::mappings.end()) {
- precomp = it->second;
- } else {
- // Lookup by name
- precomp = findObjByName(prFileName);
+ return it->second;
}
+ // Lookup by name
+ return findObjByName(prFileName);
+}
+
+static Expected<PrecompSource *> findPrecompMap(ObjFile *file,
+ PrecompRecord &pr) {
+ PrecompSource *precomp = findPrecompSource(file, pr);
if (!precomp)
return createFileError(
- prFileName,
+ pr.getPrecompFilePath(),
make_error<pdb::PDBError>(pdb::pdb_error_code::no_matching_pch));
if (pr.getSignature() != file->pchSignature)
toString(precomp->file),
make_error<pdb::PDBError>(pdb::pdb_error_code::no_matching_pch));
- return &precomp->precompIndexMap;
+ return precomp;
}
/// Merges a precompiled headers TPI map into the current TPI map. The
/// precompiled headers object will also be loaded and remapped in the
/// process.
-static Expected<const CVIndexMap *>
-mergeInPrecompHeaderObj(ObjFile *file, CVIndexMap *indexMap,
- PrecompRecord &precomp) {
- auto e = findPrecompMap(file, precomp);
+Error UsePrecompSource::mergeInPrecompHeaderObj() {
+ auto e = findPrecompMap(file, precompDependency);
if (!e)
return e.takeError();
- const CVIndexMap *precompIndexMap = *e;
- assert(precompIndexMap->isPrecompiledTypeMap);
+ PrecompSource *precompSrc = *e;
+ if (precompSrc->tpiMap.empty())
+ return Error::success();
- if (precompIndexMap->tpiMap.empty())
- return precompIndexMap;
-
- assert(precomp.getStartTypeIndex() == TypeIndex::FirstNonSimpleIndex);
- assert(precomp.getTypesCount() <= precompIndexMap->tpiMap.size());
+ assert(precompDependency.getStartTypeIndex() ==
+ TypeIndex::FirstNonSimpleIndex);
+ assert(precompDependency.getTypesCount() <= precompSrc->tpiMap.size());
// Use the previously remapped index map from the precompiled headers.
- indexMap->tpiMap.append(precompIndexMap->tpiMap.begin(),
- precompIndexMap->tpiMap.begin() +
- precomp.getTypesCount());
- return indexMap;
+ indexMapStorage.insert(indexMapStorage.begin(), precompSrc->tpiMap.begin(),
+ precompSrc->tpiMap.begin() +
+ precompDependency.getTypesCount());
+
+ return Error::success();
}
-Expected<const CVIndexMap *>
-UsePrecompSource::mergeDebugT(TypeMerger *m, CVIndexMap *indexMap) {
+Error UsePrecompSource::mergeDebugT(TypeMerger *m) {
// This object was compiled with /Yu, so process the corresponding
// precompiled headers object (/Yc) first. Some type indices in the current
// object are referencing data in the precompiled headers object, so we need
// both to be loaded.
- auto e = mergeInPrecompHeaderObj(file, indexMap, precompDependency);
- if (!e)
- return e.takeError();
-
- // Drop LF_PRECOMP record from the input stream, as it has been replaced
- // with the precompiled headers Type stream in the mergeInPrecompHeaderObj()
- // call above. Note that we can't just call Types.drop_front(), as we
- // explicitly want to rebase the stream.
- CVTypeArray types;
- BinaryStreamReader reader(file->debugTypes, support::little);
- cantFail(reader.readArray(types, reader.getLength()));
- auto firstType = types.begin();
- file->debugTypes = file->debugTypes.drop_front(firstType->RecordData.size());
+ if (Error e = mergeInPrecompHeaderObj())
+ return e;
- return TpiSource::mergeDebugT(m, indexMap);
-}
-
-Expected<const CVIndexMap *> PrecompSource::mergeDebugT(TypeMerger *m,
- CVIndexMap *) {
- // Note that we're not using the provided CVIndexMap. Instead, we use our
- // local one. Precompiled headers objects need to save the index map for
- // further reference by other objects which use the precompiled headers.
- return TpiSource::mergeDebugT(m, &precompIndexMap);
+ return TpiSource::mergeDebugT(m);
}
uint32_t TpiSource::countTypeServerPDBs() {
}
void TpiSource::clear() {
- gc.clear();
+ // Clean up any owned ghash allocations.
+ clearGHashes();
+ TpiSource::instances.clear();
TypeServerSource::mappings.clear();
PrecompSource::mappings.clear();
}
+
+//===----------------------------------------------------------------------===//
+// Parellel GHash type merging implementation.
+//===----------------------------------------------------------------------===//
+
+void TpiSource::loadGHashes() {
+ if (Optional<ArrayRef<uint8_t>> debugH = getDebugH(file)) {
+ ghashes = getHashesFromDebugH(*debugH);
+ ownedGHashes = false;
+ } else {
+ CVTypeArray types;
+ BinaryStreamReader reader(file->debugTypes, support::little);
+ cantFail(reader.readArray(types, reader.getLength()));
+ assignGHashesFromVector(GloballyHashedType::hashTypes(types));
+ }
+
+ fillIsItemIndexFromDebugT();
+}
+
+// Copies ghashes from a vector into an array. These are long lived, so it's
+// worth the time to copy these into an appropriately sized vector to reduce
+// memory usage.
+void TpiSource::assignGHashesFromVector(
+ std::vector<GloballyHashedType> &&hashVec) {
+ if (hashVec.empty())
+ return;
+ GloballyHashedType *hashes = new GloballyHashedType[hashVec.size()];
+ memcpy(hashes, hashVec.data(), hashVec.size() * sizeof(GloballyHashedType));
+ ghashes = makeArrayRef(hashes, hashVec.size());
+ ownedGHashes = true;
+}
+
+// Faster way to iterate type records. forEachTypeChecked is faster than
+// iterating CVTypeArray. It avoids virtual readBytes calls in inner loops.
+static void forEachTypeChecked(ArrayRef<uint8_t> types,
+ function_ref<void(const CVType &)> fn) {
+ checkError(
+ forEachCodeViewRecord<CVType>(types, [fn](const CVType &ty) -> Error {
+ fn(ty);
+ return Error::success();
+ }));
+}
+
+// Walk over file->debugTypes and fill in the isItemIndex bit vector.
+// TODO: Store this information in .debug$H so that we don't have to recompute
+// it. This is the main bottleneck slowing down parallel ghashing with one
+// thread over single-threaded ghashing.
+void TpiSource::fillIsItemIndexFromDebugT() {
+ uint32_t index = 0;
+ isItemIndex.resize(ghashes.size());
+ forEachTypeChecked(file->debugTypes, [&](const CVType &ty) {
+ if (isIdRecord(ty.kind()))
+ isItemIndex.set(index);
+ ++index;
+ });
+}
+
+void TpiSource::mergeTypeRecord(TypeIndex curIndex, CVType ty) {
+ // Decide if the merged type goes into TPI or IPI.
+ bool isItem = isIdRecord(ty.kind());
+ MergedInfo &merged = isItem ? mergedIpi : mergedTpi;
+
+ // Copy the type into our mutable buffer.
+ assert(ty.length() <= codeview::MaxRecordLength);
+ size_t offset = merged.recs.size();
+ size_t newSize = alignTo(ty.length(), 4);
+ merged.recs.resize(offset + newSize);
+ auto newRec = makeMutableArrayRef(&merged.recs[offset], newSize);
+ memcpy(newRec.data(), ty.data().data(), newSize);
+
+ // Fix up the record prefix and padding bytes if it required resizing.
+ if (newSize != ty.length()) {
+ reinterpret_cast<RecordPrefix *>(newRec.data())->RecordLen = newSize - 2;
+ for (size_t i = ty.length(); i < newSize; ++i)
+ newRec[i] = LF_PAD0 + (newSize - i);
+ }
+
+ // Remap the type indices in the new record.
+ remapTypesInTypeRecord(newRec);
+ uint32_t pdbHash = check(pdb::hashTypeRecord(CVType(newRec)));
+ merged.recSizes.push_back(static_cast<uint16_t>(newSize));
+ merged.recHashes.push_back(pdbHash);
+
+ // Retain a mapping from PDB function id to PDB function type. This mapping is
+ // used during symbol processing to rewrite S_GPROC32_ID symbols to S_GPROC32
+ // symbols.
+ if (ty.kind() == LF_FUNC_ID || ty.kind() == LF_MFUNC_ID) {
+ bool success = ty.length() >= 12;
+ TypeIndex funcId = curIndex;
+ if (success)
+ success &= remapTypeIndex(funcId, TiRefKind::IndexRef);
+ TypeIndex funcType =
+ *reinterpret_cast<const TypeIndex *>(&newRec.data()[8]);
+ if (success) {
+ funcIdToType.push_back({funcId, funcType});
+ } else {
+ StringRef fname = file ? file->getName() : "<unknown PDB>";
+ warn("corrupt LF_[M]FUNC_ID record 0x" + utohexstr(curIndex.getIndex()) +
+ " in " + fname);
+ }
+ }
+}
+
+void TpiSource::mergeUniqueTypeRecords(ArrayRef<uint8_t> typeRecords,
+ TypeIndex beginIndex) {
+ // Re-sort the list of unique types by index.
+ if (kind == PDB)
+ assert(std::is_sorted(uniqueTypes.begin(), uniqueTypes.end()));
+ else
+ llvm::sort(uniqueTypes);
+
+ // Accumulate all the unique types into one buffer in mergedTypes.
+ uint32_t ghashIndex = 0;
+ auto nextUniqueIndex = uniqueTypes.begin();
+ assert(mergedTpi.recs.empty());
+ assert(mergedIpi.recs.empty());
+
+ // Pre-compute the number of elements in advance to avoid std::vector resizes.
+ unsigned nbTpiRecs = 0;
+ unsigned nbIpiRecs = 0;
+ forEachTypeChecked(typeRecords, [&](const CVType &ty) {
+ if (nextUniqueIndex != uniqueTypes.end() &&
+ *nextUniqueIndex == ghashIndex) {
+ assert(ty.length() <= codeview::MaxRecordLength);
+ size_t newSize = alignTo(ty.length(), 4);
+ (isIdRecord(ty.kind()) ? nbIpiRecs : nbTpiRecs) += newSize;
+ ++nextUniqueIndex;
+ }
+ ++ghashIndex;
+ });
+ mergedTpi.recs.reserve(nbTpiRecs);
+ mergedIpi.recs.reserve(nbIpiRecs);
+
+ // Do the actual type merge.
+ ghashIndex = 0;
+ nextUniqueIndex = uniqueTypes.begin();
+ forEachTypeChecked(typeRecords, [&](const CVType &ty) {
+ if (nextUniqueIndex != uniqueTypes.end() &&
+ *nextUniqueIndex == ghashIndex) {
+ mergeTypeRecord(beginIndex + ghashIndex, ty);
+ ++nextUniqueIndex;
+ }
+ ++ghashIndex;
+ });
+ assert(nextUniqueIndex == uniqueTypes.end() &&
+ "failed to merge all desired records");
+ assert(uniqueTypes.size() ==
+ mergedTpi.recSizes.size() + mergedIpi.recSizes.size() &&
+ "missing desired record");
+}
+
+void TpiSource::remapTpiWithGHashes(GHashState *g) {
+ assert(config->debugGHashes && "ghashes must be enabled");
+ fillMapFromGHashes(g);
+ tpiMap = indexMapStorage;
+ ipiMap = indexMapStorage;
+ mergeUniqueTypeRecords(file->debugTypes);
+ // TODO: Free all unneeded ghash resources now that we have a full index map.
+
+ if (config->showSummary) {
+ nbTypeRecords = ghashes.size();
+ nbTypeRecordsBytes = file->debugTypes.size();
+ }
+}
+
+// PDBs do not actually store global hashes, so when merging a type server
+// PDB we have to synthesize global hashes. To do this, we first synthesize
+// global hashes for the TPI stream, since it is independent, then we
+// synthesize hashes for the IPI stream, using the hashes for the TPI stream
+// as inputs.
+void TypeServerSource::loadGHashes() {
+ // Don't hash twice.
+ if (!ghashes.empty())
+ return;
+ pdb::PDBFile &pdbFile = pdbInputFile->session->getPDBFile();
+
+ // Hash TPI stream.
+ Expected<pdb::TpiStream &> expectedTpi = pdbFile.getPDBTpiStream();
+ if (auto e = expectedTpi.takeError())
+ fatal("Type server does not have TPI stream: " + toString(std::move(e)));
+ assignGHashesFromVector(
+ GloballyHashedType::hashTypes(expectedTpi->typeArray()));
+ isItemIndex.resize(ghashes.size());
+
+ // Hash IPI stream, which depends on TPI ghashes.
+ if (!pdbFile.hasPDBIpiStream())
+ return;
+ Expected<pdb::TpiStream &> expectedIpi = pdbFile.getPDBIpiStream();
+ if (auto e = expectedIpi.takeError())
+ fatal("error retrieving IPI stream: " + toString(std::move(e)));
+ ipiSrc->assignGHashesFromVector(
+ GloballyHashedType::hashIds(expectedIpi->typeArray(), ghashes));
+
+ // The IPI stream isItemIndex bitvector should be all ones.
+ ipiSrc->isItemIndex.resize(ipiSrc->ghashes.size());
+ ipiSrc->isItemIndex.set(0, ipiSrc->ghashes.size());
+}
+
+// Flatten discontiguous PDB type arrays to bytes so that we can use
+// forEachTypeChecked instead of CVTypeArray iteration. Copying all types from
+// type servers is faster than iterating all object files compiled with /Z7 with
+// CVTypeArray, which has high overheads due to the virtual interface of
+// BinaryStream::readBytes.
+static ArrayRef<uint8_t> typeArrayToBytes(const CVTypeArray &types) {
+ BinaryStreamRef stream = types.getUnderlyingStream();
+ ArrayRef<uint8_t> debugTypes;
+ checkError(stream.readBytes(0, stream.getLength(), debugTypes));
+ return debugTypes;
+}
+
+// Merge types from a type server PDB.
+void TypeServerSource::remapTpiWithGHashes(GHashState *g) {
+ assert(config->debugGHashes && "ghashes must be enabled");
+
+ // IPI merging depends on TPI, so do TPI first, then do IPI. No need to
+ // propagate errors, those should've been handled during ghash loading.
+ pdb::PDBFile &pdbFile = pdbInputFile->session->getPDBFile();
+ pdb::TpiStream &tpi = check(pdbFile.getPDBTpiStream());
+ fillMapFromGHashes(g);
+ tpiMap = indexMapStorage;
+ mergeUniqueTypeRecords(typeArrayToBytes(tpi.typeArray()));
+ if (pdbFile.hasPDBIpiStream()) {
+ pdb::TpiStream &ipi = check(pdbFile.getPDBIpiStream());
+ ipiSrc->indexMapStorage.resize(ipiSrc->ghashes.size());
+ ipiSrc->fillMapFromGHashes(g);
+ ipiMap = ipiSrc->indexMapStorage;
+ ipiSrc->tpiMap = tpiMap;
+ ipiSrc->ipiMap = ipiMap;
+ ipiSrc->mergeUniqueTypeRecords(typeArrayToBytes(ipi.typeArray()));
+
+ if (config->showSummary) {
+ nbTypeRecords = ipiSrc->ghashes.size();
+ nbTypeRecordsBytes = ipi.typeArray().getUnderlyingStream().getLength();
+ }
+ }
+
+ if (config->showSummary) {
+ nbTypeRecords += ghashes.size();
+ nbTypeRecordsBytes += tpi.typeArray().getUnderlyingStream().getLength();
+ }
+}
+
+void UseTypeServerSource::remapTpiWithGHashes(GHashState *g) {
+ // No remapping to do with /Zi objects. Simply use the index map from the type
+ // server. Errors should have been reported earlier. Symbols from this object
+ // will be ignored.
+ Expected<TypeServerSource *> maybeTsSrc = getTypeServerSource();
+ if (!maybeTsSrc) {
+ typeMergingError =
+ joinErrors(std::move(typeMergingError), maybeTsSrc.takeError());
+ return;
+ }
+ TypeServerSource *tsSrc = *maybeTsSrc;
+ tpiMap = tsSrc->tpiMap;
+ ipiMap = tsSrc->ipiMap;
+}
+
+void PrecompSource::loadGHashes() {
+ if (getDebugH(file)) {
+ warn("ignoring .debug$H section; pch with ghash is not implemented");
+ }
+
+ uint32_t ghashIdx = 0;
+ std::vector<GloballyHashedType> hashVec;
+ forEachTypeChecked(file->debugTypes, [&](const CVType &ty) {
+ // Remember the index of the LF_ENDPRECOMP record so it can be excluded from
+ // the PDB. There must be an entry in the list of ghashes so that the type
+ // indexes of the following records in the /Yc PCH object line up.
+ if (ty.kind() == LF_ENDPRECOMP)
+ endPrecompGHashIdx = ghashIdx;
+
+ hashVec.push_back(GloballyHashedType::hashType(ty, hashVec, hashVec));
+ isItemIndex.push_back(isIdRecord(ty.kind()));
+ ++ghashIdx;
+ });
+ assignGHashesFromVector(std::move(hashVec));
+}
+
+void UsePrecompSource::loadGHashes() {
+ PrecompSource *pchSrc = findPrecompSource(file, precompDependency);
+ if (!pchSrc)
+ return;
+
+ // To compute ghashes of a /Yu object file, we need to build on the the
+ // ghashes of the /Yc PCH object. After we are done hashing, discard the
+ // ghashes from the PCH source so we don't unnecessarily try to deduplicate
+ // them.
+ std::vector<GloballyHashedType> hashVec =
+ pchSrc->ghashes.take_front(precompDependency.getTypesCount());
+ forEachTypeChecked(file->debugTypes, [&](const CVType &ty) {
+ hashVec.push_back(GloballyHashedType::hashType(ty, hashVec, hashVec));
+ isItemIndex.push_back(isIdRecord(ty.kind()));
+ });
+ hashVec.erase(hashVec.begin(),
+ hashVec.begin() + precompDependency.getTypesCount());
+ assignGHashesFromVector(std::move(hashVec));
+}
+
+void UsePrecompSource::remapTpiWithGHashes(GHashState *g) {
+ fillMapFromGHashes(g);
+ // This object was compiled with /Yu, so process the corresponding
+ // precompiled headers object (/Yc) first. Some type indices in the current
+ // object are referencing data in the precompiled headers object, so we need
+ // both to be loaded.
+ if (Error e = mergeInPrecompHeaderObj()) {
+ typeMergingError = joinErrors(std::move(typeMergingError), std::move(e));
+ return;
+ }
+
+ tpiMap = indexMapStorage;
+ ipiMap = indexMapStorage;
+ mergeUniqueTypeRecords(file->debugTypes,
+ TypeIndex(precompDependency.getStartTypeIndex() +
+ precompDependency.getTypesCount()));
+ if (config->showSummary) {
+ nbTypeRecords = ghashes.size();
+ nbTypeRecordsBytes = file->debugTypes.size();
+ }
+}
+
+namespace {
+/// A concurrent hash table for global type hashing. It is based on this paper:
+/// Concurrent Hash Tables: Fast and General(?)!
+/// https://dl.acm.org/doi/10.1145/3309206
+///
+/// This hash table is meant to be used in two phases:
+/// 1. concurrent insertions
+/// 2. concurrent reads
+/// It does not support lookup, deletion, or rehashing. It uses linear probing.
+///
+/// The paper describes storing a key-value pair in two machine words.
+/// Generally, the values stored in this map are type indices, and we can use
+/// those values to recover the ghash key from a side table. This allows us to
+/// shrink the table entries further at the cost of some loads, and sidesteps
+/// the need for a 128 bit atomic compare-and-swap operation.
+///
+/// During insertion, a priority function is used to decide which insertion
+/// should be preferred. This ensures that the output is deterministic. For
+/// ghashing, lower tpiSrcIdx values (earlier inputs) are preferred.
+///
+class GHashCell;
+struct GHashTable {
+ GHashCell *table = nullptr;
+ uint32_t tableSize = 0;
+
+ GHashTable() = default;
+ ~GHashTable();
+
+ /// Initialize the table with the given size. Because the table cannot be
+ /// resized, the initial size of the table must be large enough to contain all
+ /// inputs, or insertion may not be able to find an empty cell.
+ void init(uint32_t newTableSize);
+
+ /// Insert the cell with the given ghash into the table. Return the insertion
+ /// position in the table. It is safe for the caller to store the insertion
+ /// position because the table cannot be resized.
+ uint32_t insert(GloballyHashedType ghash, GHashCell newCell);
+};
+
+/// A ghash table cell for deduplicating types from TpiSources.
+class GHashCell {
+ uint64_t data = 0;
+
+public:
+ GHashCell() = default;
+
+ // Construct data most to least significant so that sorting works well:
+ // - isItem
+ // - tpiSrcIdx
+ // - ghashIdx
+ // Add one to the tpiSrcIdx so that the 0th record from the 0th source has a
+ // non-zero representation.
+ GHashCell(bool isItem, uint32_t tpiSrcIdx, uint32_t ghashIdx)
+ : data((uint64_t(isItem) << 63U) | (uint64_t(tpiSrcIdx + 1) << 32ULL) |
+ ghashIdx) {
+ assert(tpiSrcIdx == getTpiSrcIdx() && "round trip failure");
+ assert(ghashIdx == getGHashIdx() && "round trip failure");
+ }
+
+ explicit GHashCell(uint64_t data) : data(data) {}
+
+ // The empty cell is all zeros.
+ bool isEmpty() const { return data == 0ULL; }
+
+ /// Extract the tpiSrcIdx.
+ uint32_t getTpiSrcIdx() const {
+ return ((uint32_t)(data >> 32U) & 0x7FFFFFFF) - 1;
+ }
+
+ /// Extract the index into the ghash array of the TpiSource.
+ uint32_t getGHashIdx() const { return (uint32_t)data; }
+
+ bool isItem() const { return data & (1ULL << 63U); }
+
+ /// Get the ghash key for this cell.
+ GloballyHashedType getGHash() const {
+ return TpiSource::instances[getTpiSrcIdx()]->ghashes[getGHashIdx()];
+ }
+
+ /// The priority function for the cell. The data is stored such that lower
+ /// tpiSrcIdx and ghashIdx values are preferred, which means that type record
+ /// from earlier sources are more likely to prevail.
+ friend inline bool operator<(const GHashCell &l, const GHashCell &r) {
+ return l.data < r.data;
+ }
+};
+} // namespace
+
+namespace lld {
+namespace coff {
+/// This type is just a wrapper around GHashTable with external linkage so it
+/// can be used from a header.
+struct GHashState {
+ GHashTable table;
+};
+} // namespace coff
+} // namespace lld
+
+GHashTable::~GHashTable() { delete[] table; }
+
+void GHashTable::init(uint32_t newTableSize) {
+ table = new GHashCell[newTableSize];
+ memset(table, 0, newTableSize * sizeof(GHashCell));
+ tableSize = newTableSize;
+}
+
+uint32_t GHashTable::insert(GloballyHashedType ghash, GHashCell newCell) {
+ assert(!newCell.isEmpty() && "cannot insert empty cell value");
+
+ // FIXME: The low bytes of SHA1 have low entropy for short records, which
+ // type records are. Swap the byte order for better entropy. A better ghash
+ // won't need this.
+ uint32_t startIdx =
+ ByteSwap_64(*reinterpret_cast<uint64_t *>(&ghash)) % tableSize;
+
+ // Do a linear probe starting at startIdx.
+ uint32_t idx = startIdx;
+ while (true) {
+ // Run a compare and swap loop. There are four cases:
+ // - cell is empty: CAS into place and return
+ // - cell has matching key, earlier priority: do nothing, return
+ // - cell has matching key, later priority: CAS into place and return
+ // - cell has non-matching key: hash collision, probe next cell
+ auto *cellPtr = reinterpret_cast<std::atomic<GHashCell> *>(&table[idx]);
+ GHashCell oldCell(cellPtr->load());
+ while (oldCell.isEmpty() || oldCell.getGHash() == ghash) {
+ // Check if there is an existing ghash entry with a higher priority
+ // (earlier ordering). If so, this is a duplicate, we are done.
+ if (!oldCell.isEmpty() && oldCell < newCell)
+ return idx;
+ // Either the cell is empty, or our value is higher priority. Try to
+ // compare and swap. If it succeeds, we are done.
+ if (cellPtr->compare_exchange_weak(oldCell, newCell))
+ return idx;
+ // If the CAS failed, check this cell again.
+ }
+
+ // Advance the probe. Wrap around to the beginning if we run off the end.
+ ++idx;
+ idx = idx == tableSize ? 0 : idx;
+ if (idx == startIdx) {
+ // If this becomes an issue, we could mark failure and rehash from the
+ // beginning with a bigger table. There is no difference between rehashing
+ // internally and starting over.
+ report_fatal_error("ghash table is full");
+ }
+ }
+ llvm_unreachable("left infloop");
+}
+
+TypeMerger::TypeMerger(llvm::BumpPtrAllocator &alloc)
+ : typeTable(alloc), idTable(alloc) {}
+
+TypeMerger::~TypeMerger() = default;
+
+void TypeMerger::mergeTypesWithGHash() {
+ // Load ghashes. Do type servers and PCH objects first.
+ {
+ ScopedTimer t1(loadGHashTimer);
+ parallelForEach(TpiSource::dependencySources,
+ [&](TpiSource *source) { source->loadGHashes(); });
+ parallelForEach(TpiSource::objectSources,
+ [&](TpiSource *source) { source->loadGHashes(); });
+ }
+
+ ScopedTimer t2(mergeGHashTimer);
+ GHashState ghashState;
+
+ // Estimate the size of hash table needed to deduplicate ghashes. This *must*
+ // be larger than the number of unique types, or hash table insertion may not
+ // be able to find a vacant slot. Summing the input types guarantees this, but
+ // it is a gross overestimate. The table size could be reduced to save memory,
+ // but it would require implementing rehashing, and this table is generally
+ // small compared to total memory usage, at eight bytes per input type record,
+ // and most input type records are larger than eight bytes.
+ size_t tableSize = 0;
+ for (TpiSource *source : TpiSource::instances)
+ tableSize += source->ghashes.size();
+
+ // Cap the table size so that we can use 32-bit cell indices. Type indices are
+ // also 32-bit, so this is an inherent PDB file format limit anyway.
+ tableSize =
+ std::min(size_t(INT32_MAX) - TypeIndex::FirstNonSimpleIndex, tableSize);
+ ghashState.table.init(static_cast<uint32_t>(tableSize));
+
+ // Insert ghashes in parallel. During concurrent insertion, we cannot observe
+ // the contents of the hash table cell, but we can remember the insertion
+ // position. Because the table does not rehash, the position will not change
+ // under insertion. After insertion is done, the value of the cell can be read
+ // to retrieve the final PDB type index.
+ parallelForEachN(0, TpiSource::instances.size(), [&](size_t tpiSrcIdx) {
+ TpiSource *source = TpiSource::instances[tpiSrcIdx];
+ source->indexMapStorage.resize(source->ghashes.size());
+ for (uint32_t i = 0, e = source->ghashes.size(); i < e; i++) {
+ if (source->shouldOmitFromPdb(i)) {
+ source->indexMapStorage[i] = TypeIndex(SimpleTypeKind::NotTranslated);
+ continue;
+ }
+ GloballyHashedType ghash = source->ghashes[i];
+ bool isItem = source->isItemIndex.test(i);
+ uint32_t cellIdx =
+ ghashState.table.insert(ghash, GHashCell(isItem, tpiSrcIdx, i));
+
+ // Store the ghash cell index as a type index in indexMapStorage. Later
+ // we will replace it with the PDB type index.
+ source->indexMapStorage[i] = TypeIndex::fromArrayIndex(cellIdx);
+ }
+ });
+
+ // Collect all non-empty cells and sort them. This will implicitly assign
+ // destination type indices, and partition the entries into type records and
+ // item records. It arranges types in this order:
+ // - type records
+ // - source 0, type 0...
+ // - source 1, type 1...
+ // - item records
+ // - source 0, type 1...
+ // - source 1, type 0...
+ std::vector<GHashCell> entries;
+ for (const GHashCell &cell :
+ makeArrayRef(ghashState.table.table, tableSize)) {
+ if (!cell.isEmpty())
+ entries.push_back(cell);
+ }
+ parallelSort(entries, std::less<GHashCell>());
+ log(formatv("ghash table load factor: {0:p} (size {1} / capacity {2})\n",
+ tableSize ? double(entries.size()) / tableSize : 0,
+ entries.size(), tableSize));
+
+ // Find out how many type and item indices there are.
+ auto mid =
+ std::lower_bound(entries.begin(), entries.end(), GHashCell(true, 0, 0));
+ assert((mid == entries.end() || mid->isItem()) &&
+ (mid == entries.begin() || !std::prev(mid)->isItem()) &&
+ "midpoint is not midpoint");
+ uint32_t numTypes = std::distance(entries.begin(), mid);
+ uint32_t numItems = std::distance(mid, entries.end());
+ log("Tpi record count: " + Twine(numTypes));
+ log("Ipi record count: " + Twine(numItems));
+
+ // Make a list of the "unique" type records to merge for each tpi source. Type
+ // merging will skip indices not on this list. Store the destination PDB type
+ // index for these unique types in the tpiMap for each source. The entries for
+ // non-unique types will be filled in prior to type merging.
+ for (uint32_t i = 0, e = entries.size(); i < e; ++i) {
+ auto &cell = entries[i];
+ uint32_t tpiSrcIdx = cell.getTpiSrcIdx();
+ TpiSource *source = TpiSource::instances[tpiSrcIdx];
+ source->uniqueTypes.push_back(cell.getGHashIdx());
+
+ // Update the ghash table to store the destination PDB type index in the
+ // table.
+ uint32_t pdbTypeIndex = i < numTypes ? i : i - numTypes;
+ uint32_t ghashCellIndex =
+ source->indexMapStorage[cell.getGHashIdx()].toArrayIndex();
+ ghashState.table.table[ghashCellIndex] =
+ GHashCell(cell.isItem(), cell.getTpiSrcIdx(), pdbTypeIndex);
+ }
+
+ // In parallel, remap all types.
+ for_each(TpiSource::dependencySources, [&](TpiSource *source) {
+ source->remapTpiWithGHashes(&ghashState);
+ });
+ parallelForEach(TpiSource::objectSources, [&](TpiSource *source) {
+ source->remapTpiWithGHashes(&ghashState);
+ });
+
+ // Build a global map of from function ID to function type.
+ for (TpiSource *source : TpiSource::instances) {
+ for (auto idToType : source->funcIdToType)
+ funcIdToType.insert(idToType);
+ source->funcIdToType.clear();
+ }
+
+ TpiSource::clearGHashes();
+}
+
+/// Given the index into the ghash table for a particular type, return the type
+/// index for that type in the output PDB.
+static TypeIndex loadPdbTypeIndexFromCell(GHashState *g,
+ uint32_t ghashCellIdx) {
+ GHashCell cell = g->table.table[ghashCellIdx];
+ return TypeIndex::fromArrayIndex(cell.getGHashIdx());
+}
+
+// Fill in a TPI or IPI index map using ghashes. For each source type, use its
+// ghash to lookup its final type index in the PDB, and store that in the map.
+void TpiSource::fillMapFromGHashes(GHashState *g) {
+ for (size_t i = 0, e = ghashes.size(); i < e; ++i) {
+ TypeIndex fakeCellIndex = indexMapStorage[i];
+ if (fakeCellIndex.isSimple())
+ indexMapStorage[i] = fakeCellIndex;
+ else
+ indexMapStorage[i] =
+ loadPdbTypeIndexFromCell(g, fakeCellIndex.toArrayIndex());
+ }
+}
+
+void TpiSource::clearGHashes() {
+ for (TpiSource *src : TpiSource::instances) {
+ if (src->ownedGHashes)
+ delete[] src->ghashes.data();
+ src->ghashes = {};
+ src->isItemIndex.clear();
+ src->uniqueTypes.clear();
+ }
+}
#ifndef LLD_COFF_DEBUGTYPES_H
#define LLD_COFF_DEBUGTYPES_H
+#include "lld/Common/LLVM.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
namespace codeview {
-class PrecompRecord;
-class TypeServer2Record;
+struct GloballyHashedType;
} // namespace codeview
namespace pdb {
class NativeSession;
+class TpiStream;
}
} // namespace llvm
namespace lld {
namespace coff {
+using llvm::codeview::GloballyHashedType;
+using llvm::codeview::TypeIndex;
+
class ObjFile;
class PDBInputFile;
-struct CVIndexMap;
class TypeMerger;
+struct GHashState;
class TpiSource {
public:
- enum TpiKind { Regular, PCH, UsingPCH, PDB, UsingPDB };
+ enum TpiKind : uint8_t { Regular, PCH, UsingPCH, PDB, PDBIpi, UsingPDB };
TpiSource(TpiKind k, ObjFile *f);
virtual ~TpiSource();
/// If the object does not use a type server PDB (compiled with /Z7), we merge
/// all the type and item records from the .debug$S stream and fill in the
/// caller-provided ObjectIndexMap.
- virtual llvm::Expected<const CVIndexMap *> mergeDebugT(TypeMerger *m,
- CVIndexMap *indexMap);
+ virtual Error mergeDebugT(TypeMerger *m);
+
+ /// Load global hashes, either by hashing types directly, or by loading them
+ /// from LLVM's .debug$H section.
+ virtual void loadGHashes();
+
+ /// Use global hashes to merge type information.
+ virtual void remapTpiWithGHashes(GHashState *g);
+
+ // Remap a type index in place.
+ bool remapTypeIndex(TypeIndex &ti, llvm::codeview::TiRefKind refKind) const;
+
+protected:
+ void remapRecord(MutableArrayRef<uint8_t> rec,
+ ArrayRef<llvm::codeview::TiReference> typeRefs);
+
+ void mergeTypeRecord(TypeIndex curIndex, llvm::codeview::CVType ty);
+
+ // Merge the type records listed in uniqueTypes. beginIndex is the TypeIndex
+ // of the first record in this source, typically 0x1000. When PCHs are
+ // involved, it may start higher.
+ void mergeUniqueTypeRecords(
+ ArrayRef<uint8_t> debugTypes,
+ TypeIndex beginIndex = TypeIndex(TypeIndex::FirstNonSimpleIndex));
+
+ // Use the ghash table to construct a map from source type index to
+ // destination PDB type index. Usable for either TPI or IPI.
+ void fillMapFromGHashes(GHashState *m);
+
+ // Copies ghashes from a vector into an array. These are long lived, so it's
+ // worth the time to copy these into an appropriately sized vector to reduce
+ // memory usage.
+ void assignGHashesFromVector(std::vector<GloballyHashedType> &&hashVec);
+
+ // Walk over file->debugTypes and fill in the isItemIndex bit vector.
+ void fillIsItemIndexFromDebugT();
+
+public:
+ bool remapTypesInSymbolRecord(MutableArrayRef<uint8_t> rec);
+
+ void remapTypesInTypeRecord(MutableArrayRef<uint8_t> rec);
+
/// Is this a dependent file that needs to be processed first, before other
/// OBJs?
virtual bool isDependency() const { return false; }
- static void forEachSource(llvm::function_ref<void(TpiSource *)> fn);
+ /// Returns true if this type record should be omitted from the PDB, even if
+ /// it is unique. This prevents a record from being added to the input ghash
+ /// table.
+ bool shouldOmitFromPdb(uint32_t ghashIdx) {
+ return ghashIdx == endPrecompGHashIdx;
+ }
+
+ /// All sources of type information in the program.
+ static std::vector<TpiSource *> instances;
+
+ /// Dependency type sources, such as type servers or PCH object files. These
+ /// must be processed before objects that rely on them. Set by
+ /// TpiSources::sortDependencies.
+ static ArrayRef<TpiSource *> dependencySources;
+
+ /// Object file sources. These must be processed after dependencySources.
+ static ArrayRef<TpiSource *> objectSources;
+
+ /// Sorts the dependencies and reassigns TpiSource indices.
+ static void sortDependencies();
static uint32_t countTypeServerPDBs();
static uint32_t countPrecompObjs();
+ /// Free heap allocated ghashes.
+ static void clearGHashes();
+
/// Clear global data structures for TpiSources.
static void clear();
const TpiKind kind;
+ bool ownedGHashes = true;
+ uint32_t tpiSrcIdx = 0;
+
+protected:
+ /// The ghash index (zero based, not 0x1000-based) of the LF_ENDPRECOMP record
+ /// in this object, if one exists. This is the all ones value otherwise. It is
+ /// recorded here so that it can be omitted from the final ghash table.
+ uint32_t endPrecompGHashIdx = ~0U;
+
+public:
ObjFile *file;
+
+ /// An error encountered during type merging, if any.
+ Error typeMergingError = Error::success();
+
+ // Storage for tpiMap or ipiMap, depending on the kind of source.
+ llvm::SmallVector<TypeIndex, 0> indexMapStorage;
+
+ // Source type index to PDB type index mapping for type and item records.
+ // These mappings will be the same for /Z7 objects, and distinct for /Zi
+ // objects.
+ llvm::ArrayRef<TypeIndex> tpiMap;
+ llvm::ArrayRef<TypeIndex> ipiMap;
+
+ /// Array of global type hashes, indexed by TypeIndex. May be calculated on
+ /// demand, or present in input object files.
+ llvm::ArrayRef<llvm::codeview::GloballyHashedType> ghashes;
+
+ /// When ghashing is used, record the mapping from LF_[M]FUNC_ID to function
+ /// type index here. Both indices are PDB indices, not object type indexes.
+ std::vector<std::pair<TypeIndex, TypeIndex>> funcIdToType;
+
+ /// Indicates if a type record is an item index or a type index.
+ llvm::BitVector isItemIndex;
+
+ /// A list of all "unique" type indices which must be merged into the final
+ /// PDB. GHash type deduplication produces this list, and it should be
+ /// considerably smaller than the input.
+ std::vector<uint32_t> uniqueTypes;
+
+ struct MergedInfo {
+ std::vector<uint8_t> recs;
+ std::vector<uint16_t> recSizes;
+ std::vector<uint32_t> recHashes;
+ };
+
+ MergedInfo mergedTpi;
+ MergedInfo mergedIpi;
+
+ uint64_t nbTypeRecords = 0;
+ uint64_t nbTypeRecordsBytes = 0;
};
TpiSource *makeTpiSource(ObjFile *file);
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Object/ArchiveWriter.h"
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/LEB128.h"
using namespace llvm;
using namespace llvm::object;
using namespace llvm::COFF;
-using llvm::sys::Process;
+using namespace llvm::sys;
namespace lld {
namespace coff {
lld::stdoutOS = &stdoutOS;
lld::stderrOS = &stderrOS;
+ errorHandler().cleanupCallback = []() {
+ TpiSource::clear();
+ freeArena();
+ ObjFile::instances.clear();
+ PDBInputFile::instances.clear();
+ ImportFile::instances.clear();
+ BitcodeFile::instances.clear();
+ memset(MergeChunk::instances, 0, sizeof(MergeChunk::instances));
+ OutputSection::clear();
+ };
+
errorHandler().logName = args::getFilenameWithoutExe(args[0]);
errorHandler().errorLimitExceededMsg =
"too many errors emitted, stopping now"
symtab = make<SymbolTable>();
driver = make<LinkerDriver>();
- driver->link(args);
+ driver->linkerMain(args);
// Call exit() if we can to avoid calling destructors.
if (canExitEarly)
exitLld(errorCount() ? 1 : 0);
- freeArena();
- ObjFile::instances.clear();
- ImportFile::instances.clear();
- BitcodeFile::instances.clear();
- memset(MergeChunk::instances, 0, sizeof(MergeChunk::instances));
- TpiSource::clear();
-
- return !errorCount();
+ bool ret = errorCount() == 0;
+ if (!canExitEarly)
+ errorHandler().reset();
+ return ret;
}
// Parse options of the form "old;new".
// Create a std::future that opens and maps a file using the best strategy for
// the host platform.
static std::future<MBErrPair> createFutureForFile(std::string path) {
-#if _WIN32
+#if _WIN64
// On Windows, file I/O is relatively slow so it is best to do this
- // asynchronously.
+ // asynchronously. But 32-bit has issues with potentially launching tons
+ // of threads
auto strategy = std::launch::async;
#else
auto strategy = std::launch::deferred;
#endif
return std::async(strategy, [=]() {
- auto mbOrErr = MemoryBuffer::getFile(path,
- /*FileSize*/ -1,
- /*RequiresNullTerminator*/ false);
+ auto mbOrErr = MemoryBuffer::getFile(path, /*IsText=*/false,
+ /*RequiresNullTerminator=*/false);
if (!mbOrErr)
return MBErrPair{nullptr, mbOrErr.getError()};
return MBErrPair{std::move(*mbOrErr), std::error_code()};
error(filename + ": is not a native COFF file. Recompile without /GL");
break;
case file_magic::pecoff_executable:
- if (filename.endswith_lower(".dll")) {
+ if (config->mingw) {
+ symtab->addFile(make<DLLFile>(mbref));
+ break;
+ }
+ if (filename.endswith_insensitive(".dll")) {
error(filename + ": bad file type. Did you specify a DLL instead of an "
"import library?");
break;
case OPT_section:
parseSection(arg->getValue());
break;
- case OPT_subsystem:
+ case OPT_stack:
+ parseNumbers(arg->getValue(), &config->stackReserve,
+ &config->stackCommit);
+ break;
+ case OPT_subsystem: {
+ bool gotVersion = false;
parseSubsystem(arg->getValue(), &config->subsystem,
- &config->majorOSVersion, &config->minorOSVersion);
+ &config->majorSubsystemVersion,
+ &config->minorSubsystemVersion, &gotVersion);
+ if (gotVersion) {
+ config->majorOSVersion = config->majorSubsystemVersion;
+ config->minorOSVersion = config->minorSubsystemVersion;
+ }
break;
+ }
// Only add flags here that link.exe accepts in
// `#pragma comment(linker, "/flag")`-generated sections.
case OPT_editandcontinue:
return None;
}
- if (path.endswith_lower(".lib"))
+ if (path.endswith_insensitive(".lib"))
visitedLibs.insert(std::string(sys::path::filename(path)));
return path;
}
return config->dll ? 0x10000000 : 0x400000;
}
+static std::string rewritePath(StringRef s) {
+ if (fs::exists(s))
+ return relativeToRoot(s);
+ return std::string(s);
+}
+
+// Reconstructs command line arguments so that so that you can re-run
+// the same command with the same inputs. This is for --reproduce.
static std::string createResponseFile(const opt::InputArgList &args,
ArrayRef<StringRef> filePaths,
ArrayRef<StringRef> searchPaths) {
case OPT_manifestinput:
case OPT_manifestuac:
break;
+ case OPT_call_graph_ordering_file:
+ case OPT_deffile:
+ case OPT_natvis:
+ os << arg->getSpelling() << quote(rewritePath(arg->getValue())) << '\n';
+ break;
+ case OPT_order: {
+ StringRef orderFile = arg->getValue();
+ orderFile.consume_front("@");
+ os << arg->getSpelling() << '@' << quote(rewritePath(orderFile)) << '\n';
+ break;
+ }
+ case OPT_pdbstream: {
+ const std::pair<StringRef, StringRef> nameFile =
+ StringRef(arg->getValue()).split("=");
+ os << arg->getSpelling() << nameFile.first << '='
+ << quote(rewritePath(nameFile.second)) << '\n';
+ break;
+ }
case OPT_implib:
case OPT_pdb:
case OPT_pdbstripped:
return std::string(data.str());
}
-enum class DebugKind { Unknown, None, Full, FastLink, GHash, Dwarf, Symtab };
+enum class DebugKind {
+ Unknown,
+ None,
+ Full,
+ FastLink,
+ GHash,
+ NoGHash,
+ Dwarf,
+ Symtab
+};
static DebugKind parseDebugKind(const opt::InputArgList &args) {
auto *a = args.getLastArg(OPT_debug, OPT_debug_opt);
return DebugKind::Full;
DebugKind debug = StringSwitch<DebugKind>(a->getValue())
- .CaseLower("none", DebugKind::None)
- .CaseLower("full", DebugKind::Full)
- .CaseLower("fastlink", DebugKind::FastLink)
- // LLD extensions
- .CaseLower("ghash", DebugKind::GHash)
- .CaseLower("dwarf", DebugKind::Dwarf)
- .CaseLower("symtab", DebugKind::Symtab)
- .Default(DebugKind::Unknown);
+ .CaseLower("none", DebugKind::None)
+ .CaseLower("full", DebugKind::Full)
+ .CaseLower("fastlink", DebugKind::FastLink)
+ // LLD extensions
+ .CaseLower("ghash", DebugKind::GHash)
+ .CaseLower("noghash", DebugKind::NoGHash)
+ .CaseLower("dwarf", DebugKind::Dwarf)
+ .CaseLower("symtab", DebugKind::Symtab)
+ .Default(DebugKind::Unknown);
if (debug == DebugKind::FastLink) {
warn("/debug:fastlink unsupported; using /debug:full");
// If the import library already exists, replace it only if the contents
// have changed.
ErrorOr<std::unique_ptr<MemoryBuffer>> oldBuf = MemoryBuffer::getFile(
- path, /*FileSize*/ -1, /*RequiresNullTerminator*/ false);
+ path, /*IsText=*/false, /*RequiresNullTerminator=*/false);
if (!oldBuf) {
handleError(writeImportLibrary(libName, path, exports, config->machine,
config->mingw));
}
std::unique_ptr<MemoryBuffer> newBuf = check(MemoryBuffer::getFile(
- tmpName, /*FileSize*/ -1, /*RequiresNullTerminator*/ false));
+ tmpName, /*IsText=*/false, /*RequiresNullTerminator=*/false));
if ((*oldBuf)->getBuffer() != newBuf->getBuffer()) {
oldBuf->reset();
handleError(errorCodeToError(sys::fs::rename(tmpName, path)));
}
static void parseModuleDefs(StringRef path) {
- std::unique_ptr<MemoryBuffer> mb = CHECK(
- MemoryBuffer::getFile(path, -1, false, true), "could not open " + path);
+ std::unique_ptr<MemoryBuffer> mb =
+ CHECK(MemoryBuffer::getFile(path, /*IsText=*/false,
+ /*RequiresNullTerminator=*/false,
+ /*IsVolatile=*/true),
+ "could not open " + path);
COFFModuleDefinition m = check(parseCOFFModuleDefinition(
mb->getMemBufferRef(), config->machine, config->mingw));
+ // Include in /reproduce: output if applicable.
+ driver->takeBuffer(std::move(mb));
+
if (config->outputFile.empty())
config->outputFile = std::string(saver.save(m.OutputFile));
config->importName = std::string(saver.save(m.ImportName));
// Open a file.
StringRef path = arg.substr(1);
- std::unique_ptr<MemoryBuffer> mb = CHECK(
- MemoryBuffer::getFile(path, -1, false, true), "could not open " + path);
+ std::unique_ptr<MemoryBuffer> mb =
+ CHECK(MemoryBuffer::getFile(path, /*IsText=*/false,
+ /*RequiresNullTerminator=*/false,
+ /*IsVolatile=*/true),
+ "could not open " + path);
// Parse a file. An order file contains one symbol per line.
// All symbols that were not present in a given order file are
else
config->order[s] = INT_MIN + config->order.size();
}
+
+ // Include in /reproduce: output if applicable.
+ driver->takeBuffer(std::move(mb));
+}
+
+static void parseCallGraphFile(StringRef path) {
+ std::unique_ptr<MemoryBuffer> mb =
+ CHECK(MemoryBuffer::getFile(path, /*IsText=*/false,
+ /*RequiresNullTerminator=*/false,
+ /*IsVolatile=*/true),
+ "could not open " + path);
+
+ // Build a map from symbol name to section.
+ DenseMap<StringRef, Symbol *> map;
+ for (ObjFile *file : ObjFile::instances)
+ for (Symbol *sym : file->getSymbols())
+ if (sym)
+ map[sym->getName()] = sym;
+
+ auto findSection = [&](StringRef name) -> SectionChunk * {
+ Symbol *sym = map.lookup(name);
+ if (!sym) {
+ if (config->warnMissingOrderSymbol)
+ warn(path + ": no such symbol: " + name);
+ return nullptr;
+ }
+
+ if (DefinedCOFF *dr = dyn_cast_or_null<DefinedCOFF>(sym))
+ return dyn_cast_or_null<SectionChunk>(dr->getChunk());
+ return nullptr;
+ };
+
+ for (StringRef line : args::getLines(*mb)) {
+ SmallVector<StringRef, 3> fields;
+ line.split(fields, ' ');
+ uint64_t count;
+
+ if (fields.size() != 3 || !to_integer(fields[2], count)) {
+ error(path + ": parse error");
+ return;
+ }
+
+ if (SectionChunk *from = findSection(fields[0]))
+ if (SectionChunk *to = findSection(fields[1]))
+ config->callGraphProfile[{from, to}] += count;
+ }
+
+ // Include in /reproduce: output if applicable.
+ driver->takeBuffer(std::move(mb));
+}
+
+static void readCallGraphsFromObjectFiles() {
+ for (ObjFile *obj : ObjFile::instances) {
+ if (obj->callgraphSec) {
+ ArrayRef<uint8_t> contents;
+ cantFail(
+ obj->getCOFFObj()->getSectionContents(obj->callgraphSec, contents));
+ BinaryStreamReader reader(contents, support::little);
+ while (!reader.empty()) {
+ uint32_t fromIndex, toIndex;
+ uint64_t count;
+ if (Error err = reader.readInteger(fromIndex))
+ fatal(toString(obj) + ": Expected 32-bit integer");
+ if (Error err = reader.readInteger(toIndex))
+ fatal(toString(obj) + ": Expected 32-bit integer");
+ if (Error err = reader.readInteger(count))
+ fatal(toString(obj) + ": Expected 64-bit integer");
+ auto *fromSym = dyn_cast_or_null<Defined>(obj->getSymbol(fromIndex));
+ auto *toSym = dyn_cast_or_null<Defined>(obj->getSymbol(toIndex));
+ if (!fromSym || !toSym)
+ continue;
+ auto *from = dyn_cast_or_null<SectionChunk>(fromSym->getChunk());
+ auto *to = dyn_cast_or_null<SectionChunk>(toSym->getChunk());
+ if (from && to)
+ config->callGraphProfile[{from, to}] += count;
+ }
+ }
+ }
}
static void markAddrsig(Symbol *s) {
// text between first and second % as variable name.
buf.append(altPath.substr(cursor, firstMark - cursor));
StringRef var = altPath.substr(firstMark, secondMark - firstMark + 1);
- if (var.equals_lower("%_pdb%"))
+ if (var.equals_insensitive("%_pdb%"))
buf.append(pdbBasename);
- else if (var.equals_lower("%_ext%"))
+ else if (var.equals_insensitive("%_ext%"))
buf.append(binaryExtension);
else {
warn("only %_PDB% and %_EXT% supported in /pdbaltpath:, keeping " +
// -exclude-all-symbols option, so that lld-link behaves like link.exe rather
// than MinGW in the case that nothing is explicitly exported.
void LinkerDriver::maybeExportMinGWSymbols(const opt::InputArgList &args) {
- if (!config->dll)
- return;
-
if (!args.hasArg(OPT_export_all_symbols)) {
+ if (!config->dll)
+ return;
+
if (!config->exports.empty())
return;
if (args.hasArg(OPT_exclude_all_symbols))
if (!exporter.shouldExport(def))
return;
+ if (!def->isGCRoot) {
+ def->isGCRoot = true;
+ config->gcroot.push_back(def);
+ }
+
Export e;
e.name = def->getName();
e.sym = def;
if (Chunk *c = def->getChunk())
if (!(c->getOutputCharacteristics() & IMAGE_SCN_MEM_EXECUTE))
e.data = true;
+ s->isUsedInRegularObj = true;
config->exports.push_back(e);
});
}
return std::string(path);
}
+ // This is intentionally not guarded by OPT_lldignoreenv since writing
+ // a repro tar file doesn't affect the main output.
+ if (auto *path = getenv("LLD_REPRODUCE"))
+ return std::string(path);
+
return None;
}
-void LinkerDriver::link(ArrayRef<const char *> argsArr) {
+void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
ScopedTimer rootTimer(Timer::root());
// Needed for LTO.
// If the first command line argument is "/lib", link.exe acts like lib.exe.
// We call our own implementation of lib.exe that understands bitcode files.
- if (argsArr.size() > 1 && StringRef(argsArr[1]).equals_lower("/lib")) {
+ if (argsArr.size() > 1 &&
+ (StringRef(argsArr[1]).equals_insensitive("/lib") ||
+ StringRef(argsArr[1]).equals_insensitive("-lib"))) {
if (llvm::libDriverMain(argsArr.slice(1)) != 0)
fatal("lib failed");
return;
v.push_back("lld-link (LLVM option parsing)");
for (auto *arg : args.filtered(OPT_mllvm))
v.push_back(arg->getValue());
+ cl::ResetAllOptionOccurrences();
cl::ParseCommandLineOptions(v.size(), v.data());
// Handle /errorlimit early, because error() depends on it.
// because it doesn't start with "/", but we deliberately chose "--" to
// avoid conflict with /version and for compatibility with clang-cl.
if (args.hasArg(OPT_dash_dash_version)) {
- lld::outs() << getLLDVersion() << "\n";
+ message(getLLDVersion());
return;
}
// Handle /debug
DebugKind debug = parseDebugKind(args);
if (debug == DebugKind::Full || debug == DebugKind::Dwarf ||
- debug == DebugKind::GHash) {
+ debug == DebugKind::GHash || debug == DebugKind::NoGHash) {
config->debug = true;
config->incremental = true;
}
// Handle /pdb
bool shouldCreatePDB =
- (debug == DebugKind::Full || debug == DebugKind::GHash);
+ (debug == DebugKind::Full || debug == DebugKind::GHash ||
+ debug == DebugKind::NoGHash);
if (shouldCreatePDB) {
if (auto *arg = args.getLastArg(OPT_pdb))
config->pdbPath = arg->getValue();
// Handle /subsystem
if (auto *arg = args.getLastArg(OPT_subsystem))
- parseSubsystem(arg->getValue(), &config->subsystem, &config->majorOSVersion,
- &config->minorOSVersion);
+ parseSubsystem(arg->getValue(), &config->subsystem,
+ &config->majorSubsystemVersion,
+ &config->minorSubsystemVersion);
+
+ // Handle /osversion
+ if (auto *arg = args.getLastArg(OPT_osversion)) {
+ parseVersion(arg->getValue(), &config->majorOSVersion,
+ &config->minorOSVersion);
+ } else {
+ config->majorOSVersion = config->majorSubsystemVersion;
+ config->minorOSVersion = config->minorSubsystemVersion;
+ }
// Handle /timestamp
if (llvm::opt::Arg *arg = args.getLastArg(OPT_timestamp, OPT_repro)) {
// Handle /opt.
bool doGC = debug == DebugKind::None || args.hasArg(OPT_profile);
- unsigned icfLevel =
- args.hasArg(OPT_profile) ? 0 : 1; // 0: off, 1: limited, 2: on
+ Optional<ICFLevel> icfLevel = None;
+ if (args.hasArg(OPT_profile))
+ icfLevel = ICFLevel::None;
unsigned tailMerge = 1;
+ bool ltoNewPM = LLVM_ENABLE_NEW_PASS_MANAGER;
+ bool ltoDebugPM = false;
for (auto *arg : args.filtered(OPT_opt)) {
std::string str = StringRef(arg->getValue()).lower();
SmallVector<StringRef, 1> vec;
} else if (s == "noref") {
doGC = false;
} else if (s == "icf" || s.startswith("icf=")) {
- icfLevel = 2;
+ icfLevel = ICFLevel::All;
+ } else if (s == "safeicf") {
+ icfLevel = ICFLevel::Safe;
} else if (s == "noicf") {
- icfLevel = 0;
+ icfLevel = ICFLevel::None;
} else if (s == "lldtailmerge") {
tailMerge = 2;
} else if (s == "nolldtailmerge") {
tailMerge = 0;
+ } else if (s == "ltonewpassmanager") {
+ ltoNewPM = true;
+ } else if (s == "noltonewpassmanager") {
+ ltoNewPM = false;
+ } else if (s == "ltodebugpassmanager") {
+ ltoDebugPM = true;
+ } else if (s == "noltodebugpassmanager") {
+ ltoDebugPM = false;
} else if (s.startswith("lldlto=")) {
StringRef optLevel = s.substr(7);
if (optLevel.getAsInteger(10, config->ltoo) || config->ltoo > 3)
}
}
- // Limited ICF is enabled if GC is enabled and ICF was never mentioned
- // explicitly.
- // FIXME: LLD only implements "limited" ICF, i.e. it only merges identical
- // code. If the user passes /OPT:ICF explicitly, LLD should merge identical
- // comdat readonly data.
- if (icfLevel == 1 && !doGC)
- icfLevel = 0;
+ if (!icfLevel)
+ icfLevel = doGC ? ICFLevel::All : ICFLevel::None;
config->doGC = doGC;
- config->doICF = icfLevel > 0;
- config->tailMerge = (tailMerge == 1 && config->doICF) || tailMerge == 2;
+ config->doICF = icfLevel.getValue();
+ config->tailMerge =
+ (tailMerge == 1 && config->doICF != ICFLevel::None) || tailMerge == 2;
+ config->ltoNewPassManager = ltoNewPM;
+ config->ltoDebugPassManager = ltoDebugPM;
// Handle /lldsavetemps
if (args.hasArg(OPT_lldsavetemps))
config->thinLTOObjectSuffixReplace =
getOldNewOptions(args, OPT_thinlto_object_suffix_replace);
config->ltoObjPath = args.getLastArgValue(OPT_lto_obj_path);
+ config->ltoCSProfileGenerate = args.hasArg(OPT_lto_cs_profile_generate);
+ config->ltoCSProfileFile = args.getLastArgValue(OPT_lto_cs_profile_file);
// Handle miscellaneous boolean flags.
config->allowBind = args.hasFlag(OPT_allowbind, OPT_allowbind_no, true);
config->allowIsolation =
args.hasFlag(OPT_allowisolation, OPT_allowisolation_no, true);
config->incremental =
args.hasFlag(OPT_incremental, OPT_incremental_no,
- !config->doGC && !config->doICF && !args.hasArg(OPT_order) &&
- !args.hasArg(OPT_profile));
+ !config->doGC && config->doICF == ICFLevel::None &&
+ !args.hasArg(OPT_order) && !args.hasArg(OPT_profile));
config->integrityCheck =
args.hasFlag(OPT_integritycheck, OPT_integritycheck_no, false);
config->cetCompat = args.hasFlag(OPT_cetcompat, OPT_cetcompat_no, false);
config->terminalServerAware =
!config->dll && args.hasFlag(OPT_tsaware, OPT_tsaware_no, true);
config->debugDwarf = debug == DebugKind::Dwarf;
- config->debugGHashes = debug == DebugKind::GHash;
+ config->debugGHashes = debug == DebugKind::GHash || debug == DebugKind::Full;
config->debugSymtab = debug == DebugKind::Symtab;
config->autoImport =
args.hasFlag(OPT_auto_import, OPT_auto_import_no, config->mingw);
config->pseudoRelocs = args.hasFlag(
OPT_runtime_pseudo_reloc, OPT_runtime_pseudo_reloc_no, config->mingw);
-
- // Don't warn about long section names, such as .debug_info, for mingw or when
- // -debug:dwarf is requested.
+ config->callGraphProfileSort = args.hasFlag(
+ OPT_call_graph_profile_sort, OPT_call_graph_profile_sort_no, true);
+ config->stdcallFixup =
+ args.hasFlag(OPT_stdcall_fixup, OPT_stdcall_fixup_no, config->mingw);
+ config->warnStdcallFixup = !args.hasArg(OPT_stdcall_fixup);
+
+ // Don't warn about long section names, such as .debug_info, for mingw or
+ // when -debug:dwarf is requested.
if (config->mingw || config->debugDwarf)
config->warnLongSectionNames = false;
config->incremental = false;
}
- if (config->incremental && config->doICF) {
+ if (config->incremental && config->doICF != ICFLevel::None) {
warn("ignoring '/incremental' because ICF is enabled; use '/opt:noicf' to "
"disable");
config->incremental = false;
symtab->addAbsolute(mangle("__guard_longjmp_table"), 0);
// Needed for MSVC 2017 15.5 CRT.
symtab->addAbsolute(mangle("__enclave_config"), 0);
+ // Needed for MSVC 2019 16.8 CRT.
+ symtab->addAbsolute(mangle("__guard_eh_cont_count"), 0);
+ symtab->addAbsolute(mangle("__guard_eh_cont_table"), 0);
if (config->pseudoRelocs) {
symtab->addAbsolute(mangle("__RUNTIME_PSEUDO_RELOC_LIST__"), 0);
while (run());
}
- if (config->autoImport) {
+ // Create wrapped symbols for -wrap option.
+ std::vector<WrappedSymbol> wrapped = addWrappedSymbols(args);
+ // Load more object files that might be needed for wrapped symbols.
+ if (!wrapped.empty())
+ while (run());
+
+ if (config->autoImport || config->stdcallFixup) {
// MinGW specific.
// Load any further object files that might be needed for doing automatic
- // imports.
+ // imports, and do stdcall fixups.
//
// For cases with no automatically imported symbols, this iterates once
// over the symbol table and doesn't do anything.
// normal object file as well (although that won't be used for the
// actual autoimport later on). If this pass adds new undefined references,
// we won't iterate further to resolve them.
- symtab->loadMinGWAutomaticImports();
+ //
+ // If stdcall fixups only are needed for loading import entries from
+ // a DLL without import library, this also just needs running once.
+ // If it ends up pulling in more object files from static libraries,
+ // (and maybe doing more stdcall fixups along the way), this would need
+ // to loop these two calls.
+ symtab->loadMinGWSymbols();
run();
}
if (errorCount())
return;
+ config->hadExplicitExports = !config->exports.empty();
+ if (config->mingw) {
+ // In MinGW, all symbols are automatically exported if no symbols
+ // are chosen to be exported.
+ maybeExportMinGWSymbols(args);
+ }
+
// Do LTO by compiling bitcode input files to a set of native COFF files then
// link those files (unless -thinlto-index-only was given, in which case we
// resolve symbols and write indices, but don't generate native code or link).
// references to the symbols we use from them.
run();
+ // Apply symbol renames for -wrap.
+ if (!wrapped.empty())
+ wrapSymbols(wrapped);
+
// Resolve remaining undefined symbols and warn about imported locals.
symtab->resolveRemainingUndefines();
if (errorCount())
return;
- config->hadExplicitExports = !config->exports.empty();
if (config->mingw) {
- // In MinGW, all symbols are automatically exported if no symbols
- // are chosen to be exported.
- maybeExportMinGWSymbols(args);
-
// Make sure the crtend.o object is the last object file. This object
// file can contain terminating section chunks that need to be placed
// last. GNU ld processes files and static libraries explicitly in the
// Handle /order. We want to do this at this moment because we
// need a complete list of comdat sections to warn on nonexistent
// functions.
- if (auto *arg = args.getLastArg(OPT_order))
+ if (auto *arg = args.getLastArg(OPT_order)) {
+ if (args.hasArg(OPT_call_graph_ordering_file))
+ error("/order and /call-graph-order-file may not be used together");
parseOrderFile(arg->getValue());
+ config->callGraphProfileSort = false;
+ }
+
+ // Handle /call-graph-ordering-file and /call-graph-profile-sort (default on).
+ if (config->callGraphProfileSort) {
+ if (auto *arg = args.getLastArg(OPT_call_graph_ordering_file)) {
+ parseCallGraphFile(arg->getValue());
+ }
+ readCallGraphsFromObjectFiles();
+ }
+
+ // Handle /print-symbol-order.
+ if (auto *arg = args.getLastArg(OPT_print_symbol_order))
+ config->printSymbolOrder = arg->getValue();
// Identify unreferenced COMDAT sections.
- if (config->doGC)
+ if (config->doGC) {
+ if (config->mingw) {
+ // markLive doesn't traverse .eh_frame, but the personality function is
+ // only reached that way. The proper solution would be to parse and
+ // traverse the .eh_frame section, like the ELF linker does.
+ // For now, just manually try to retain the known possible personality
+ // functions. This doesn't bring in more object files, but only marks
+ // functions that already have been included to be retained.
+ for (const char *n : {"__gxx_personality_v0", "__gcc_personality_v0"}) {
+ Defined *d = dyn_cast_or_null<Defined>(symtab->findUnderscore(n));
+ if (d && !d->isGCRoot) {
+ d->isGCRoot = true;
+ config->gcroot.push_back(d);
+ }
+ }
+ }
+
markLive(symtab->getChunks());
+ }
// Needs to happen after the last call to addFile().
convertResources();
// Identify identical COMDAT sections to merge them.
- if (config->doICF) {
+ if (config->doICF != ICFLevel::None) {
findKeepUniqueSections();
- doICF(symtab->getChunks());
+ doICF(symtab->getChunks(), config->doICF);
}
// Write the result.
class LinkerDriver {
public:
- void link(llvm::ArrayRef<const char *> args);
+ void linkerMain(llvm::ArrayRef<const char *> args);
// Used by the resolver to parse .drectve section contents.
void parseDirectives(InputFile *file);
void enqueuePath(StringRef path, bool wholeArchive, bool lazy);
-private:
std::unique_ptr<llvm::TarWriter> tar; // for /linkrepro
- // Opens a file. Path has to be resolved already.
- MemoryBufferRef openFile(StringRef path);
-
+private:
// Searches a file from search paths.
Optional<StringRef> findFile(StringRef filename);
Optional<StringRef> findLib(StringRef filename);
// Parses a string in the form of "<subsystem>[,<integer>[.<integer>]]".
void parseSubsystem(StringRef arg, WindowsSubsystem *sys, uint32_t *major,
- uint32_t *minor);
+ uint32_t *minor, bool *gotVersion = nullptr);
void parseAlternateName(StringRef);
void parseMerge(StringRef);
MemoryBufferRef convertResToCOFF(ArrayRef<MemoryBufferRef> mbs,
ArrayRef<ObjFile *> objs);
-void runMSVCLinker(std::string rsp, ArrayRef<StringRef> objects);
-
// Create enum with OPT_xxx values for each option in Options.td
enum {
OPT_INVALID = 0,
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/WindowsManifest/WindowsManifestMerger.h"
+#include <limits>
#include <memory>
using namespace llvm::COFF;
void parseVersion(StringRef arg, uint32_t *major, uint32_t *minor) {
StringRef s1, s2;
std::tie(s1, s2) = arg.split('.');
- if (s1.getAsInteger(0, *major))
+ if (s1.getAsInteger(10, *major))
fatal("invalid number: " + s1);
*minor = 0;
- if (!s2.empty() && s2.getAsInteger(0, *minor))
+ if (!s2.empty() && s2.getAsInteger(10, *minor))
fatal("invalid number: " + s2);
}
SmallVector<StringRef, 1> splitArgs;
fullArg.split(splitArgs, ",");
for (StringRef arg : splitArgs) {
- if (arg.equals_lower("no"))
+ if (arg.equals_insensitive("no"))
config->guardCF = GuardCFLevel::Off;
- else if (arg.equals_lower("nolongjmp"))
- config->guardCF = GuardCFLevel::NoLongJmp;
- else if (arg.equals_lower("cf") || arg.equals_lower("longjmp"))
- config->guardCF = GuardCFLevel::Full;
+ else if (arg.equals_insensitive("nolongjmp"))
+ config->guardCF &= ~GuardCFLevel::LongJmp;
+ else if (arg.equals_insensitive("noehcont"))
+ config->guardCF &= ~GuardCFLevel::EHCont;
+ else if (arg.equals_insensitive("cf"))
+ config->guardCF = GuardCFLevel::CF;
+ else if (arg.equals_insensitive("longjmp"))
+ config->guardCF |= GuardCFLevel::CF | GuardCFLevel::LongJmp;
+ else if (arg.equals_insensitive("ehcont"))
+ config->guardCF |= GuardCFLevel::CF | GuardCFLevel::EHCont;
else
fatal("invalid argument to /guard: " + arg);
}
// Parses a string in the form of "<subsystem>[,<integer>[.<integer>]]".
void parseSubsystem(StringRef arg, WindowsSubsystem *sys, uint32_t *major,
- uint32_t *minor) {
+ uint32_t *minor, bool *gotVersion) {
StringRef sysStr, ver;
std::tie(sysStr, ver) = arg.split(',');
std::string sysStrLower = sysStr.lower();
fatal("unknown subsystem: " + sysStr);
if (!ver.empty())
parseVersion(ver, major, minor);
+ if (gotVersion)
+ *gotVersion = !ver.empty();
}
// Parse a string of the form of "<from>=<to>".
// Parses a string in the form of "EMBED[,=<integer>]|NO".
// Results are directly written to Config.
void parseManifest(StringRef arg) {
- if (arg.equals_lower("no")) {
+ if (arg.equals_insensitive("no")) {
config->manifest = Configuration::No;
return;
}
- if (!arg.startswith_lower("embed"))
+ if (!arg.startswith_insensitive("embed"))
fatal("invalid option " + arg);
config->manifest = Configuration::Embed;
arg = arg.substr(strlen("embed"));
if (arg.empty())
return;
- if (!arg.startswith_lower(",id="))
+ if (!arg.startswith_insensitive(",id="))
fatal("invalid option " + arg);
arg = arg.substr(strlen(",id="));
if (arg.getAsInteger(0, config->manifestID))
// Parses a string in the form of "level=<string>|uiAccess=<string>|NO".
// Results are directly written to Config.
void parseManifestUAC(StringRef arg) {
- if (arg.equals_lower("no")) {
+ if (arg.equals_insensitive("no")) {
config->manifestUAC = false;
return;
}
arg = arg.ltrim();
if (arg.empty())
return;
- if (arg.startswith_lower("level=")) {
+ if (arg.startswith_insensitive("level=")) {
arg = arg.substr(strlen("level="));
std::tie(config->manifestLevel, arg) = arg.split(" ");
continue;
}
- if (arg.startswith_lower("uiaccess=")) {
+ if (arg.startswith_insensitive("uiaccess=")) {
arg = arg.substr(strlen("uiaccess="));
std::tie(config->manifestUIAccess, arg) = arg.split(" ");
continue;
do {
StringRef swaprun, newArg;
std::tie(swaprun, newArg) = arg.split(',');
- if (swaprun.equals_lower("cd"))
+ if (swaprun.equals_insensitive("cd"))
config->swaprunCD = true;
- else if (swaprun.equals_lower("net"))
+ else if (swaprun.equals_insensitive("net"))
config->swaprunNet = true;
else if (swaprun.empty())
error("/swaprun: missing argument");
// is called (you cannot remove an opened file on Windows.)
std::unique_ptr<MemoryBuffer> getMemoryBuffer() {
// IsVolatile=true forces MemoryBuffer to not use mmap().
- return CHECK(MemoryBuffer::getFile(path, /*FileSize=*/-1,
+ return CHECK(MemoryBuffer::getFile(path, /*IsText=*/false,
/*RequiresNullTerminator=*/false,
/*IsVolatile=*/true),
"could not open " + path);
// Create the default manifest file as a temporary file.
TemporaryFile Default("defaultxml", "manifest");
std::error_code ec;
- raw_fd_ostream os(Default.path, ec, sys::fs::OF_Text);
+ raw_fd_ostream os(Default.path, ec, sys::fs::OF_TextWithCRLF);
if (ec)
fatal("failed to open " + Default.path + ": " + ec.message());
os << defaultXml;
if (path == "")
path = config->outputFile + ".manifest";
std::error_code ec;
- raw_fd_ostream out(path, ec, sys::fs::OF_Text);
+ raw_fd_ostream out(path, ec, sys::fs::OF_TextWithCRLF);
if (ec)
fatal("failed to create manifest: " + ec.message());
out << createManifestXml();
while (!rest.empty()) {
StringRef tok;
std::tie(tok, rest) = rest.split(",");
- if (tok.equals_lower("noname")) {
+ if (tok.equals_insensitive("noname")) {
if (e.ordinal == 0)
goto err;
e.noname = true;
continue;
}
- if (tok.equals_lower("data")) {
+ if (tok.equals_insensitive("data")) {
e.data = true;
continue;
}
- if (tok.equals_lower("constant")) {
+ if (tok.equals_insensitive("constant")) {
e.constant = true;
continue;
}
- if (tok.equals_lower("private")) {
+ if (tok.equals_insensitive("private")) {
e.isPrivate = true;
continue;
}
void assignExportOrdinals() {
// Assign unique ordinals if default (= 0).
- uint16_t max = 0;
+ uint32_t max = 0;
for (Export &e : config->exports)
- max = std::max(max, e.ordinal);
+ max = std::max(max, (uint32_t)e.ordinal);
for (Export &e : config->exports)
if (e.ordinal == 0)
e.ordinal = ++max;
+ if (max > std::numeric_limits<uint16_t>::max())
+ fatal("too many exported symbols (max " +
+ Twine(std::numeric_limits<uint16_t>::max()) + ")");
}
// Parses a string in the form of "key=value" and check
handleColorDiagnostics(args);
- for (auto *arg : args.filtered(OPT_UNKNOWN)) {
+ for (opt::Arg *arg : args.filtered(OPT_UNKNOWN)) {
std::string nearest;
if (optTable.findNearest(arg->getAsString(args), nearest) > 1)
warn("ignoring unknown argument '" + arg->getAsString(args) + "'");
SmallVector<StringRef, 16> tokens;
cl::TokenizeWindowsCommandLineNoCopy(s, saver, tokens);
for (StringRef tok : tokens) {
- if (tok.startswith_lower("/export:") || tok.startswith_lower("-export:"))
+ if (tok.startswith_insensitive("/export:") ||
+ tok.startswith_insensitive("-export:"))
result.exports.push_back(tok.substr(strlen("/export:")));
- else if (tok.startswith_lower("/include:") ||
- tok.startswith_lower("-include:"))
+ else if (tok.startswith_insensitive("/include:") ||
+ tok.startswith_insensitive("-include:"))
result.includes.push_back(tok.substr(strlen("/include:")));
else {
- // Save non-null-terminated strings to make proper C strings.
- bool HasNul = tok.data()[tok.size()] == '\0';
+ // Copy substrings that are not valid C strings. The tokenizer may have
+ // already copied quoted arguments for us, so those do not need to be
+ // copied again.
+ bool HasNul = tok.end() != s.end() && tok.data()[tok.size()] == '\0';
rest.push_back(HasNul ? tok.data() : saver.save(tok).data());
}
}
}
void printHelp(const char *argv0) {
- optTable.PrintHelp(lld::outs(),
+ optTable.printHelp(lld::outs(),
(std::string(argv0) + " [options] file...").c_str(),
"LLVM Linker", false);
}
class ICF {
public:
+ ICF(ICFLevel icfLevel) : icfLevel(icfLevel){};
void run(ArrayRef<Chunk *> v);
private:
std::vector<SectionChunk *> chunks;
int cnt = 0;
std::atomic<bool> repeat = {false};
+ ICFLevel icfLevel = ICFLevel::All;
};
// Returns true if section S is subject of ICF.
if (!c->isCOMDAT() || !c->live || writable)
return false;
- // Code sections are eligible.
- if (c->getOutputCharacteristics() & llvm::COFF::IMAGE_SCN_MEM_EXECUTE)
+ // Under regular (not safe) ICF, all code sections are eligible.
+ if ((icfLevel == ICFLevel::All) &&
+ c->getOutputCharacteristics() & llvm::COFF::IMAGE_SCN_MEM_EXECUTE)
return true;
// .pdata and .xdata unwind info sections are eligible.
auto considerForICF = [](const SectionChunk &assoc) {
StringRef Name = assoc.getSectionName();
return !(Name.startswith(".debug") || Name == ".gfids$y" ||
- Name == ".gljmp$y");
+ Name == ".giats$y" || Name == ".gljmp$y");
};
auto ra = make_filter_range(a->children(), considerForICF);
auto rb = make_filter_range(b->children(), considerForICF);
}
// Entry point to ICF.
-void doICF(ArrayRef<Chunk *> chunks) { ICF().run(chunks); }
+void doICF(ArrayRef<Chunk *> chunks, ICFLevel icfLevel) {
+ ICF(icfLevel).run(chunks);
+}
} // namespace coff
} // namespace lld
#ifndef LLD_COFF_ICF_H
#define LLD_COFF_ICF_H
+#include "Config.h"
#include "lld/Common/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
class Chunk;
-void doICF(ArrayRef<Chunk *> chunks);
+void doICF(ArrayRef<Chunk *> chunks, ICFLevel);
} // namespace coff
} // namespace lld
return nullptr;
}
+ if (name == ".llvm.call-graph-profile") {
+ callgraphSec = sec;
+ return nullptr;
+ }
+
// Object files may have DWARF debug info or MS CodeView debug info
// (or both).
//
debugChunks.push_back(c);
else if (name == ".gfids$y")
guardFidChunks.push_back(c);
+ else if (name == ".giats$y")
+ guardIATChunks.push_back(c);
else if (name == ".gljmp$y")
guardLJmpChunks.push_back(c);
+ else if (name == ".gehcont$y")
+ guardEHContChunks.push_back(c);
else if (name == ".sxdata")
sxDataChunks.push_back(c);
else if (config->tailMerge && sec->NumberOfRelocations == 0 &&
return symtab->addUndefined(name, this, sym.isWeakExternal());
}
-void ObjFile::handleComdatSelection(COFFSymbolRef sym, COMDATType &selection,
- bool &prevailing, DefinedRegular *leader) {
+static const coff_aux_section_definition *findSectionDef(COFFObjectFile *obj,
+ int32_t section) {
+ uint32_t numSymbols = obj->getNumberOfSymbols();
+ for (uint32_t i = 0; i < numSymbols; ++i) {
+ COFFSymbolRef sym = check(obj->getSymbol(i));
+ if (sym.getSectionNumber() != section)
+ continue;
+ if (const coff_aux_section_definition *def = sym.getSectionDefinition())
+ return def;
+ }
+ return nullptr;
+}
+
+void ObjFile::handleComdatSelection(
+ COFFSymbolRef sym, COMDATType &selection, bool &prevailing,
+ DefinedRegular *leader,
+ const llvm::object::coff_aux_section_definition *def) {
if (prevailing)
return;
// There's already an existing comdat for this symbol: `Leader`.
// symbol in `Sym` should be discarded, produce a duplicate symbol
// error, etc.
- SectionChunk *leaderChunk = nullptr;
- COMDATType leaderSelection = IMAGE_COMDAT_SELECT_ANY;
+ SectionChunk *leaderChunk = leader->getChunk();
+ COMDATType leaderSelection = leaderChunk->selection;
- if (leader->data) {
- leaderChunk = leader->getChunk();
- leaderSelection = leaderChunk->selection;
- } else {
- // FIXME: comdats from LTO files don't know their selection; treat them
- // as "any".
- selection = leaderSelection;
+ assert(leader->data && "Comdat leader without SectionChunk?");
+ if (isa<BitcodeFile>(leader->file)) {
+ // If the leader is only a LTO symbol, we don't know e.g. its final size
+ // yet, so we can't do the full strict comdat selection checking yet.
+ selection = leaderSelection = IMAGE_COMDAT_SELECT_ANY;
}
if ((selection == IMAGE_COMDAT_SELECT_ANY &&
break;
case IMAGE_COMDAT_SELECT_SAME_SIZE:
- if (leaderChunk->getSize() != getSection(sym)->SizeOfRawData)
- symtab->reportDuplicate(leader, this);
+ if (leaderChunk->getSize() != getSection(sym)->SizeOfRawData) {
+ if (!config->mingw) {
+ symtab->reportDuplicate(leader, this);
+ } else {
+ const coff_aux_section_definition *leaderDef = nullptr;
+ if (leaderChunk->file)
+ leaderDef = findSectionDef(leaderChunk->file->getCOFFObj(),
+ leaderChunk->getSectionNumber());
+ if (!leaderDef || leaderDef->Length != def->Length)
+ symtab->reportDuplicate(leader, this);
+ }
+ }
break;
case IMAGE_COMDAT_SELECT_EXACT_MATCH: {
COMDATType selection = (COMDATType)def->Selection;
if (leader->isCOMDAT)
- handleComdatSelection(sym, selection, prevailing, leader);
+ handleComdatSelection(sym, selection, prevailing, leader, def);
if (prevailing) {
SectionChunk *c = readSection(sectionNumber, def, getName());
else
data = getDebugSection(".debug$T");
- if (data.empty())
+ // Don't make a TpiSource for objects with no debug info. If the object has
+ // symbols but no types, make a plain, empty TpiSource anyway, because it
+ // simplifies adding the symbols later.
+ if (data.empty()) {
+ if (!debugChunks.empty())
+ debugTypesObj = makeTpiSource(this);
return;
+ }
// Get the first type record. It will indicate if this object uses a type
// server (/Zi) or a PCH file (/Yu).
PrecompRecord precomp = cantFail(
TypeDeserializer::deserializeAs<PrecompRecord>(firstType->data()));
debugTypesObj = makeUsePrecompSource(this, precomp);
+ // Drop the LF_PRECOMP record from the input stream.
+ debugTypes = debugTypes.drop_front(firstType->RecordData.size());
return;
}
return dwarf->getDILineInfo(offset, sectionIndex);
}
-static StringRef ltrim1(StringRef s, const char *chars) {
- if (!s.empty() && strchr(chars, s[0]))
- return s.substr(1);
- return s;
-}
-
void ImportFile::parse() {
const char *buf = mb.getBufferStart();
const auto *hdr = reinterpret_cast<const coff_import_header *>(buf);
BitcodeFile::~BitcodeFile() = default;
+namespace {
+// Convenience class for initializing a coff_section with specific flags.
+class FakeSection {
+public:
+ FakeSection(int c) { section.Characteristics = c; }
+
+ coff_section section;
+};
+
+// Convenience class for initializing a SectionChunk with specific flags.
+class FakeSectionChunk {
+public:
+ FakeSectionChunk(const coff_section *section) : chunk(nullptr, section) {
+ // Comdats from LTO files can't be fully treated as regular comdats
+ // at this point; we don't know what size or contents they are going to
+ // have, so we can't do proper checking of such aspects of them.
+ chunk.selection = IMAGE_COMDAT_SELECT_ANY;
+ }
+
+ SectionChunk chunk;
+};
+
+FakeSection ltoTextSection(IMAGE_SCN_MEM_EXECUTE);
+FakeSection ltoDataSection(IMAGE_SCN_CNT_INITIALIZED_DATA);
+FakeSectionChunk ltoTextSectionChunk(<oTextSection.section);
+FakeSectionChunk ltoDataSectionChunk(<oDataSection.section);
+} // namespace
+
void BitcodeFile::parse() {
std::vector<std::pair<Symbol *, bool>> comdat(obj->getComdatTable().size());
for (size_t i = 0; i != obj->getComdatTable().size(); ++i)
- // FIXME: lto::InputFile doesn't keep enough data to do correct comdat
- // selection handling.
- comdat[i] = symtab->addComdat(this, saver.save(obj->getComdatTable()[i]));
+ // FIXME: Check nodeduplicate
+ comdat[i] =
+ symtab->addComdat(this, saver.save(obj->getComdatTable()[i].first));
for (const lto::InputFile::Symbol &objSym : obj->symbols()) {
StringRef symName = saver.save(objSym.getName());
int comdatIndex = objSym.getComdatIndex();
Symbol *sym;
+ SectionChunk *fakeSC = nullptr;
+ if (objSym.isExecutable())
+ fakeSC = <oTextSectionChunk.chunk;
+ else
+ fakeSC = <oDataSectionChunk.chunk;
if (objSym.isUndefined()) {
sym = symtab->addUndefined(symName, this, false);
} else if (objSym.isCommon()) {
Symbol *alias = symtab->addUndefined(saver.save(fallback));
checkAndSetWeakAlias(symtab, this, sym, alias);
} else if (comdatIndex != -1) {
- if (symName == obj->getComdatTable()[comdatIndex])
+ if (symName == obj->getComdatTable()[comdatIndex].first) {
sym = comdat[comdatIndex].first;
- else if (comdat[comdatIndex].second)
- sym = symtab->addRegular(this, symName);
- else
+ if (cast<DefinedRegular>(sym)->data == nullptr)
+ cast<DefinedRegular>(sym)->data = &fakeSC->repl;
+ } else if (comdat[comdatIndex].second) {
+ sym = symtab->addRegular(this, symName, nullptr, fakeSC);
+ } else {
sym = symtab->addUndefined(symName, this, false);
+ }
} else {
- sym = symtab->addRegular(this, symName);
+ sym = symtab->addRegular(this, symName, nullptr, fakeSC);
}
symbols.push_back(sym);
if (objSym.isUsed())
return (path + repl).str();
return std::string(path);
}
+
+static bool isRVACode(COFFObjectFile *coffObj, uint64_t rva, InputFile *file) {
+ for (size_t i = 1, e = coffObj->getNumberOfSections(); i <= e; i++) {
+ const coff_section *sec = CHECK(coffObj->getSection(i), file);
+ if (rva >= sec->VirtualAddress &&
+ rva <= sec->VirtualAddress + sec->VirtualSize) {
+ return (sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE) != 0;
+ }
+ }
+ return false;
+}
+
+void DLLFile::parse() {
+ // Parse a memory buffer as a PE-COFF executable.
+ std::unique_ptr<Binary> bin = CHECK(createBinary(mb), this);
+
+ if (auto *obj = dyn_cast<COFFObjectFile>(bin.get())) {
+ bin.release();
+ coffObj.reset(obj);
+ } else {
+ error(toString(this) + " is not a COFF file");
+ return;
+ }
+
+ if (!coffObj->getPE32Header() && !coffObj->getPE32PlusHeader()) {
+ error(toString(this) + " is not a PE-COFF executable");
+ return;
+ }
+
+ for (const auto &exp : coffObj->export_directories()) {
+ StringRef dllName, symbolName;
+ uint32_t exportRVA;
+ checkError(exp.getDllName(dllName));
+ checkError(exp.getSymbolName(symbolName));
+ checkError(exp.getExportRVA(exportRVA));
+
+ if (symbolName.empty())
+ continue;
+
+ bool code = isRVACode(coffObj.get(), exportRVA, this);
+
+ Symbol *s = make<Symbol>();
+ s->dllName = dllName;
+ s->symbolName = symbolName;
+ s->importType = code ? ImportType::IMPORT_CODE : ImportType::IMPORT_DATA;
+ s->nameType = ImportNameType::IMPORT_NAME;
+
+ if (coffObj->getMachine() == I386) {
+ s->symbolName = symbolName = saver.save("_" + symbolName);
+ s->nameType = ImportNameType::IMPORT_NAME_NOPREFIX;
+ }
+
+ StringRef impName = saver.save("__imp_" + symbolName);
+ symtab->addLazyDLLSymbol(this, s, impName);
+ if (code)
+ symtab->addLazyDLLSymbol(this, s, symbolName);
+ }
+}
+
+MachineTypes DLLFile::getMachineType() {
+ if (coffObj)
+ return static_cast<MachineTypes>(coffObj->getMachine());
+ return IMAGE_FILE_MACHINE_UNKNOWN;
+}
+
+void DLLFile::makeImport(DLLFile::Symbol *s) {
+ if (!seen.insert(s->symbolName).second)
+ return;
+
+ size_t impSize = s->dllName.size() + s->symbolName.size() + 2; // +2 for NULs
+ size_t size = sizeof(coff_import_header) + impSize;
+ char *buf = bAlloc.Allocate<char>(size);
+ memset(buf, 0, size);
+ char *p = buf;
+ auto *imp = reinterpret_cast<coff_import_header *>(p);
+ p += sizeof(*imp);
+ imp->Sig2 = 0xFFFF;
+ imp->Machine = coffObj->getMachine();
+ imp->SizeOfData = impSize;
+ imp->OrdinalHint = 0; // Only linking by name
+ imp->TypeInfo = (s->nameType << 2) | s->importType;
+
+ // Write symbol name and DLL name.
+ memcpy(p, s->symbolName.data(), s->symbolName.size());
+ p += s->symbolName.size() + 1;
+ memcpy(p, s->dllName.data(), s->dllName.size());
+ MemoryBufferRef mbref = MemoryBufferRef(StringRef(buf, size), s->dllName);
+ ImportFile *impFile = make<ImportFile>(mbref);
+ symtab->addFile(impFile);
+}
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/COFF.h"
LazyObjectKind,
PDBKind,
ImportKind,
- BitcodeKind
+ BitcodeKind,
+ DLLKind
};
Kind kind() const { return fileKind; }
virtual ~InputFile() {}
ArrayRef<SectionChunk *> getDebugChunks() { return debugChunks; }
ArrayRef<SectionChunk *> getSXDataChunks() { return sxDataChunks; }
ArrayRef<SectionChunk *> getGuardFidChunks() { return guardFidChunks; }
+ ArrayRef<SectionChunk *> getGuardIATChunks() { return guardIATChunks; }
ArrayRef<SectionChunk *> getGuardLJmpChunks() { return guardLJmpChunks; }
+ ArrayRef<SectionChunk *> getGuardEHContChunks() { return guardEHContChunks; }
ArrayRef<Symbol *> getSymbols() { return symbols; }
+ MutableArrayRef<Symbol *> getMutableSymbols() { return symbols; }
+
ArrayRef<uint8_t> getDebugSection(StringRef secName);
// Returns a Symbol object for the symbolIndex'th symbol in the
bool hasSafeSEH() { return feat00Flags & 0x1; }
// True if this file was compiled with /guard:cf.
- bool hasGuardCF() { return feat00Flags & 0x800; }
+ bool hasGuardCF() { return feat00Flags & 0x4800; }
// Pointer to the PDB module descriptor builder. Various debug info records
// will reference object files by "module index", which is here. Things like
const coff_section *addrsigSec = nullptr;
+ const coff_section *callgraphSec = nullptr;
+
// When using Microsoft precompiled headers, this is the PCH's key.
// The same key is used by both the precompiled object, and objects using the
// precompiled object. Any difference indicates out-of-date objects.
// match the existing symbol and its selection. If either old or new
// symbol have selection IMAGE_COMDAT_SELECT_LARGEST, Sym might replace
// the existing leader. In that case, Prevailing is set to true.
- void handleComdatSelection(COFFSymbolRef sym,
- llvm::COFF::COMDATType &selection,
- bool &prevailing, DefinedRegular *leader);
+ void
+ handleComdatSelection(COFFSymbolRef sym, llvm::COFF::COMDATType &selection,
+ bool &prevailing, DefinedRegular *leader,
+ const llvm::object::coff_aux_section_definition *def);
llvm::Optional<Symbol *>
createDefined(COFFSymbolRef sym,
// 32-bit x86.
std::vector<SectionChunk *> sxDataChunks;
- // Chunks containing symbol table indices of address taken symbols and longjmp
- // targets. These are not linked into the final binary when /guard:cf is set.
+ // Chunks containing symbol table indices of address taken symbols, address
+ // taken IAT entries, longjmp and ehcont targets. These are not linked into
+ // the final binary when /guard:cf is set.
std::vector<SectionChunk *> guardFidChunks;
+ std::vector<SectionChunk *> guardIATChunks;
std::vector<SectionChunk *> guardLJmpChunks;
+ std::vector<SectionChunk *> guardEHContChunks;
// This vector contains a list of all symbols defined or referenced by this
// file. They are indexed such that you can get a Symbol by symbol
const coff_import_header *hdr;
Chunk *location = nullptr;
- // We want to eliminate dllimported symbols if no one actually refers them.
+ // We want to eliminate dllimported symbols if no one actually refers to them.
// These "Live" bits are used to keep track of which import library members
// are actually in use.
//
std::vector<Symbol *> symbols;
};
+// .dll file. MinGW only.
+class DLLFile : public InputFile {
+public:
+ explicit DLLFile(MemoryBufferRef m) : InputFile(DLLKind, m) {}
+ static bool classof(const InputFile *f) { return f->kind() == DLLKind; }
+ void parse() override;
+ MachineTypes getMachineType() override;
+
+ struct Symbol {
+ StringRef dllName;
+ StringRef symbolName;
+ llvm::COFF::ImportNameType nameType;
+ llvm::COFF::ImportType importType;
+ };
+
+ void makeImport(Symbol *s);
+
+private:
+ std::unique_ptr<COFFObjectFile> coffObj;
+ llvm::StringSet<> seen;
+};
+
inline bool isBitcode(MemoryBufferRef mb) {
return identify_magic(mb.getBuffer()) == llvm::file_magic::bitcode;
}
static lto::Config createConfig() {
lto::Config c;
c.Options = initTargetOptionsFromCodeGenFlags();
+ c.Options.EmitAddrsig = true;
// Always emit a section per function/datum with LTO. LLVM LTO should get most
// of the benefit of linker GC, but there are still opportunities for ICF.
c.MAttrs = getMAttrs();
c.CGOptLevel = args::getCGOptLevel(config->ltoo);
c.AlwaysEmitRegularLTOObj = !config->ltoObjPath.empty();
+ c.UseNewPM = config->ltoNewPassManager;
+ c.DebugPassManager = config->ltoDebugPassManager;
+ c.CSIRProfile = std::string(config->ltoCSProfileFile);
+ c.RunCSIRInstr = config->ltoCSProfileGenerate;
if (config->saveTemps)
checkError(c.addSaveTemps(std::string(config->outputFile) + ".",
r.VisibleToRegularObj = sym->isUsedInRegularObj;
if (r.Prevailing)
undefine(sym);
+
+ // We tell LTO to not apply interprocedural optimization for wrapped
+ // (with -wrap) symbols because otherwise LTO would inline them while
+ // their values are still not final.
+ r.LinkerRedefined = !sym->canInline;
}
checkError(ltoObj->add(std::move(f.obj), resols));
}
//===----------------------------------------------------------------------===//
#include "MinGW.h"
+#include "Driver.h"
+#include "InputFiles.h"
#include "SymbolTable.h"
#include "lld/Common/ErrorHandler.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Object/COFF.h"
+#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
}
bool AutoExporter::shouldExport(Defined *sym) const {
- if (!sym || !sym->isLive() || !sym->getChunk())
+ if (!sym || !sym->getChunk())
return false;
// Only allow the symbol kinds that make sense to export; in particular,
os << "\n";
}
}
+
+static StringRef mangle(Twine sym) {
+ assert(config->machine != IMAGE_FILE_MACHINE_UNKNOWN);
+ if (config->machine == I386)
+ return saver.save("_" + sym);
+ return saver.save(sym);
+}
+
+// Handles -wrap option.
+//
+// This function instantiates wrapper symbols. At this point, they seem
+// like they are not being used at all, so we explicitly set some flags so
+// that LTO won't eliminate them.
+std::vector<WrappedSymbol>
+lld::coff::addWrappedSymbols(opt::InputArgList &args) {
+ std::vector<WrappedSymbol> v;
+ DenseSet<StringRef> seen;
+
+ for (auto *arg : args.filtered(OPT_wrap)) {
+ StringRef name = arg->getValue();
+ if (!seen.insert(name).second)
+ continue;
+
+ Symbol *sym = symtab->findUnderscore(name);
+ if (!sym)
+ continue;
+
+ Symbol *real = symtab->addUndefined(mangle("__real_" + name));
+ Symbol *wrap = symtab->addUndefined(mangle("__wrap_" + name));
+ v.push_back({sym, real, wrap});
+
+ // These symbols may seem undefined initially, but don't bail out
+ // at symtab->reportUnresolvable() due to them, but let wrapSymbols
+ // below sort things out before checking finally with
+ // symtab->resolveRemainingUndefines().
+ sym->deferUndefined = true;
+ real->deferUndefined = true;
+ // We want to tell LTO not to inline symbols to be overwritten
+ // because LTO doesn't know the final symbol contents after renaming.
+ real->canInline = false;
+ sym->canInline = false;
+
+ // Tell LTO not to eliminate these symbols.
+ sym->isUsedInRegularObj = true;
+ if (!isa<Undefined>(wrap))
+ wrap->isUsedInRegularObj = true;
+ }
+ return v;
+}
+
+// Do renaming for -wrap by updating pointers to symbols.
+//
+// When this function is executed, only InputFiles and symbol table
+// contain pointers to symbol objects. We visit them to replace pointers,
+// so that wrapped symbols are swapped as instructed by the command line.
+void lld::coff::wrapSymbols(ArrayRef<WrappedSymbol> wrapped) {
+ DenseMap<Symbol *, Symbol *> map;
+ for (const WrappedSymbol &w : wrapped) {
+ map[w.sym] = w.wrap;
+ map[w.real] = w.sym;
+ if (Defined *d = dyn_cast<Defined>(w.wrap)) {
+ Symbol *imp = symtab->find(("__imp_" + w.sym->getName()).str());
+ // Create a new defined local import for the wrap symbol. If
+ // no imp prefixed symbol existed, there's no need for it.
+ // (We can't easily distinguish whether any object file actually
+ // referenced it or not, though.)
+ if (imp) {
+ DefinedLocalImport *wrapimp = make<DefinedLocalImport>(
+ saver.save("__imp_" + w.wrap->getName()), d);
+ symtab->localImportChunks.push_back(wrapimp->getChunk());
+ map[imp] = wrapimp;
+ }
+ }
+ }
+
+ // Update pointers in input files.
+ parallelForEach(ObjFile::instances, [&](ObjFile *file) {
+ MutableArrayRef<Symbol *> syms = file->getMutableSymbols();
+ for (size_t i = 0, e = syms.size(); i != e; ++i)
+ if (Symbol *s = map.lookup(syms[i]))
+ syms[i] = s;
+ });
+}
#include "Config.h"
#include "Symbols.h"
#include "lld/Common/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Option/ArgList.h"
+#include <vector>
namespace lld {
namespace coff {
void writeDefFile(StringRef name);
+// The -wrap option is a feature to rename symbols so that you can write
+// wrappers for existing functions. If you pass `-wrap:foo`, all
+// occurrences of symbol `foo` are resolved to `__wrap_foo` (so, you are
+// expected to write `__wrap_foo` function as a wrapper). The original
+// symbol becomes accessible as `__real_foo`, so you can call that from your
+// wrapper.
+//
+// This data structure is instantiated for each -wrap option.
+struct WrappedSymbol {
+ Symbol *sym;
+ Symbol *real;
+ Symbol *wrap;
+};
+
+std::vector<WrappedSymbol> addWrappedSymbols(llvm::opt::InputArgList &args);
+
+void wrapSymbols(ArrayRef<WrappedSymbol> wrapped);
+
} // namespace coff
} // namespace lld
class P<string name, string help> :
Joined<["/", "-", "/?", "-?"], name#":">, HelpText<help>;
+// Same as P<> above, but without help texts, for private undocumented
+// options.
+class P_priv<string name> :
+ Joined<["/", "-", "/?", "-?"], name#":">;
+
// Boolean flag which can be suffixed by ":no". Using it unsuffixed turns the
// flag on and using it suffixed by ":no" turns it off.
multiclass B<string name, string help_on, string help_off> {
def alternatename : P<"alternatename", "Define weak alias">;
def base : P<"base", "Base address of the program">;
def color_diagnostics: Flag<["--"], "color-diagnostics">,
- HelpText<"Use colors in diagnostics">;
+ HelpText<"Alias for --color-diagnostics=always">;
+def no_color_diagnostics: Flag<["--"], "no-color-diagnostics">,
+ HelpText<"Alias for --color-diagnostics=never">;
def color_diagnostics_eq: Joined<["--"], "color-diagnostics=">,
- HelpText<"Use colors in diagnostics; one of 'always', 'never', 'auto'">;
+ HelpText<"Use colors in diagnostics (default: auto)">,
+ MetaVarName<"[auto,always,never]">;
def defaultlib : P<"defaultlib", "Add the library to the list of input files">;
def delayload : P<"delayload", "Delay loaded DLL name">;
def entry : P<"entry", "Name of entry point symbol">;
def lib : F<"lib">,
HelpText<"Act like lib.exe; must be first argument if present">;
def libpath : P<"libpath", "Additional library search path">;
-def linkrepro : P<"linkrepro",
- "Dump linker invocation and input files for debugging">;
+def linkrepro : Joined<["/", "-", "/?", "-?"], "linkrepro:">,
+ MetaVarName<"directory">,
+ HelpText<"Write repro.tar containing inputs and command to reproduce link">;
def lldignoreenv : F<"lldignoreenv">,
HelpText<"Ignore environment variables like %LIB%">;
def lldltocache : P<"lldltocache",
def lldltocachepolicy : P<"lldltocachepolicy",
"Pruning policy for the ThinLTO cache">;
def lldsavetemps : F<"lldsavetemps">,
- HelpText<"Save temporary files instead of deleting them">;
+ HelpText<"Save intermediate LTO compilation results">;
def machine : P<"machine", "Specify target platform">;
def merge : P<"merge", "Combine sections">;
def mllvm : P<"mllvm", "Options to pass to LLVM">;
def order : P<"order", "Put functions in order">;
def out : P<"out", "Path to file to write output">;
def natvis : P<"natvis", "Path to natvis file to embed in the PDB">;
-def no_color_diagnostics: F<"no-color-diagnostics">,
- HelpText<"Do not use colors in diagnostics">;
def pdb : P<"pdb", "PDB file path">;
def pdbstripped : P<"pdbstripped", "Stripped PDB file path">;
def pdbaltpath : P<"pdbaltpath", "PDB file path to embed in the image">;
def profile : F<"profile">;
def repro : F<"Brepro">,
HelpText<"Use a hash of the executable as the PE header timestamp">;
-def reproduce : P<"reproduce",
- "Dump linker invocation and input files for debugging">;
+def reproduce : Joined<["/", "-", "/?", "-?"], "reproduce:">,
+ MetaVarName<"filename">,
+ HelpText<"Write tar file containing inputs and command to reproduce link">;
def swaprun : P<"swaprun",
"Comma-separated list of 'cd' or 'net'">;
def swaprun_cd : F<"swaprun:cd">, Alias<swaprun>, AliasArgs<["cd"]>,
defm auto_import : B_priv<"auto-import">;
defm runtime_pseudo_reloc : B_priv<"runtime-pseudo-reloc">;
def end_lib : F<"end-lib">,
- HelpText<"Ends group of objects treated as if they were in a library">;
+ HelpText<"End group of objects treated as if they were in a library">;
def exclude_all_symbols : F<"exclude-all-symbols">;
def export_all_symbols : F<"export-all-symbols">;
defm demangle : B<"demangle",
def kill_at : F<"kill-at">;
def lldmingw : F<"lldmingw">;
def noseh : F<"noseh">;
+def osversion : P_priv<"osversion">;
def output_def : Joined<["/", "-", "/?", "-?"], "output-def:">;
def pdb_source_path : P<"pdbsourcepath",
"Base path used to make relative source file path absolute in PDB">;
def rsp_quoting : Joined<["--"], "rsp-quoting=">,
HelpText<"Quoting style for response files, 'windows' (default) or 'posix'">;
def start_lib : F<"start-lib">,
- HelpText<"Starts group of objects treated as if they were in a library">;
+ HelpText<"Start group of objects treated as if they were in a library">;
+defm stdcall_fixup : B_priv<"stdcall-fixup">;
def thinlto_emit_imports_files :
F<"thinlto-emit-imports-files">,
HelpText<"Emit .imports files with -thinlto-index-only">;
def lto_obj_path : P<
"lto-obj-path",
"output native object for merged LTO unit to this path">;
+def lto_cs_profile_generate: F<"lto-cs-profile-generate">,
+ HelpText<"Perform context sensitive PGO instrumentation">;
+def lto_cs_profile_file : P<"lto-cs-profile-file",
+ "Context sensitive profile file path">;
def dash_dash_version : Flag<["--"], "version">,
- HelpText<"Print version information">;
+ HelpText<"Display the version number and exit">;
def threads
: P<"threads", "Number of threads. '1' disables multi-threading. By "
"default all available hardware threads are used">;
+def call_graph_ordering_file: P<
+ "call-graph-ordering-file",
+ "Layout sections to optimize the given callgraph">;
+defm call_graph_profile_sort: B<
+ "call-graph-profile-sort",
+ "Reorder sections with call graph profile (default)",
+ "Do not reorder sections with call graph profile">;
+def print_symbol_order: P<
+ "print-symbol-order",
+ "Print a symbol order specified by /call-graph-ordering-file and "
+ "/call-graph-profile-sort into the specified file">;
+def wrap : P_priv<"wrap">;
// Flags for debugging
def lldmap : F<"lldmap">;
using namespace lld::coff;
using llvm::object::coff_section;
+using llvm::pdb::StringTableFixup;
static ExitOnError exitOnErr;
static Timer totalPdbLinkTimer("PDB Emission (Cumulative)", Timer::root());
-
static Timer addObjectsTimer("Add Objects", totalPdbLinkTimer);
+Timer lld::coff::loadGHashTimer("Global Type Hashing", addObjectsTimer);
+Timer lld::coff::mergeGHashTimer("GHash Type Merging", addObjectsTimer);
static Timer typeMergingTimer("Type Merging", addObjectsTimer);
static Timer symbolMergingTimer("Symbol Merging", addObjectsTimer);
static Timer publicsLayoutTimer("Publics Stream Layout", totalPdbLinkTimer);
/// Link info for each import file in the symbol table into the PDB.
void addImportFilesToPDB(ArrayRef<OutputSection *> outputSections);
+ void createModuleDBI(ObjFile *file);
+
/// Link CodeView from a single object file into the target (output) PDB.
/// When a precompiled headers object is linked, its TPI map might be provided
/// externally.
void addDebug(TpiSource *source);
- const CVIndexMap *mergeTypeRecords(TpiSource *source, CVIndexMap *localMap);
-
- void addDebugSymbols(ObjFile *file, const CVIndexMap *indexMap);
-
- void mergeSymbolRecords(ObjFile *file, const CVIndexMap &indexMap,
- std::vector<ulittle32_t *> &stringTableRefs,
- BinaryStreamRef symData);
+ void addDebugSymbols(TpiSource *source);
+
+ // Analyze the symbol records to separate module symbols from global symbols,
+ // find string references, and calculate how large the symbol stream will be
+ // in the PDB.
+ void analyzeSymbolSubsection(SectionChunk *debugChunk,
+ uint32_t &moduleSymOffset,
+ uint32_t &nextRelocIndex,
+ std::vector<StringTableFixup> &stringTableFixups,
+ BinaryStreamRef symData);
+
+ // Write all module symbols from all all live debug symbol subsections of the
+ // given object file into the given stream writer.
+ Error writeAllModuleSymbolRecords(ObjFile *file, BinaryStreamWriter &writer);
+
+ // Callback to copy and relocate debug symbols during PDB file writing.
+ static Error commitSymbolsForObject(void *ctx, void *obj,
+ BinaryStreamWriter &writer);
+
+ // Copy the symbol record, relocate it, and fix the alignment if necessary.
+ // Rewrite type indices in the record. Replace unrecognized symbol records
+ // with S_SKIP records.
+ void writeSymbolRecord(SectionChunk *debugChunk,
+ ArrayRef<uint8_t> sectionContents, CVSymbol sym,
+ size_t alignedSize, uint32_t &nextRelocIndex,
+ std::vector<uint8_t> &storage);
/// Add the section map and section contributions to the PDB.
void addSections(ArrayRef<OutputSection *> outputSections,
uint64_t globalSymbols = 0;
uint64_t moduleSymbols = 0;
uint64_t publicSymbols = 0;
+ uint64_t nbTypeRecords = 0;
+ uint64_t nbTypeRecordsBytes = 0;
+};
+
+/// Represents an unrelocated DEBUG_S_FRAMEDATA subsection.
+struct UnrelocatedFpoData {
+ SectionChunk *debugChunk = nullptr;
+ ArrayRef<uint8_t> subsecData;
+ uint32_t relocIndex = 0;
};
+/// The size of the magic bytes at the beginning of a symbol section or stream.
+enum : uint32_t { kSymbolStreamMagicSize = 4 };
+
class DebugSHandler {
PDBLinker &linker;
ObjFile &file;
/// The result of merging type indices.
- const CVIndexMap *indexMap;
+ TpiSource *source;
/// The DEBUG_S_STRINGTABLE subsection. These strings are referred to by
/// index from other records in the .debug$S section. All of these strings
/// contain string table references which need to be re-written, so we
/// collect them all here and re-write them after all subsections have been
/// discovered and processed.
- std::vector<DebugFrameDataSubsectionRef> newFpoFrames;
+ std::vector<UnrelocatedFpoData> frameDataSubsecs;
+
+ /// List of string table references in symbol records. Later they will be
+ /// applied to the symbols during PDB writing.
+ std::vector<StringTableFixup> stringTableFixups;
+
+ /// Sum of the size of all module symbol records across all .debug$S sections.
+ /// Includes record realignment and the size of the symbol stream magic
+ /// prefix.
+ uint32_t moduleStreamSize = kSymbolStreamMagicSize;
+
+ /// Next relocation index in the current .debug$S section. Resets every
+ /// handleDebugS call.
+ uint32_t nextRelocIndex = 0;
- /// Pointers to raw memory that we determine have string table references
- /// that need to be re-written. We first process all .debug$S subsections
- /// to ensure that we can handle subsections written in any order, building
- /// up this list as we go. At the end, we use the string table (which must
- /// have been discovered by now else it is an error) to re-write these
- /// references.
- std::vector<ulittle32_t *> stringTableReferences;
+ void advanceRelocIndex(SectionChunk *debugChunk, ArrayRef<uint8_t> subsec);
- void mergeInlineeLines(const DebugSubsectionRecord &inlineeLines);
+ void addUnrelocatedSubsection(SectionChunk *debugChunk,
+ const DebugSubsectionRecord &ss);
+
+ void addFrameDataSubsection(SectionChunk *debugChunk,
+ const DebugSubsectionRecord &ss);
+
+ void recordStringTableReferences(CVSymbol sym, uint32_t symOffset);
public:
- DebugSHandler(PDBLinker &linker, ObjFile &file, const CVIndexMap *indexMap)
- : linker(linker), file(file), indexMap(indexMap) {}
+ DebugSHandler(PDBLinker &linker, ObjFile &file, TpiSource *source)
+ : linker(linker), file(file), source(source) {}
- void handleDebugS(ArrayRef<uint8_t> relocatedDebugContents);
+ void handleDebugS(SectionChunk *debugChunk);
void finish();
};
});
}
-static bool remapTypeIndex(TypeIndex &ti, ArrayRef<TypeIndex> typeIndexMap) {
- if (ti.isSimple())
- return true;
- if (ti.toArrayIndex() >= typeIndexMap.size())
- return false;
- ti = typeIndexMap[ti.toArrayIndex()];
- return true;
-}
-
-static void remapTypesInSymbolRecord(ObjFile *file, SymbolKind symKind,
- MutableArrayRef<uint8_t> recordBytes,
- const CVIndexMap &indexMap,
- ArrayRef<TiReference> typeRefs) {
- MutableArrayRef<uint8_t> contents =
- recordBytes.drop_front(sizeof(RecordPrefix));
- for (const TiReference &ref : typeRefs) {
- unsigned byteSize = ref.Count * sizeof(TypeIndex);
- if (contents.size() < ref.Offset + byteSize)
- fatal("symbol record too short");
-
- // This can be an item index or a type index. Choose the appropriate map.
- ArrayRef<TypeIndex> typeOrItemMap = indexMap.tpiMap;
- bool isItemIndex = ref.Kind == TiRefKind::IndexRef;
- if (isItemIndex && indexMap.isTypeServerMap)
- typeOrItemMap = indexMap.ipiMap;
-
- MutableArrayRef<TypeIndex> tIs(
- reinterpret_cast<TypeIndex *>(contents.data() + ref.Offset), ref.Count);
- for (TypeIndex &ti : tIs) {
- if (!remapTypeIndex(ti, typeOrItemMap)) {
- log("ignoring symbol record of kind 0x" + utohexstr(symKind) + " in " +
- file->getName() + " with bad " + (isItemIndex ? "item" : "type") +
- " index 0x" + utohexstr(ti.getIndex()));
- ti = TypeIndex(SimpleTypeKind::NotTranslated);
- continue;
- }
- }
- }
-}
-
-static void
-recordStringTableReferenceAtOffset(MutableArrayRef<uint8_t> contents,
- uint32_t offset,
- std::vector<ulittle32_t *> &strTableRefs) {
- contents =
- contents.drop_front(offset).take_front(sizeof(support::ulittle32_t));
- ulittle32_t *index = reinterpret_cast<ulittle32_t *>(contents.data());
- strTableRefs.push_back(index);
+static void addGHashTypeInfo(pdb::PDBFileBuilder &builder) {
+ // Start the TPI or IPI stream header.
+ builder.getTpiBuilder().setVersionHeader(pdb::PdbTpiV80);
+ builder.getIpiBuilder().setVersionHeader(pdb::PdbTpiV80);
+ for_each(TpiSource::instances, [&](TpiSource *source) {
+ builder.getTpiBuilder().addTypeRecords(source->mergedTpi.recs,
+ source->mergedTpi.recSizes,
+ source->mergedTpi.recHashes);
+ builder.getIpiBuilder().addTypeRecords(source->mergedIpi.recs,
+ source->mergedIpi.recSizes,
+ source->mergedIpi.recHashes);
+ });
}
static void
-recordStringTableReferences(SymbolKind kind, MutableArrayRef<uint8_t> contents,
- std::vector<ulittle32_t *> &strTableRefs) {
+recordStringTableReferences(CVSymbol sym, uint32_t symOffset,
+ std::vector<StringTableFixup> &stringTableFixups) {
// For now we only handle S_FILESTATIC, but we may need the same logic for
// S_DEFRANGE and S_DEFRANGE_SUBFIELD. However, I cannot seem to generate any
// PDBs that contain these types of records, so because of the uncertainty
// they are omitted here until we can prove that it's necessary.
- switch (kind) {
- case SymbolKind::S_FILESTATIC:
+ switch (sym.kind()) {
+ case SymbolKind::S_FILESTATIC: {
// FileStaticSym::ModFileOffset
- recordStringTableReferenceAtOffset(contents, 8, strTableRefs);
+ uint32_t ref = *reinterpret_cast<const ulittle32_t *>(&sym.data()[8]);
+ stringTableFixups.push_back({ref, symOffset + 8});
break;
+ }
case SymbolKind::S_DEFRANGE:
case SymbolKind::S_DEFRANGE_SUBFIELD:
log("Not fixing up string table reference in S_DEFRANGE / "
/// MSVC translates S_PROC_ID_END to S_END, and S_[LG]PROC32_ID to S_[LG]PROC32
static void translateIdSymbols(MutableArrayRef<uint8_t> &recordData,
- TypeCollection &idTable) {
+ TypeMerger &tMerger, TpiSource *source) {
RecordPrefix *prefix = reinterpret_cast<RecordPrefix *>(recordData.data());
SymbolKind kind = symbolKind(recordData);
reinterpret_cast<TypeIndex *>(content.data() + refs[0].Offset);
// `ti` is the index of a FuncIdRecord or MemberFuncIdRecord which lives in
// the IPI stream, whose `FunctionType` member refers to the TPI stream.
- // Note that LF_FUNC_ID and LF_MEMFUNC_ID have the same record layout, and
+ // Note that LF_FUNC_ID and LF_MFUNC_ID have the same record layout, and
// in both cases we just need the second type index.
if (!ti->isSimple() && !ti->isNoneType()) {
- CVType funcIdData = idTable.getType(*ti);
- ArrayRef<uint8_t> tiBuf = funcIdData.data().slice(8, 4);
- assert(tiBuf.size() == 4 && "corrupt LF_[MEM]FUNC_ID record");
- *ti = *reinterpret_cast<const TypeIndex *>(tiBuf.data());
+ TypeIndex newType = TypeIndex(SimpleTypeKind::NotTranslated);
+ if (config->debugGHashes) {
+ auto idToType = tMerger.funcIdToType.find(*ti);
+ if (idToType != tMerger.funcIdToType.end())
+ newType = idToType->second;
+ } else {
+ if (tMerger.getIDTable().contains(*ti)) {
+ CVType funcIdData = tMerger.getIDTable().getType(*ti);
+ if (funcIdData.length() >= 8 && (funcIdData.kind() == LF_FUNC_ID ||
+ funcIdData.kind() == LF_MFUNC_ID)) {
+ newType = *reinterpret_cast<const TypeIndex *>(&funcIdData.data()[8]);
+ }
+ }
+ }
+ if (newType == TypeIndex(SimpleTypeKind::NotTranslated)) {
+ warn(formatv("procedure symbol record for `{0}` in {1} refers to PDB "
+ "item index {2:X} which is not a valid function ID record",
+ getSymbolName(CVSymbol(recordData)),
+ source->file->getName(), ti->getIndex()));
+ }
+ *ti = newType;
}
kind = (kind == SymbolKind::S_GPROC32_ID) ? SymbolKind::S_GPROC32
}
}
-/// Copy the symbol record. In a PDB, symbol records must be 4 byte aligned.
-/// The object file may not be aligned.
-static MutableArrayRef<uint8_t>
-copyAndAlignSymbol(const CVSymbol &sym, MutableArrayRef<uint8_t> &alignedMem) {
- size_t size = alignTo(sym.length(), alignOf(CodeViewContainer::Pdb));
- assert(size >= 4 && "record too short");
- assert(size <= MaxRecordLength && "record too long");
- assert(alignedMem.size() >= size && "didn't preallocate enough");
-
- // Copy the symbol record and zero out any padding bytes.
- MutableArrayRef<uint8_t> newData = alignedMem.take_front(size);
- alignedMem = alignedMem.drop_front(size);
- memcpy(newData.data(), sym.data().data(), sym.length());
- memset(newData.data() + sym.length(), 0, size - sym.length());
-
- // Update the record prefix length. It should point to the beginning of the
- // next record.
- auto *prefix = reinterpret_cast<RecordPrefix *>(newData.data());
- prefix->RecordLen = size - 2;
- return newData;
-}
-
+namespace {
struct ScopeRecord {
ulittle32_t ptrParent;
ulittle32_t ptrEnd;
};
+} // namespace
-struct SymbolScope {
- ScopeRecord *openingRecord;
- uint32_t scopeOffset;
-};
+/// Given a pointer to a symbol record that opens a scope, return a pointer to
+/// the scope fields.
+static ScopeRecord *getSymbolScopeFields(void *sym) {
+ return reinterpret_cast<ScopeRecord *>(reinterpret_cast<char *>(sym) +
+ sizeof(RecordPrefix));
+}
-static void scopeStackOpen(SmallVectorImpl<SymbolScope> &stack,
- uint32_t curOffset, CVSymbol &sym) {
- assert(symbolOpensScope(sym.kind()));
- SymbolScope s;
- s.scopeOffset = curOffset;
- s.openingRecord = const_cast<ScopeRecord *>(
- reinterpret_cast<const ScopeRecord *>(sym.content().data()));
- s.openingRecord->ptrParent = stack.empty() ? 0 : stack.back().scopeOffset;
- stack.push_back(s);
+// To open a scope, push the offset of the current symbol record onto the
+// stack.
+static void scopeStackOpen(SmallVectorImpl<uint32_t> &stack,
+ std::vector<uint8_t> &storage) {
+ stack.push_back(storage.size());
}
-static void scopeStackClose(SmallVectorImpl<SymbolScope> &stack,
- uint32_t curOffset, InputFile *file) {
+// To close a scope, update the record that opened the scope.
+static void scopeStackClose(SmallVectorImpl<uint32_t> &stack,
+ std::vector<uint8_t> &storage,
+ uint32_t storageBaseOffset, ObjFile *file) {
if (stack.empty()) {
warn("symbol scopes are not balanced in " + file->getName());
return;
}
- SymbolScope s = stack.pop_back_val();
- s.openingRecord->ptrEnd = curOffset;
+
+ // Update ptrEnd of the record that opened the scope to point to the
+ // current record, if we are writing into the module symbol stream.
+ uint32_t offOpen = stack.pop_back_val();
+ uint32_t offEnd = storageBaseOffset + storage.size();
+ uint32_t offParent = stack.empty() ? 0 : (stack.back() + storageBaseOffset);
+ ScopeRecord *scopeRec = getSymbolScopeFields(&(storage)[offOpen]);
+ scopeRec->ptrParent = offParent;
+ scopeRec->ptrEnd = offEnd;
}
-static bool symbolGoesInModuleStream(const CVSymbol &sym, bool isGlobalScope) {
+static bool symbolGoesInModuleStream(const CVSymbol &sym,
+ unsigned symbolScopeDepth) {
switch (sym.kind()) {
case SymbolKind::S_GDATA32:
case SymbolKind::S_CONSTANT:
return false;
// S_UDT records go in the module stream if it is not a global S_UDT.
case SymbolKind::S_UDT:
- return !isGlobalScope;
+ return symbolScopeDepth > 0;
// S_GDATA32 does not go in the module stream, but S_LDATA32 does.
case SymbolKind::S_LDATA32:
case SymbolKind::S_LTHREAD32:
}
static bool symbolGoesInGlobalsStream(const CVSymbol &sym,
- bool isFunctionScope) {
+ unsigned symbolScopeDepth) {
switch (sym.kind()) {
case SymbolKind::S_CONSTANT:
case SymbolKind::S_GDATA32:
case SymbolKind::S_GTHREAD32:
case SymbolKind::S_GPROC32:
case SymbolKind::S_LPROC32:
+ case SymbolKind::S_GPROC32_ID:
+ case SymbolKind::S_LPROC32_ID:
// We really should not be seeing S_PROCREF and S_LPROCREF in the first place
// since they are synthesized by the linker in response to S_GPROC32 and
// S_LPROC32, but if we do see them, copy them straight through.
case SymbolKind::S_UDT:
case SymbolKind::S_LDATA32:
case SymbolKind::S_LTHREAD32:
- return !isFunctionScope;
+ return symbolScopeDepth == 0;
default:
return false;
}
}
static void addGlobalSymbol(pdb::GSIStreamBuilder &builder, uint16_t modIndex,
- unsigned symOffset, const CVSymbol &sym) {
+ unsigned symOffset,
+ std::vector<uint8_t> &symStorage) {
+ CVSymbol sym(makeArrayRef(symStorage));
switch (sym.kind()) {
case SymbolKind::S_CONSTANT:
case SymbolKind::S_UDT:
case SymbolKind::S_LTHREAD32:
case SymbolKind::S_LDATA32:
case SymbolKind::S_PROCREF:
- case SymbolKind::S_LPROCREF:
- builder.addGlobalSymbol(sym);
+ case SymbolKind::S_LPROCREF: {
+ // sym is a temporary object, so we have to copy and reallocate the record
+ // to stabilize it.
+ uint8_t *mem = bAlloc.Allocate<uint8_t>(sym.length());
+ memcpy(mem, sym.data().data(), sym.length());
+ builder.addGlobalSymbol(CVSymbol(makeArrayRef(mem, sym.length())));
break;
+ }
case SymbolKind::S_GPROC32:
case SymbolKind::S_LPROC32: {
SymbolRecordKind k = SymbolRecordKind::ProcRefSym;
}
}
-void PDBLinker::mergeSymbolRecords(ObjFile *file, const CVIndexMap &indexMap,
- std::vector<ulittle32_t *> &stringTableRefs,
- BinaryStreamRef symData) {
- ArrayRef<uint8_t> symsBuffer;
- cantFail(symData.readBytes(0, symData.getLength(), symsBuffer));
- SmallVector<SymbolScope, 4> scopes;
-
- // Iterate every symbol to check if any need to be realigned, and if so, how
- // much space we need to allocate for them.
- bool needsRealignment = false;
- unsigned totalRealignedSize = 0;
- auto ec = forEachCodeViewRecord<CVSymbol>(
- symsBuffer, [&](CVSymbol sym) -> llvm::Error {
- unsigned realignedSize =
- alignTo(sym.length(), alignOf(CodeViewContainer::Pdb));
- needsRealignment |= realignedSize != sym.length();
- totalRealignedSize += realignedSize;
- return Error::success();
- });
-
- // If any of the symbol record lengths was corrupt, ignore them all, warn
- // about it, and move on.
- if (ec) {
- warn("corrupt symbol records in " + file->getName());
- consumeError(std::move(ec));
+// Check if the given symbol record was padded for alignment. If so, zero out
+// the padding bytes and update the record prefix with the new size.
+static void fixRecordAlignment(MutableArrayRef<uint8_t> recordBytes,
+ size_t oldSize) {
+ size_t alignedSize = recordBytes.size();
+ if (oldSize == alignedSize)
return;
- }
+ reinterpret_cast<RecordPrefix *>(recordBytes.data())->RecordLen =
+ alignedSize - 2;
+ memset(recordBytes.data() + oldSize, 0, alignedSize - oldSize);
+}
+
+// Replace any record with a skip record of the same size. This is useful when
+// we have reserved size for a symbol record, but type index remapping fails.
+static void replaceWithSkipRecord(MutableArrayRef<uint8_t> recordBytes) {
+ memset(recordBytes.data(), 0, recordBytes.size());
+ auto *prefix = reinterpret_cast<RecordPrefix *>(recordBytes.data());
+ prefix->RecordKind = SymbolKind::S_SKIP;
+ prefix->RecordLen = recordBytes.size() - 2;
+}
- // If any symbol needed realignment, allocate enough contiguous memory for
- // them all. Typically symbol subsections are small enough that this will not
- // cause fragmentation.
- MutableArrayRef<uint8_t> alignedSymbolMem;
- if (needsRealignment) {
- void *alignedData =
- bAlloc.Allocate(totalRealignedSize, alignOf(CodeViewContainer::Pdb));
- alignedSymbolMem = makeMutableArrayRef(
- reinterpret_cast<uint8_t *>(alignedData), totalRealignedSize);
+// Copy the symbol record, relocate it, and fix the alignment if necessary.
+// Rewrite type indices in the record. Replace unrecognized symbol records with
+// S_SKIP records.
+void PDBLinker::writeSymbolRecord(SectionChunk *debugChunk,
+ ArrayRef<uint8_t> sectionContents,
+ CVSymbol sym, size_t alignedSize,
+ uint32_t &nextRelocIndex,
+ std::vector<uint8_t> &storage) {
+ // Allocate space for the new record at the end of the storage.
+ storage.resize(storage.size() + alignedSize);
+ auto recordBytes = MutableArrayRef<uint8_t>(storage).take_back(alignedSize);
+
+ // Copy the symbol record and relocate it.
+ debugChunk->writeAndRelocateSubsection(sectionContents, sym.data(),
+ nextRelocIndex, recordBytes.data());
+ fixRecordAlignment(recordBytes, sym.length());
+
+ // Re-map all the type index references.
+ TpiSource *source = debugChunk->file->debugTypesObj;
+ if (!source->remapTypesInSymbolRecord(recordBytes)) {
+ log("ignoring unknown symbol record with kind 0x" + utohexstr(sym.kind()));
+ replaceWithSkipRecord(recordBytes);
}
- // Iterate again, this time doing the real work.
- unsigned curSymOffset = file->moduleDBI->getNextSymbolOffset();
- ArrayRef<uint8_t> bulkSymbols;
- cantFail(forEachCodeViewRecord<CVSymbol>(
- symsBuffer, [&](CVSymbol sym) -> llvm::Error {
- // Align the record if required.
- MutableArrayRef<uint8_t> recordBytes;
- if (needsRealignment) {
- recordBytes = copyAndAlignSymbol(sym, alignedSymbolMem);
- sym = CVSymbol(recordBytes);
- } else {
- // Otherwise, we can actually mutate the symbol directly, since we
- // copied it to apply relocations.
- recordBytes = makeMutableArrayRef(
- const_cast<uint8_t *>(sym.data().data()), sym.length());
- }
+ // An object file may have S_xxx_ID symbols, but these get converted to
+ // "real" symbols in a PDB.
+ translateIdSymbols(recordBytes, tMerger, source);
+}
- // Discover type index references in the record. Skip it if we don't
- // know where they are.
- SmallVector<TiReference, 32> typeRefs;
- if (!discoverTypeIndicesInSymbol(sym, typeRefs)) {
- log("ignoring unknown symbol record with kind 0x" +
- utohexstr(sym.kind()));
- return Error::success();
- }
+void PDBLinker::analyzeSymbolSubsection(
+ SectionChunk *debugChunk, uint32_t &moduleSymOffset,
+ uint32_t &nextRelocIndex, std::vector<StringTableFixup> &stringTableFixups,
+ BinaryStreamRef symData) {
+ ObjFile *file = debugChunk->file;
+ uint32_t moduleSymStart = moduleSymOffset;
- // Re-map all the type index references.
- remapTypesInSymbolRecord(file, sym.kind(), recordBytes, indexMap,
- typeRefs);
+ uint32_t scopeLevel = 0;
+ std::vector<uint8_t> storage;
+ ArrayRef<uint8_t> sectionContents = debugChunk->getContents();
- // An object file may have S_xxx_ID symbols, but these get converted to
- // "real" symbols in a PDB.
- translateIdSymbols(recordBytes, tMerger.getIDTable());
- sym = CVSymbol(recordBytes);
+ ArrayRef<uint8_t> symsBuffer;
+ cantFail(symData.readBytes(0, symData.getLength(), symsBuffer));
- // If this record refers to an offset in the object file's string table,
- // add that item to the global PDB string table and re-write the index.
- recordStringTableReferences(sym.kind(), recordBytes, stringTableRefs);
+ if (symsBuffer.empty())
+ warn("empty symbols subsection in " + file->getName());
- // Fill in "Parent" and "End" fields by maintaining a stack of scopes.
+ Error ec = forEachCodeViewRecord<CVSymbol>(
+ symsBuffer, [&](CVSymbol sym) -> llvm::Error {
+ // Track the current scope.
if (symbolOpensScope(sym.kind()))
- scopeStackOpen(scopes, curSymOffset, sym);
+ ++scopeLevel;
else if (symbolEndsScope(sym.kind()))
- scopeStackClose(scopes, curSymOffset, file);
+ --scopeLevel;
+
+ uint32_t alignedSize =
+ alignTo(sym.length(), alignOf(CodeViewContainer::Pdb));
- // Add the symbol to the globals stream if necessary. Do this before
- // adding the symbol to the module since we may need to get the next
- // symbol offset, and writing to the module's symbol stream will update
- // that offset.
- if (symbolGoesInGlobalsStream(sym, !scopes.empty())) {
+ // Copy global records. Some global records (mainly procedures)
+ // reference the current offset into the module stream.
+ if (symbolGoesInGlobalsStream(sym, scopeLevel)) {
+ storage.clear();
+ writeSymbolRecord(debugChunk, sectionContents, sym, alignedSize,
+ nextRelocIndex, storage);
addGlobalSymbol(builder.getGsiBuilder(),
- file->moduleDBI->getModuleIndex(), curSymOffset, sym);
+ file->moduleDBI->getModuleIndex(), moduleSymOffset,
+ storage);
++globalSymbols;
}
- if (symbolGoesInModuleStream(sym, scopes.empty())) {
- // Add symbols to the module in bulk. If this symbol is contiguous
- // with the previous run of symbols to add, combine the ranges. If
- // not, close the previous range of symbols and start a new one.
- if (sym.data().data() == bulkSymbols.end()) {
- bulkSymbols = makeArrayRef(bulkSymbols.data(),
- bulkSymbols.size() + sym.length());
- } else {
- file->moduleDBI->addSymbolsInBulk(bulkSymbols);
- bulkSymbols = recordBytes;
- }
- curSymOffset += sym.length();
+ // Update the module stream offset and record any string table index
+ // references. There are very few of these and they will be rewritten
+ // later during PDB writing.
+ if (symbolGoesInModuleStream(sym, scopeLevel)) {
+ recordStringTableReferences(sym, moduleSymOffset, stringTableFixups);
+ moduleSymOffset += alignedSize;
++moduleSymbols;
}
+
return Error::success();
- }));
+ });
- // Add any remaining symbols we've accumulated.
- file->moduleDBI->addSymbolsInBulk(bulkSymbols);
+ // If we encountered corrupt records, ignore the whole subsection. If we wrote
+ // any partial records, undo that. For globals, we just keep what we have and
+ // continue.
+ if (ec) {
+ warn("corrupt symbol records in " + file->getName());
+ moduleSymOffset = moduleSymStart;
+ consumeError(std::move(ec));
+ }
+}
+
+Error PDBLinker::writeAllModuleSymbolRecords(ObjFile *file,
+ BinaryStreamWriter &writer) {
+ std::vector<uint8_t> storage;
+ SmallVector<uint32_t, 4> scopes;
+
+ // Visit all live .debug$S sections a second time, and write them to the PDB.
+ for (SectionChunk *debugChunk : file->getDebugChunks()) {
+ if (!debugChunk->live || debugChunk->getSize() == 0 ||
+ debugChunk->getSectionName() != ".debug$S")
+ continue;
+
+ ArrayRef<uint8_t> sectionContents = debugChunk->getContents();
+ auto contents =
+ SectionChunk::consumeDebugMagic(sectionContents, ".debug$S");
+ DebugSubsectionArray subsections;
+ BinaryStreamReader reader(contents, support::little);
+ exitOnErr(reader.readArray(subsections, contents.size()));
+
+ uint32_t nextRelocIndex = 0;
+ for (const DebugSubsectionRecord &ss : subsections) {
+ if (ss.kind() != DebugSubsectionKind::Symbols)
+ continue;
+
+ uint32_t moduleSymStart = writer.getOffset();
+ scopes.clear();
+ storage.clear();
+ ArrayRef<uint8_t> symsBuffer;
+ BinaryStreamRef sr = ss.getRecordData();
+ cantFail(sr.readBytes(0, sr.getLength(), symsBuffer));
+ auto ec = forEachCodeViewRecord<CVSymbol>(
+ symsBuffer, [&](CVSymbol sym) -> llvm::Error {
+ // Track the current scope. Only update records in the postmerge
+ // pass.
+ if (symbolOpensScope(sym.kind()))
+ scopeStackOpen(scopes, storage);
+ else if (symbolEndsScope(sym.kind()))
+ scopeStackClose(scopes, storage, moduleSymStart, file);
+
+ // Copy, relocate, and rewrite each module symbol.
+ if (symbolGoesInModuleStream(sym, scopes.size())) {
+ uint32_t alignedSize =
+ alignTo(sym.length(), alignOf(CodeViewContainer::Pdb));
+ writeSymbolRecord(debugChunk, sectionContents, sym, alignedSize,
+ nextRelocIndex, storage);
+ }
+ return Error::success();
+ });
+
+ // If we encounter corrupt records in the second pass, ignore them. We
+ // already warned about them in the first analysis pass.
+ if (ec) {
+ consumeError(std::move(ec));
+ storage.clear();
+ }
+
+ // Writing bytes has a very high overhead, so write the entire subsection
+ // at once.
+ // TODO: Consider buffering symbols for the entire object file to reduce
+ // overhead even further.
+ if (Error e = writer.writeBytes(storage))
+ return e;
+ }
+ }
+
+ return Error::success();
+}
+
+Error PDBLinker::commitSymbolsForObject(void *ctx, void *obj,
+ BinaryStreamWriter &writer) {
+ return static_cast<PDBLinker *>(ctx)->writeAllModuleSymbolRecords(
+ static_cast<ObjFile *>(obj), writer);
}
static pdb::SectionContrib createSectionContrib(const Chunk *c, uint32_t modi) {
return pdbStrTable.insert(*expectedString);
}
-void DebugSHandler::handleDebugS(ArrayRef<uint8_t> relocatedDebugContents) {
- relocatedDebugContents =
- SectionChunk::consumeDebugMagic(relocatedDebugContents, ".debug$S");
-
+void DebugSHandler::handleDebugS(SectionChunk *debugChunk) {
+ // Note that we are processing the *unrelocated* section contents. They will
+ // be relocated later during PDB writing.
+ ArrayRef<uint8_t> contents = debugChunk->getContents();
+ contents = SectionChunk::consumeDebugMagic(contents, ".debug$S");
DebugSubsectionArray subsections;
- BinaryStreamReader reader(relocatedDebugContents, support::little);
- exitOnErr(reader.readArray(subsections, relocatedDebugContents.size()));
+ BinaryStreamReader reader(contents, support::little);
+ exitOnErr(reader.readArray(subsections, contents.size()));
+ debugChunk->sortRelocations();
- // If there is no index map, use an empty one.
- CVIndexMap tempIndexMap;
- if (!indexMap)
- indexMap = &tempIndexMap;
+ // Reset the relocation index, since this is a new section.
+ nextRelocIndex = 0;
for (const DebugSubsectionRecord &ss : subsections) {
// Ignore subsections with the 'ignore' bit. Some versions of the Visual C++
exitOnErr(checksums.initialize(ss.getRecordData()));
break;
case DebugSubsectionKind::Lines:
- // We can add the relocated line table directly to the PDB without
- // modification because the file checksum offsets will stay the same.
- file.moduleDBI->addDebugSubsection(ss);
- break;
case DebugSubsectionKind::InlineeLines:
- // The inlinee lines subsection also has file checksum table references
- // that can be used directly, but it contains function id references that
- // must be remapped.
- mergeInlineeLines(ss);
+ addUnrelocatedSubsection(debugChunk, ss);
break;
- case DebugSubsectionKind::FrameData: {
- // We need to re-write string table indices here, so save off all
- // frame data subsections until we've processed the entire list of
- // subsections so that we can be sure we have the string table.
- DebugFrameDataSubsectionRef fds;
- exitOnErr(fds.initialize(ss.getRecordData()));
- newFpoFrames.push_back(std::move(fds));
+ case DebugSubsectionKind::FrameData:
+ addFrameDataSubsection(debugChunk, ss);
break;
- }
- case DebugSubsectionKind::Symbols: {
- linker.mergeSymbolRecords(&file, *indexMap, stringTableReferences,
- ss.getRecordData());
+ case DebugSubsectionKind::Symbols:
+ linker.analyzeSymbolSubsection(debugChunk, moduleStreamSize,
+ nextRelocIndex, stringTableFixups,
+ ss.getRecordData());
break;
- }
case DebugSubsectionKind::CrossScopeImports:
case DebugSubsectionKind::CrossScopeExports:
}
}
+void DebugSHandler::advanceRelocIndex(SectionChunk *sc,
+ ArrayRef<uint8_t> subsec) {
+ ptrdiff_t vaBegin = subsec.data() - sc->getContents().data();
+ assert(vaBegin > 0);
+ auto relocs = sc->getRelocs();
+ for (; nextRelocIndex < relocs.size(); ++nextRelocIndex) {
+ if (relocs[nextRelocIndex].VirtualAddress >= vaBegin)
+ break;
+ }
+}
+
+namespace {
+/// Wrapper class for unrelocated line and inlinee line subsections, which
+/// require only relocation and type index remapping to add to the PDB.
+class UnrelocatedDebugSubsection : public DebugSubsection {
+public:
+ UnrelocatedDebugSubsection(DebugSubsectionKind k, SectionChunk *debugChunk,
+ ArrayRef<uint8_t> subsec, uint32_t relocIndex)
+ : DebugSubsection(k), debugChunk(debugChunk), subsec(subsec),
+ relocIndex(relocIndex) {}
+
+ Error commit(BinaryStreamWriter &writer) const override;
+ uint32_t calculateSerializedSize() const override { return subsec.size(); }
+
+ SectionChunk *debugChunk;
+ ArrayRef<uint8_t> subsec;
+ uint32_t relocIndex;
+};
+} // namespace
+
+Error UnrelocatedDebugSubsection::commit(BinaryStreamWriter &writer) const {
+ std::vector<uint8_t> relocatedBytes(subsec.size());
+ uint32_t tmpRelocIndex = relocIndex;
+ debugChunk->writeAndRelocateSubsection(debugChunk->getContents(), subsec,
+ tmpRelocIndex, relocatedBytes.data());
+
+ // Remap type indices in inlinee line records in place. Skip the remapping if
+ // there is no type source info.
+ if (kind() == DebugSubsectionKind::InlineeLines &&
+ debugChunk->file->debugTypesObj) {
+ TpiSource *source = debugChunk->file->debugTypesObj;
+ DebugInlineeLinesSubsectionRef inlineeLines;
+ BinaryStreamReader storageReader(relocatedBytes, support::little);
+ exitOnErr(inlineeLines.initialize(storageReader));
+ for (const InlineeSourceLine &line : inlineeLines) {
+ TypeIndex &inlinee = *const_cast<TypeIndex *>(&line.Header->Inlinee);
+ if (!source->remapTypeIndex(inlinee, TiRefKind::IndexRef)) {
+ log("bad inlinee line record in " + debugChunk->file->getName() +
+ " with bad inlinee index 0x" + utohexstr(inlinee.getIndex()));
+ }
+ }
+ }
+
+ return writer.writeBytes(relocatedBytes);
+}
+
+void DebugSHandler::addUnrelocatedSubsection(SectionChunk *debugChunk,
+ const DebugSubsectionRecord &ss) {
+ ArrayRef<uint8_t> subsec;
+ BinaryStreamRef sr = ss.getRecordData();
+ cantFail(sr.readBytes(0, sr.getLength(), subsec));
+ advanceRelocIndex(debugChunk, subsec);
+ file.moduleDBI->addDebugSubsection(
+ std::make_shared<UnrelocatedDebugSubsection>(ss.kind(), debugChunk,
+ subsec, nextRelocIndex));
+}
+
+void DebugSHandler::addFrameDataSubsection(SectionChunk *debugChunk,
+ const DebugSubsectionRecord &ss) {
+ // We need to re-write string table indices here, so save off all
+ // frame data subsections until we've processed the entire list of
+ // subsections so that we can be sure we have the string table.
+ ArrayRef<uint8_t> subsec;
+ BinaryStreamRef sr = ss.getRecordData();
+ cantFail(sr.readBytes(0, sr.getLength(), subsec));
+ advanceRelocIndex(debugChunk, subsec);
+ frameDataSubsecs.push_back({debugChunk, subsec, nextRelocIndex});
+}
+
static Expected<StringRef>
getFileName(const DebugStringTableSubsectionRef &strings,
const DebugChecksumsSubsectionRef &checksums, uint32_t fileID) {
return strings.getString(offset);
}
-void DebugSHandler::mergeInlineeLines(
- const DebugSubsectionRecord &inlineeSubsection) {
- DebugInlineeLinesSubsectionRef inlineeLines;
- exitOnErr(inlineeLines.initialize(inlineeSubsection.getRecordData()));
-
- // Remap type indices in inlinee line records in place.
- for (const InlineeSourceLine &line : inlineeLines) {
- TypeIndex &inlinee = *const_cast<TypeIndex *>(&line.Header->Inlinee);
- ArrayRef<TypeIndex> typeOrItemMap =
- indexMap->isTypeServerMap ? indexMap->ipiMap : indexMap->tpiMap;
- if (!remapTypeIndex(inlinee, typeOrItemMap)) {
- log("bad inlinee line record in " + file.getName() +
- " with bad inlinee index 0x" + utohexstr(inlinee.getIndex()));
- }
- }
-
- // Add the modified inlinee line subsection directly.
- file.moduleDBI->addDebugSubsection(inlineeSubsection);
-}
-
void DebugSHandler::finish() {
pdb::DbiStreamBuilder &dbiBuilder = linker.builder.getDbiBuilder();
+ // If we found any symbol records for the module symbol stream, defer them.
+ if (moduleStreamSize > kSymbolStreamMagicSize)
+ file.moduleDBI->addUnmergedSymbols(&file, moduleStreamSize -
+ kSymbolStreamMagicSize);
+
// We should have seen all debug subsections across the entire object file now
// which means that if a StringTable subsection and Checksums subsection were
// present, now is the time to handle them.
fatal(".debug$S sections with a checksums subsection must also contain a "
"string table subsection");
- if (!stringTableReferences.empty())
+ if (!stringTableFixups.empty())
warn("No StringTable subsection was encountered, but there are string "
"table references");
return;
}
- // Rewrite string table indices in the Fpo Data and symbol records to refer to
- // the global PDB string table instead of the object file string table.
- for (DebugFrameDataSubsectionRef &fds : newFpoFrames) {
- const ulittle32_t *reloc = fds.getRelocPtr();
+ // Handle FPO data. Each subsection begins with a single image base
+ // relocation, which is then added to the RvaStart of each frame data record
+ // when it is added to the PDB. The string table indices for the FPO program
+ // must also be rewritten to use the PDB string table.
+ for (const UnrelocatedFpoData &subsec : frameDataSubsecs) {
+ // Relocate the first four bytes of the subection and reinterpret them as a
+ // 32 bit integer.
+ SectionChunk *debugChunk = subsec.debugChunk;
+ ArrayRef<uint8_t> subsecData = subsec.subsecData;
+ uint32_t relocIndex = subsec.relocIndex;
+ auto unrelocatedRvaStart = subsecData.take_front(sizeof(uint32_t));
+ uint8_t relocatedRvaStart[sizeof(uint32_t)];
+ debugChunk->writeAndRelocateSubsection(debugChunk->getContents(),
+ unrelocatedRvaStart, relocIndex,
+ &relocatedRvaStart[0]);
+ uint32_t rvaStart;
+ memcpy(&rvaStart, &relocatedRvaStart[0], sizeof(uint32_t));
+
+ // Copy each frame data record, add in rvaStart, translate string table
+ // indices, and add the record to the PDB.
+ DebugFrameDataSubsectionRef fds;
+ BinaryStreamReader reader(subsecData, support::little);
+ exitOnErr(fds.initialize(reader));
for (codeview::FrameData fd : fds) {
- fd.RvaStart += *reloc;
+ fd.RvaStart += rvaStart;
fd.FrameFunc =
translateStringTableIndex(fd.FrameFunc, cvStrTab, linker.pdbStrTab);
dbiBuilder.addNewFpoData(fd);
}
}
- for (ulittle32_t *ref : stringTableReferences)
- *ref = translateStringTableIndex(*ref, cvStrTab, linker.pdbStrTab);
+ // Translate the fixups and pass them off to the module builder so they will
+ // be applied during writing.
+ for (StringTableFixup &ref : stringTableFixups) {
+ ref.StrTabOffset =
+ translateStringTableIndex(ref.StrTabOffset, cvStrTab, linker.pdbStrTab);
+ }
+ file.moduleDBI->setStringTableFixups(std::move(stringTableFixups));
// Make a new file checksum table that refers to offsets in the PDB-wide
// string table. Generally the string table subsection appears after the
warn(msg);
}
-const CVIndexMap *PDBLinker::mergeTypeRecords(TpiSource *source,
- CVIndexMap *localMap) {
- ScopedTimer t(typeMergingTimer);
- // Before we can process symbol substreams from .debug$S, we need to process
- // type information, file checksums, and the string table. Add type info to
- // the PDB first, so that we can get the map from object file type and item
- // indices to PDB type and item indices.
- Expected<const CVIndexMap *> r = source->mergeDebugT(&tMerger, localMap);
-
- // If the .debug$T sections fail to merge, assume there is no debug info.
- if (!r) {
- warnUnusable(source->file, r.takeError());
- return nullptr;
- }
- return *r;
-}
-
// Allocate memory for a .debug$S / .debug$F section and relocate it.
static ArrayRef<uint8_t> relocateDebugChunk(SectionChunk &debugChunk) {
uint8_t *buffer = bAlloc.Allocate<uint8_t>(debugChunk.getSize());
return makeArrayRef(buffer, debugChunk.getSize());
}
-void PDBLinker::addDebugSymbols(ObjFile *file, const CVIndexMap *indexMap) {
+void PDBLinker::addDebugSymbols(TpiSource *source) {
+ // If this TpiSource doesn't have an object file, it must be from a type
+ // server PDB. Type server PDBs do not contain symbols, so stop here.
+ if (!source->file)
+ return;
+
ScopedTimer t(symbolMergingTimer);
pdb::DbiStreamBuilder &dbiBuilder = builder.getDbiBuilder();
- DebugSHandler dsh(*this, *file, indexMap);
+ DebugSHandler dsh(*this, *source->file, source);
// Now do all live .debug$S and .debug$F sections.
- for (SectionChunk *debugChunk : file->getDebugChunks()) {
+ for (SectionChunk *debugChunk : source->file->getDebugChunks()) {
if (!debugChunk->live || debugChunk->getSize() == 0)
continue;
if (!isDebugS && !isDebugF)
continue;
- ArrayRef<uint8_t> relocatedDebugContents = relocateDebugChunk(*debugChunk);
-
if (isDebugS) {
- dsh.handleDebugS(relocatedDebugContents);
+ dsh.handleDebugS(debugChunk);
} else if (isDebugF) {
+ // Handle old FPO data .debug$F sections. These are relatively rare.
+ ArrayRef<uint8_t> relocatedDebugContents =
+ relocateDebugChunk(*debugChunk);
FixedStreamArray<object::FpoData> fpoRecords;
BinaryStreamReader reader(relocatedDebugContents, support::little);
uint32_t count = relocatedDebugContents.size() / sizeof(object::FpoData);
// path to the object into the PDB. If this is a plain object, we make its
// path absolute. If it's an object in an archive, we make the archive path
// absolute.
-static void createModuleDBI(pdb::PDBFileBuilder &builder, ObjFile *file) {
+void PDBLinker::createModuleDBI(ObjFile *file) {
pdb::DbiStreamBuilder &dbiBuilder = builder.getDbiBuilder();
SmallString<128> objName;
bool inArchive = !file->parentName.empty();
objName = inArchive ? file->parentName : file->getName();
pdbMakeAbsolute(objName);
- StringRef modName = inArchive ? file->getName() : StringRef(objName);
+ StringRef modName = inArchive ? file->getName() : objName.str();
file->moduleDBI = &exitOnErr(dbiBuilder.addModuleInfo(modName));
file->moduleDBI->setObjFileName(objName);
+ file->moduleDBI->setMergeSymbolsCallback(this, &commitSymbolsForObject);
ArrayRef<Chunk *> chunks = file->getChunks();
uint32_t modi = file->moduleDBI->getModuleIndex();
}
void PDBLinker::addDebug(TpiSource *source) {
- CVIndexMap localMap;
- const CVIndexMap *indexMap = mergeTypeRecords(source, &localMap);
+ // Before we can process symbol substreams from .debug$S, we need to process
+ // type information, file checksums, and the string table. Add type info to
+ // the PDB first, so that we can get the map from object file type and item
+ // indices to PDB type and item indices. If we are using ghashes, types have
+ // already been merged.
+ if (!config->debugGHashes) {
+ ScopedTimer t(typeMergingTimer);
+ if (Error e = source->mergeDebugT(&tMerger)) {
+ // If type merging failed, ignore the symbols.
+ warnUnusable(source->file, std::move(e));
+ return;
+ }
+ }
- if (source->kind == TpiSource::PDB)
- return; // No symbols in TypeServer PDBs
+ // If type merging failed, ignore the symbols.
+ Error typeError = std::move(source->typeMergingError);
+ if (typeError) {
+ warnUnusable(source->file, std::move(typeError));
+ return;
+ }
- addDebugSymbols(source->file, indexMap);
+ addDebugSymbols(source);
}
static pdb::BulkPublic createPublic(Defined *def) {
ScopedTimer t1(addObjectsTimer);
// Create module descriptors
- for_each(ObjFile::instances,
- [&](ObjFile *obj) { createModuleDBI(builder, obj); });
+ for_each(ObjFile::instances, [&](ObjFile *obj) { createModuleDBI(obj); });
- // Merge OBJs that do not have debug types
- for_each(ObjFile::instances, [&](ObjFile *obj) {
- if (obj->debugTypesObj)
- return;
- // Even if there're no types, still merge non-symbol .Debug$S and .Debug$F
- // sections
- addDebugSymbols(obj, nullptr);
- });
+ // Reorder dependency type sources to come first.
+ TpiSource::sortDependencies();
- // Merge dependencies
- TpiSource::forEachSource([&](TpiSource *source) {
- if (source->isDependency())
- addDebug(source);
- });
+ // Merge type information from input files using global type hashing.
+ if (config->debugGHashes)
+ tMerger.mergeTypesWithGHash();
- // Merge regular and dependent OBJs
- TpiSource::forEachSource([&](TpiSource *source) {
- if (!source->isDependency())
- addDebug(source);
- });
+ // Merge dependencies and then regular objects.
+ for_each(TpiSource::dependencySources,
+ [&](TpiSource *source) { addDebug(source); });
+ for_each(TpiSource::objectSources,
+ [&](TpiSource *source) { addDebug(source); });
builder.getStringTableBuilder().setStrings(pdbStrTab);
t1.stop();
// Construct TPI and IPI stream contents.
ScopedTimer t2(tpiStreamLayoutTimer);
- addTypeInfo(builder.getTpiBuilder(), tMerger.getTypeTable());
- addTypeInfo(builder.getIpiBuilder(), tMerger.getIDTable());
+ // Collect all the merged types.
+ if (config->debugGHashes) {
+ addGHashTypeInfo(builder);
+ } else {
+ addTypeInfo(builder.getTpiBuilder(), tMerger.getTypeTable());
+ addTypeInfo(builder.getIpiBuilder(), tMerger.getIDTable());
+ }
t2.stop();
+
+ if (config->showSummary) {
+ for_each(TpiSource::instances, [&](TpiSource *source) {
+ nbTypeRecords += source->nbTypeRecords;
+ nbTypeRecordsBytes += source->nbTypeRecordsBytes;
+ });
+ }
}
void PDBLinker::addPublicsToPDB() {
// Only emit external, defined, live symbols that have a chunk. Static,
// non-external symbols do not appear in the symbol table.
auto *def = dyn_cast<Defined>(s);
- if (def && def->isLive() && def->getChunk())
+ if (def && def->isLive() && def->getChunk()) {
+ // Don't emit a public symbol for coverage data symbols. LLVM code
+ // coverage (and PGO) create a __profd_ and __profc_ symbol for every
+ // function. C++ mangled names are long, and tend to dominate symbol size.
+ // Including these names triples the size of the public stream, which
+ // results in bloated PDB files. These symbols generally are not helpful
+ // for debugging, so suppress them.
+ StringRef name = def->getName();
+ if (name.data()[0] == '_' && name.data()[1] == '_') {
+ // Drop the '_' prefix for x86.
+ if (config->machine == I386)
+ name = name.drop_front(1);
+ if (name.startswith("__profd_") || name.startswith("__profc_") ||
+ name.startswith("__covrec_")) {
+ return;
+ }
+ }
publics.push_back(createPublic(def));
+ }
});
if (!publics.empty()) {
"Input OBJ files (expanded from all cmd-line inputs)");
print(TpiSource::countTypeServerPDBs(), "PDB type server dependencies");
print(TpiSource::countPrecompObjs(), "Precomp OBJ dependencies");
- print(tMerger.getTypeTable().size() + tMerger.getIDTable().size(),
- "Merged TPI records");
+ print(nbTypeRecords, "Input type records");
+ print(nbTypeRecordsBytes, "Input type records bytes");
+ print(builder.getTpiBuilder().getRecordCount(), "Merged TPI records");
+ print(builder.getIpiBuilder().getRecordCount(), "Merged IPI records");
print(pdbStrTab.size(), "Output PDB strings");
print(globalSymbols, "Global symbol records");
print(moduleSymbols, "Module symbol records");
}
};
- printLargeInputTypeRecs("TPI", tMerger.tpiCounts, tMerger.getTypeTable());
- printLargeInputTypeRecs("IPI", tMerger.ipiCounts, tMerger.getIDTable());
+ if (!config->debugGHashes) {
+ // FIXME: Reimplement for ghash.
+ printLargeInputTypeRecs("TPI", tMerger.tpiCounts, tMerger.getTypeTable());
+ printLargeInputTypeRecs("IPI", tMerger.ipiCounts, tMerger.getIDTable());
+ }
message(buffer);
}
warn("Cannot open input file: " + file);
continue;
}
- builder.addInjectedSource(file, std::move(*dataOrErr));
+ std::unique_ptr<MemoryBuffer> data = std::move(*dataOrErr);
+
+ // Can't use takeBuffer() here since addInjectedSource() takes ownership.
+ if (driver->tar)
+ driver->tar->append(relativeToRoot(data->getBufferIdentifier()),
+ data->getBuffer());
+
+ builder.addInjectedSource(file, std::move(data));
}
}
warn("Cannot open input file: " + file);
continue;
}
- exitOnErr(builder.addNamedStream(stream, (*dataOrErr)->getBuffer()));
+ std::unique_ptr<MemoryBuffer> data = std::move(*dataOrErr);
+ exitOnErr(builder.addNamedStream(stream, data->getBuffer()));
+ driver->takeBuffer(std::move(data));
}
}
mod->addSymbol(codeview::SymbolSerializer::writeOneSymbol(
cs, bAlloc, CodeViewContainer::Pdb));
- SmallVector<SymbolScope, 4> scopes;
CVSymbol newSym = codeview::SymbolSerializer::writeOneSymbol(
ts, bAlloc, CodeViewContainer::Pdb);
- scopeStackOpen(scopes, mod->getNextSymbolOffset(), newSym);
+
+ // Write ptrEnd for the S_THUNK32.
+ ScopeRecord *thunkSymScope =
+ getSymbolScopeFields(const_cast<uint8_t *>(newSym.data().data()));
mod->addSymbol(newSym);
newSym = codeview::SymbolSerializer::writeOneSymbol(es, bAlloc,
CodeViewContainer::Pdb);
- scopeStackClose(scopes, mod->getNextSymbolOffset(), file);
+ thunkSymScope->ptrEnd = mod->getNextSymbolOffset();
mod->addSymbol(newSym);
}
void PDBLinker::commit(codeview::GUID *guid) {
- ExitOnError exitOnErr((config->pdbPath + ": ").str());
- // Write to a file.
- exitOnErr(builder.commit(config->pdbPath, guid));
+ // Print an error and continue if PDB writing fails. This is done mainly so
+ // the user can see the output of /time and /summary, which is very helpful
+ // when trying to figure out why a PDB file is too large.
+ if (Error e = builder.commit(config->pdbPath, guid)) {
+ checkError(std::move(e));
+ error("failed to write PDB file " + Twine(config->pdbPath));
+ }
}
static uint32_t getSecrelReloc() {
}
namespace lld {
+class Timer;
+
namespace coff {
class OutputSection;
class SectionChunk;
llvm::Optional<std::pair<llvm::StringRef, uint32_t>>
getFileLineCodeView(const SectionChunk *c, uint32_t addr);
+
+extern Timer loadGHashTimer;
+extern Timer mergeGHashTimer;
+
} // namespace coff
} // namespace lld
namespace lld {
namespace coff {
+StringRef ltrim1(StringRef s, const char *chars) {
+ if (!s.empty() && strchr(chars, s[0]))
+ return s.substr(1);
+ return s;
+}
+
static Timer ltoTimer("LTO", Timer::root());
SymbolTable *symtab;
case Symbol::Kind::LazyObjectKind:
cast<LazyObject>(s)->file->fetch();
break;
+ case Symbol::Kind::LazyDLLSymbolKind: {
+ auto *l = cast<LazyDLLSymbol>(s);
+ l->file->makeImport(l->sym);
+ break;
+ }
default:
llvm_unreachable(
"symbol passed to forceLazy is not a LazyArchive or LazyObject");
errorOrWarn(os.str());
}
-void SymbolTable::loadMinGWAutomaticImports() {
+void SymbolTable::loadMinGWSymbols() {
for (auto &i : symMap) {
Symbol *sym = i.second;
auto *undef = dyn_cast<Undefined>(sym);
StringRef name = undef->getName();
- if (name.startswith("__imp_"))
- continue;
- // If we have an undefined symbol, but we have a lazy symbol we could
- // load, load it.
- Symbol *l = find(("__imp_" + name).str());
- if (!l || l->pendingArchiveLoad || !l->isLazy())
- continue;
+ if (config->machine == I386 && config->stdcallFixup) {
+ // Check if we can resolve an undefined decorated symbol by finding
+ // the indended target as an undecorated symbol (only with a leading
+ // underscore).
+ StringRef origName = name;
+ StringRef baseName = name;
+ // Trim down stdcall/fastcall/vectorcall symbols to the base name.
+ baseName = ltrim1(baseName, "_@");
+ baseName = baseName.substr(0, baseName.find('@'));
+ // Add a leading underscore, as it would be in cdecl form.
+ std::string newName = ("_" + baseName).str();
+ Symbol *l;
+ if (newName != origName && (l = find(newName)) != nullptr) {
+ // If we found a symbol and it is lazy; load it.
+ if (l->isLazy() && !l->pendingArchiveLoad) {
+ log("Loading lazy " + l->getName() + " from " +
+ l->getFile()->getName() + " for stdcall fixup");
+ forceLazy(l);
+ }
+ // If it's lazy or already defined, hook it up as weak alias.
+ if (l->isLazy() || isa<Defined>(l)) {
+ if (config->warnStdcallFixup)
+ warn("Resolving " + origName + " by linking to " + newName);
+ else
+ log("Resolving " + origName + " by linking to " + newName);
+ undef->weakAlias = l;
+ continue;
+ }
+ }
+ }
+
+ if (config->autoImport) {
+ if (name.startswith("__imp_"))
+ continue;
+ // If we have an undefined symbol, but we have a lazy symbol we could
+ // load, load it.
+ Symbol *l = find(("__imp_" + name).str());
+ if (!l || l->pendingArchiveLoad || !l->isLazy())
+ continue;
- log("Loading lazy " + l->getName() + " from " + l->getFile()->getName() +
- " for automatic import");
- forceLazy(l);
+ log("Loading lazy " + l->getName() + " from " + l->getFile()->getName() +
+ " for automatic import");
+ forceLazy(l);
+ }
}
}
for (auto &i : symMap) {
Symbol *sym = i.second;
auto *undef = dyn_cast<Undefined>(sym);
- if (!undef)
+ if (!undef || sym->deferUndefined)
continue;
if (undef->getWeakAlias())
continue;
}
if (name.contains("_PchSym_"))
continue;
- if (config->mingw && impSymbol(name))
+ if (config->autoImport && impSymbol(name))
continue;
undefs.insert(sym);
}
sym = reinterpret_cast<Symbol *>(make<SymbolUnion>());
sym->isUsedInRegularObj = false;
sym->pendingArchiveLoad = false;
+ sym->canInline = true;
inserted = true;
}
return {sym, inserted};
f->fetch();
}
+void SymbolTable::addLazyDLLSymbol(DLLFile *f, DLLFile::Symbol *sym,
+ StringRef n) {
+ Symbol *s;
+ bool wasInserted;
+ std::tie(s, wasInserted) = insert(n);
+ if (wasInserted) {
+ replaceSymbol<LazyDLLSymbol>(s, f, sym, n);
+ return;
+ }
+ auto *u = dyn_cast<Undefined>(s);
+ if (!u || u->weakAlias || s->pendingArchiveLoad)
+ return;
+ s->pendingArchiveLoad = true;
+ f->makeImport(sym);
+}
+
static std::string getSourceLocationBitcode(BitcodeFile *file) {
std::string res("\n>>> defined at ");
StringRef source = file->obj->getSourceFileName();
// symbols and warn about imported local symbols.
void resolveRemainingUndefines();
- void loadMinGWAutomaticImports();
+ // Load lazy objects that are needed for MinGW automatic import and for
+ // doing stdcall fixups.
+ void loadMinGWSymbols();
bool handleMinGWAutomaticImport(Symbol *sym, StringRef name);
// Returns a list of chunks of selected symbols.
Symbol *addUndefined(StringRef name, InputFile *f, bool isWeakAlias);
void addLazyArchive(ArchiveFile *f, const Archive::Symbol &sym);
void addLazyObject(LazyObjFile *f, StringRef n);
+ void addLazyDLLSymbol(DLLFile *f, DLLFile::Symbol *sym, StringRef n);
Symbol *addAbsolute(StringRef n, COFFSymbolRef s);
Symbol *addRegular(InputFile *f, StringRef n,
const llvm::object::coff_symbol_generic *s = nullptr,
std::vector<std::string> getSymbolLocations(ObjFile *file, uint32_t symIndex);
+StringRef ltrim1(StringRef s, const char *chars);
+
} // namespace coff
} // namespace lld
return sym->file;
if (auto *sym = dyn_cast<LazyObject>(this))
return sym->file;
+ if (auto *sym = dyn_cast<LazyDLLSymbol>(this))
+ return sym->file;
return nullptr;
}
UndefinedKind,
LazyArchiveKind,
LazyObjectKind,
+ LazyDLLSymbolKind,
LastDefinedCOFFKind = DefinedCommonKind,
LastDefinedKind = DefinedSyntheticKind,
bool isLive() const;
bool isLazy() const {
- return symbolKind == LazyArchiveKind || symbolKind == LazyObjectKind;
+ return symbolKind == LazyArchiveKind || symbolKind == LazyObjectKind ||
+ symbolKind == LazyDLLSymbolKind;
}
private:
explicit Symbol(Kind k, StringRef n = "")
: symbolKind(k), isExternal(true), isCOMDAT(false),
writtenToSymtab(false), pendingArchiveLoad(false), isGCRoot(false),
- isRuntimePseudoReloc(false), nameSize(n.size()),
- nameData(n.empty() ? nullptr : n.data()) {}
+ isRuntimePseudoReloc(false), deferUndefined(false), canInline(true),
+ nameSize(n.size()), nameData(n.empty() ? nullptr : n.data()) {}
const unsigned symbolKind : 8;
unsigned isExternal : 1;
unsigned isRuntimePseudoReloc : 1;
+ // True if we want to allow this symbol to be undefined in the early
+ // undefined check pass in SymbolTable::reportUnresolvable(), as it
+ // might be fixed up later.
+ unsigned deferUndefined : 1;
+
+ // False if LTO shouldn't inline whatever this symbol points to. If a symbol
+ // is overwritten after LTO, LTO shouldn't inline the symbol because it
+ // doesn't know the final contents of the symbol.
+ unsigned canInline : 1;
+
protected:
// Symbol name length. Assume symbol lengths fit in a 32-bit integer.
uint32_t nameSize;
LazyObjFile *file;
};
+// MinGW only.
+class LazyDLLSymbol : public Symbol {
+public:
+ LazyDLLSymbol(DLLFile *f, DLLFile::Symbol *s, StringRef n)
+ : Symbol(LazyDLLSymbolKind, n), file(f), sym(s) {}
+ static bool classof(const Symbol *s) {
+ return s->kind() == LazyDLLSymbolKind;
+ }
+
+ DLLFile *file;
+ DLLFile::Symbol *sym;
+};
+
// Undefined symbols.
class Undefined : public Symbol {
public:
uint16_t getOrdinal() { return file->hdr->OrdinalHint; }
ImportFile *file;
+
+ // This is a pointer to the synthetic symbol associated with the load thunk
+ // for this symbol that will be called if the DLL is delay-loaded. This is
+ // needed for Control Flow Guard because if this DefinedImportData symbol is a
+ // valid call target, the corresponding load thunk must also be marked as a
+ // valid call target.
+ DefinedSynthetic *loadThunkSym = nullptr;
};
// This class represents a symbol for a jump table entry which jumps
return cast<DefinedRegular>(this)->getRVA();
case LazyArchiveKind:
case LazyObjectKind:
+ case LazyDLLSymbolKind:
case UndefinedKind:
llvm_unreachable("Cannot get the address for an undefined symbol.");
}
return cast<DefinedCommon>(this)->getChunk();
case LazyArchiveKind:
case LazyObjectKind:
+ case LazyDLLSymbolKind:
case UndefinedKind:
llvm_unreachable("Cannot get the chunk of an undefined symbol.");
}
alignas(DefinedImportThunk) char h[sizeof(DefinedImportThunk)];
alignas(DefinedLocalImport) char i[sizeof(DefinedLocalImport)];
alignas(LazyObject) char j[sizeof(LazyObject)];
+ alignas(LazyDLLSymbol) char k[sizeof(LazyDLLSymbol)];
};
template <typename T, typename... ArgT>
"SymbolUnion not aligned enough");
assert(static_cast<Symbol *>(static_cast<T *>(nullptr)) == nullptr &&
"Not a Symbol");
+ bool canInline = s->canInline;
new (s) T(std::forward<ArgT>(arg)...);
+ s->canInline = canInline;
}
} // namespace coff
#define LLD_COFF_TYPEMERGER_H
#include "Config.h"
-#include "llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h"
+#include "llvm/DebugInfo/CodeView/TypeHashing.h"
#include "llvm/Support/Allocator.h"
+#include <atomic>
namespace lld {
namespace coff {
+using llvm::codeview::GloballyHashedType;
+using llvm::codeview::TypeIndex;
+
+struct GHashState;
+
class TypeMerger {
public:
- TypeMerger(llvm::BumpPtrAllocator &alloc)
- : typeTable(alloc), idTable(alloc), globalTypeTable(alloc),
- globalIDTable(alloc) {}
+ TypeMerger(llvm::BumpPtrAllocator &alloc);
+
+ ~TypeMerger();
/// Get the type table or the global type table if /DEBUG:GHASH is enabled.
inline llvm::codeview::TypeCollection &getTypeTable() {
- if (config->debugGHashes)
- return globalTypeTable;
+ assert(!config->debugGHashes);
return typeTable;
}
/// Get the ID table or the global ID table if /DEBUG:GHASH is enabled.
inline llvm::codeview::TypeCollection &getIDTable() {
- if (config->debugGHashes)
- return globalIDTable;
+ assert(!config->debugGHashes);
return idTable;
}
+ /// Use global hashes to eliminate duplicate types and identify unique type
+ /// indices in each TpiSource.
+ void mergeTypesWithGHash();
+
+ /// Map from PDB function id type indexes to PDB function type indexes.
+ /// Populated after mergeTypesWithGHash.
+ llvm::DenseMap<TypeIndex, TypeIndex> funcIdToType;
+
/// Type records that will go into the PDB TPI stream.
llvm::codeview::MergingTypeTableBuilder typeTable;
/// Item records that will go into the PDB IPI stream.
llvm::codeview::MergingTypeTableBuilder idTable;
- /// Type records that will go into the PDB TPI stream (for /DEBUG:GHASH)
- llvm::codeview::GlobalTypeTableBuilder globalTypeTable;
-
- /// Item records that will go into the PDB IPI stream (for /DEBUG:GHASH)
- llvm::codeview::GlobalTypeTableBuilder globalIDTable;
-
// When showSummary is enabled, these are histograms of TPI and IPI records
// keyed by type index.
SmallVector<uint32_t, 0> tpiCounts;
SmallVector<uint32_t, 0> ipiCounts;
};
-/// Map from type index and item index in a type server PDB to the
-/// corresponding index in the destination PDB.
-struct CVIndexMap {
- llvm::SmallVector<llvm::codeview::TypeIndex, 0> tpiMap;
- llvm::SmallVector<llvm::codeview::TypeIndex, 0> ipiMap;
- bool isTypeServerMap = false;
- bool isPrecompiledTypeMap = false;
-};
-
} // namespace coff
} // namespace lld
//===----------------------------------------------------------------------===//
#include "Writer.h"
+#include "CallGraphSort.h"
#include "Config.h"
#include "DLL.h"
#include "InputFiles.h"
return osidx == 0 ? nullptr : outputSections[osidx - 1];
}
+void OutputSection::clear() { outputSections.clear(); }
+
namespace {
class DebugDirectoryChunk : public NonSectionChunk {
void markSymbolsForRVATable(ObjFile *file,
ArrayRef<SectionChunk *> symIdxChunks,
SymbolRVASet &tableSymbols);
+ void getSymbolsFromSections(ObjFile *file,
+ ArrayRef<SectionChunk *> symIdxChunks,
+ std::vector<Symbol *> &symbols);
void maybeAddRVATable(SymbolRVASet tableSymbols, StringRef tableSym,
- StringRef countSym);
+ StringRef countSym, bool hasFlag=false);
void setSectionPermissions();
void writeSections();
void writeBuildId();
+ void sortSections();
void sortExceptionTable();
void sortCRTSectionChunks(std::vector<Chunk *> &chunks);
void addSyntheticIdata();
void fixPartialSectionChars(StringRef name, uint32_t chars);
bool fixGnuImportChunks();
+ void fixTlsAlignment();
PartialSection *createPartialSection(StringRef name, uint32_t outChars);
PartialSection *findPartialSection(StringRef name, uint32_t outChars);
DelayLoadContents delayIdata;
EdataContents edata;
bool setNoSEHCharacteristic = false;
+ uint32_t tlsAlignment = 0;
DebugDirectoryChunk *debugDirectory = nullptr;
std::vector<std::pair<COFF::DebugType, Chunk *>> debugRecords;
// If the verification above thought we needed thunks, we should have
// added some.
assert(addressesChanged);
+ (void)addressesChanged;
// Recalculate the layout for the whole image (and verify the ranges at
// the start of the next round).
void Writer::run() {
ScopedTimer t1(codeLayoutTimer);
- // First, clear the output sections from previous runs
- outputSections.clear();
-
createImportTables();
createSections();
- createMiscChunks();
appendImportThunks();
+ // Import thunks must be added before the Control Flow Guard tables are added.
+ createMiscChunks();
createExportTable();
mergeSections();
removeUnusedSections();
writeSections();
sortExceptionTable();
+ // Fix up the alignment in the TLS Directory's characteristic field,
+ // if a specific alignment value is needed
+ if (tlsAlignment)
+ fixTlsAlignment();
+
t1.stop();
if (!config->pdbPath.empty() && config->debug) {
name.startswith(".xdata$") || name.startswith(".eh_frame$");
}
+void Writer::sortSections() {
+ if (!config->callGraphProfile.empty()) {
+ DenseMap<const SectionChunk *, int> order = computeCallGraphProfileOrder();
+ for (auto it : order) {
+ if (DefinedRegular *sym = it.first->sym)
+ config->order[sym->getName()] = it.second;
+ }
+ }
+ if (!config->order.empty())
+ for (auto it : partialSections)
+ sortBySectionOrder(it.second->chunks);
+}
+
// Create output section objects and add them to OutputSections.
void Writer::createSections() {
// First, create the builtin sections.
StringRef name = c->getSectionName();
if (shouldStripSectionSuffix(sc, name))
name = name.split('$').first;
+
+ if (name.startswith(".tls"))
+ tlsAlignment = std::max(tlsAlignment, c->getAlignment());
+
PartialSection *pSec = createPartialSection(name,
c->getOutputCharacteristics());
pSec->chunks.push_back(c);
if (hasIdata)
addSyntheticIdata();
- // Process an /order option.
- if (!config->order.empty())
- for (auto it : partialSections)
- sortBySectionOrder(it.second->chunks);
+ sortSections();
if (hasIdata)
locateImportTables();
}
if (config->cetCompat) {
- ExtendedDllCharacteristicsChunk *extendedDllChars =
- make<ExtendedDllCharacteristicsChunk>(
- IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT);
- debugRecords.push_back(
- {COFF::IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS, extendedDllChars});
+ debugRecords.push_back({COFF::IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS,
+ make<ExtendedDllCharacteristicsChunk>(
+ IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT)});
}
- if (debugRecords.size() > 0) {
- for (std::pair<COFF::DebugType, Chunk *> r : debugRecords)
- debugInfoSec->addChunk(r.second);
+ // Align and add each chunk referenced by the debug data directory.
+ for (std::pair<COFF::DebugType, Chunk *> r : debugRecords) {
+ r.second->setAlignment(4);
+ debugInfoSec->addChunk(r.second);
}
// Create SEH table. x86-only.
edataStart = edataSec->chunks.front();
edataEnd = edataSec->chunks.back();
}
+ // Warn on exported deleting destructor.
+ for (auto e : config->exports)
+ if (e.sym && e.sym->getName().startswith("??_G"))
+ warn("export of deleting dtor: " + toString(*e.sym));
}
void Writer::removeUnusedSections() {
pe->MinorImageVersion = config->minorImageVersion;
pe->MajorOperatingSystemVersion = config->majorOSVersion;
pe->MinorOperatingSystemVersion = config->minorOSVersion;
- pe->MajorSubsystemVersion = config->majorOSVersion;
- pe->MinorSubsystemVersion = config->minorOSVersion;
+ pe->MajorSubsystemVersion = config->majorSubsystemVersion;
+ pe->MinorSubsystemVersion = config->minorSubsystemVersion;
pe->Subsystem = config->subsystem;
pe->SizeOfImage = sizeOfImage;
pe->SizeOfHeaders = sizeOfHeaders;
break;
case Symbol::LazyArchiveKind:
case Symbol::LazyObjectKind:
+ case Symbol::LazyDLLSymbolKind:
case Symbol::UndefinedKind:
// Undefined symbols resolve to zero, so they don't have an RVA. Lazy
// symbols shouldn't have relocations.
// table.
void Writer::createGuardCFTables() {
SymbolRVASet addressTakenSyms;
+ SymbolRVASet giatsRVASet;
+ std::vector<Symbol *> giatsSymbols;
SymbolRVASet longJmpTargets;
+ SymbolRVASet ehContTargets;
for (ObjFile *file : ObjFile::instances) {
// If the object was compiled with /guard:cf, the address taken symbols
- // are in .gfids$y sections, and the longjmp targets are in .gljmp$y
- // sections. If the object was not compiled with /guard:cf, we assume there
- // were no setjmp targets, and that all code symbols with relocations are
- // possibly address-taken.
+ // are in .gfids$y sections, the longjmp targets are in .gljmp$y sections,
+ // and ehcont targets are in .gehcont$y sections. If the object was not
+ // compiled with /guard:cf, we assume there were no setjmp and ehcont
+ // targets, and that all code symbols with relocations are possibly
+ // address-taken.
if (file->hasGuardCF()) {
markSymbolsForRVATable(file, file->getGuardFidChunks(), addressTakenSyms);
+ markSymbolsForRVATable(file, file->getGuardIATChunks(), giatsRVASet);
+ getSymbolsFromSections(file, file->getGuardIATChunks(), giatsSymbols);
markSymbolsForRVATable(file, file->getGuardLJmpChunks(), longJmpTargets);
+ markSymbolsForRVATable(file, file->getGuardEHContChunks(), ehContTargets);
} else {
markSymbolsWithRelocations(file, addressTakenSyms);
}
for (Export &e : config->exports)
maybeAddAddressTakenFunction(addressTakenSyms, e.sym);
+ // For each entry in the .giats table, check if it has a corresponding load
+ // thunk (e.g. because the DLL that defines it will be delay-loaded) and, if
+ // so, add the load thunk to the address taken (.gfids) table.
+ for (Symbol *s : giatsSymbols) {
+ if (auto *di = dyn_cast<DefinedImportData>(s)) {
+ if (di->loadThunkSym)
+ addSymbolToRVASet(addressTakenSyms, di->loadThunkSym);
+ }
+ }
+
// Ensure sections referenced in the gfid table are 16-byte aligned.
for (const ChunkAndOffset &c : addressTakenSyms)
if (c.inputChunk->getAlignment() < 16)
maybeAddRVATable(std::move(addressTakenSyms), "__guard_fids_table",
"__guard_fids_count");
+ // Add the Guard Address Taken IAT Entry Table (.giats).
+ maybeAddRVATable(std::move(giatsRVASet), "__guard_iat_table",
+ "__guard_iat_count");
+
// Add the longjmp target table unless the user told us not to.
- if (config->guardCF == GuardCFLevel::Full)
+ if (config->guardCF & GuardCFLevel::LongJmp)
maybeAddRVATable(std::move(longJmpTargets), "__guard_longjmp_table",
"__guard_longjmp_count");
+ // Add the ehcont target table unless the user told us not to.
+ if (config->guardCF & GuardCFLevel::EHCont)
+ maybeAddRVATable(std::move(ehContTargets), "__guard_eh_cont_table",
+ "__guard_eh_cont_count", true);
+
// Set __guard_flags, which will be used in the load config to indicate that
// /guard:cf was enabled.
uint32_t guardFlags = uint32_t(coff_guard_flags::CFInstrumented) |
uint32_t(coff_guard_flags::HasFidTable);
- if (config->guardCF == GuardCFLevel::Full)
+ if (config->guardCF & GuardCFLevel::LongJmp)
guardFlags |= uint32_t(coff_guard_flags::HasLongJmpTable);
+ if (config->guardCF & GuardCFLevel::EHCont)
+ guardFlags |= uint32_t(coff_guard_flags::HasEHContTable);
Symbol *flagSym = symtab->findUnderscore("__guard_flags");
cast<DefinedAbsolute>(flagSym)->setVA(guardFlags);
}
// Take a list of input sections containing symbol table indices and add those
-// symbols to an RVA table. The challenge is that symbol RVAs are not known and
+// symbols to a vector. The challenge is that symbol RVAs are not known and
// depend on the table size, so we can't directly build a set of integers.
-void Writer::markSymbolsForRVATable(ObjFile *file,
+void Writer::getSymbolsFromSections(ObjFile *file,
ArrayRef<SectionChunk *> symIdxChunks,
- SymbolRVASet &tableSymbols) {
+ std::vector<Symbol *> &symbols) {
for (SectionChunk *c : symIdxChunks) {
// Skip sections discarded by linker GC. This comes up when a .gfids section
// is associated with something like a vtable and the vtable is discarded.
}
// Read each symbol table index and check if that symbol was included in the
- // final link. If so, add it to the table symbol set.
+ // final link. If so, add it to the vector of symbols.
ArrayRef<ulittle32_t> symIndices(
reinterpret_cast<const ulittle32_t *>(data.data()), data.size() / 4);
ArrayRef<Symbol *> objSymbols = file->getSymbols();
}
if (Symbol *s = objSymbols[symIndex]) {
if (s->isLive())
- addSymbolToRVASet(tableSymbols, cast<Defined>(s));
+ symbols.push_back(cast<Symbol>(s));
}
}
}
}
+// Take a list of input sections containing symbol table indices and add those
+// symbols to an RVA table.
+void Writer::markSymbolsForRVATable(ObjFile *file,
+ ArrayRef<SectionChunk *> symIdxChunks,
+ SymbolRVASet &tableSymbols) {
+ std::vector<Symbol *> syms;
+ getSymbolsFromSections(file, symIdxChunks, syms);
+
+ for (Symbol *s : syms)
+ addSymbolToRVASet(tableSymbols, cast<Defined>(s));
+}
+
// Replace the absolute table symbol with a synthetic symbol pointing to
// tableChunk so that we can emit base relocations for it and resolve section
// relative relocations.
void Writer::maybeAddRVATable(SymbolRVASet tableSymbols, StringRef tableSym,
- StringRef countSym) {
+ StringRef countSym, bool hasFlag) {
if (tableSymbols.empty())
return;
- RVATableChunk *tableChunk = make<RVATableChunk>(std::move(tableSymbols));
+ NonSectionChunk *tableChunk;
+ if (hasFlag)
+ tableChunk = make<RVAFlagTableChunk>(std::move(tableSymbols));
+ else
+ tableChunk = make<RVATableChunk>(std::move(tableSymbols));
rdataSec->addChunk(tableChunk);
Symbol *t = symtab->findUnderscore(tableSym);
Symbol *c = symtab->findUnderscore(countSym);
replaceSymbol<DefinedSynthetic>(t, t->getName(), tableChunk);
- cast<DefinedAbsolute>(c)->setVA(tableChunk->getSize() / 4);
+ cast<DefinedAbsolute>(c)->setVA(tableChunk->getSize() / (hasFlag ? 5 : 4));
}
// MinGW specific. Gather all relocations that are imported from a DLL even
return it->second;
return nullptr;
}
+
+void Writer::fixTlsAlignment() {
+ Defined *tlsSym =
+ dyn_cast_or_null<Defined>(symtab->findUnderscore("_tls_used"));
+ if (!tlsSym)
+ return;
+
+ OutputSection *sec = tlsSym->getChunk()->getOutputSection();
+ assert(sec && tlsSym->getRVA() >= sec->getRVA() &&
+ "no output section for _tls_used");
+
+ uint8_t *secBuf = buffer->getBufferStart() + sec->getFileOff();
+ uint64_t tlsOffset = tlsSym->getRVA() - sec->getRVA();
+ uint64_t directorySize = config->is64()
+ ? sizeof(object::coff_tls_directory64)
+ : sizeof(object::coff_tls_directory32);
+
+ if (tlsOffset + directorySize > sec->getRawSize())
+ fatal("_tls_used sym is malformed");
+
+ if (config->is64()) {
+ object::coff_tls_directory64 *tlsDir =
+ reinterpret_cast<object::coff_tls_directory64 *>(&secBuf[tlsOffset]);
+ tlsDir->setAlignment(tlsAlignment);
+ } else {
+ object::coff_tls_directory32 *tlsDir =
+ reinterpret_cast<object::coff_tls_directory32 *>(&secBuf[tlsOffset]);
+ tlsDir->setAlignment(tlsAlignment);
+ }
+}
void writeHeaderTo(uint8_t *buf);
void addContributingPartialSection(PartialSection *sec);
+ // Clear the output sections static container.
+ static void clear();
+
// Returns the size of this section in an executable memory image.
// This may be smaller than the raw size (the raw size is multiple
// of disk sector size, so there may be padding at end), or may be
return CodeGenOpt::Default;
}
-int64_t lld::args::getInteger(opt::InputArgList &args, unsigned key,
- int64_t Default) {
+static int64_t getInteger(opt::InputArgList &args, unsigned key,
+ int64_t Default, unsigned base) {
auto *a = args.getLastArg(key);
if (!a)
return Default;
int64_t v;
- if (to_integer(a->getValue(), v, 10))
+ StringRef s = a->getValue();
+ if (base == 16 && (s.startswith("0x") || s.startswith("0X")))
+ s = s.drop_front(2);
+ if (to_integer(s, v, base))
return v;
StringRef spelling = args.getArgString(a->getIndex());
return 0;
}
+int64_t lld::args::getInteger(opt::InputArgList &args, unsigned key,
+ int64_t Default) {
+ return ::getInteger(args, key, Default, 10);
+}
+
+int64_t lld::args::getHex(opt::InputArgList &args, unsigned key,
+ int64_t Default) {
+ return ::getInteger(args, key, Default, 16);
+}
+
std::vector<StringRef> lld::args::getStrings(opt::InputArgList &args, int id) {
std::vector<StringRef> v;
for (auto *arg : args.filtered(id))
}
StringRef lld::args::getFilenameWithoutExe(StringRef path) {
- if (path.endswith_lower(".exe"))
+ if (path.endswith_insensitive(".exe"))
return sys::path::stem(path);
return sys::path::filename(path);
}
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
set(LLD_SYSTEM_LIBS ${LLVM_PTHREAD_LIB})
if(NOT HAVE_CXX_ATOMICS64_WITHOUT_LIB)
PROPERTIES GENERATED TRUE
HEADER_FILE_ONLY TRUE)
-set_property(SOURCE Version.cpp APPEND PROPERTY
- COMPILE_DEFINITIONS "HAVE_VCS_VERSION_INC")
-
add_lld_library(lldCommon
Args.cpp
DWARF.cpp
${LLD_SYSTEM_LIBS}
DEPENDS
- ${tablegen_deps}
+ intrinsics_gen
)
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
#include <mutex>
#include <regex>
-#if !defined(_MSC_VER) && !defined(__MINGW32__)
-#include <unistd.h>
-#endif
-
using namespace llvm;
using namespace lld;
raw_ostream *lld::stdoutOS;
raw_ostream *lld::stderrOS;
-raw_ostream &lld::outs() { return stdoutOS ? *stdoutOS : llvm::outs(); }
-raw_ostream &lld::errs() { return stderrOS ? *stderrOS : llvm::errs(); }
-
ErrorHandler &lld::errorHandler() {
static ErrorHandler handler;
return handler;
}
+raw_ostream &lld::outs() {
+ if (errorHandler().disableOutput)
+ return llvm::nulls();
+ return stdoutOS ? *stdoutOS : llvm::outs();
+}
+
+raw_ostream &lld::errs() {
+ if (errorHandler().disableOutput)
+ return llvm::nulls();
+ return stderrOS ? *stderrOS : llvm::errs();
+}
+
void lld::exitLld(int val) {
// Delete any temporary file, while keeping the memory mapping open.
if (errorHandler().outputBuffer)
errorHandler().outputBuffer->discard();
+ // Re-throw a possible signal or exception once/if it was catched by
+ // safeLldMain().
+ CrashRecoveryContext::throwIfCrash(val);
+
// Dealloc/destroy ManagedStatic variables before calling _exit().
// In an LTO build, allows us to get the output of -time-passes.
// Ensures that the thread pool for the parallel algorithms is stopped to
// avoid intermittent crashes on Windows when exiting.
- llvm_shutdown();
+ if (!CrashRecoveryContext::GetCurrent())
+ llvm_shutdown();
{
std::lock_guard<std::mutex> lock(mu);
lld::outs().flush();
lld::errs().flush();
}
- _exit(val);
+ // When running inside safeLldMain(), restore the control flow back to the
+ // CrashRecoveryContext. Otherwise simply use _exit(), meanning no cleanup,
+ // since we want to avoid further crashes on shutdown.
+ llvm::sys::Process::Exit(val, /*NoCleanup=*/true);
}
void lld::diagnosticHandler(const DiagnosticInfo &di) {
}
void ErrorHandler::log(const Twine &msg) {
- if (!verbose)
+ if (!verbose || disableOutput)
return;
std::lock_guard<std::mutex> lock(mu);
lld::errs() << logName << ": " << msg << "\n";
}
void ErrorHandler::message(const Twine &msg) {
+ if (disableOutput)
+ return;
std::lock_guard<std::mutex> lock(mu);
lld::outs() << msg << "\n";
lld::outs().flush();
exitLld(1);
}
+void ErrorHandler::error(const Twine &msg, ErrorTag tag,
+ ArrayRef<StringRef> args) {
+ if (errorHandlingScript.empty()) {
+ error(msg);
+ return;
+ }
+ SmallVector<StringRef, 4> scriptArgs;
+ scriptArgs.push_back(errorHandlingScript);
+ switch (tag) {
+ case ErrorTag::LibNotFound:
+ scriptArgs.push_back("missing-lib");
+ break;
+ case ErrorTag::SymbolNotFound:
+ scriptArgs.push_back("undefined-symbol");
+ break;
+ }
+ scriptArgs.insert(scriptArgs.end(), args.begin(), args.end());
+ int res = llvm::sys::ExecuteAndWait(errorHandlingScript, scriptArgs);
+ if (res == 0) {
+ return error(msg);
+ } else {
+ // Temporarily disable error limit to make sure the two calls to error(...)
+ // only count as one.
+ uint64_t currentErrorLimit = errorLimit;
+ errorLimit = 0;
+ error(msg);
+ errorLimit = currentErrorLimit;
+ --errorCount;
+
+ switch (res) {
+ case -1:
+ error("error handling script '" + errorHandlingScript +
+ "' failed to execute");
+ break;
+ case -2:
+ error("error handling script '" + errorHandlingScript +
+ "' crashed or timeout");
+ break;
+ default:
+ error("error handling script '" + errorHandlingScript +
+ "' exited with code " + Twine(res));
+ }
+ }
+}
+
void ErrorHandler::fatal(const Twine &msg) {
error(msg);
exitLld(1);
std::string k = std::string(arg.getSpelling());
if (arg.getNumValues() == 0)
return k;
- std::string v = quote(arg.getValue());
+ std::string v;
+ for (size_t i = 0; i < arg.getNumValues(); ++i) {
+ if (i > 0)
+ v.push_back(' ');
+ v += quote(arg.getValue(i));
+ }
if (arg.getOption().getRenderStyle() == opt::Option::RenderJoinedStyle)
return k + v;
return k + " " + v;
// Returns the demangled C++ symbol name for name.
std::string lld::demangleItanium(StringRef name) {
- // itaniumDemangle can be used to demangle strings other than symbol
- // names which do not necessarily start with "_Z". Name can be
- // either a C or C++ symbol. Don't call demangle if the name
- // does not look like a C++ symbol name to avoid getting unexpected
- // result for a C symbol that happens to match a mangled type name.
- if (!name.startswith("_Z"))
+ // demangleItanium() can be called for all symbols. Only demangle C++ symbols,
+ // to avoid getting unexpected result for a C symbol that happens to match a
+ // mangled type name such as "Pi" (which would demangle to "int*").
+ if (!name.startswith("_Z") && !name.startswith("__Z") &&
+ !name.startswith("___Z") && !name.startswith("____Z"))
return std::string(name);
return demangle(std::string(name));
// Returns true if S is valid as a C language identifier.
bool lld::isValidCIdentifier(StringRef s) {
- return !s.empty() && (isAlpha(s[0]) || s[0] == '_') &&
- std::all_of(s.begin() + 1, s.end(),
- [](char c) { return c == '_' || isAlnum(c); });
+ return !s.empty() && !isDigit(s[0]) &&
+ llvm::all_of(s, [](char c) { return isAlnum(c) || c == '_'; });
}
// Write the contents of the a buffer to a file
static llvm::codegen::RegisterCodeGenFlags CGF;
llvm::TargetOptions lld::initTargetOptionsFromCodeGenFlags() {
- return llvm::codegen::InitTargetOptionsFromCodeGenFlags();
+ return llvm::codegen::InitTargetOptionsFromCodeGenFlags(llvm::Triple());
}
llvm::Optional<llvm::Reloc::Model> lld::getRelocModelFromCMModel() {
if (child->total > 0)
child->print(1, totalDuration);
- message(std::string(49, '-'));
+ message(std::string(50, '-'));
root().print(0, root().millis(), false);
}
SmallString<32> str;
llvm::raw_svector_ostream stream(str);
std::string s = std::string(depth * 2, ' ') + name + std::string(":");
- stream << format("%-30s%5d ms (%5.1f%%)", s.c_str(), (int)millis(), p);
+ stream << format("%-30s%7d ms (%5.1f%%)", s.c_str(), (int)millis(), p);
message(str);
#include "lld/Common/Version.h"
-#ifdef HAVE_VCS_VERSION_INC
#include "VCSVersion.inc"
-#endif
// Returns a version string, e.g.:
// lld 9.0.0 (https://github.com/llvm/llvm-project.git 9efdd7ac5e914d3c9fa1ef)
write32le(buf, read32le(patchee->data().begin() + patcheeOffset));
// Apply any relocation transferred from the original patchee section.
- // For a SyntheticSection Buf already has outSecOff added, but relocateAlloc
- // also adds outSecOff so we need to subtract to avoid double counting.
- this->relocateAlloc(buf - outSecOff, buf - outSecOff + getSize());
+ relocateAlloc(buf, buf + getSize());
// Return address is the next instruction after the one we have just copied.
uint64_t s = getLDSTAddr() + 4;
class Defined;
class InputSection;
-struct InputSectionDescription;
+class InputSectionDescription;
class OutputSection;
class Patch843419Section;
offset = target->getImplicitAddend(buf, R_ARM_THM_JUMP24);
else
offset = target->getImplicitAddend(buf, R_ARM_THM_CALL);
+ // A BLX instruction from Thumb to Arm may have an address that is
+ // not 4-byte aligned. As Arm instructions are always 4-byte aligned
+ // the instruction is calculated (from Arm ARM):
+ // targetAddress = Align(PC, 4) + imm32
+ // where
+ // Align(x, y) = y * (x Div y)
+ // which corresponds to alignDown.
+ if (isBLX(instr))
+ sourceAddr = alignDown(sourceAddr, 4);
return sourceAddr + offset + 4;
}
write32le(buf, 0xea000000);
else
write32le(buf, 0x9000f000);
- // If we have a relocation then apply it. For a SyntheticSection buf already
- // has outSecOff added, but relocateAlloc also adds outSecOff so we need to
- // subtract to avoid double counting.
+ // If we have a relocation then apply it.
if (!relocations.empty()) {
- relocateAlloc(buf - outSecOff, buf - outSecOff + getSize());
+ relocateAlloc(buf, buf + getSize());
return;
}
// We cannot use the instruction in the patchee section as this will have
// been altered to point to us!
uint64_t s = getThumbDestAddr(getBranchAddr(), instr);
- uint64_t p = getVA(4);
+ // A BLX changes the state of the branch in the patch to Arm state, which
+ // has a PC Bias of 8, whereas in all other cases the branch is in Thumb
+ // state with a PC Bias of 4.
+ uint64_t pcBias = isBLX(instr) ? 8 : 4;
+ uint64_t p = getVA(pcBias);
target->relocateNoSym(buf, isARM ? R_ARM_JUMP24 : R_ARM_THM_JUMP24, s - p);
}
class Defined;
class InputSection;
-struct InputSectionDescription;
+class InputSectionDescription;
class OutputSection;
class Patch657417Section;
RelExpr getRelExpr(RelType type, const Symbol &s,
const uint8_t *loc) const override;
RelType getDynRel(RelType type) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
void writePltHeader(uint8_t *buf) const override;
void writePlt(uint8_t *buf, const Symbol &sym,
bool usesOnlyLowPageBits(RelType type) const override;
void relocate(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
- RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const override;
+ RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
pltEntrySize = 16;
ipltEntrySize = 16;
defaultMaxPageSize = 65536;
+ gotBaseSymInGotPlt = false;
// Align to the 2 MiB page size (known as a superpage or huge page).
// FreeBSD automatically promotes 2 MiB-aligned allocations.
case R_AARCH64_TLSLE_MOVW_TPREL_G1:
case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
case R_AARCH64_TLSLE_MOVW_TPREL_G2:
- return R_TLS;
+ return R_TPREL;
case R_AARCH64_CALL26:
case R_AARCH64_CONDBR19:
case R_AARCH64_JUMP26:
case R_AARCH64_LD64_GOT_LO12_NC:
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
return R_GOT;
+ case R_AARCH64_LD64_GOTPAGE_LO15:
+ return R_AARCH64_GOT_PAGE;
case R_AARCH64_ADR_GOT_PAGE:
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
return R_AARCH64_GOT_PAGE_PC;
}
}
-RelExpr AArch64::adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const {
+RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const {
if (expr == R_RELAX_TLS_GD_TO_IE) {
if (type == R_AARCH64_TLSDESC_ADR_PAGE21)
return R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC;
return R_AARCH64_NONE;
}
+int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
+ case R_AARCH64_TLSDESC:
+ return read64(buf + 8);
+ default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
+ return 0;
+ }
+}
+
void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
- write64le(buf, in.plt->getVA());
+ write64(buf, in.plt->getVA());
}
void AArch64::writePltHeader(uint8_t *buf) const {
case R_AARCH64_ABS16:
case R_AARCH64_PREL16:
checkIntUInt(loc, val, 16, rel);
- write16le(loc, val);
+ write16(loc, val);
break;
case R_AARCH64_ABS32:
case R_AARCH64_PREL32:
checkIntUInt(loc, val, 32, rel);
- write32le(loc, val);
+ write32(loc, val);
break;
case R_AARCH64_PLT32:
checkInt(loc, val, 32, rel);
- write32le(loc, val);
+ write32(loc, val);
break;
case R_AARCH64_ABS64:
case R_AARCH64_PREL64:
- write64le(loc, val);
+ write64(loc, val);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
or32AArch64Imm(loc, val);
checkAlignment(loc, val, 16, rel);
or32AArch64Imm(loc, getBits(val, 4, 11));
break;
+ case R_AARCH64_LD64_GOTPAGE_LO15:
+ checkAlignment(loc, val, 8, rel);
+ or32AArch64Imm(loc, getBits(val, 3, 14));
+ break;
case R_AARCH64_MOVW_UABS_G0:
checkUInt(loc, val, 16, rel);
LLVM_FALLTHROUGH;
case R_AARCH64_TLSDESC_ADD_LO12:
or32AArch64Imm(loc, val);
break;
+ case R_AARCH64_TLSDESC:
+ // For R_AARCH64_TLSDESC the addend is stored in the second 64-bit word.
+ write64(loc + 8, val);
+ break;
default:
llvm_unreachable("unknown relocation");
}
namespace {
class AMDGPU final : public TargetInfo {
+private:
+ uint32_t calcEFlagsV3() const;
+ uint32_t calcEFlagsV4() const;
+
public:
AMDGPU();
uint32_t calcEFlags() const override;
}
static uint32_t getEFlags(InputFile *file) {
- return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader()->e_flags;
+ return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
}
-uint32_t AMDGPU::calcEFlags() const {
- assert(!objectFiles.empty());
+uint32_t AMDGPU::calcEFlagsV3() const {
uint32_t ret = getEFlags(objectFiles[0]);
// Verify that all input files have the same e_flags.
return ret;
}
+uint32_t AMDGPU::calcEFlagsV4() const {
+ uint32_t retMach = getEFlags(objectFiles[0]) & EF_AMDGPU_MACH;
+ uint32_t retXnack = getEFlags(objectFiles[0]) & EF_AMDGPU_FEATURE_XNACK_V4;
+ uint32_t retSramEcc =
+ getEFlags(objectFiles[0]) & EF_AMDGPU_FEATURE_SRAMECC_V4;
+
+ // Verify that all input files have compatible e_flags (same mach, all
+ // features in the same category are either ANY, ANY and ON, or ANY and OFF).
+ for (InputFile *f : makeArrayRef(objectFiles).slice(1)) {
+ if (retMach != (getEFlags(f) & EF_AMDGPU_MACH)) {
+ error("incompatible mach: " + toString(f));
+ return 0;
+ }
+
+ if (retXnack == EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 ||
+ (retXnack != EF_AMDGPU_FEATURE_XNACK_ANY_V4 &&
+ (getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)
+ != EF_AMDGPU_FEATURE_XNACK_ANY_V4)) {
+ if (retXnack != (getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)) {
+ error("incompatible xnack: " + toString(f));
+ return 0;
+ }
+ } else {
+ if (retXnack == EF_AMDGPU_FEATURE_XNACK_ANY_V4)
+ retXnack = getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4;
+ }
+
+ if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4 ||
+ (retSramEcc != EF_AMDGPU_FEATURE_SRAMECC_ANY_V4 &&
+ (getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4) !=
+ EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)) {
+ if (retSramEcc != (getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4)) {
+ error("incompatible sramecc: " + toString(f));
+ return 0;
+ }
+ } else {
+ if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)
+ retSramEcc = getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4;
+ }
+ }
+
+ return retMach | retXnack | retSramEcc;
+}
+
+uint32_t AMDGPU::calcEFlags() const {
+ assert(!objectFiles.empty());
+
+ uint8_t abiVersion = cast<ObjFile<ELF64LE>>(objectFiles[0])->getObj()
+ .getHeader().e_ident[EI_ABIVERSION];
+ switch (abiVersion) {
+ case ELFABIVERSION_AMDGPU_HSA_V2:
+ case ELFABIVERSION_AMDGPU_HSA_V3:
+ return calcEFlagsV3();
+ case ELFABIVERSION_AMDGPU_HSA_V4:
+ return calcEFlagsV4();
+ default:
+ error("unknown abi version: " + Twine(abiVersion));
+ return 0;
+ }
+}
+
void AMDGPU::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
switch (rel.type) {
case R_AMDGPU_ABS32:
case R_AMDGPU_REL32_HI:
write32le(loc, val >> 32);
break;
+ case R_AMDGPU_REL16: {
+ int64_t simm = (static_cast<int64_t>(val) - 4) / 4;
+ checkInt(loc, simm, 16, rel);
+ write16le(loc, simm);
+ break;
+ }
default:
llvm_unreachable("unknown relocation");
}
case R_AMDGPU_REL32_LO:
case R_AMDGPU_REL32_HI:
case R_AMDGPU_REL64:
+ case R_AMDGPU_REL16:
return R_PC;
case R_AMDGPU_GOTPCREL:
case R_AMDGPU_GOTPCREL32_LO:
case R_ARM_NONE:
return R_NONE;
case R_ARM_TLS_LE32:
- return R_TLS;
+ return R_TPREL;
case R_ARM_V4BX:
// V4BX is just a marker to indicate there's a "bx rN" instruction at the
// given address. It can be used to implement a special linker mode which
bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
uint64_t branchAddr, const Symbol &s,
- int64_t /*a*/) const {
+ int64_t a) const {
// If S is an undefined weak symbol and does not have a PLT entry then it
// will be resolved as a branch to the next instruction.
if (s.isUndefWeak() && !s.isInPlt())
LLVM_FALLTHROUGH;
case R_ARM_CALL: {
uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
- return !inBranchRange(type, branchAddr, dst);
+ return !inBranchRange(type, branchAddr, dst + a);
}
case R_ARM_THM_JUMP19:
case R_ARM_THM_JUMP24:
LLVM_FALLTHROUGH;
case R_ARM_THM_CALL: {
uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
- return !inBranchRange(type, branchAddr, dst);
+ return !inBranchRange(type, branchAddr, dst + a);
}
}
return false;
}
bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
- uint64_t range;
- uint64_t instrSize;
+ if ((dst & 0x1) == 0)
+ // Destination is ARM, if ARM caller then Src is already 4-byte aligned.
+ // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
+ // destination will be 4 byte aligned.
+ src &= ~0x3;
+ else
+ // Bit 0 == 1 denotes Thumb state, it is not part of the range.
+ dst &= ~0x1;
+ int64_t offset = dst - src;
switch (type) {
case R_ARM_PC24:
case R_ARM_PLT32:
case R_ARM_JUMP24:
case R_ARM_CALL:
- range = 0x2000000;
- instrSize = 4;
- break;
+ return llvm::isInt<26>(offset);
case R_ARM_THM_JUMP19:
- range = 0x100000;
- instrSize = 2;
- break;
+ return llvm::isInt<21>(offset);
case R_ARM_THM_JUMP24:
case R_ARM_THM_CALL:
- range = config->armJ1J2BranchEncoding ? 0x1000000 : 0x400000;
- instrSize = 2;
- break;
+ return config->armJ1J2BranchEncoding ? llvm::isInt<25>(offset)
+ : llvm::isInt<23>(offset);
default:
return true;
}
- // PC at Src is 2 instructions ahead, immediate of branch is signed
- if (src > dst)
- range -= 2 * instrSize;
- else
- range += instrSize;
-
- if ((dst & 0x1) == 0)
- // Destination is ARM, if ARM caller then Src is already 4-byte aligned.
- // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
- // destination will be 4 byte aligned.
- src &= ~0x3;
- else
- // Bit 0 == 1 denotes Thumb state, it is not part of the range
- dst &= ~0x1;
-
- uint64_t distance = (src > dst) ? src - dst : dst - src;
- return distance <= range;
}
// Helper to produce message text when LLD detects that a CALL relocation to
int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
switch (type) {
default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
return 0;
case R_ARM_ABS32:
case R_ARM_BASE_PREL:
+ case R_ARM_GLOB_DAT:
case R_ARM_GOTOFF32:
case R_ARM_GOT_BREL:
case R_ARM_GOT_PREL:
+ case R_ARM_IRELATIVE:
case R_ARM_REL32:
+ case R_ARM_RELATIVE:
+ case R_ARM_SBREL32:
case R_ARM_TARGET1:
case R_ARM_TARGET2:
+ case R_ARM_TLS_DTPMOD32:
+ case R_ARM_TLS_DTPOFF32:
case R_ARM_TLS_GD32:
- case R_ARM_TLS_LDM32:
- case R_ARM_TLS_LDO32:
case R_ARM_TLS_IE32:
+ case R_ARM_TLS_LDM32:
case R_ARM_TLS_LE32:
+ case R_ARM_TLS_LDO32:
+ case R_ARM_TLS_TPOFF32:
return SignExtend64<32>(read32le(buf));
case R_ARM_PREL31:
return SignExtend64<31>(read32le(buf));
uint64_t imm12 = read16le(buf + 2) & 0x0fff;
return u ? imm12 : -imm12;
}
+ case R_ARM_NONE:
+ case R_ARM_JUMP_SLOT:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
}
}
//
//===----------------------------------------------------------------------===//
//
-// AVR is a Harvard-architecture 8-bit micrcontroller designed for small
+// AVR is a Harvard-architecture 8-bit microcontroller designed for small
// baremetal programs. All AVR-family processors have 32 8-bit registers.
// The tiniest AVR has 32 byte RAM and 1 KiB program memory, and the largest
// one supports up to 2^24 data address space and 2^22 code address space.
class AVR final : public TargetInfo {
public:
AVR();
+ uint32_t calcEFlags() const override;
RelExpr getRelExpr(RelType type, const Symbol &s,
const uint8_t *loc) const override;
void relocate(uint8_t *loc, const Relocation &rel,
RelExpr AVR::getRelExpr(RelType type, const Symbol &s,
const uint8_t *loc) const {
switch (type) {
+ case R_AVR_6:
+ case R_AVR_6_ADIW:
+ case R_AVR_8:
+ case R_AVR_16:
+ case R_AVR_16_PM:
+ case R_AVR_32:
+ case R_AVR_LDI:
+ case R_AVR_LO8_LDI:
+ case R_AVR_LO8_LDI_NEG:
+ case R_AVR_HI8_LDI:
+ case R_AVR_HI8_LDI_NEG:
+ case R_AVR_HH8_LDI_NEG:
+ case R_AVR_HH8_LDI:
+ case R_AVR_MS8_LDI_NEG:
+ case R_AVR_MS8_LDI:
+ case R_AVR_LO8_LDI_PM:
+ case R_AVR_LO8_LDI_PM_NEG:
+ case R_AVR_HI8_LDI_PM:
+ case R_AVR_HI8_LDI_PM_NEG:
+ case R_AVR_HH8_LDI_PM:
+ case R_AVR_HH8_LDI_PM_NEG:
+ case R_AVR_PORT5:
+ case R_AVR_PORT6:
+ case R_AVR_CALL:
+ return R_ABS;
case R_AVR_7_PCREL:
case R_AVR_13_PCREL:
return R_PC;
default:
- return R_ABS;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
break;
}
default:
- error(getErrorLocation(loc) + "unrecognized relocation " +
- toString(rel.type));
+ llvm_unreachable("unknown relocation");
}
}
static AVR target;
return ⌖
}
+
+static uint32_t getEFlags(InputFile *file) {
+ return cast<ObjFile<ELF32LE>>(file)->getObj().getHeader().e_flags;
+}
+
+uint32_t AVR::calcEFlags() const {
+ assert(!objectFiles.empty());
+
+ uint32_t flags = getEFlags(objectFiles[0]);
+ bool hasLinkRelaxFlag = flags & EF_AVR_LINKRELAX_PREPARED;
+
+ for (InputFile *f : makeArrayRef(objectFiles).slice(1)) {
+ uint32_t objFlags = getEFlags(f);
+ if ((objFlags & EF_AVR_ARCH_MASK) != (flags & EF_AVR_ARCH_MASK))
+ error(toString(f) +
+ ": cannot link object files with incompatible target ISA");
+ if (!(objFlags & EF_AVR_LINKRELAX_PREPARED))
+ hasLinkRelaxFlag = false;
+ }
+
+ if (!hasLinkRelaxFlag)
+ flags &= ~EF_AVR_LINKRELAX_PREPARED;
+
+ return flags;
+}
// greatest revision in the list of inputs.
uint32_t ret = 0;
for (InputFile *f : objectFiles) {
- uint32_t eflags = cast<ObjFile<ELF32LE>>(f)->getObj().getHeader()->e_flags;
+ uint32_t eflags = cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
if (eflags > ret)
ret = eflags;
}
case R_HEX_TPREL_32_6_X:
case R_HEX_TPREL_HI16:
case R_HEX_TPREL_LO16:
- return R_TLS;
+ return R_TPREL;
default:
error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
") against symbol " + toString(s));
case R_MIPS_TLS_TPREL64:
case R_MICROMIPS_TLS_TPREL_HI16:
case R_MICROMIPS_TLS_TPREL_LO16:
- return R_TLS;
+ return R_TPREL;
case R_MIPS_PC32:
case R_MIPS_PC16:
case R_MIPS_PC19_S2:
if (!f)
return false;
// If current file has PIC code, LA25 stub is not required.
- if (f->getObj().getHeader()->e_flags & EF_MIPS_PIC)
+ if (f->getObj().getHeader().e_flags & EF_MIPS_PIC)
return false;
auto *d = dyn_cast<Defined>(&s);
// LA25 is required if target file has PIC code
const endianness e = ELFT::TargetEndianness;
switch (type) {
case R_MIPS_32:
+ case R_MIPS_REL32:
case R_MIPS_GPREL32:
case R_MIPS_TLS_DTPREL32:
+ case R_MIPS_TLS_DTPMOD32:
case R_MIPS_TLS_TPREL32:
return SignExtend64<32>(read32(buf));
case R_MIPS_26:
// we should use another expression for calculation:
// ((A << 2) | (P & 0xf0000000)) >> 2
return SignExtend64<28>(read32(buf) << 2);
+ case R_MIPS_CALL_HI16:
case R_MIPS_GOT16:
+ case R_MIPS_GOT_HI16:
case R_MIPS_HI16:
case R_MIPS_PCHI16:
return SignExtend64<16>(read32(buf)) << 16;
+ case R_MIPS_CALL16:
+ case R_MIPS_CALL_LO16:
+ case R_MIPS_GOT_LO16:
case R_MIPS_GPREL16:
case R_MIPS_LO16:
case R_MIPS_PCLO16:
case R_MIPS_TLS_DTPREL_HI16:
case R_MIPS_TLS_DTPREL_LO16:
+ case R_MIPS_TLS_GD:
+ case R_MIPS_TLS_GOTTPREL:
+ case R_MIPS_TLS_LDM:
case R_MIPS_TLS_TPREL_HI16:
case R_MIPS_TLS_TPREL_LO16:
return SignExtend64<16>(read32(buf));
case R_MICROMIPS_GOT16:
case R_MICROMIPS_HI16:
return SignExtend64<16>(readShuffle<e>(buf)) << 16;
+ case R_MICROMIPS_CALL16:
case R_MICROMIPS_GPREL16:
case R_MICROMIPS_LO16:
case R_MICROMIPS_TLS_DTPREL_HI16:
case R_MICROMIPS_TLS_DTPREL_LO16:
+ case R_MICROMIPS_TLS_GD:
+ case R_MICROMIPS_TLS_GOTTPREL:
+ case R_MICROMIPS_TLS_LDM:
case R_MICROMIPS_TLS_TPREL_HI16:
case R_MICROMIPS_TLS_TPREL_LO16:
return SignExtend64<16>(readShuffle<e>(buf));
return SignExtend64<25>(readShuffle<e>(buf) << 2);
case R_MICROMIPS_PC26_S1:
return SignExtend64<27>(readShuffle<e>(buf) << 1);
+ case R_MIPS_64:
+ case R_MIPS_TLS_DTPMOD64:
+ case R_MIPS_TLS_DTPREL64:
+ case R_MIPS_TLS_TPREL64:
+ case (R_MIPS_64 << 8) | R_MIPS_REL32:
+ return read64(buf);
+ case R_MIPS_COPY:
+ return config->is64 ? read64(buf) : read32(buf);
+ case R_MIPS_NONE:
+ case R_MIPS_JUMP_SLOT:
+ case R_MIPS_JALR:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
return 0;
}
}
if (!file)
return false;
- return file->getObj().getHeader()->e_flags & EF_MIPS_PIC;
+ return file->getObj().getHeader().e_flags & EF_MIPS_PIC;
}
template <class ELFT> TargetInfo *elf::getMipsTargetInfo() {
template <class ELFT> uint32_t elf::calcMipsEFlags() {
std::vector<FileFlags> v;
for (InputFile *f : objectFiles)
- v.push_back({f, cast<ObjFile<ELFT>>(f)->getObj().getHeader()->e_flags});
+ v.push_back({f, cast<ObjFile<ELFT>>(f)->getObj().getHeader().e_flags});
if (v.empty()) {
// If we don't have any input files, we'll have to rely on the information
// we can derive from emulation information, since this at least gets us
template <class ELFT> static bool isN32Abi(const InputFile *f) {
if (auto *ef = dyn_cast<ELFFileBase>(f))
- return ef->template getObj<ELFT>().getHeader()->e_flags & EF_MIPS_ABI2;
+ return ef->template getObj<ELFT>().getHeader().e_flags & EF_MIPS_ABI2;
return false;
}
using namespace lld;
using namespace lld::elf;
-static uint64_t ppc64TocOffset = 0x8000;
-static uint64_t dynamicThreadPointerOffset = 0x8000;
+constexpr uint64_t ppc64TocOffset = 0x8000;
+constexpr uint64_t dynamicThreadPointerOffset = 0x8000;
// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
// instructions that can be used as part of the initial exec TLS sequence.
ADDI = 14
};
+constexpr uint32_t NOP = 0x60000000;
+
+enum class PPCLegacyInsn : uint32_t {
+ NOINSN = 0,
+ // Loads.
+ LBZ = 0x88000000,
+ LHZ = 0xa0000000,
+ LWZ = 0x80000000,
+ LHA = 0xa8000000,
+ LWA = 0xe8000002,
+ LD = 0xe8000000,
+ LFS = 0xC0000000,
+ LXSSP = 0xe4000003,
+ LFD = 0xc8000000,
+ LXSD = 0xe4000002,
+ LXV = 0xf4000001,
+ LXVP = 0x18000000,
+
+ // Stores.
+ STB = 0x98000000,
+ STH = 0xb0000000,
+ STW = 0x90000000,
+ STD = 0xf8000000,
+ STFS = 0xd0000000,
+ STXSSP = 0xf4000003,
+ STFD = 0xd8000000,
+ STXSD = 0xf4000002,
+ STXV = 0xf4000005,
+ STXVP = 0x18000001
+};
+enum class PPCPrefixedInsn : uint64_t {
+ NOINSN = 0,
+ PREFIX_MLS = 0x0610000000000000,
+ PREFIX_8LS = 0x0410000000000000,
+
+ // Loads.
+ PLBZ = PREFIX_MLS,
+ PLHZ = PREFIX_MLS,
+ PLWZ = PREFIX_MLS,
+ PLHA = PREFIX_MLS,
+ PLWA = PREFIX_8LS | 0xa4000000,
+ PLD = PREFIX_8LS | 0xe4000000,
+ PLFS = PREFIX_MLS,
+ PLXSSP = PREFIX_8LS | 0xac000000,
+ PLFD = PREFIX_MLS,
+ PLXSD = PREFIX_8LS | 0xa8000000,
+ PLXV = PREFIX_8LS | 0xc8000000,
+ PLXVP = PREFIX_8LS | 0xe8000000,
+
+ // Stores.
+ PSTB = PREFIX_MLS,
+ PSTH = PREFIX_MLS,
+ PSTW = PREFIX_MLS,
+ PSTD = PREFIX_8LS | 0xf4000000,
+ PSTFS = PREFIX_MLS,
+ PSTXSSP = PREFIX_8LS | 0xbc000000,
+ PSTFD = PREFIX_MLS,
+ PSTXSD = PREFIX_8LS | 0xb8000000,
+ PSTXV = PREFIX_8LS | 0xd8000000,
+ PSTXVP = PREFIX_8LS | 0xf8000000
+};
+static bool checkPPCLegacyInsn(uint32_t encoding) {
+ PPCLegacyInsn insn = static_cast<PPCLegacyInsn>(encoding);
+ if (insn == PPCLegacyInsn::NOINSN)
+ return false;
+#define PCREL_OPT(Legacy, PCRel, InsnMask) \
+ if (insn == PPCLegacyInsn::Legacy) \
+ return true;
+#include "PPCInsns.def"
+#undef PCREL_OPT
+ return false;
+}
+
+// Masks to apply to legacy instructions when converting them to prefixed,
+// pc-relative versions. For the most part, the primary opcode is shared
+// between the legacy instruction and the suffix of its prefixed version.
+// However, there are some instances where that isn't the case (DS-Form and
+// DQ-form instructions).
+enum class LegacyToPrefixMask : uint64_t {
+ NOMASK = 0x0,
+ OPC_AND_RST = 0xffe00000, // Primary opc (0-5) and R[ST] (6-10).
+ ONLY_RST = 0x3e00000, // [RS]T (6-10).
+ ST_STX28_TO5 =
+ 0x8000000003e00000, // S/T (6-10) - The [S/T]X bit moves from 28 to 5.
+};
+
uint64_t elf::getPPC64TocBase() {
// The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
// TOC starts where the first of these sections starts. We always create a
return type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS;
}
+void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
+ insn = config->isLE ? insn << 32 | insn >> 32 : insn;
+ write64(loc, insn);
+}
+
static bool addOptional(StringRef name, uint64_t value,
std::vector<Defined *> &defined) {
Symbol *sym = symtab->find(name);
int64_t a) const override;
uint32_t getThunkSectionSpacing() const override;
bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
- RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const override;
+ RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
+ RelExpr adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const override;
void relaxGot(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
// document.
static uint16_t lo(uint64_t v) { return v; }
static uint16_t hi(uint64_t v) { return v >> 16; }
-static uint16_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
+static uint64_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
static uint16_t higher(uint64_t v) { return v >> 32; }
static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
static uint16_t highest(uint64_t v) { return v >> 48; }
switch (getPrimaryOpCode(encoding)) {
default:
return false;
+ case 6: // Power10 paired loads/stores (lxvp, stxvp).
case 56:
// The only instruction with a primary opcode of 56 is `lq`.
return true;
}
}
+static bool isDSFormInstruction(PPCLegacyInsn insn) {
+ switch (insn) {
+ default:
+ return false;
+ case PPCLegacyInsn::LWA:
+ case PPCLegacyInsn::LD:
+ case PPCLegacyInsn::LXSD:
+ case PPCLegacyInsn::LXSSP:
+ case PPCLegacyInsn::STD:
+ case PPCLegacyInsn::STXSD:
+ case PPCLegacyInsn::STXSSP:
+ return true;
+ }
+}
+
+static PPCLegacyInsn getPPCLegacyInsn(uint32_t encoding) {
+ uint32_t opc = encoding & 0xfc000000;
+
+ // If the primary opcode is shared between multiple instructions, we need to
+ // fix it up to match the actual instruction we are after.
+ if ((opc == 0xe4000000 || opc == 0xe8000000 || opc == 0xf4000000 ||
+ opc == 0xf8000000) &&
+ !isDQFormInstruction(encoding))
+ opc = encoding & 0xfc000003;
+ else if (opc == 0xf4000000)
+ opc = encoding & 0xfc000007;
+ else if (opc == 0x18000000)
+ opc = encoding & 0xfc00000f;
+
+ // If the value is not one of the enumerators in PPCLegacyInsn, we want to
+ // return PPCLegacyInsn::NOINSN.
+ if (!checkPPCLegacyInsn(opc))
+ return PPCLegacyInsn::NOINSN;
+ return static_cast<PPCLegacyInsn>(opc);
+}
+
+static PPCPrefixedInsn getPCRelativeForm(PPCLegacyInsn insn) {
+ switch (insn) {
+#define PCREL_OPT(Legacy, PCRel, InsnMask) \
+ case PPCLegacyInsn::Legacy: \
+ return PPCPrefixedInsn::PCRel
+#include "PPCInsns.def"
+#undef PCREL_OPT
+ }
+ return PPCPrefixedInsn::NOINSN;
+}
+
+static LegacyToPrefixMask getInsnMask(PPCLegacyInsn insn) {
+ switch (insn) {
+#define PCREL_OPT(Legacy, PCRel, InsnMask) \
+ case PPCLegacyInsn::Legacy: \
+ return LegacyToPrefixMask::InsnMask
+#include "PPCInsns.def"
+#undef PCREL_OPT
+ }
+ return LegacyToPrefixMask::NOMASK;
+}
+static uint64_t getPCRelativeForm(uint32_t encoding) {
+ PPCLegacyInsn origInsn = getPPCLegacyInsn(encoding);
+ PPCPrefixedInsn pcrelInsn = getPCRelativeForm(origInsn);
+ if (pcrelInsn == PPCPrefixedInsn::NOINSN)
+ return UINT64_C(-1);
+ LegacyToPrefixMask origInsnMask = getInsnMask(origInsn);
+ uint64_t pcrelEncoding =
+ (uint64_t)pcrelInsn | (encoding & (uint64_t)origInsnMask);
+
+ // If the mask requires moving bit 28 to bit 5, do that now.
+ if (origInsnMask == LegacyToPrefixMask::ST_STX28_TO5)
+ pcrelEncoding |= (encoding & 0x8) << 23;
+ return pcrelEncoding;
+}
+
static bool isInstructionUpdateForm(uint32_t encoding) {
switch (getPrimaryOpCode(encoding)) {
default:
}
}
+// Compute the total displacement between the prefixed instruction that gets
+// to the start of the data and the load/store instruction that has the offset
+// into the data structure.
+// For example:
+// paddi 3, 0, 1000, 1
+// lwz 3, 20(3)
+// Should add up to 1020 for total displacement.
+static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
+ int64_t disp34 = llvm::SignExtend64(
+ ((prefixedInsn & 0x3ffff00000000) >> 16) | (prefixedInsn & 0xffff), 34);
+ int32_t disp16 = llvm::SignExtend32(accessInsn & 0xffff, 16);
+ // For DS and DQ form instructions, we need to mask out the XO bits.
+ if (isDQFormInstruction(accessInsn))
+ disp16 &= ~0xf;
+ else if (isDSFormInstruction(getPPCLegacyInsn(accessInsn)))
+ disp16 &= ~0x3;
+ return disp34 + disp16;
+}
+
// There are a number of places when we either want to read or write an
// instruction when handling a half16 relocation type. On big-endian the buffer
// pointer is pointing into the middle of the word we want to extract, and on
return read32(config->isLE ? loc : loc - 2);
}
-// The prefixed instruction is always a 4 byte prefix followed by a 4 byte
-// instruction. Therefore, the prefix is always in lower memory than the
-// instruction (regardless of endianness).
-// As a result, we need to shift the pieces around on little endian machines.
-static void writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
- insn = config->isLE ? insn << 32 | insn >> 32 : insn;
- write64(loc, insn);
-}
-
static uint64_t readPrefixedInstruction(const uint8_t *loc) {
uint64_t fullInstr = read64(loc);
return config->isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
static uint32_t getEFlags(InputFile *file) {
if (config->ekind == ELF64BEKind)
- return cast<ObjFile<ELF64BE>>(file)->getObj().getHeader()->e_flags;
- return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader()->e_flags;
+ return cast<ObjFile<ELF64BE>>(file)->getObj().getHeader().e_flags;
+ return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
}
// This file implements v2 ABI. This function makes sure that all
relocateNoSym(loc, R_PPC64_TOC16_LO, val);
break;
}
+ case R_PPC64_GOT_PCREL34: {
+ // Clear the first 8 bits of the prefix and the first 6 bits of the
+ // instruction (the primary opcode).
+ uint64_t insn = readPrefixedInstruction(loc);
+ if ((insn & 0xfc000000) != 0xe4000000)
+ error("expected a 'pld' for got-indirect to pc-relative relaxing");
+ insn &= ~0xff000000fc000000;
+
+ // Replace the cleared bits with the values for PADDI (0x600000038000000);
+ insn |= 0x600000038000000;
+ writePrefixedInstruction(loc, insn);
+ relocate(loc, rel, val);
+ break;
+ }
+ case R_PPC64_PCREL_OPT: {
+ // We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
+ // be relaxed. The eligibility for the relaxation needs to be determined
+ // on that relocation since this one does not relocate a symbol.
+ uint64_t insn = readPrefixedInstruction(loc);
+ uint32_t accessInsn = read32(loc + rel.addend);
+ uint64_t pcRelInsn = getPCRelativeForm(accessInsn);
+
+ // This error is not necessary for correctness but is emitted for now
+ // to ensure we don't miss these opportunities in real code. It can be
+ // removed at a later date.
+ if (pcRelInsn == UINT64_C(-1)) {
+ errorOrWarn(
+ "unrecognized instruction for R_PPC64_PCREL_OPT relaxation: 0x" +
+ Twine::utohexstr(accessInsn));
+ break;
+ }
+
+ int64_t totalDisp = getTotalDisp(insn, accessInsn);
+ if (!isInt<34>(totalDisp))
+ break; // Displacement doesn't fit.
+ // Convert the PADDI to the prefixed version of accessInsn and convert
+ // accessInsn to a nop.
+ writePrefixedInstruction(loc, pcRelInsn |
+ ((totalDisp & 0x3ffff0000) << 16) |
+ (totalDisp & 0xffff));
+ write32(loc + rel.addend, NOP); // nop accessInsn.
+ break;
+ }
default:
llvm_unreachable("unexpected relocation type");
}
switch (rel.type) {
case R_PPC64_GOT_TLSGD16_HA:
- writeFromHalf16(loc, 0x60000000); // nop
+ writeFromHalf16(loc, NOP);
break;
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13
relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
break;
- case R_PPC64_TLSGD:
- write32(loc, 0x60000000); // nop
- write32(loc + 4, 0x38630000); // addi r3, r3
- // Since we are relocating a half16 type relocation and Loc + 4 points to
- // the start of an instruction we need to advance the buffer by an extra
- // 2 bytes on BE.
- relocateNoSym(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
- R_PPC64_TPREL16_LO, val);
+ case R_PPC64_GOT_TLSGD_PCREL34:
+ // Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
+ // paddi r3, r13, x@tprel, 0
+ writePrefixedInstruction(loc, 0x06000000386d0000);
+ relocateNoSym(loc, R_PPC64_TPREL34, val);
break;
+ case R_PPC64_TLSGD: {
+ // PC Relative Relaxation:
+ // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
+ // nop
+ // TOC Relaxation:
+ // Relax from bl __tls_get_addr(x@tlsgd)
+ // nop
+ // to
+ // nop
+ // addi r3, r3, x@tprel@l
+ const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
+ if (locAsInt % 4 == 0) {
+ write32(loc, NOP); // nop
+ write32(loc + 4, 0x38630000); // addi r3, r3
+ // Since we are relocating a half16 type relocation and Loc + 4 points to
+ // the start of an instruction we need to advance the buffer by an extra
+ // 2 bytes on BE.
+ relocateNoSym(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
+ R_PPC64_TPREL16_LO, val);
+ } else if (locAsInt % 4 == 1) {
+ write32(loc - 1, NOP);
+ } else {
+ errorOrWarn("R_PPC64_TLSGD has unexpected byte alignment");
+ }
+ break;
+ }
default:
llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
}
switch (rel.type) {
case R_PPC64_GOT_TLSLD16_HA:
- writeFromHalf16(loc, 0x60000000); // nop
+ writeFromHalf16(loc, NOP);
break;
case R_PPC64_GOT_TLSLD16_LO:
writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13, 0
break;
- case R_PPC64_TLSLD:
- write32(loc, 0x60000000); // nop
- write32(loc + 4, 0x38631000); // addi r3, r3, 4096
+ case R_PPC64_GOT_TLSLD_PCREL34:
+ // Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
+ // paddi r3, r13, 0x1000, 0
+ writePrefixedInstruction(loc, 0x06000000386d1000);
break;
+ case R_PPC64_TLSLD: {
+ // PC Relative Relaxation:
+ // Relax from bl __tls_get_addr@notoc(x@tlsld)
+ // to
+ // nop
+ // TOC Relaxation:
+ // Relax from bl __tls_get_addr(x@tlsld)
+ // nop
+ // to
+ // nop
+ // addi r3, r3, 4096
+ const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
+ if (locAsInt % 4 == 0) {
+ write32(loc, NOP);
+ write32(loc + 4, 0x38631000); // addi r3, r3, 4096
+ } else if (locAsInt % 4 == 1) {
+ write32(loc - 1, NOP);
+ } else {
+ errorOrWarn("R_PPC64_TLSLD has unexpected byte alignment");
+ }
+ break;
+ }
case R_PPC64_DTPREL16:
case R_PPC64_DTPREL16_HA:
case R_PPC64_DTPREL16_HI:
case R_PPC64_DTPREL16_DS:
case R_PPC64_DTPREL16_LO:
case R_PPC64_DTPREL16_LO_DS:
+ case R_PPC64_DTPREL34:
relocate(loc, rel, val);
break;
default:
unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0;
switch (rel.type) {
case R_PPC64_GOT_TPREL16_HA:
- write32(loc - offset, 0x60000000); // nop
+ write32(loc - offset, NOP);
break;
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_DS: {
relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
break;
}
+ case R_PPC64_GOT_TPREL_PCREL34: {
+ const uint64_t pldRT = readPrefixedInstruction(loc) & 0x0000000003e00000;
+ // paddi RT(from pld), r13, symbol@tprel, 0
+ writePrefixedInstruction(loc, 0x06000000380d0000 | pldRT);
+ relocateNoSym(loc, R_PPC64_TPREL34, val);
+ break;
+ }
case R_PPC64_TLS: {
- uint32_t primaryOp = getPrimaryOpCode(read32(loc));
- if (primaryOp != 31)
- error("unrecognized instruction for IE to LE R_PPC64_TLS");
- uint32_t secondaryOp = (read32(loc) & 0x000007FE) >> 1; // bits 21-30
- uint32_t dFormOp = getPPCDFormOp(secondaryOp);
- if (dFormOp == 0)
- error("unrecognized instruction for IE to LE R_PPC64_TLS");
- write32(loc, ((dFormOp << 26) | (read32(loc) & 0x03FFFFFF)));
- relocateNoSym(loc + offset, R_PPC64_TPREL16_LO, val);
+ const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
+ if (locAsInt % 4 == 0) {
+ uint32_t primaryOp = getPrimaryOpCode(read32(loc));
+ if (primaryOp != 31)
+ error("unrecognized instruction for IE to LE R_PPC64_TLS");
+ uint32_t secondaryOp = (read32(loc) & 0x000007FE) >> 1; // bits 21-30
+ uint32_t dFormOp = getPPCDFormOp(secondaryOp);
+ if (dFormOp == 0)
+ error("unrecognized instruction for IE to LE R_PPC64_TLS");
+ write32(loc, ((dFormOp << 26) | (read32(loc) & 0x03FFFFFF)));
+ relocateNoSym(loc + offset, R_PPC64_TPREL16_LO, val);
+ } else if (locAsInt % 4 == 1) {
+ // If the offset is not 4 byte aligned then we have a PCRel type reloc.
+ // This version of the relocation is offset by one byte from the
+ // instruction it references.
+ uint32_t tlsInstr = read32(loc - 1);
+ uint32_t primaryOp = getPrimaryOpCode(tlsInstr);
+ if (primaryOp != 31)
+ errorOrWarn("unrecognized instruction for IE to LE R_PPC64_TLS");
+ uint32_t secondaryOp = (tlsInstr & 0x000007FE) >> 1; // bits 21-30
+ // The add is a special case and should be turned into a nop. The paddi
+ // that comes before it will already have computed the address of the
+ // symbol.
+ if (secondaryOp == 266) {
+ // Check if the add uses the same result register as the input register.
+ uint32_t rt = (tlsInstr & 0x03E00000) >> 21; // bits 6-10
+ uint32_t ra = (tlsInstr & 0x001F0000) >> 16; // bits 11-15
+ if (ra == rt) {
+ write32(loc - 1, NOP);
+ } else {
+ // mr rt, ra
+ write32(loc - 1, 0x7C000378 | (rt << 16) | (ra << 21) | (ra << 11));
+ }
+ } else {
+ uint32_t dFormOp = getPPCDFormOp(secondaryOp);
+ if (dFormOp == 0)
+ errorOrWarn("unrecognized instruction for IE to LE R_PPC64_TLS");
+ write32(loc - 1, ((dFormOp << 26) | (tlsInstr & 0x03FF0000)));
+ }
+ } else {
+ errorOrWarn("R_PPC64_TLS must be either 4 byte aligned or one byte "
+ "offset from 4 byte aligned");
+ }
break;
}
default:
case R_PPC64_ADDR16_DS:
case R_PPC64_ADDR16_HA:
case R_PPC64_ADDR16_HI:
+ case R_PPC64_ADDR16_HIGH:
case R_PPC64_ADDR16_HIGHER:
case R_PPC64_ADDR16_HIGHERA:
case R_PPC64_ADDR16_HIGHEST:
case R_PPC64_TOC16_LO:
return R_GOTREL;
case R_PPC64_GOT_PCREL34:
+ case R_PPC64_GOT_TPREL_PCREL34:
+ case R_PPC64_PCREL_OPT:
return R_GOT_PC;
case R_PPC64_TOC16_HA:
case R_PPC64_TOC16_LO_DS:
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSGD16_LO:
return R_TLSGD_GOT;
+ case R_PPC64_GOT_TLSGD_PCREL34:
+ return R_TLSGD_PC;
case R_PPC64_GOT_TLSLD16:
case R_PPC64_GOT_TLSLD16_HA:
case R_PPC64_GOT_TLSLD16_HI:
case R_PPC64_GOT_TLSLD16_LO:
return R_TLSLD_GOT;
+ case R_PPC64_GOT_TLSLD_PCREL34:
+ return R_TLSLD_PC;
case R_PPC64_GOT_TPREL16_HA:
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_DS:
case R_PPC64_TPREL16_HIGHERA:
case R_PPC64_TPREL16_HIGHEST:
case R_PPC64_TPREL16_HIGHESTA:
- return R_TLS;
+ case R_PPC64_TPREL34:
+ return R_TPREL;
case R_PPC64_DTPREL16:
case R_PPC64_DTPREL16_DS:
case R_PPC64_DTPREL16_HA:
case R_PPC64_DTPREL16_LO:
case R_PPC64_DTPREL16_LO_DS:
case R_PPC64_DTPREL64:
+ case R_PPC64_DTPREL34:
return R_DTPREL;
case R_PPC64_TLSGD:
return R_TLSDESC_CALL;
case R_PPC64_REL16_HA:
case R_PPC64_TPREL16_HA:
if (config->tocOptimize && shouldTocOptimize && ha(val) == 0)
- writeFromHalf16(loc, 0x60000000);
- else
+ writeFromHalf16(loc, NOP);
+ else {
+ checkInt(loc, val + 0x8000, 32, rel);
write16(loc, ha(val));
+ }
break;
case R_PPC64_ADDR16_HI:
case R_PPC64_REL16_HI:
case R_PPC64_TPREL16_HI:
+ checkInt(loc, val, 32, rel);
+ write16(loc, hi(val));
+ break;
+ case R_PPC64_ADDR16_HIGH:
write16(loc, hi(val));
break;
case R_PPC64_ADDR16_HIGHER:
case R_PPC64_DTPREL64:
write64(loc, val - dynamicThreadPointerOffset);
break;
- case R_PPC64_PCREL34: {
+ case R_PPC64_DTPREL34:
+ // The Dynamic Thread Vector actually points 0x8000 bytes past the start
+ // of the TLS block. Therefore, in the case of R_PPC64_DTPREL34 we first
+ // need to subtract that value then fallthrough to the general case.
+ val -= dynamicThreadPointerOffset;
+ LLVM_FALLTHROUGH;
+ case R_PPC64_PCREL34:
+ case R_PPC64_GOT_PCREL34:
+ case R_PPC64_GOT_TLSGD_PCREL34:
+ case R_PPC64_GOT_TLSLD_PCREL34:
+ case R_PPC64_GOT_TPREL_PCREL34:
+ case R_PPC64_TPREL34: {
const uint64_t si0Mask = 0x00000003ffff0000;
const uint64_t si1Mask = 0x000000000000ffff;
const uint64_t fullMask = 0x0003ffff0000ffff;
(val & si1Mask));
break;
}
- case R_PPC64_GOT_PCREL34: {
- const uint64_t si0Mask = 0x00000003ffff0000;
- const uint64_t si1Mask = 0x000000000000ffff;
- const uint64_t fullMask = 0x0003ffff0000ffff;
- checkInt(loc, val, 34, rel);
-
- uint64_t instr = readPrefixedInstruction(loc) & ~fullMask;
- writePrefixedInstruction(loc, instr | ((val & si0Mask) << 16) |
- (val & si1Mask));
+ // If we encounter a PCREL_OPT relocation that we won't optimize.
+ case R_PPC64_PCREL_OPT:
break;
- }
default:
llvm_unreachable("unknown relocation");
}
type != R_PPC64_REL24_NOTOC)
return false;
- // FIXME: Remove the fatal error once the call protocol is implemented.
- if (type == R_PPC64_REL24_NOTOC && s.isInPlt())
- fatal("unimplemented feature: external function call with the reltype"
- " R_PPC64_REL24_NOTOC");
-
// If a function is in the Plt it needs to be called with a call-stub.
if (s.isInPlt())
return true;
- // FIXME: Remove the fatal error once the call protocol is implemented.
- if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
- fatal("unimplemented feature: local function call with the reltype"
- " R_PPC64_REL24_NOTOC and the callee needs toc-pointer setup");
-
// This check looks at the st_other bits of the callee with relocation
// R_PPC64_REL14 or R_PPC64_REL24. If the value is 1, then the callee
// clobbers the TOC and we need an R2 save stub.
if (type != R_PPC64_REL24_NOTOC && (s.stOther >> 5) == 1)
return true;
+ if (type == R_PPC64_REL24_NOTOC && (s.stOther >> 5) > 1)
+ return true;
+
// If a symbol is a weak undefined and we are compiling an executable
// it doesn't need a range-extending thunk since it can't be called.
if (s.isUndefWeak() && !config->shared)
llvm_unreachable("unsupported relocation type used in branch");
}
-RelExpr PPC64::adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const {
- if (expr == R_RELAX_TLS_GD_TO_IE)
+RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
+ if (type != R_PPC64_GOT_TLSGD_PCREL34 && expr == R_RELAX_TLS_GD_TO_IE)
return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
if (expr == R_RELAX_TLS_LD_TO_LE)
return R_RELAX_TLS_LD_TO_LE_ABS;
return expr;
}
+RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const {
+ if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
+ config->pcRelOptimize) {
+ // It only makes sense to optimize pld since paddi means that the address
+ // of the object in the GOT is required rather than the object itself.
+ if ((readPrefixedInstruction(loc) & 0xfc000000) == 0xe4000000)
+ return R_PPC64_RELAX_GOT_PC;
+ }
+ return R_GOT_PC;
+}
+
// Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
// The general dynamic code sequence for a global `x` uses 4 instructions.
// Instruction Relocation Symbol
relocateNoSym(loc, R_PPC64_GOT_TPREL16_LO_DS, val);
return;
}
- case R_PPC64_TLSGD:
- write32(loc, 0x60000000); // bl __tls_get_addr(sym@tlsgd) --> nop
- write32(loc + 4, 0x7c636A14); // nop --> add r3, r3, r13
+ case R_PPC64_GOT_TLSGD_PCREL34: {
+ // Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
+ // pld r3, sym@got@tprel@pcrel
+ writePrefixedInstruction(loc, 0x04100000e4600000);
+ relocateNoSym(loc, R_PPC64_GOT_TPREL_PCREL34, val);
return;
+ }
+ case R_PPC64_TLSGD: {
+ // PC Relative Relaxation:
+ // Relax from bl __tls_get_addr@notoc(x@tlsgd) to
+ // nop
+ // TOC Relaxation:
+ // Relax from bl __tls_get_addr(x@tlsgd)
+ // nop
+ // to
+ // nop
+ // add r3, r3, r13
+ const uintptr_t locAsInt = reinterpret_cast<uintptr_t>(loc);
+ if (locAsInt % 4 == 0) {
+ write32(loc, NOP); // bl __tls_get_addr(sym@tlsgd) --> nop
+ write32(loc + 4, 0x7c636A14); // nop --> add r3, r3, r13
+ } else if (locAsInt % 4 == 1) {
+ // bl __tls_get_addr(sym@tlsgd) --> add r3, r3, r13
+ write32(loc - 1, 0x7c636a14);
+ } else {
+ errorOrWarn("R_PPC64_TLSGD has unexpected byte alignment");
+ }
+ return;
+ }
default:
llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
}
uint32_t secondInstr = read32(loc + 8);
if (!loImm && getPrimaryOpCode(secondInstr) == 14) {
loImm = secondInstr & 0xFFFF;
- } else if (secondInstr != 0x60000000) {
+ } else if (secondInstr != NOP) {
return false;
}
};
if (!checkRegOperands(firstInstr, 12, 1))
return false;
- if (secondInstr != 0x60000000 && !checkRegOperands(secondInstr, 12, 12))
+ if (secondInstr != NOP && !checkRegOperands(secondInstr, 12, 12))
return false;
int32_t stackFrameSize = (hiImm * 65536) + loImm;
if (hiImm) {
write32(loc + 4, 0x3D810000 | (uint16_t)hiImm);
// If the low immediate is zero the second instruction will be a nop.
- secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : 0x60000000;
+ secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : NOP;
write32(loc + 8, secondInstr);
} else {
// addi r12, r1, imm
write32(loc + 4, (0x39810000) | (uint16_t)loImm);
- write32(loc + 8, 0x60000000);
+ write32(loc + 8, NOP);
}
return true;
--- /dev/null
+#ifndef PCREL_OPT
+#error "Need to define function-style macro PCREL_OPT"
+#endif
+PCREL_OPT(NOINSN, NOINSN, NOMASK);
+PCREL_OPT(LBZ, PLBZ, OPC_AND_RST);
+PCREL_OPT(LHZ, PLHZ, OPC_AND_RST);
+PCREL_OPT(LWZ, PLWZ, OPC_AND_RST);
+PCREL_OPT(LHA, PLHA, OPC_AND_RST);
+PCREL_OPT(LWA, PLWA, ONLY_RST);
+PCREL_OPT(LD, PLD , ONLY_RST);
+PCREL_OPT(LFS, PLFS, OPC_AND_RST);
+PCREL_OPT(LXSSP, PLXSSP, ONLY_RST);
+PCREL_OPT(LFD, PLFD, OPC_AND_RST);
+PCREL_OPT(LXSD, PLXSD, ONLY_RST);
+PCREL_OPT(LXV, PLXV, ST_STX28_TO5);
+PCREL_OPT(LXVP, PLXVP, OPC_AND_RST);
+
+PCREL_OPT(STB, PSTB, OPC_AND_RST);
+PCREL_OPT(STH, PSTH, OPC_AND_RST);
+PCREL_OPT(STW, PSTW, OPC_AND_RST);
+PCREL_OPT(STD, PSTD, ONLY_RST);
+PCREL_OPT(STFS, PSTFS, OPC_AND_RST);
+PCREL_OPT(STXSSP, PSTXSSP, ONLY_RST);
+PCREL_OPT(STFD, PSTFD, OPC_AND_RST);
+PCREL_OPT(STXSD, PSTXSD, ONLY_RST);
+PCREL_OPT(STXV, PSTXV, ST_STX28_TO5);
+PCREL_OPT(STXVP, PSTXVP, OPC_AND_RST);
public:
RISCV();
uint32_t calcEFlags() const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
void writeGotHeader(uint8_t *buf) const override;
void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
void writePltHeader(uint8_t *buf) const override;
void writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const override;
static uint32_t getEFlags(InputFile *f) {
if (config->is64)
- return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader()->e_flags;
- return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader()->e_flags;
+ return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
+ return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
}
uint32_t RISCV::calcEFlags() const {
return target;
}
+int64_t RISCV::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
+ default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
+ return 0;
+ case R_RISCV_32:
+ case R_RISCV_TLS_DTPMOD32:
+ case R_RISCV_TLS_DTPREL32:
+ return SignExtend64<32>(read32le(buf));
+ case R_RISCV_64:
+ return read64le(buf);
+ case R_RISCV_RELATIVE:
+ case R_RISCV_IRELATIVE:
+ return config->is64 ? read64le(buf) : read32le(buf);
+ case R_RISCV_NONE:
+ case R_RISCV_JUMP_SLOT:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
+ }
+}
+
void RISCV::writeGotHeader(uint8_t *buf) const {
if (config->is64)
write64le(buf, mainPart->dynamic->getVA());
write32le(buf, in.plt->getVA());
}
+void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
+ if (config->writeAddends) {
+ if (config->is64)
+ write64le(buf, s.getVA());
+ else
+ write32le(buf, s.getVA());
+ }
+}
+
void RISCV::writePltHeader(uint8_t *buf) const {
// 1: auipc t2, %pcrel_hi(.got.plt)
// sub t1, t1, t3
case R_RISCV_TPREL_HI20:
case R_RISCV_TPREL_LO12_I:
case R_RISCV_TPREL_LO12_S:
- return R_TLS;
+ return R_TPREL;
case R_RISCV_RELAX:
case R_RISCV_TPREL_ADD:
return R_NONE;
return R_NONE;
case R_SPARC_TLS_LE_HIX22:
case R_SPARC_TLS_LE_LOX10:
- return R_TLS;
+ return R_TPREL;
default:
error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
") against symbol " + toString(s));
void relocate(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
- RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const override;
+ RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
case R_386_GOTOFF:
return R_GOTPLTREL;
case R_386_TLS_LE:
- return R_TLS;
+ return R_TPREL;
case R_386_TLS_LE_32:
- return R_NEG_TLS;
+ return R_TPREL_NEG;
case R_386_NONE:
return R_NONE;
default:
}
}
-RelExpr X86::adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const {
+RelExpr X86::adjustTlsExpr(RelType type, RelExpr expr) const {
switch (expr) {
default:
return expr;
case R_386_PC16:
return SignExtend64<16>(read16le(buf));
case R_386_32:
+ case R_386_GLOB_DAT:
case R_386_GOT32:
case R_386_GOT32X:
case R_386_GOTOFF:
case R_386_GOTPC:
+ case R_386_IRELATIVE:
case R_386_PC32:
case R_386_PLT32:
+ case R_386_RELATIVE:
+ case R_386_TLS_DTPMOD32:
+ case R_386_TLS_DTPOFF32:
case R_386_TLS_LDO_32:
+ case R_386_TLS_LDM:
+ case R_386_TLS_IE:
+ case R_386_TLS_IE_32:
case R_386_TLS_LE:
+ case R_386_TLS_LE_32:
+ case R_386_TLS_GD:
+ case R_386_TLS_GD_32:
+ case R_386_TLS_GOTIE:
+ case R_386_TLS_TPOFF:
+ case R_386_TLS_TPOFF32:
return SignExtend64<32>(read32le(buf));
+ case R_386_NONE:
+ case R_386_JUMP_SLOT:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
return 0;
}
}
RelType getDynRel(RelType type) const override;
void writeGotPltHeader(uint8_t *buf) const override;
void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
void writePltHeader(uint8_t *buf) const override;
void writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const override;
void relocate(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
void applyJumpInstrMod(uint8_t *loc, JumpModType type,
unsigned size) const override;
- RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const override;
+ RelExpr adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const override;
void relaxGot(uint8_t *loc, const Relocation &rel,
uint64_t val) const override;
void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
tlsGotRel = R_X86_64_TPOFF64;
tlsModuleIndexRel = R_X86_64_DTPMOD64;
tlsOffsetRel = R_X86_64_DTPOFF64;
+ gotEntrySize = 8;
pltHeaderSize = 16;
pltEntrySize = 16;
ipltEntrySize = 16;
case R_X86_64_DTPOFF64:
return R_DTPREL;
case R_X86_64_TPOFF32:
- return R_TLS;
+ return R_TPREL;
case R_X86_64_TLSDESC_CALL:
return R_TLSDESC_CALL;
case R_X86_64_TLSLD:
write64le(buf, s.getPltVA() + 6);
}
+void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
+ // An x86 entry is the address of the ifunc resolver function (for -z rel).
+ if (config->writeAddends)
+ write64le(buf, s.getVA());
+}
+
void X86_64::writePltHeader(uint8_t *buf) const {
const uint8_t pltData[] = {
0xff, 0x35, 0, 0, 0, 0, // pushq GOTPLT+8(%rip)
}
}
+int64_t X86_64::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
+ case R_X86_64_8:
+ case R_X86_64_PC8:
+ return SignExtend64<8>(*buf);
+ case R_X86_64_16:
+ case R_X86_64_PC16:
+ return SignExtend64<16>(read16le(buf));
+ case R_X86_64_32:
+ case R_X86_64_32S:
+ case R_X86_64_TPOFF32:
+ case R_X86_64_GOT32:
+ case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_GOTPCREL:
+ case R_X86_64_GOTPCRELX:
+ case R_X86_64_REX_GOTPCRELX:
+ case R_X86_64_PC32:
+ case R_X86_64_GOTTPOFF:
+ case R_X86_64_PLT32:
+ case R_X86_64_TLSGD:
+ case R_X86_64_TLSLD:
+ case R_X86_64_DTPOFF32:
+ case R_X86_64_SIZE32:
+ return SignExtend64<32>(read32le(buf));
+ case R_X86_64_64:
+ case R_X86_64_TPOFF64:
+ case R_X86_64_DTPOFF64:
+ case R_X86_64_DTPMOD64:
+ case R_X86_64_PC64:
+ case R_X86_64_SIZE64:
+ case R_X86_64_GLOB_DAT:
+ case R_X86_64_GOT64:
+ case R_X86_64_GOTOFF64:
+ case R_X86_64_GOTPC64:
+ case R_X86_64_IRELATIVE:
+ case R_X86_64_RELATIVE:
+ return read64le(buf);
+ case R_X86_64_JUMP_SLOT:
+ case R_X86_64_NONE:
+ // These relocations are defined as not having an implicit addend.
+ return 0;
+ default:
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
+ return 0;
+ }
+}
+
void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
switch (rel.type) {
case R_X86_64_8:
}
}
-RelExpr X86_64::adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr relExpr) const {
- if (type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX)
- return relExpr;
- const uint8_t op = data[-2];
- const uint8_t modRm = data[-1];
+RelExpr X86_64::adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const {
+ // Only R_X86_64_[REX_]GOTPCRELX can be relaxed. GNU as may emit GOTPCRELX
+ // with addend != -4. Such an instruction does not load the full GOT entry, so
+ // we cannot relax the relocation. E.g. movl x@GOTPCREL+4(%rip), %rax
+ // (addend=0) loads the high 32 bits of the GOT entry.
+ if ((type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX) ||
+ addend != -4)
+ return R_GOT_PC;
+ const uint8_t op = loc[-2];
+ const uint8_t modRm = loc[-1];
// FIXME: When PIC is disabled and foo is defined locally in the
// lower 32 bit address space, memory operand in mov can be converted into
if (op == 0xff && (modRm == 0x15 || modRm == 0x25))
return R_RELAX_GOT_PC;
+ // We don't support test/binop instructions without a REX prefix.
+ if (type == R_X86_64_GOTPCRELX)
+ return R_GOT_PC;
+
// Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
// If PIC then no relaxation is available.
- // We also don't relax test/binop instructions without REX byte,
- // they are 32bit operations and not common to have.
- assert(type == R_X86_64_REX_GOTPCRELX);
- return config->isPic ? relExpr : R_RELAX_GOT_PC_NOPIC;
+ return config->isPic ? R_GOT_PC : R_RELAX_GOT_PC_NOPIC;
}
// A subset of relaxations can only be applied for no-PIC. This method
write32le(loc, val);
}
-void X86_64::relaxGot(uint8_t *loc, const Relocation &, uint64_t val) const {
+void X86_64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
+ checkInt(loc, val, 32, rel);
const uint8_t op = loc[-2];
const uint8_t modRm = loc[-1];
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(ELFOptionsTableGen)
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
add_lld_library(lldELF
AArch64ErrataFix.cpp
Arch/AArch64.cpp
DEPENDS
ELFOptionsTableGen
- ${tablegen_deps}
+ intrinsics_gen
)
int next;
int prev;
- size_t size = 0;
+ uint64_t size;
uint64_t weight = 0;
uint64_t initialWeight = 0;
Edge bestPred = {-1, 0};
DenseMap<const InputSectionBase *, int> orderMap;
int curOrder = 1;
- for (int leader : sorted)
+ for (int leader : sorted) {
for (int i = leader;;) {
orderMap[sections[i]] = curOrder++;
i = clusters[i].next;
if (i == leader)
break;
}
-
+ }
if (!config->printSymbolOrder.empty()) {
std::error_code ec;
raw_fd_ostream os(config->printSymbolOrder, ec, sys::fs::OF_None);
namespace {
template <class RelTy> struct LLDRelocationResolver {
// In the ELF ABIs, S sepresents the value of the symbol in the relocation
- // entry. For Rela, the addend is stored as part of the relocation entry.
- static uint64_t resolve(object::RelocationRef ref, uint64_t s,
- uint64_t /* A */) {
- return s + ref.getRawDataRefImpl().p;
+ // entry. For Rela, the addend is stored as part of the relocation entry and
+ // is provided by the `findAux` method.
+ // In resolve() methods, the `type` and `offset` arguments would always be 0,
+ // because we don't set an owning object for the `RelocationRef` instance that
+ // we create in `findAux()`.
+ static uint64_t resolve(uint64_t /*type*/, uint64_t /*offset*/, uint64_t s,
+ uint64_t /*locData*/, int64_t addend) {
+ return s + addend;
}
};
template <class ELFT> struct LLDRelocationResolver<Elf_Rel_Impl<ELFT, false>> {
- // For Rel, the addend A is supplied by the caller.
- static uint64_t resolve(object::RelocationRef /*Ref*/, uint64_t s,
- uint64_t a) {
- return s + a;
+ // For Rel, the addend is extracted from the relocated location and is
+ // supplied by the caller.
+ static uint64_t resolve(uint64_t /*type*/, uint64_t /*offset*/, uint64_t s,
+ uint64_t locData, int64_t /*addend*/) {
+ return s + locData;
}
};
} // namespace
class LinkerDriver {
public:
- void main(ArrayRef<const char *> args);
+ void linkerMain(ArrayRef<const char *> args);
void addFile(StringRef path, bool withLOption);
void addLibrary(StringRef name);
EhReader(InputSectionBase *s, ArrayRef<uint8_t> d) : isec(s), d(d) {}
size_t readEhRecordSize();
uint8_t getFdeEncoding();
+ bool hasLSDA();
private:
template <class P> void failOn(const P *loc, const Twine &msg) {
StringRef readString();
void skipLeb128();
void skipAugP();
+ StringRef getAugmentation();
InputSectionBase *isec;
ArrayRef<uint8_t> d;
return EhReader(p->sec, p->data()).getFdeEncoding();
}
-uint8_t EhReader::getFdeEncoding() {
+bool elf::hasLSDA(const EhSectionPiece &p) {
+ return EhReader(p.sec, p.data()).hasLSDA();
+}
+
+StringRef EhReader::getAugmentation() {
skipBytes(8);
int version = readByte();
if (version != 1 && version != 3)
readByte();
else
skipLeb128();
+ return aug;
+}
+uint8_t EhReader::getFdeEncoding() {
// We only care about an 'R' value, but other records may precede an 'R'
// record. Unfortunately records are not in TLV (type-length-value) format,
// so we need to teach the linker how to skip records for each type.
+ StringRef aug = getAugmentation();
for (char c : aug) {
if (c == 'R')
return readByte();
- if (c == 'z') {
+ if (c == 'z')
skipLeb128();
- continue;
- }
- if (c == 'P') {
- skipAugP();
- continue;
- }
- if (c == 'L') {
+ else if (c == 'L')
readByte();
- continue;
- }
- failOn(aug.data(), "unknown .eh_frame augmentation string: " + aug);
+ else if (c == 'P')
+ skipAugP();
+ else if (c != 'B' && c != 'S')
+ failOn(aug.data(), "unknown .eh_frame augmentation string: " + aug);
}
return DW_EH_PE_absptr;
}
+
+bool EhReader::hasLSDA() {
+ StringRef aug = getAugmentation();
+ for (char c : aug) {
+ if (c == 'L')
+ return true;
+ if (c == 'z')
+ skipLeb128();
+ else if (c == 'P')
+ skipAugP();
+ else if (c == 'R')
+ readByte();
+ else if (c != 'B' && c != 'S')
+ failOn(aug.data(), "unknown .eh_frame augmentation string: " + aug);
+ }
+ return false;
+}
size_t readEhRecordSize(InputSectionBase *s, size_t off);
uint8_t getFdeEncoding(EhSectionPiece *p);
+bool hasLSDA(const EhSectionPiece &p);
} // namespace elf
} // namespace lld
#include "ICF.h"
#include "Config.h"
+#include "EhFrame.h"
#include "LinkerScript.h"
#include "OutputSections.h"
#include "SymbolTable.h"
void run();
private:
- void segregate(size_t begin, size_t end, bool constant);
+ void segregate(size_t begin, size_t end, uint32_t eqClassBase, bool constant);
template <class RelTy>
bool constantEq(const InputSection *a, ArrayRef<RelTy> relsA,
// Split an equivalence class into smaller classes.
template <class ELFT>
-void ICF<ELFT>::segregate(size_t begin, size_t end, bool constant) {
+void ICF<ELFT>::segregate(size_t begin, size_t end, uint32_t eqClassBase,
+ bool constant) {
// This loop rearranges sections in [Begin, End) so that all sections
// that are equal in terms of equals{Constant,Variable} are contiguous
// in [Begin, End).
size_t mid = bound - sections.begin();
// Now we split [Begin, End) into [Begin, Mid) and [Mid, End) by
- // updating the sections in [Begin, Mid). We use Mid as an equivalence
- // class ID because every group ends with a unique index.
+ // updating the sections in [Begin, Mid). We use Mid as the basis for
+ // the equivalence class ID because every group ends with a unique index.
+ // Add this to eqClassBase to avoid equality with unique IDs.
for (size_t i = begin; i < mid; ++i)
- sections[i]->eqClass[next] = mid;
+ sections[i]->eqClass[next] = eqClassBase + mid;
// If we created a group, we need to iterate the main loop again.
if (mid != end)
continue;
auto *y = cast<InputSection>(db->section);
- // Ineligible sections are in the special equivalence class 0.
- // They can never be the same in terms of the equivalence class.
+ // Sections that are in the special equivalence class 0, can never be the
+ // same in terms of the equivalence class.
if (x->eqClass[current] == 0)
return false;
if (x->eqClass[current] != y->eqClass[current])
if (auto *relSec = dyn_cast_or_null<InputSection>(d->section))
hash += relSec->eqClass[cnt % 2];
}
- // Set MSB to 1 to avoid collisions with non-hash IDs.
+ // Set MSB to 1 to avoid collisions with unique IDs.
isec->eqClass[(cnt + 1) % 2] = hash | (1U << 31);
}
for (Symbol *sym : symtab->symbols())
sym->isPreemptible = computeIsPreemptible(*sym);
+ // Two text sections may have identical content and relocations but different
+ // LSDA, e.g. the two functions may have catch blocks of different types. If a
+ // text section is referenced by a .eh_frame FDE with LSDA, it is not
+ // eligible. This is implemented by iterating over CIE/FDE and setting
+ // eqClass[0] to the referenced text section from a live FDE.
+ //
+ // If two .gcc_except_table have identical semantics (usually identical
+ // content with PC-relative encoding), we will lose folding opportunity.
+ uint32_t uniqueId = 0;
+ for (Partition &part : partitions)
+ part.ehFrame->iterateFDEWithLSDA<ELFT>(
+ [&](InputSection &s) { s.eqClass[0] = s.eqClass[1] = ++uniqueId; });
+
// Collect sections to merge.
for (InputSectionBase *sec : inputSections) {
auto *s = cast<InputSection>(sec);
- if (isEligible(s))
- sections.push_back(s);
+ if (s->eqClass[0] == 0) {
+ if (isEligible(s))
+ sections.push_back(s);
+ else
+ // Ineligible sections are assigned unique IDs, i.e. each section
+ // belongs to an equivalence class of its own.
+ s->eqClass[0] = s->eqClass[1] = ++uniqueId;
+ }
}
// Initially, we use hash values to partition sections.
- parallelForEach(
- sections, [&](InputSection *s) { s->eqClass[0] = xxHash64(s->data()); });
+ parallelForEach(sections, [&](InputSection *s) {
+ // Set MSB to 1 to avoid collisions with unique IDs.
+ s->eqClass[0] = xxHash64(s->data()) | (1U << 31);
+ });
+ // Perform 2 rounds of relocation hash propagation. 2 is an empirical value to
+ // reduce the average sizes of equivalence classes, i.e. segregate() which has
+ // a large time complexity will have less work to do.
for (unsigned cnt = 0; cnt != 2; ++cnt) {
parallelForEach(sections, [&](InputSection *s) {
if (s->areRelocsRela)
return a->eqClass[0] < b->eqClass[0];
});
- // Compare static contents and assign unique IDs for each static content.
- forEachClass([&](size_t begin, size_t end) { segregate(begin, end, true); });
+ // Compare static contents and assign unique equivalence class IDs for each
+ // static content. Use a base offset for these IDs to ensure no overlap with
+ // the unique IDs already assigned.
+ uint32_t eqClassBase = ++uniqueId;
+ forEachClass([&](size_t begin, size_t end) {
+ segregate(begin, end, eqClassBase, true);
+ });
// Split groups by comparing relocations until convergence is obtained.
do {
repeat = false;
- forEachClass(
- [&](size_t begin, size_t end) { segregate(begin, end, false); });
+ forEachClass([&](size_t begin, size_t end) {
+ segregate(begin, end, eqClassBase, false);
+ });
} while (repeat);
log("ICF needed " + Twine(cnt) + " iterations");
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/RISCVAttributeParser.h"
#include "llvm/Support/TarWriter.h"
#include "llvm/Support/raw_ostream.h"
}
Optional<MemoryBufferRef> elf::readFile(StringRef path) {
+ llvm::TimeTraceScope timeScope("Load input files", path);
+
// The --chroot option changes our virtual root directory.
// This is useful when you are dealing with files created by --reproduce.
if (!config->chroot.empty() && path.startswith("/"))
path = saver.save(config->chroot + path);
log(path);
+ config->dependencyFiles.insert(llvm::CachedHashString(path));
- auto mbOrErr = MemoryBuffer::getFile(path, -1, false);
+ auto mbOrErr = MemoryBuffer::getFile(path, /*IsText=*/false,
+ /*RequiresNullTerminator=*/false);
if (auto ec = mbOrErr.getError()) {
error("cannot open " + path + ": " + ec.message());
return None;
}
}
+StringRef InputFile::getNameForScript() const {
+ if (archiveName.empty())
+ return getName();
+
+ if (nameForScriptCache.empty())
+ nameForScriptCache = (archiveName + Twine(':') + getName()).str();
+
+ return nameForScriptCache;
+}
+
template <class ELFT> DWARFCache *ObjFile<ELFT>::getDwarf() {
llvm::call_once(initDwarf, [this]() {
dwarf = std::make_unique<DWARFCache>(std::make_unique<DWARFContext>(
// Initialize trivial attributes.
const ELFFile<ELFT> &obj = getObj<ELFT>();
- emachine = obj.getHeader()->e_machine;
- osabi = obj.getHeader()->e_ident[llvm::ELF::EI_OSABI];
- abiVersion = obj.getHeader()->e_ident[llvm::ELF::EI_ABIVERSION];
+ emachine = obj.getHeader().e_machine;
+ osabi = obj.getHeader().e_ident[llvm::ELF::EI_OSABI];
+ abiVersion = obj.getHeader().e_ident[llvm::ELF::EI_ABIVERSION];
ArrayRef<Elf_Shdr> sections = CHECK(obj.sections(), this);
template <class ELFT>
uint32_t ObjFile<ELFT>::getSectionIndex(const Elf_Sym &sym) const {
return CHECK(
- this->getObj().getSectionIndex(&sym, getELFSyms<ELFT>(), shndxTable),
+ this->getObj().getSectionIndex(sym, getELFSyms<ELFT>(), shndxTable),
this);
}
const Elf_Shdr &sec = objSections[i];
if (sec.sh_type == ELF::SHT_LLVM_CALL_GRAPH_PROFILE)
- cgProfile =
- check(obj.template getSectionContentsAsArray<Elf_CGProfile>(&sec));
+ cgProfileSectionIndex = i;
// SHF_EXCLUDE'ed sections are discarded by the linker. However,
// if -r is given, we'll let the final link discard such sections.
if (sec.sh_link != 0)
this->addrsigSec = &sec;
else if (config->icf == ICFLevel::Safe)
- warn(toString(this) + ": --icf=safe is incompatible with object "
- "files created using objcopy or ld -r");
+ warn(toString(this) +
+ ": --icf=safe conservatively ignores "
+ "SHT_LLVM_ADDRSIG [index " +
+ Twine(i) +
+ "] with sh_link=0 "
+ "(likely created using objcopy or ld -r)");
}
this->sections[i] = &InputSection::discarded;
continue;
StringRef signature = getShtGroupSignature(objSections, sec);
this->sections[i] = &InputSection::discarded;
-
ArrayRef<Elf_Word> entries =
- CHECK(obj.template getSectionContentsAsArray<Elf_Word>(&sec), this);
+ CHECK(obj.template getSectionContentsAsArray<Elf_Word>(sec), this);
if (entries.empty())
fatal(toString(this) + ": empty SHT_GROUP");
- // The first word of a SHT_GROUP section contains flags. Currently,
- // the standard defines only "GRP_COMDAT" flag for the COMDAT group.
- // An group with the empty flag doesn't define anything; such sections
- // are just skipped.
- if (entries[0] == 0)
- continue;
-
- if (entries[0] != GRP_COMDAT)
+ Elf_Word flag = entries[0];
+ if (flag && flag != GRP_COMDAT)
fatal(toString(this) + ": unsupported SHT_GROUP format");
- bool isNew =
- ignoreComdats ||
+ bool keepGroup =
+ (flag & GRP_COMDAT) == 0 || ignoreComdats ||
symtab->comdatGroups.try_emplace(CachedHashStringRef(signature), this)
.second;
- if (isNew) {
+ if (keepGroup) {
if (config->relocatable)
this->sections[i] = createInputSection(sec);
selectedGroups.push_back(entries);
if (sec.sh_type == SHT_REL || sec.sh_type == SHT_RELA)
this->sections[i] = createInputSection(sec);
- if (!(sec.sh_flags & SHF_LINK_ORDER))
+ // A SHF_LINK_ORDER section with sh_link=0 is handled as if it did not have
+ // the flag.
+ if (!(sec.sh_flags & SHF_LINK_ORDER) || !sec.sh_link)
continue;
- // .ARM.exidx sections have a reverse dependency on the InputSection they
- // have a SHF_LINK_ORDER dependency, this is identified by the sh_link.
InputSectionBase *linkSec = nullptr;
if (sec.sh_link < this->sections.size())
linkSec = this->sections[sec.sh_link];
if (!linkSec)
fatal(toString(this) + ": invalid sh_link index: " + Twine(sec.sh_link));
+ // A SHF_LINK_ORDER section is discarded if its linked-to section is
+ // discarded.
InputSection *isec = cast<InputSection>(this->sections[i]);
linkSec->dependentSections.push_back(isec);
if (!isa<InputSection>(linkSec))
// of zero or more type-length-value fields. We want to find a field of a
// certain type. It seems a bit too much to just store a 32-bit value, perhaps
// the ABI is unnecessarily complicated.
-template <class ELFT>
-static uint32_t readAndFeatures(ObjFile<ELFT> *obj, ArrayRef<uint8_t> data) {
+template <class ELFT> static uint32_t readAndFeatures(const InputSection &sec) {
using Elf_Nhdr = typename ELFT::Nhdr;
using Elf_Note = typename ELFT::Note;
uint32_t featuresSet = 0;
+ ArrayRef<uint8_t> data = sec.data();
+ auto reportFatal = [&](const uint8_t *place, const char *msg) {
+ fatal(toString(sec.file) + ":(" + sec.name + "+0x" +
+ Twine::utohexstr(place - sec.data().data()) + "): " + msg);
+ };
while (!data.empty()) {
// Read one NOTE record.
- if (data.size() < sizeof(Elf_Nhdr))
- fatal(toString(obj) + ": .note.gnu.property: section too short");
-
auto *nhdr = reinterpret_cast<const Elf_Nhdr *>(data.data());
- if (data.size() < nhdr->getSize())
- fatal(toString(obj) + ": .note.gnu.property: section too short");
+ if (data.size() < sizeof(Elf_Nhdr) || data.size() < nhdr->getSize())
+ reportFatal(data.data(), "data is too short");
Elf_Note note(*nhdr);
if (nhdr->n_type != NT_GNU_PROPERTY_TYPE_0 || note.getName() != "GNU") {
// Read a body of a NOTE record, which consists of type-length-value fields.
ArrayRef<uint8_t> desc = note.getDesc();
while (!desc.empty()) {
+ const uint8_t *place = desc.data();
if (desc.size() < 8)
- fatal(toString(obj) + ": .note.gnu.property: section too short");
-
- uint32_t type = read32le(desc.data());
- uint32_t size = read32le(desc.data() + 4);
+ reportFatal(place, "program property is too short");
+ uint32_t type = read32<ELFT::TargetEndianness>(desc.data());
+ uint32_t size = read32<ELFT::TargetEndianness>(desc.data() + 4);
+ desc = desc.slice(8);
+ if (desc.size() < size)
+ reportFatal(place, "program property is too short");
if (type == featureAndType) {
// We found a FEATURE_1_AND field. There may be more than one of these
// in a .note.gnu.property section, for a relocatable object we
// accumulate the bits set.
- featuresSet |= read32le(desc.data() + 8);
+ if (size < 4)
+ reportFatal(place, "FEATURE_1_AND entry is too short");
+ featuresSet |= read32<ELFT::TargetEndianness>(desc.data());
}
- // On 64-bit, a payload may be followed by a 4-byte padding to make its
- // size a multiple of 8.
- if (ELFT::Is64Bits)
- size = alignTo(size, 8);
-
- desc = desc.slice(size + 8); // +8 for Type and Size
+ // Padding is present in the note descriptor, if necessary.
+ desc = desc.slice(alignTo<(ELFT::Is64Bits ? 8 : 4)>(size));
}
// Go to next NOTE record to look for more FEATURE_1_AND descriptions.
InputSectionBase *ObjFile<ELFT>::createInputSection(const Elf_Shdr &sec) {
StringRef name = getSectionName(sec);
- switch (sec.sh_type) {
- case SHT_ARM_ATTRIBUTES: {
- if (config->emachine != EM_ARM)
- break;
+ if (config->emachine == EM_ARM && sec.sh_type == SHT_ARM_ATTRIBUTES) {
ARMAttributeParser attributes;
- ArrayRef<uint8_t> contents = check(this->getObj().getSectionContents(&sec));
+ ArrayRef<uint8_t> contents = check(this->getObj().getSectionContents(sec));
if (Error e = attributes.parse(contents, config->ekind == ELF32LEKind
? support::little
: support::big)) {
auto *isec = make<InputSection>(*this, sec, name);
warn(toString(isec) + ": " + llvm::toString(std::move(e)));
- break;
+ } else {
+ updateSupportedARMFeatures(attributes);
+ updateARMVFPArgs(attributes, this);
+
+ // FIXME: Retain the first attribute section we see. The eglibc ARM
+ // dynamic loaders require the presence of an attribute section for dlopen
+ // to work. In a full implementation we would merge all attribute
+ // sections.
+ if (in.attributes == nullptr) {
+ in.attributes = make<InputSection>(*this, sec, name);
+ return in.attributes;
+ }
+ return &InputSection::discarded;
}
- updateSupportedARMFeatures(attributes);
- updateARMVFPArgs(attributes, this);
-
- // FIXME: Retain the first attribute section we see. The eglibc ARM
- // dynamic loaders require the presence of an attribute section for dlopen
- // to work. In a full implementation we would merge all attribute sections.
- if (in.armAttributes == nullptr) {
- in.armAttributes = make<InputSection>(*this, sec, name);
- return in.armAttributes;
+ }
+
+ if (config->emachine == EM_RISCV && sec.sh_type == SHT_RISCV_ATTRIBUTES) {
+ RISCVAttributeParser attributes;
+ ArrayRef<uint8_t> contents = check(this->getObj().getSectionContents(sec));
+ if (Error e = attributes.parse(contents, support::little)) {
+ auto *isec = make<InputSection>(*this, sec, name);
+ warn(toString(isec) + ": " + llvm::toString(std::move(e)));
+ } else {
+ // FIXME: Validate arch tag contains C if and only if EF_RISCV_RVC is
+ // present.
+
+ // FIXME: Retain the first attribute section we see. Tools such as
+ // llvm-objdump make use of the attribute section to determine which
+ // standard extensions to enable. In a full implementation we would merge
+ // all attribute sections.
+ if (in.attributes == nullptr) {
+ in.attributes = make<InputSection>(*this, sec, name);
+ return in.attributes;
+ }
+ return &InputSection::discarded;
}
- return &InputSection::discarded;
}
+
+ switch (sec.sh_type) {
case SHT_LLVM_DEPENDENT_LIBRARIES: {
if (config->relocatable)
break;
ArrayRef<char> data =
- CHECK(this->getObj().template getSectionContentsAsArray<char>(&sec), this);
+ CHECK(this->getObj().template getSectionContentsAsArray<char>(sec), this);
if (!data.empty() && data.back() != '\0') {
error(toString(this) +
": corrupted dependent libraries section (unterminated string): " +
this->sections[sec.sh_info] = target;
}
- // This section contains relocation information.
- // If -r is given, we do not interpret or apply relocation
- // but just copy relocation sections to output.
- if (config->relocatable) {
- InputSection *relocSec = make<InputSection>(*this, sec, name);
- // We want to add a dependency to target, similar like we do for
- // -emit-relocs below. This is useful for the case when linker script
- // contains the "/DISCARD/". It is perhaps uncommon to use a script with
- // -r, but we faced it in the Linux kernel and have to handle such case
- // and not to crash.
- target->dependentSections.push_back(relocSec);
- return relocSec;
- }
-
if (target->firstRelocation)
fatal(toString(this) +
": multiple relocation sections to one section are not supported");
if (sec.sh_type == SHT_RELA) {
- ArrayRef<Elf_Rela> rels = CHECK(getObj().relas(&sec), this);
+ ArrayRef<Elf_Rela> rels = CHECK(getObj().relas(sec), this);
target->firstRelocation = rels.begin();
target->numRelocations = rels.size();
target->areRelocsRela = true;
} else {
- ArrayRef<Elf_Rel> rels = CHECK(getObj().rels(&sec), this);
+ ArrayRef<Elf_Rel> rels = CHECK(getObj().rels(sec), this);
target->firstRelocation = rels.begin();
target->numRelocations = rels.size();
target->areRelocsRela = false;
}
assert(isUInt<31>(target->numRelocations));
- // Relocation sections processed by the linker are usually removed
- // from the output, so returning `nullptr` for the normal case.
- // However, if -emit-relocs is given, we need to leave them in the output.
- // (Some post link analysis tools need this information.)
- if (config->emitRelocs) {
- InputSection *relocSec = make<InputSection>(*this, sec, name);
- // We will not emit relocation section if target was discarded.
- target->dependentSections.push_back(relocSec);
- return relocSec;
- }
- return nullptr;
+ // Relocation sections are usually removed from the output, so return
+ // `nullptr` for the normal case. However, if -r or --emit-relocs is
+ // specified, we need to copy them to the output. (Some post link analysis
+ // tools specify --emit-relocs to obtain the information.)
+ if (!config->relocatable && !config->emitRelocs)
+ return nullptr;
+ InputSection *relocSec = make<InputSection>(*this, sec, name);
+ // If the relocated section is discarded (due to /DISCARD/ or
+ // --gc-sections), the relocation section should be discarded as well.
+ target->dependentSections.push_back(relocSec);
+ return relocSec;
}
}
// .note.gnu.property containing a single AND'ed bitmap, we discard an input
// file's .note.gnu.property section.
if (name == ".note.gnu.property") {
- ArrayRef<uint8_t> contents = check(this->getObj().getSectionContents(&sec));
- this->andFeatures = readAndFeatures(this, contents);
+ this->andFeatures = readAndFeatures<ELFT>(InputSection(*this, sec, name));
return &InputSection::discarded;
}
template <class ELFT>
StringRef ObjFile<ELFT>::getSectionName(const Elf_Shdr &sec) {
- return CHECK(getObj().getSectionName(&sec, sectionStringTable), this);
+ return CHECK(getObj().getSectionName(sec, sectionStringTable), this);
}
// Initialize this->Symbols. this->Symbols is a parallel array as
}
// Symbol resolution of non-local symbols.
+ SmallVector<unsigned, 32> undefineds;
for (size_t i = firstGlobal, end = eSyms.size(); i != end; ++i) {
const Elf_Sym &eSym = eSyms[i];
uint8_t binding = eSym.getBinding();
// Handle global undefined symbols.
if (eSym.st_shndx == SHN_UNDEF) {
- this->symbols[i]->resolve(Undefined{this, name, binding, stOther, type});
- this->symbols[i]->referenced = true;
+ undefineds.push_back(i);
continue;
}
fatal(toString(this) + ": unexpected binding: " + Twine((int)binding));
}
+
+ // Undefined symbols (excluding those defined relative to non-prevailing
+ // sections) can trigger recursive fetch. Process defined symbols first so
+ // that the relative order between a defined symbol and an undefined symbol
+ // does not change the symbol resolution behavior. In addition, a set of
+ // interconnected symbols will all be resolved to the same file, instead of
+ // being resolved to different files.
+ for (unsigned i : undefineds) {
+ const Elf_Sym &eSym = eSyms[i];
+ StringRefZ name = this->stringTable.data() + eSym.st_name;
+ this->symbols[i]->resolve(Undefined{this, name, eSym.getBinding(),
+ eSym.st_other, eSym.getType()});
+ this->symbols[i]->referenced = true;
+ }
}
ArchiveFile::ArchiveFile(std::unique_ptr<Archive> &&file)
parseFile(file);
}
+// The handling of tentative definitions (COMMON symbols) in archives is murky.
+// A tentative definition will be promoted to a global definition if there are
+// no non-tentative definitions to dominate it. When we hold a tentative
+// definition to a symbol and are inspecting archive members for inclusion
+// there are 2 ways we can proceed:
+//
+// 1) Consider the tentative definition a 'real' definition (ie promotion from
+// tentative to real definition has already happened) and not inspect
+// archive members for Global/Weak definitions to replace the tentative
+// definition. An archive member would only be included if it satisfies some
+// other undefined symbol. This is the behavior Gold uses.
+//
+// 2) Consider the tentative definition as still undefined (ie the promotion to
+// a real definition happens only after all symbol resolution is done).
+// The linker searches archive members for STB_GLOBAL definitions to
+// replace the tentative definition with. This is the behavior used by
+// GNU ld.
+//
+// The second behavior is inherited from SysVR4, which based it on the FORTRAN
+// COMMON BLOCK model. This behavior is needed for proper initialization in old
+// (pre F90) FORTRAN code that is packaged into an archive.
+//
+// The following functions search archive members for definitions to replace
+// tentative definitions (implementing behavior 2).
+static bool isBitcodeNonCommonDef(MemoryBufferRef mb, StringRef symName,
+ StringRef archiveName) {
+ IRSymtabFile symtabFile = check(readIRSymtab(mb));
+ for (const irsymtab::Reader::SymbolRef &sym :
+ symtabFile.TheReader.symbols()) {
+ if (sym.isGlobal() && sym.getName() == symName)
+ return !sym.isUndefined() && !sym.isWeak() && !sym.isCommon();
+ }
+ return false;
+}
+
+template <class ELFT>
+static bool isNonCommonDef(MemoryBufferRef mb, StringRef symName,
+ StringRef archiveName) {
+ ObjFile<ELFT> *obj = make<ObjFile<ELFT>>(mb, archiveName);
+ StringRef stringtable = obj->getStringTable();
+
+ for (auto sym : obj->template getGlobalELFSyms<ELFT>()) {
+ Expected<StringRef> name = sym.getName(stringtable);
+ if (name && name.get() == symName)
+ return sym.isDefined() && sym.getBinding() == STB_GLOBAL &&
+ !sym.isCommon();
+ }
+ return false;
+}
+
+static bool isNonCommonDef(MemoryBufferRef mb, StringRef symName,
+ StringRef archiveName) {
+ switch (getELFKind(mb, archiveName)) {
+ case ELF32LEKind:
+ return isNonCommonDef<ELF32LE>(mb, symName, archiveName);
+ case ELF32BEKind:
+ return isNonCommonDef<ELF32BE>(mb, symName, archiveName);
+ case ELF64LEKind:
+ return isNonCommonDef<ELF64LE>(mb, symName, archiveName);
+ case ELF64BEKind:
+ return isNonCommonDef<ELF64BE>(mb, symName, archiveName);
+ default:
+ llvm_unreachable("getELFKind");
+ }
+}
+
+bool ArchiveFile::shouldFetchForCommon(const Archive::Symbol &sym) {
+ Archive::Child c =
+ CHECK(sym.getMember(), toString(this) +
+ ": could not get the member for symbol " +
+ toELFString(sym));
+ MemoryBufferRef mb =
+ CHECK(c.getMemoryBufferRef(),
+ toString(this) +
+ ": could not get the buffer for the member defining symbol " +
+ toELFString(sym));
+
+ if (isBitcode(mb))
+ return isBitcodeNonCommonDef(mb, sym.getName(), getName());
+
+ return isNonCommonDef(mb, sym.getName(), getName());
+}
+
size_t ArchiveFile::getMemberCount() const {
size_t count = 0;
Error err = Error::success();
if (!sec)
return {};
std::vector<uint32_t> verneeds;
- ArrayRef<uint8_t> data = CHECK(obj.getSectionContents(sec), this);
+ ArrayRef<uint8_t> data = CHECK(obj.getSectionContents(*sec), this);
const uint8_t *verneedBuf = data.begin();
for (unsigned i = 0; i != sec->sh_info; ++i) {
if (verneedBuf + sizeof(typename ELFT::Verneed) > data.end())
continue;
case SHT_DYNAMIC:
dynamicTags =
- CHECK(obj.template getSectionContentsAsArray<Elf_Dyn>(&sec), this);
+ CHECK(obj.template getSectionContentsAsArray<Elf_Dyn>(sec), this);
break;
case SHT_GNU_versym:
versymSec = &sec;
std::vector<uint16_t> versyms(size, VER_NDX_GLOBAL);
if (versymSec) {
ArrayRef<Elf_Versym> versym =
- CHECK(obj.template getSectionContentsAsArray<Elf_Versym>(versymSec),
+ CHECK(obj.template getSectionContentsAsArray<Elf_Versym>(*versymSec),
this)
.slice(firstGlobal);
for (size_t i = 0; i < size; ++i)
Symbol *s = symtab->addSymbol(
Undefined{this, name, sym.getBinding(), sym.st_other, sym.getType()});
s->exportDynamic = true;
+ if (s->isUndefined() && !s->isWeak() &&
+ config->unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore)
+ requiredSymbols.push_back(s);
continue;
}
return t.isArch64Bit() ? ELF64BEKind : ELF32BEKind;
}
-static uint8_t getBitcodeMachineKind(StringRef path, const Triple &t) {
+static uint16_t getBitcodeMachineKind(StringRef path, const Triple &t) {
switch (t.getArch()) {
case Triple::aarch64:
+ case Triple::aarch64_be:
return EM_AARCH64;
case Triple::amdgcn:
case Triple::r600:
case Triple::msp430:
return EM_MSP430;
case Triple::ppc:
+ case Triple::ppcle:
return EM_PPC;
case Triple::ppc64:
case Triple::ppc64le:
}
}
+static uint8_t getOsAbi(const Triple &t) {
+ switch (t.getOS()) {
+ case Triple::AMDHSA:
+ return ELF::ELFOSABI_AMDGPU_HSA;
+ case Triple::AMDPAL:
+ return ELF::ELFOSABI_AMDGPU_PAL;
+ case Triple::Mesa3D:
+ return ELF::ELFOSABI_AMDGPU_MESA3D;
+ default:
+ return ELF::ELFOSABI_NONE;
+ }
+}
+
BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
uint64_t offsetInArchive)
: InputFile(BitcodeKind, mb) {
Triple t(obj->getTargetTriple());
ekind = getBitcodeELFKind(t);
emachine = getBitcodeMachineKind(mb.getBufferIdentifier(), t);
+ osabi = getOsAbi(t);
}
static uint8_t mapVisibility(GlobalValue::VisibilityTypes gvVisibility) {
template <class ELFT> void BitcodeFile::parse() {
std::vector<bool> keptComdats;
- for (StringRef s : obj->getComdatTable())
+ for (std::pair<StringRef, Comdat::SelectionKind> s : obj->getComdatTable()) {
keptComdats.push_back(
- symtab->comdatGroups.try_emplace(CachedHashStringRef(s), this).second);
+ s.second == Comdat::NoDeduplicate ||
+ symtab->comdatGroups.try_emplace(CachedHashStringRef(s.first), this)
+ .second);
+ }
for (const lto::InputFile::Symbol &objSym : obj->symbols())
symbols.push_back(createBitcodeSymbol<ELFT>(keptComdats, objSym, *this));
}
}
+bool LazyObjFile::shouldFetchForCommon(const StringRef &name) {
+ if (isBitcode(mb))
+ return isBitcodeNonCommonDef(mb, name, archiveName);
+
+ return isNonCommonDef(mb, name, archiveName);
+}
+
std::string elf::replaceThinLTOSuffix(StringRef path) {
StringRef suffix = config->thinLTOObjectSuffixReplace.first;
StringRef repl = config->thinLTOObjectSuffixReplace.second;
return symbols;
}
- // Filename of .a which contained this file. If this file was
- // not in an archive file, it is the empty string. We use this
- // string for creating error messages.
+ // Get filename to use for linker script processing.
+ StringRef getNameForScript() const;
+
+ // If not empty, this stores the name of the archive containing this file.
+ // We use this string for creating error messages.
std::string archiveName;
// If this is an architecture-specific file, the following members
// [.got, .got + 0xFFFC].
bool ppc64SmallCodeModelTocRelocs = false;
+ // True if the file has TLSGD/TLSLD GOT relocations without R_PPC64_TLSGD or
+ // R_PPC64_TLSLD. Disable TLS relaxation to avoid bad code generation.
+ bool ppc64DisableTLSRelax = false;
+
// groupId is used for --warn-backrefs which is an optional error
// checking feature. All files within the same --{start,end}-group or
// --{start,end}-lib get the same group ID. Otherwise, each file gets a new
private:
const Kind fileKind;
+
+ // Cache for getNameForScript().
+ mutable std::string nameForScriptCache;
};
class ELFFileBase : public InputFile {
// .o file.
template <class ELFT> class ObjFile : public ELFFileBase {
- using Elf_Rel = typename ELFT::Rel;
- using Elf_Rela = typename ELFT::Rela;
- using Elf_Sym = typename ELFT::Sym;
- using Elf_Shdr = typename ELFT::Shdr;
- using Elf_Word = typename ELFT::Word;
- using Elf_CGProfile = typename ELFT::CGProfile;
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
public:
static bool classof(const InputFile *f) { return f->kind() == ObjKind; }
// Pointer to this input file's .llvm_addrsig section, if it has one.
const Elf_Shdr *addrsigSec = nullptr;
- // SHT_LLVM_CALL_GRAPH_PROFILE table
- ArrayRef<Elf_CGProfile> cgProfile;
+ // SHT_LLVM_CALL_GRAPH_PROFILE section index.
+ uint32_t cgProfileSectionIndex = 0;
// Get cached DWARF information.
DWARFCache *getDwarf();
template <class ELFT> void parse();
void fetch();
+ // Check if a non-common symbol should be fetched to override a common
+ // definition.
+ bool shouldFetchForCommon(const StringRef &name);
+
bool fetched = false;
private:
// more than once.)
void fetch(const Archive::Symbol &sym);
+ // Check if a non-common symbol should be fetched to override a common
+ // definition.
+ bool shouldFetchForCommon(const Archive::Symbol &sym);
+
size_t getMemberCount() const;
size_t getFetchedMemberCount() const { return seen.size(); }
template <typename ELFT> void parse();
- // Used for --no-allow-shlib-undefined.
- bool allNeededIsKnown;
-
// Used for --as-needed
bool isNeeded;
+ // Non-weak undefined symbols which are not yet resolved when the SO is
+ // parsed. Only filled for `--no-allow-shlib-undefined`.
+ std::vector<Symbol *> requiredSymbols;
+
private:
template <typename ELFT>
std::vector<uint32_t> parseVerneed(const llvm::object::ELFFile<ELFT> &obj,
// this but instead this->Repl.
SectionBase *repl;
- unsigned sectionKind : 3;
+ uint8_t sectionKind : 3;
// The next two bit fields are only used by InputSectionBase, but we
// put them here so the struct packs better.
- unsigned bss : 1;
+ uint8_t bss : 1;
// Set for sections that should not be folded by ICF.
- unsigned keepUnique : 1;
+ uint8_t keepUnique : 1;
// The 1-indexed partition that this section is assigned to by the garbage
// collector, or 0 if this section is dead. Normally there is only one
// and shrinking a section.
unsigned bytesDropped = 0;
+ // Whether the section needs to be padded with a NOP filler due to
+ // deleteFallThruJmpInsn.
+ bool nopFiller = false;
+
void drop_back(uint64_t num) { bytesDropped += num; }
void push_back(uint64_t num) {
// The native ELF reloc data type is not very convenient to handle.
// So we convert ELF reloc records to our own records in Relocations.cpp.
// This vector contains such "cooked" relocations.
- std::vector<Relocation> relocations;
-
- // Indicates that this section needs to be padded with a NOP filler if set to
- // true.
- bool nopFiller = false;
+ SmallVector<Relocation, 0> relocations;
// These are modifiers to jump instructions that are necessary when basic
// block sections are enabled. Basic block sections creates opportunities to
// relax jump instructions at basic block boundaries after reordering the
// basic blocks.
- std::vector<JumpInstrMod> jumpInstrMods;
+ SmallVector<JumpInstrMod, 0> jumpInstrMods;
// A function compiled with -fsplit-stack calling a function
// compiled without -fsplit-stack needs its prologue adjusted. Find
}
protected:
+ template <typename ELFT>
void parseCompressedHeader();
void uncompress() const;
unsigned firstRelocation)
: inputOff(off), sec(sec), size(size), firstRelocation(firstRelocation) {}
- ArrayRef<uint8_t> data() {
+ ArrayRef<uint8_t> data() const {
return {sec->data().data() + this->inputOff, size};
}
template <class ELFT> void copyShtGroup(uint8_t *buf);
};
+#ifdef _WIN32
+static_assert(sizeof(InputSection) <= 192, "InputSection is too big");
+#else
+static_assert(sizeof(InputSection) <= 184, "InputSection is too big");
+#endif
+
inline bool isDebugSection(const InputSectionBase &sec) {
- return sec.name.startswith(".debug") || sec.name.startswith(".zdebug");
+ return (sec.flags & llvm::ELF::SHF_ALLOC) == 0 &&
+ (sec.name.startswith(".debug") || sec.name.startswith(".zdebug"));
}
// The list of all input sections.
return ret;
}
+// The merged bitcode after LTO is large. Try opening a file stream that
+// supports reading, seeking and writing. Such a file allows BitcodeWriter to
+// flush buffered data to reduce memory consumption. If this fails, open a file
+// stream that supports only write.
+static std::unique_ptr<raw_fd_ostream> openLTOOutputFile(StringRef file) {
+ std::error_code ec;
+ std::unique_ptr<raw_fd_ostream> fs =
+ std::make_unique<raw_fd_stream>(file, ec);
+ if (!ec)
+ return fs;
+ return openFile(file);
+}
+
static std::string getThinLTOOutputFile(StringRef modulePath) {
return lto::getThinLTOOutputFile(
std::string(modulePath), std::string(config->thinLTOPrefixReplace.first),
c.Options.DataSections = true;
// Check if basic block sections must be used.
- // Allowed values for --lto-basicblock-sections are "all", "labels",
+ // Allowed values for --lto-basic-block-sections are "all", "labels",
// "<file name specifying basic block ids>", or none. This is the equivalent
// of -fbasic-block-sections= flag in clang.
if (!config->ltoBasicBlockSections.empty()) {
}
}
+ c.Options.PseudoProbeForProfiling = config->ltoPseudoProbeForProfiling;
c.Options.UniqueBasicBlockSectionNames =
config->ltoUniqueBasicBlockSectionNames;
c.RemarksFilename = std::string(config->optRemarksFilename);
c.RemarksPasses = std::string(config->optRemarksPasses);
c.RemarksWithHotness = config->optRemarksWithHotness;
+ c.RemarksHotnessThreshold = config->optRemarksHotnessThreshold;
c.RemarksFormat = std::string(config->optRemarksFormat);
c.SampleProfile = std::string(config->ltoSampleProfile);
if (config->emitLLVM) {
c.PostInternalizeModuleHook = [](size_t task, const Module &m) {
- if (std::unique_ptr<raw_fd_ostream> os = openFile(config->outputFile))
+ if (std::unique_ptr<raw_fd_ostream> os =
+ openLTOOutputFile(config->outputFile))
WriteBitcodeToFile(m, *os, false);
return false;
};
r.VisibleToRegularObj = config->relocatable || sym->isUsedInRegularObj ||
(r.Prevailing && sym->includeInDynsym()) ||
usedStartStop.count(objSym.getSectionName());
+ // Identify symbols exported dynamically, and that therefore could be
+ // referenced by a shared library not visible to the linker.
+ r.ExportDynamic = sym->computeBinding() != STB_LOCAL &&
+ (sym->isExportDynamic(sym->kind(), sym->visibility) ||
+ sym->exportDynamic || sym->inDynamicList);
const auto *dr = dyn_cast<Defined>(sym);
r.FinalDefinitionInLinkageUnit =
(isExec || sym->visibility != STV_DEFAULT) && dr &&
namespace elf {
class Defined;
+class InputFile;
class InputSection;
class InputSectionBase;
class OutputSection;
// This struct represents one section match pattern in SECTIONS() command.
// It can optionally have negative match pattern for EXCLUDED_FILE command.
// Also it may be surrounded with SORT() command, so contains sorting rules.
-struct SectionPattern {
+class SectionPattern {
+ StringMatcher excludedFilePat;
+
+ // Cache of the most recent input argument and result of excludesFile().
+ mutable llvm::Optional<std::pair<const InputFile *, bool>> excludesFileCache;
+
+public:
SectionPattern(StringMatcher &&pat1, StringMatcher &&pat2)
: excludedFilePat(pat1), sectionPat(pat2),
sortOuter(SortSectionPolicy::Default),
sortInner(SortSectionPolicy::Default) {}
- StringMatcher excludedFilePat;
+ bool excludesFile(const InputFile *file) const;
+
StringMatcher sectionPat;
SortSectionPolicy sortOuter;
SortSectionPolicy sortInner;
};
-struct InputSectionDescription : BaseCommand {
+class InputSectionDescription : public BaseCommand {
+ SingleStringMatcher filePat;
+
+ // Cache of the most recent input argument and result of matchesFile().
+ mutable llvm::Optional<std::pair<const InputFile *, bool>> matchesFileCache;
+
+public:
InputSectionDescription(StringRef filePattern, uint64_t withFlags = 0,
uint64_t withoutFlags = 0)
: BaseCommand(InputSectionKind), filePat(filePattern),
return c->kind == InputSectionKind;
}
- SingleStringMatcher filePat;
+ bool matchesFile(const InputFile *file) const;
// Input sections that matches at least one of SectionPatterns
// will be associated with this InputSectionDescription.
};
struct InsertCommand {
- OutputSection *os;
+ std::vector<StringRef> names;
bool isAfter;
StringRef where;
};
// not be used outside of the scope of a call to the above functions.
struct AddressState {
AddressState();
- uint64_t threadBssOffset = 0;
OutputSection *outSec = nullptr;
MemoryRegion *memRegion = nullptr;
MemoryRegion *lmaRegion = nullptr;
uint64_t lmaOffset = 0;
+ uint64_t tbssAddr = 0;
};
llvm::DenseMap<StringRef, OutputSection *> nameToOutputSection;
// to be reordered.
std::vector<InsertCommand> insertCommands;
+ // OutputSections specified by OVERWRITE_SECTIONS.
+ std::vector<OutputSection *> overwriteSections;
+
// Sections that will be warned/errored by --orphan-handling.
std::vector<const InputSectionBase *> orphanSections;
};
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Parallel.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
if (config->mapFile.empty())
return;
+ llvm::TimeTraceScope timeScope("Write map file");
+
// Open a map file for writing.
std::error_code ec;
raw_fd_ostream os(config->mapFile, ec, sys::fs::OF_None);
void mark();
template <class RelTy>
- void resolveReloc(InputSectionBase &sec, RelTy &rel, bool isLSDA);
+ void resolveReloc(InputSectionBase &sec, RelTy &rel, bool fromFDE);
template <class RelTy>
void scanEhFrameSection(EhInputSection &eh, ArrayRef<RelTy> rels);
unsigned partition;
// A list of sections to visit.
- SmallVector<InputSection *, 256> queue;
+ SmallVector<InputSection *, 0> queue;
// There are normally few input sections whose names are valid C
// identifiers, so we just store a std::vector instead of a multimap.
template <class ELFT>
template <class RelTy>
void MarkLive<ELFT>::resolveReloc(InputSectionBase &sec, RelTy &rel,
- bool isLSDA) {
+ bool fromFDE) {
Symbol &sym = sec.getFile<ELFT>()->getRelocTargetSym(rel);
// If a symbol is referenced in a live section, it is used.
if (d->isSection())
offset += getAddend<ELFT>(sec, rel);
- if (!isLSDA || !(relSec->flags & SHF_EXECINSTR))
+ // fromFDE being true means this is referenced by a FDE in a .eh_frame
+ // piece. The relocation points to the described function or to a LSDA. We
+ // only need to keep the LSDA live, so ignore anything that points to
+ // executable sections. If the LSDA is in a section group or has the
+ // SHF_LINK_ORDER flag, we ignore the relocation as well because (a) if the
+ // associated text section is live, the LSDA will be retained due to section
+ // group/SHF_LINK_ORDER rules (b) if the associated text section should be
+ // discarded, marking the LSDA will unnecessarily retain the text section.
+ if (!(fromFDE && ((relSec->flags & (SHF_EXECINSTR | SHF_LINK_ORDER)) ||
+ relSec->nextInSectionGroup)))
enqueue(relSec, offset);
return;
}
continue;
}
- // This is a FDE. The relocations point to the described function or to
- // a LSDA. We only need to keep the LSDA alive, so ignore anything that
- // points to executable sections.
uint64_t pieceEnd = piece.inputOff + piece.size;
- for (size_t j = firstRelI, end2 = rels.size(); j < end2; ++j)
- if (rels[j].r_offset < pieceEnd)
- resolveReloc(eh, rels[j], true);
+ for (size_t j = firstRelI, end2 = rels.size();
+ j < end2 && rels[j].r_offset < pieceEnd; ++j)
+ resolveReloc(eh, rels[j], true);
}
}
scanEhFrameSection(*eh, eh->template rels<ELFT>());
}
+ if (sec->flags & SHF_GNU_RETAIN) {
+ enqueue(sec, 0);
+ continue;
+ }
if (sec->flags & SHF_LINK_ORDER)
continue;
if (isReserved(sec) || script->shouldKeep(sec)) {
enqueue(sec, 0);
- } else if (isValidCIdentifier(sec->name)) {
+ } else if ((!config->zStartStopGC || sec->name.startswith("__libc_")) &&
+ isValidCIdentifier(sec->name)) {
+ // As a workaround for glibc libc.a before 2.34
+ // (https://sourceware.org/PR27492), retain __libc_atexit and similar
+ // sections regardless of zStartStopGC.
cNamedSections[saver.save("__start_" + sec->name)].push_back(sec);
cNamedSections[saver.save("__stop_" + sec->name)].push_back(sec);
}
// Otherwise, do mark-sweep GC.
//
- // The -gc-sections option works only for SHF_ALLOC sections
- // (sections that are memory-mapped at runtime). So we can
- // unconditionally make non-SHF_ALLOC sections alive except
- // SHF_LINK_ORDER and SHT_REL/SHT_RELA sections.
+ // The -gc-sections option works only for SHF_ALLOC sections (sections that
+ // are memory-mapped at runtime). So we can unconditionally make non-SHF_ALLOC
+ // sections alive except SHF_LINK_ORDER, SHT_REL/SHT_RELA sections, and
+ // sections in a group.
//
// Usually, non-SHF_ALLOC sections are not removed even if they are
- // unreachable through relocations because reachability is not
- // a good signal whether they are garbage or not (e.g. there is
- // usually no section referring to a .comment section, but we
- // want to keep it.).
+ // unreachable through relocations because reachability is not a good signal
+ // whether they are garbage or not (e.g. there is usually no section referring
+ // to a .comment section, but we want to keep it.) When a non-SHF_ALLOC
+ // section is retained, we also retain sections dependent on it.
//
// Note on SHF_LINK_ORDER: Such sections contain metadata and they
// have a reverse dependency on the InputSection they are linked with.
bool isLinkOrder = (sec->flags & SHF_LINK_ORDER);
bool isRel = (sec->type == SHT_REL || sec->type == SHT_RELA);
- if (!isAlloc && !isLinkOrder && !isRel && !sec->nextInSectionGroup)
+ if (!isAlloc && !isLinkOrder && !isRel && !sec->nextInSectionGroup) {
sec->markLive();
+ for (InputSection *isec : sec->dependentSections)
+ isec->markLive();
+ }
}
// Follow the graph to mark all live sections.
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/SHA1.h"
+#include "llvm/Support/TimeProfiler.h"
#include <regex>
+#include <unordered_set>
using namespace llvm;
using namespace llvm::dwarf;
!name.startswith(".debug_"))
return;
+ llvm::TimeTraceScope timeScope("Compress debug sections");
+
// Create a section header.
zDebugHeader.resize(sizeof(Elf_Chdr));
auto *hdr = reinterpret_cast<Elf_Chdr *>(zDebugHeader.data());
// provides signature of the section group.
ArrayRef<Symbol *> symbols = section->file->getSymbols();
os->info = in.symTab->getSymbolIndex(symbols[section->info]);
+
+ // Some group members may be combined or discarded, so we need to compute the
+ // new size. The content will be rewritten in InputSection::copyShtGroup.
+ std::unordered_set<uint32_t> seen;
+ ArrayRef<InputSectionBase *> sections = section->file->getSections();
+ for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
+ if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
+ seen.insert(osec->sectionIndex);
+ os->size = (1 + seen.size()) * sizeof(uint32_t);
}
void OutputSection::finalize() {
if (!config->copyRelocs || (type != SHT_RELA && type != SHT_REL))
return;
- if (isa<SyntheticSection>(first))
+ // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
+ // Normally 'type' was changed by 'first' so 'first' should be non-null.
+ // However, if the output section is .rela.dyn, 'type' can be set by the empty
+ // synthetic .rela.plt and first can be null.
+ if (!first || isa<SyntheticSection>(first))
return;
link = in.symTab->getParent()->sectionIndex;
return std::regex_match(s.begin(), s.end(), re);
}
-// .ctors and .dtors are sorted by this priority from highest to lowest.
-//
-// 1. The section was contained in crtbegin (crtbegin contains
-// some sentinel value in its .ctors and .dtors so that the runtime
-// can find the beginning of the sections.)
+// .ctors and .dtors are sorted by this order:
//
-// 2. The section has an optional priority value in the form of ".ctors.N"
-// or ".dtors.N" where N is a number. Unlike .{init,fini}_array,
-// they are compared as string rather than number.
+// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
+// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
+// 3. The section has an optional priority value in the form of ".ctors.N" or
+// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
+// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
//
-// 3. The section is just ".ctors" or ".dtors".
-//
-// 4. The section was contained in crtend, which contains an end marker.
+// For 2 and 3, the sections are sorted by priority from high to low, e.g.
+// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
+// internal linker scripts, the sorting is by string comparison which can
+// achieve the same goal given the optional priority values are of the same
+// length.
//
// In an ideal world, we don't need this function because .init_array and
// .ctors are duplicate features (and .init_array is newer.) However, there
bool endB = isCrtend(b->file->getName());
if (endA != endB)
return endB;
- StringRef x = a->name;
- StringRef y = b->name;
- assert(x.startswith(".ctors") || x.startswith(".dtors"));
- assert(y.startswith(".ctors") || y.startswith(".dtors"));
- x = x.substr(6);
- y = y.substr(6);
- return x < y;
+ return getPriority(a->name) > getPriority(b->name);
}
// Sorts input sections by the special rules for .ctors and .dtors.
llvm::stable_sort(isd->sections, compCtors);
}
-// If an input string is in the form of "foo.N" where N is a number,
-// return N. Otherwise, returns 65536, which is one greater than the
-// lowest priority.
+// If an input string is in the form of "foo.N" where N is a number, return N
+// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
+// greater than the lowest priority.
int elf::getPriority(StringRef s) {
size_t pos = s.rfind('.');
if (pos == StringRef::npos)
return 65536;
- int v;
- if (!to_integer(s.substr(pos + 1), v, 10))
- return 65536;
+ int v = 65536;
+ if (to_integer(s.substr(pos + 1), v, 10) &&
+ (pos == 6 && (s.startswith(".ctors") || s.startswith(".dtors"))))
+ v = 65535 - v;
return v;
}
return {0, 0, 0, 0};
}
+void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
+ assert(config->writeAddends && config->checkDynamicRelocs);
+ assert(type == SHT_REL || type == SHT_RELA);
+ std::vector<InputSection *> sections = getInputSections(this);
+ parallelForEachN(0, sections.size(), [&](size_t i) {
+ // When linking with -r or --emit-relocs we might also call this function
+ // for input .rel[a].<sec> sections which we simply pass through to the
+ // output. We skip over those and only look at the synthetic relocation
+ // sections created during linking.
+ const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
+ if (!sec)
+ return;
+ for (const DynamicReloc &rel : sec->relocs) {
+ int64_t addend = rel.computeAddend();
+ const OutputSection *relOsec = rel.inputSec->getOutputSection();
+ assert(relOsec != nullptr && "missing output section for relocation");
+ const uint8_t *relocTarget =
+ bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
+ // For SHT_NOBITS the written addend is always zero.
+ int64_t writtenAddend =
+ relOsec->type == SHT_NOBITS
+ ? 0
+ : target->getImplicitAddend(relocTarget, rel.type);
+ if (addend != writtenAddend)
+ internalLinkerError(
+ getErrorLocation(relocTarget),
+ "wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
+ " instead of 0x" + utohexstr(addend) +
+ " for dynamic relocation " + toString(rel.type) +
+ " at offset 0x" + utohexstr(rel.getOffset()) +
+ (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
+ }
+ });
+}
+
template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
void finalize();
template <class ELFT> void writeTo(uint8_t *buf);
+ // Check that the addends for dynamic relocations were written correctly.
+ void checkDynRelAddends(const uint8_t *bufStart);
template <class ELFT> void maybeCompress();
void sort(llvm::function_ref<int(InputSectionBase *s)> order);
private:
// Used for implementation of --compress-debug-sections option.
std::vector<uint8_t> zDebugHeader;
- llvm::SmallVector<char, 1> compressedData;
+ llvm::SmallVector<char, 0> compressedData;
std::array<uint8_t, 4> getFiller();
};
static OutputSection *finiArray;
};
-} // namespace elf
-} // namespace lld
-
-namespace lld {
-namespace elf {
-
uint64_t getHeaderSize();
extern std::vector<OutputSection *> outputSections;
R_GOTPLT,
R_GOTPLTREL,
R_GOTREL,
- R_NEG_TLS,
R_NONE,
R_PC,
R_PLT,
R_RELAX_TLS_LD_TO_LE,
R_RELAX_TLS_LD_TO_LE_ABS,
R_SIZE,
- R_TLS,
+ R_TPREL,
+ R_TPREL_NEG,
R_TLSDESC,
R_TLSDESC_CALL,
R_TLSDESC_PC,
// of a relocation type, there are some relocations whose semantics are
// unique to a target. Such relocation are marked with R_<TARGET_NAME>.
R_AARCH64_GOT_PAGE_PC,
+ R_AARCH64_GOT_PAGE,
R_AARCH64_PAGE_PC,
R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC,
R_AARCH64_TLSDESC_PAGE,
R_PPC64_CALL_PLT,
R_PPC64_RELAX_TOC,
R_PPC64_TOCBASE,
+ R_PPC64_RELAX_GOT_PC,
R_RISCV_ADD,
R_RISCV_PC_INDIRECT,
};
class ThunkSection;
class Thunk;
-struct InputSectionDescription;
+class InputSectionDescription;
class ThunkCreator {
public:
void mergeThunks(ArrayRef<OutputSection *> outputSections);
ThunkSection *getISDThunkSec(OutputSection *os, InputSection *isec,
- InputSectionDescription *isd, uint32_t type,
- uint64_t src);
+ InputSectionDescription *isd,
+ const Relocation &rel, uint64_t src);
ThunkSection *getISThunkSec(InputSection *isec);
static inline int64_t getAddend(const typename ELFT::Rela &rel) {
return rel.r_addend;
}
+
+template <typename RelTy>
+ArrayRef<RelTy> sortRels(ArrayRef<RelTy> rels, SmallVector<RelTy, 0> &storage) {
+ auto cmp = [](const RelTy &a, const RelTy &b) {
+ return a.r_offset < b.r_offset;
+ };
+ if (!llvm::is_sorted(rels, cmp)) {
+ storage.assign(rels.begin(), rels.end());
+ llvm::stable_sort(storage, cmp);
+ rels = storage;
+ }
+ return rels;
+}
} // namespace elf
} // namespace lld
return 1;
StringRef s = getCurrentMB().getBuffer();
StringRef tok = tokens[pos - 1];
- return s.substr(0, tok.data() - s.data()).count('\n') + 1;
+ const size_t tokOffset = tok.data() - s.data();
+
+ // For the first token, or when going backwards, start from the beginning of
+ // the buffer. If this token is after the previous token, start from the
+ // previous token.
+ size_t line = 1;
+ size_t start = 0;
+ if (lastLineNumberOffset > 0 && tokOffset >= lastLineNumberOffset) {
+ start = lastLineNumberOffset;
+ line = lastLineNumber;
+ }
+
+ line += s.substr(start, tokOffset - start).count('\n');
+
+ // Store the line number of this token for reuse.
+ lastLineNumberOffset = tokOffset;
+ lastLineNumber = line;
+
+ return line;
}
// Returns 0-based column number of the current token.
if (s.startswith("/*")) {
size_t e = s.find("*/", 2);
if (e == StringRef::npos) {
- error("unclosed comment in a linker script");
+ setError("unclosed comment in a linker script");
return "";
}
s = s.substr(e + 2);
void setError(const Twine &msg);
void tokenize(MemoryBufferRef mb);
- static StringRef skipSpace(StringRef s);
+ StringRef skipSpace(StringRef s);
bool atEOF();
StringRef next();
StringRef peek();
bool inExpr = false;
size_t pos = 0;
+ size_t lastLineNumber = 0;
+ size_t lastLineNumberOffset = 0;
+
protected:
MemoryBufferRef getCurrentMB();
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/TimeProfiler.h"
#include <cassert>
#include <limits>
#include <vector>
void readOutput();
void readOutputArch();
void readOutputFormat();
+ void readOverwriteSections();
void readPhdrs();
void readRegionAlias();
void readSearchDir();
uint64_t withFlags,
uint64_t withoutFlags);
unsigned readPhdrType();
+ SortSectionPolicy peekSortKind();
SortSectionPolicy readSortKind();
SymbolAssignment *readProvideHidden(bool provide, bool hidden);
SymbolAssignment *readAssignment(StringRef tok);
readOutputArch();
} else if (tok == "OUTPUT_FORMAT") {
readOutputFormat();
+ } else if (tok == "OVERWRITE_SECTIONS") {
+ readOverwriteSections();
} else if (tok == "PHDRS") {
readPhdrs();
} else if (tok == "REGION_ALIAS") {
if (isUnderSysroot && s.startswith("/")) {
SmallString<128> pathData;
StringRef path = (config->sysroot + s).toStringRef(pathData);
- if (sys::fs::exists(path)) {
+ if (sys::fs::exists(path))
driver->addFile(saver.save(path), /*withLOption=*/false);
- return;
- }
+ else
+ setError("cannot find " + s + " inside " + config->sysroot);
+ return;
}
if (s.startswith("/")) {
.Case("elf32-x86-64", {ELF32LEKind, EM_X86_64})
.Case("elf64-aarch64", {ELF64LEKind, EM_AARCH64})
.Case("elf64-littleaarch64", {ELF64LEKind, EM_AARCH64})
+ .Case("elf64-bigaarch64", {ELF64BEKind, EM_AARCH64})
.Case("elf32-powerpc", {ELF32BEKind, EM_PPC})
+ .Case("elf32-powerpcle", {ELF32LEKind, EM_PPC})
.Case("elf64-powerpc", {ELF64BEKind, EM_PPC64})
.Case("elf64-powerpcle", {ELF64LEKind, EM_PPC64})
.Case("elf64-x86-64", {ELF64LEKind, EM_X86_64})
.Case("elf32-littleriscv", {ELF32LEKind, EM_RISCV})
.Case("elf64-littleriscv", {ELF64LEKind, EM_RISCV})
.Case("elf64-sparc", {ELF64BEKind, EM_SPARCV9})
+ .Case("elf32-msp430", {ELF32LEKind, EM_MSP430})
.Default({ELFNoneKind, EM_NONE});
}
-// Parse OUTPUT_FORMAT(bfdname) or OUTPUT_FORMAT(bfdname, big, little).
-// Currently we ignore big and little parameters.
+// Parse OUTPUT_FORMAT(bfdname) or OUTPUT_FORMAT(default, big, little). Choose
+// big if -EB is specified, little if -EL is specified, or default if neither is
+// specified.
void ScriptParser::readOutputFormat() {
expect("(");
+ StringRef s;
config->bfdname = unquote(next());
- StringRef s = config->bfdname;
+ if (!consume(")")) {
+ expect(",");
+ s = unquote(next());
+ if (config->optEB)
+ config->bfdname = s;
+ expect(",");
+ s = unquote(next());
+ if (config->optEL)
+ config->bfdname = s;
+ consume(")");
+ }
+ s = config->bfdname;
if (s.consume_back("-freebsd"))
config->osabi = ELFOSABI_FREEBSD;
setError("unknown output format name: " + config->bfdname);
if (s == "elf32-ntradlittlemips" || s == "elf32-ntradbigmips")
config->mipsN32Abi = true;
-
- if (consume(")"))
- return;
- expect(",");
- skip();
- expect(",");
- skip();
- expect(")");
+ if (config->emachine == EM_MSP430)
+ config->osabi = ELFOSABI_STANDALONE;
}
void ScriptParser::readPhdrs() {
return v;
}
+void ScriptParser::readOverwriteSections() {
+ expect("{");
+ while (!errorCount() && !consume("}"))
+ script->overwriteSections.push_back(readOutputSectionDescription(next()));
+}
+
void ScriptParser::readSections() {
expect("{");
std::vector<BaseCommand *> v;
else if (!consume("BEFORE"))
setError("expected AFTER/BEFORE, but got '" + next() + "'");
StringRef where = next();
+ std::vector<StringRef> names;
for (BaseCommand *cmd : v)
if (auto *os = dyn_cast<OutputSection>(cmd))
- script->insertCommands.push_back({os, isAfter, where});
+ names.push_back(os->name);
+ if (!names.empty())
+ script->insertCommands.push_back({std::move(names), isAfter, where});
}
void ScriptParser::readTarget() {
return Matcher;
}
+SortSectionPolicy ScriptParser::peekSortKind() {
+ return StringSwitch<SortSectionPolicy>(peek())
+ .Cases("SORT", "SORT_BY_NAME", SortSectionPolicy::Name)
+ .Case("SORT_BY_ALIGNMENT", SortSectionPolicy::Alignment)
+ .Case("SORT_BY_INIT_PRIORITY", SortSectionPolicy::Priority)
+ .Case("SORT_NONE", SortSectionPolicy::None)
+ .Default(SortSectionPolicy::Default);
+}
+
SortSectionPolicy ScriptParser::readSortKind() {
- if (consume("SORT") || consume("SORT_BY_NAME"))
- return SortSectionPolicy::Name;
- if (consume("SORT_BY_ALIGNMENT"))
- return SortSectionPolicy::Alignment;
- if (consume("SORT_BY_INIT_PRIORITY"))
- return SortSectionPolicy::Priority;
- if (consume("SORT_NONE"))
- return SortSectionPolicy::None;
- return SortSectionPolicy::Default;
+ SortSectionPolicy ret = peekSortKind();
+ if (ret != SortSectionPolicy::Default)
+ skip();
+ return ret;
}
// Reads SECTIONS command contents in the following form:
}
StringMatcher SectionMatcher;
- while (!errorCount() && peek() != ")" && peek() != "EXCLUDE_FILE")
+ // Break if the next token is ), EXCLUDE_FILE, or SORT*.
+ while (!errorCount() && peek() != ")" && peek() != "EXCLUDE_FILE" &&
+ peekSortKind() == SortSectionPolicy::Default)
SectionMatcher.addPattern(unquote(next()));
if (!SectionMatcher.empty())
ret.push_back({std::move(excludeFilePat), std::move(SectionMatcher)});
+ else if (excludeFilePat.empty())
+ break;
else
setError("section pattern is expected");
}
}
SymbolAssignment *ScriptParser::readSymbolAssignment(StringRef name) {
+ name = unquote(name);
StringRef op = next();
assert(op == "=" || op == "+=");
Expr e = readExpr();
static Optional<uint64_t> parseInt(StringRef tok) {
// Hexadecimal
uint64_t val;
- if (tok.startswith_lower("0x")) {
+ if (tok.startswith_insensitive("0x")) {
if (!to_integer(tok.substr(2), val, 16))
return None;
return val;
}
- if (tok.endswith_lower("H")) {
+ if (tok.endswith_insensitive("H")) {
if (!to_integer(tok.drop_back(), val, 16))
return None;
return val;
}
// Decimal
- if (tok.endswith_lower("K")) {
+ if (tok.endswith_insensitive("K")) {
if (!to_integer(tok.drop_back(), val, 10))
return None;
return val * 1024;
}
- if (tok.endswith_lower("M")) {
+ if (tok.endswith_insensitive("M")) {
if (!to_integer(tok.drop_back(), val, 10))
return None;
return val * 1024 * 1024;
error(location + ": undefined section " + cmd->name);
}
+static bool isValidSymbolName(StringRef s) {
+ auto valid = [](char c) {
+ return isAlnum(c) || c == '$' || c == '.' || c == '_';
+ };
+ return !s.empty() && !isDigit(s[0]) && llvm::all_of(s, valid);
+}
+
Expr ScriptParser::readPrimary() {
if (peek() == "(")
return readParenExpr();
return [=] { return alignTo(script->getDot(), e().getValue()); };
}
if (tok == "DEFINED") {
- StringRef name = readParenLiteral();
- return [=] { return symtab->find(name) ? 1 : 0; };
+ StringRef name = unquote(readParenLiteral());
+ return [=] {
+ Symbol *b = symtab->find(name);
+ return (b && b->isDefined()) ? 1 : 0;
+ };
}
if (tok == "LENGTH") {
StringRef name = readParenLiteral();
return cmd->getLMA();
};
}
+ if (tok == "LOG2CEIL") {
+ expect("(");
+ Expr a = readExpr();
+ expect(")");
+ return [=] {
+ // LOG2CEIL(0) is defined to be 0.
+ return llvm::Log2_64_Ceil(std::max(a().getValue(), UINT64_C(1)));
+ };
+ }
if (tok == "MAX" || tok == "MIN") {
expect("(");
Expr a = readExpr();
return [=] { return *val; };
// Tok is a symbol name.
- if (!isValidCIdentifier(tok))
+ tok = unquote(tok);
+ if (!isValidSymbolName(tok))
setError("malformed number: " + tok);
script->referencedSymbols.push_back(tok);
return [=] { return script->getSymbolValue(tok, location); };
std::vector<SymbolVersion> globals;
std::tie(locals, globals) = readSymbols();
for (const SymbolVersion &pat : locals)
- config->versionDefinitions[VER_NDX_LOCAL].patterns.push_back(pat);
+ config->versionDefinitions[VER_NDX_LOCAL].localPatterns.push_back(pat);
for (const SymbolVersion &pat : globals)
- config->versionDefinitions[VER_NDX_GLOBAL].patterns.push_back(pat);
+ config->versionDefinitions[VER_NDX_GLOBAL].nonLocalPatterns.push_back(pat);
expect(";");
}
std::vector<SymbolVersion> locals;
std::vector<SymbolVersion> globals;
std::tie(locals, globals) = readSymbols();
- for (const SymbolVersion &pat : locals)
- config->versionDefinitions[VER_NDX_LOCAL].patterns.push_back(pat);
// Create a new version definition and add that to the global symbols.
VersionDefinition ver;
ver.name = verStr;
- ver.patterns = globals;
+ ver.nonLocalPatterns = std::move(globals);
+ ver.localPatterns = std::move(locals);
ver.id = config->versionDefinitions.size();
config->versionDefinitions.push_back(ver);
}
void elf::readLinkerScript(MemoryBufferRef mb) {
+ llvm::TimeTraceScope timeScope("Read linker script",
+ mb.getBufferIdentifier());
ScriptParser(mb).readLinkerScript();
}
void elf::readVersionScript(MemoryBufferRef mb) {
+ llvm::TimeTraceScope timeScope("Read version script",
+ mb.getBufferIdentifier());
ScriptParser(mb).readVersionScript();
}
void elf::readDynamicList(MemoryBufferRef mb) {
+ llvm::TimeTraceScope timeScope("Read dynamic list", mb.getBufferIdentifier());
ScriptParser(mb).readDynamicList();
}
void elf::readDefsym(StringRef name, MemoryBufferRef mb) {
+ llvm::TimeTraceScope timeScope("Read defsym input", name);
ScriptParser(mb).readDefsym(name);
}
if (real->exportDynamic)
sym->exportDynamic = true;
+ if (!real->isUsedInRegularObj && sym->isUndefined())
+ sym->isUsedInRegularObj = false;
// Now renaming is complete, and no one refers to real. We drop real from
// .symtab and .dynsym. If real is undefined, it is important that we don't
StringMap<std::vector<Symbol *>> &SymbolTable::getDemangledSyms() {
if (!demangledSyms) {
demangledSyms.emplace();
+ std::string demangled;
for (Symbol *sym : symVector)
- if (canBeVersioned(*sym))
- (*demangledSyms)[demangleItanium(sym->getName())].push_back(sym);
+ if (canBeVersioned(*sym)) {
+ StringRef name = sym->getName();
+ size_t pos = name.find('@');
+ if (pos == std::string::npos)
+ demangled = demangleItanium(name);
+ else if (pos + 1 == name.size() || name[pos + 1] == '@')
+ demangled = demangleItanium(name.substr(0, pos));
+ else
+ demangled =
+ (demangleItanium(name.substr(0, pos)) + name.substr(pos)).str();
+ (*demangledSyms)[demangled].push_back(sym);
+ }
}
return *demangledSyms;
}
return {};
}
-std::vector<Symbol *> SymbolTable::findAllByVersion(SymbolVersion ver) {
+std::vector<Symbol *> SymbolTable::findAllByVersion(SymbolVersion ver,
+ bool includeNonDefault) {
std::vector<Symbol *> res;
SingleStringMatcher m(ver.name);
+ auto check = [&](StringRef name) {
+ size_t pos = name.find('@');
+ if (!includeNonDefault)
+ return pos == StringRef::npos;
+ return !(pos + 1 < name.size() && name[pos + 1] == '@');
+ };
if (ver.isExternCpp) {
for (auto &p : getDemangledSyms())
if (m.match(p.first()))
- res.insert(res.end(), p.second.begin(), p.second.end());
+ for (Symbol *sym : p.second)
+ if (check(sym->getName()))
+ res.push_back(sym);
return res;
}
for (Symbol *sym : symVector)
- if (canBeVersioned(*sym) && m.match(sym->getName()))
+ if (canBeVersioned(*sym) && check(sym->getName()) &&
+ m.match(sym->getName()))
res.push_back(sym);
return res;
}
for (SymbolVersion &ver : config->dynamicList) {
std::vector<Symbol *> syms;
if (ver.hasWildcard)
- syms = findAllByVersion(ver);
+ syms = findAllByVersion(ver, /*includeNonDefault=*/true);
else
syms = findByVersion(ver);
}
}
-// Set symbol versions to symbols. This function handles patterns
-// containing no wildcard characters.
-void SymbolTable::assignExactVersion(SymbolVersion ver, uint16_t versionId,
- StringRef versionName) {
- if (ver.hasWildcard)
- return;
-
+// Set symbol versions to symbols. This function handles patterns containing no
+// wildcard characters. Return false if no symbol definition matches ver.
+bool SymbolTable::assignExactVersion(SymbolVersion ver, uint16_t versionId,
+ StringRef versionName,
+ bool includeNonDefault) {
// Get a list of symbols which we need to assign the version to.
std::vector<Symbol *> syms = findByVersion(ver);
- if (syms.empty()) {
- if (!config->undefinedVersion)
- error("version script assignment of '" + versionName + "' to symbol '" +
- ver.name + "' failed: symbol not defined");
- return;
- }
auto getName = [](uint16_t ver) -> std::string {
if (ver == VER_NDX_LOCAL)
// Assign the version.
for (Symbol *sym : syms) {
- // Skip symbols containing version info because symbol versions
- // specified by symbol names take precedence over version scripts.
- // See parseSymbolVersion().
- if (sym->getName().contains('@'))
+ // For a non-local versionId, skip symbols containing version info because
+ // symbol versions specified by symbol names take precedence over version
+ // scripts. See parseSymbolVersion().
+ if (!includeNonDefault && versionId != VER_NDX_LOCAL &&
+ sym->getName().contains('@'))
continue;
// If the version has not been assigned, verdefIndex is -1. Use an arbitrary
warn("attempt to reassign symbol '" + ver.name + "' of " +
getName(sym->versionId) + " to " + getName(versionId));
}
+ return !syms.empty();
}
-void SymbolTable::assignWildcardVersion(SymbolVersion ver, uint16_t versionId) {
+void SymbolTable::assignWildcardVersion(SymbolVersion ver, uint16_t versionId,
+ bool includeNonDefault) {
// Exact matching takes precedence over fuzzy matching,
// so we set a version to a symbol only if no version has been assigned
// to the symbol. This behavior is compatible with GNU.
- for (Symbol *sym : findAllByVersion(ver))
+ for (Symbol *sym : findAllByVersion(ver, includeNonDefault))
if (sym->verdefIndex == UINT32_C(-1)) {
sym->verdefIndex = 0;
sym->versionId = versionId;
// script file, the script does not actually define any symbol version,
// but just specifies symbols visibilities.
void SymbolTable::scanVersionScript() {
+ SmallString<128> buf;
// First, we assign versions to exact matching symbols,
// i.e. version definitions not containing any glob meta-characters.
- for (VersionDefinition &v : config->versionDefinitions)
- for (SymbolVersion &pat : v.patterns)
- assignExactVersion(pat, v.id, v.name);
+ std::vector<Symbol *> syms;
+ for (VersionDefinition &v : config->versionDefinitions) {
+ auto assignExact = [&](SymbolVersion pat, uint16_t id, StringRef ver) {
+ bool found =
+ assignExactVersion(pat, id, ver, /*includeNonDefault=*/false);
+ buf.clear();
+ found |= assignExactVersion({(pat.name + "@" + v.name).toStringRef(buf),
+ pat.isExternCpp, /*hasWildCard=*/false},
+ id, ver, /*includeNonDefault=*/true);
+ if (!found && !config->undefinedVersion)
+ errorOrWarn("version script assignment of '" + ver + "' to symbol '" +
+ pat.name + "' failed: symbol not defined");
+ };
+ for (SymbolVersion &pat : v.nonLocalPatterns)
+ if (!pat.hasWildcard)
+ assignExact(pat, v.id, v.name);
+ for (SymbolVersion pat : v.localPatterns)
+ if (!pat.hasWildcard)
+ assignExact(pat, VER_NDX_LOCAL, "local");
+ }
// Next, assign versions to wildcards that are not "*". Note that because the
// last match takes precedence over previous matches, we iterate over the
// definitions in the reverse order.
- for (VersionDefinition &v : llvm::reverse(config->versionDefinitions))
- for (SymbolVersion &pat : v.patterns)
+ auto assignWildcard = [&](SymbolVersion pat, uint16_t id, StringRef ver) {
+ assignWildcardVersion(pat, id, /*includeNonDefault=*/false);
+ buf.clear();
+ assignWildcardVersion({(pat.name + "@" + ver).toStringRef(buf),
+ pat.isExternCpp, /*hasWildCard=*/true},
+ id,
+ /*includeNonDefault=*/true);
+ };
+ for (VersionDefinition &v : llvm::reverse(config->versionDefinitions)) {
+ for (SymbolVersion &pat : v.nonLocalPatterns)
if (pat.hasWildcard && pat.name != "*")
- assignWildcardVersion(pat, v.id);
+ assignWildcard(pat, v.id, v.name);
+ for (SymbolVersion &pat : v.localPatterns)
+ if (pat.hasWildcard && pat.name != "*")
+ assignWildcard(pat, VER_NDX_LOCAL, v.name);
+ }
// Then, assign versions to "*". In GNU linkers they have lower priority than
// other wildcards.
- for (VersionDefinition &v : config->versionDefinitions)
- for (SymbolVersion &pat : v.patterns)
+ for (VersionDefinition &v : config->versionDefinitions) {
+ for (SymbolVersion &pat : v.nonLocalPatterns)
if (pat.hasWildcard && pat.name == "*")
- assignWildcardVersion(pat, v.id);
+ assignWildcard(pat, v.id, v.name);
+ for (SymbolVersion &pat : v.localPatterns)
+ if (pat.hasWildcard && pat.name == "*")
+ assignWildcard(pat, VER_NDX_LOCAL, v.name);
+ }
// Symbol themselves might know their versions because symbols
// can contain versions in the form of <name>@<version>.
private:
std::vector<Symbol *> findByVersion(SymbolVersion ver);
- std::vector<Symbol *> findAllByVersion(SymbolVersion ver);
+ std::vector<Symbol *> findAllByVersion(SymbolVersion ver,
+ bool includeNonDefault);
llvm::StringMap<std::vector<Symbol *>> &getDemangledSyms();
- void assignExactVersion(SymbolVersion ver, uint16_t versionId,
- StringRef versionName);
- void assignWildcardVersion(SymbolVersion ver, uint16_t versionId);
+ bool assignExactVersion(SymbolVersion ver, uint16_t versionId,
+ StringRef versionName, bool includeNonDefault);
+ void assignWildcardVersion(SymbolVersion ver, uint16_t versionId,
+ bool includeNonDefault);
// The order the global symbols are in is not defined. We can use an arbitrary
// order, but it has to be reproducible. That is true even when cross linking.
#include "DWARF.h"
#include "EhFrame.h"
#include "InputSection.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/Endian.h"
std::vector<FdeData> getFdeData() const;
ArrayRef<CieRecord *> getCieRecords() const { return cieRecords; }
+ template <class ELFT>
+ void iterateFDEWithLSDA(llvm::function_ref<void(InputSection &)> fn);
private:
// This is used only when parsing EhInputSection. We keep it here to avoid
template <class ELFT, class RelTy>
void addRecords(EhInputSection *s, llvm::ArrayRef<RelTy> rels);
- template <class ELFT>
- void addSectionAux(EhInputSection *s);
+ template <class ELFT> void addSectionAux(EhInputSection *s);
+ template <class ELFT, class RelTy>
+ void iterateFDEWithLSDAAux(EhInputSection &sec, ArrayRef<RelTy> rels,
+ llvm::DenseSet<size_t> &ciesWithLSDA,
+ llvm::function_ref<void(InputSection &)> fn);
template <class ELFT, class RelTy>
CieRecord *addCie(EhSectionPiece &piece, ArrayRef<RelTy> rels);
template <class ELFT, class RelTy>
- bool isFdeLive(EhSectionPiece &piece, ArrayRef<RelTy> rels);
+ Defined *isFdeLive(EhSectionPiece &piece, ArrayRef<RelTy> rels);
uint64_t getFdePc(uint8_t *buf, size_t off, uint8_t enc) const;
class DynamicReloc {
public:
+ enum Kind {
+ /// The resulting dynamic relocation does not reference a symbol (#sym must
+ /// be nullptr) and uses #addend as the result of computeAddend().
+ AddendOnly,
+ /// The resulting dynamic relocation will not reference a symbol: #sym is
+ /// only used to compute the addend with InputSection::getRelocTargetVA().
+ /// Useful for various relative and TLS relocations (e.g. R_X86_64_TPOFF64).
+ AddendOnlyWithTargetVA,
+ /// The resulting dynamic relocation references symbol #sym from the dynamic
+ /// symbol table and uses #addend as the value of computeAddend().
+ AgainstSymbol,
+ /// The resulting dynamic relocation references symbol #sym from the dynamic
+ /// symbol table and uses InputSection::getRelocTargetVA() + #addend for the
+ /// final addend. It can be used for relocations that write the symbol VA as
+ // the addend (e.g. R_MIPS_TLS_TPREL64) but still reference the symbol.
+ AgainstSymbolWithTargetVA,
+ /// This is used by the MIPS multi-GOT implementation. It relocates
+ /// addresses of 64kb pages that lie inside the output section.
+ MipsMultiGotPage,
+ };
+ /// This constructor records a relocation against a symbol.
DynamicReloc(RelType type, const InputSectionBase *inputSec,
- uint64_t offsetInSec, bool useSymVA, Symbol *sym, int64_t addend)
- : type(type), sym(sym), inputSec(inputSec), offsetInSec(offsetInSec),
- useSymVA(useSymVA), addend(addend), outputSec(nullptr) {}
- // This constructor records dynamic relocation settings used by MIPS
- // multi-GOT implementation. It's to relocate addresses of 64kb pages
- // lie inside the output section.
+ uint64_t offsetInSec, Kind kind, Symbol &sym, int64_t addend,
+ RelExpr expr)
+ : type(type), sym(&sym), inputSec(inputSec), offsetInSec(offsetInSec),
+ kind(kind), expr(expr), addend(addend) {}
+ /// This constructor records a relative relocation with no symbol.
+ DynamicReloc(RelType type, const InputSectionBase *inputSec,
+ uint64_t offsetInSec, int64_t addend = 0)
+ : type(type), sym(nullptr), inputSec(inputSec), offsetInSec(offsetInSec),
+ kind(AddendOnly), expr(R_ADDEND), addend(addend) {}
+ /// This constructor records dynamic relocation settings used by the MIPS
+ /// multi-GOT implementation.
DynamicReloc(RelType type, const InputSectionBase *inputSec,
uint64_t offsetInSec, const OutputSection *outputSec,
int64_t addend)
: type(type), sym(nullptr), inputSec(inputSec), offsetInSec(offsetInSec),
- useSymVA(false), addend(addend), outputSec(outputSec) {}
+ kind(MipsMultiGotPage), expr(R_ADDEND), addend(addend),
+ outputSec(outputSec) {}
uint64_t getOffset() const;
uint32_t getSymIndex(SymbolTableBaseSection *symTab) const;
+ bool needsDynSymIndex() const {
+ return kind == AgainstSymbol || kind == AgainstSymbolWithTargetVA;
+ }
- // Computes the addend of the dynamic relocation. Note that this is not the
- // same as the addend member variable as it also includes the symbol address
- // if useSymVA is true.
+ /// Computes the addend of the dynamic relocation. Note that this is not the
+ /// same as the #addend member variable as it may also include the symbol
+ /// address/the address of the corresponding GOT entry/etc.
int64_t computeAddend() const;
RelType type;
-
Symbol *sym;
- const InputSectionBase *inputSec = nullptr;
+ const InputSectionBase *inputSec;
uint64_t offsetInSec;
- // If this member is true, the dynamic relocation will not be against the
- // symbol but will instead be a relative relocation that simply adds the
- // load address. This means we need to write the symbol virtual address
- // plus the original addend as the final relocation addend.
- bool useSymVA;
+
+private:
+ Kind kind;
+ // The kind of expression used to calculate the added (required e.g. for
+ // relative GOT relocations).
+ RelExpr expr;
int64_t addend;
- const OutputSection *outputSec;
+ const OutputSection *outputSec = nullptr;
};
template <class ELFT> class DynamicSection final : public SyntheticSection {
- using Elf_Dyn = typename ELFT::Dyn;
- using Elf_Rel = typename ELFT::Rel;
- using Elf_Rela = typename ELFT::Rela;
- using Elf_Relr = typename ELFT::Relr;
- using Elf_Shdr = typename ELFT::Shdr;
- using Elf_Sym = typename ELFT::Sym;
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
// finalizeContents() fills this vector with the section contents.
std::vector<std::pair<int32_t, std::function<uint64_t()>>> entries;
public:
RelocationBaseSection(StringRef name, uint32_t type, int32_t dynamicTag,
int32_t sizeDynamicTag);
- void addReloc(RelType dynType, InputSectionBase *isec, uint64_t offsetInSec,
- Symbol *sym);
- // Add a dynamic relocation that might need an addend. This takes care of
- // writing the addend to the output section if needed.
- void addReloc(RelType dynType, InputSectionBase *inputSec,
- uint64_t offsetInSec, Symbol *sym, int64_t addend, RelExpr expr,
- RelType type);
+ /// Add a dynamic relocation without writing an addend to the output section.
+ /// This overload can be used if the addends are written directly instead of
+ /// using relocations on the input section (e.g. MipsGotSection::writeTo()).
void addReloc(const DynamicReloc &reloc);
+ /// Add a dynamic relocation against \p sym with an optional addend.
+ void addSymbolReloc(RelType dynType, InputSectionBase *isec,
+ uint64_t offsetInSec, Symbol &sym, int64_t addend = 0,
+ llvm::Optional<RelType> addendRelType = llvm::None);
+ /// Add a relative dynamic relocation that uses the target address of \p sym
+ /// (i.e. InputSection::getRelocTargetVA()) + \p addend as the addend.
+ void addRelativeReloc(RelType dynType, InputSectionBase *isec,
+ uint64_t offsetInSec, Symbol &sym, int64_t addend,
+ RelType addendRelType, RelExpr expr);
+ /// Add a dynamic relocation using the target address of \p sym as the addend
+ /// if \p sym is non-preemptible. Otherwise add a relocation against \p sym.
+ void addAddendOnlyRelocIfNonPreemptible(RelType dynType,
+ InputSectionBase *isec,
+ uint64_t offsetInSec, Symbol &sym,
+ RelType addendRelType);
+ void addReloc(DynamicReloc::Kind kind, RelType dynType,
+ InputSectionBase *inputSec, uint64_t offsetInSec, Symbol &sym,
+ int64_t addend, RelExpr expr, RelType addendRelType);
bool isNeeded() const override { return !relocs.empty(); }
size_t getSize() const override { return relocs.size() * this->entsize; }
size_t getRelativeRelocCount() const { return numRelativeRelocs; }
void finalizeContents() override;
+ static bool classof(const SectionBase *d) {
+ return SyntheticSection::classof(d) &&
+ (d->type == llvm::ELF::SHT_RELA || d->type == llvm::ELF::SHT_REL ||
+ d->type == llvm::ELF::SHT_RELR);
+ }
int32_t dynamicTag, sizeDynamicTag;
std::vector<DynamicReloc> relocs;
// Linker generated sections which can be used as inputs and are not specific to
// a partition.
struct InStruct {
- InputSection *armAttributes;
+ InputSection *attributes;
BssSection *bss;
BssSection *bssRelRo;
GotSection *got;
TargetInfo::~TargetInfo() {}
int64_t TargetInfo::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ internalLinkerError(getErrorLocation(buf),
+ "cannot read addend for relocation " + toString(type));
return 0;
}
return true;
}
-RelExpr TargetInfo::adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const {
+RelExpr TargetInfo::adjustTlsExpr(RelType type, RelExpr expr) const {
return expr;
}
+RelExpr TargetInfo::adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *data) const {
+ return R_GOT_PC;
+}
+
void TargetInfo::relaxGot(uint8_t *loc, const Relocation &rel,
uint64_t val) const {
llvm_unreachable("Should not have claimed to be relaxable");
RelType tlsGotRel;
RelType tlsModuleIndexRel;
RelType tlsOffsetRel;
+ unsigned gotEntrySize = config->wordsize;
unsigned pltEntrySize;
unsigned pltHeaderSize;
unsigned ipltEntrySize;
// non-split-stack callee this will return true. Otherwise returns false.
bool needsMoreStackNonSplit = true;
- virtual RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
- RelExpr expr) const;
+ virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
+ virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
+ const uint8_t *loc) const;
virtual void relaxGot(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
virtual void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
// the .toc section.
bool isPPC64SmallCodeModelTocReloc(RelType type);
+// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
+// instruction (regardless of endianness). Therefore, the prefix is always in
+// lower memory than the instruction.
+void writePrefixedInstruction(uint8_t *loc, uint64_t insn);
+
void addPPC64SaveRestore();
uint64_t getPPC64TocBase();
uint64_t getAArch64Page(uint64_t expr);
void reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
int64_t min, uint64_t max);
+void reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
+ const Twine &msg);
// Make sure that V can be represented as an N bit signed integer.
inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
// if the target is in range, otherwise it creates a long thunk.
class ARMThunk : public Thunk {
public:
- ARMThunk(Symbol &dest) : Thunk(dest, 0) {}
+ ARMThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {}
bool getMayUseShortThunk();
uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); }
// which has a range of 16MB.
class ThumbThunk : public Thunk {
public:
- ThumbThunk(Symbol &dest) : Thunk(dest, 0) { alignment = 2; }
+ ThumbThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {
+ alignment = 2;
+ }
bool getMayUseShortThunk();
uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); }
// Source State, TargetState, Target Requirement, ABS or PI, Range
class ARMV7ABSLongThunk final : public ARMThunk {
public:
- ARMV7ABSLongThunk(Symbol &dest) : ARMThunk(dest) {}
+ ARMV7ABSLongThunk(Symbol &dest, int64_t addend) : ARMThunk(dest, addend) {}
uint32_t sizeLong() override { return 12; }
void writeLong(uint8_t *buf) override;
class ARMV7PILongThunk final : public ARMThunk {
public:
- ARMV7PILongThunk(Symbol &dest) : ARMThunk(dest) {}
+ ARMV7PILongThunk(Symbol &dest, int64_t addend) : ARMThunk(dest, addend) {}
uint32_t sizeLong() override { return 16; }
void writeLong(uint8_t *buf) override;
class ThumbV7ABSLongThunk final : public ThumbThunk {
public:
- ThumbV7ABSLongThunk(Symbol &dest) : ThumbThunk(dest) {}
+ ThumbV7ABSLongThunk(Symbol &dest, int64_t addend)
+ : ThumbThunk(dest, addend) {}
uint32_t sizeLong() override { return 10; }
void writeLong(uint8_t *buf) override;
class ThumbV7PILongThunk final : public ThumbThunk {
public:
- ThumbV7PILongThunk(Symbol &dest) : ThumbThunk(dest) {}
+ ThumbV7PILongThunk(Symbol &dest, int64_t addend) : ThumbThunk(dest, addend) {}
uint32_t sizeLong() override { return 12; }
void writeLong(uint8_t *buf) override;
// can result in a thunk
class ARMV5ABSLongThunk final : public ARMThunk {
public:
- ARMV5ABSLongThunk(Symbol &dest) : ARMThunk(dest) {}
+ ARMV5ABSLongThunk(Symbol &dest, int64_t addend) : ARMThunk(dest, addend) {}
uint32_t sizeLong() override { return 8; }
void writeLong(uint8_t *buf) override;
class ARMV5PILongThunk final : public ARMThunk {
public:
- ARMV5PILongThunk(Symbol &dest) : ARMThunk(dest) {}
+ ARMV5PILongThunk(Symbol &dest, int64_t addend) : ARMThunk(dest, addend) {}
uint32_t sizeLong() override { return 16; }
void writeLong(uint8_t *buf) override;
// Implementations of Thunks for Arm v6-M. Only Thumb instructions are permitted
class ThumbV6MABSLongThunk final : public ThumbThunk {
public:
- ThumbV6MABSLongThunk(Symbol &dest) : ThumbThunk(dest) {}
+ ThumbV6MABSLongThunk(Symbol &dest, int64_t addend)
+ : ThumbThunk(dest, addend) {}
uint32_t sizeLong() override { return 12; }
void writeLong(uint8_t *buf) override;
class ThumbV6MPILongThunk final : public ThumbThunk {
public:
- ThumbV6MPILongThunk(Symbol &dest) : ThumbThunk(dest) {}
+ ThumbV6MPILongThunk(Symbol &dest, int64_t addend)
+ : ThumbThunk(dest, addend) {}
uint32_t sizeLong() override { return 16; }
void writeLong(uint8_t *buf) override;
uint32_t size() override { return 20; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
};
// PPC64 R2 Save Stub
// 2) Tail calls the callee.
class PPC64R2SaveStub final : public Thunk {
public:
- PPC64R2SaveStub(Symbol &dest) : Thunk(dest, 0) {}
- uint32_t size() override { return 8; }
+ PPC64R2SaveStub(Symbol &dest, int64_t addend) : Thunk(dest, addend) {
+ alignment = 16;
+ }
+
+ // To prevent oscillations in layout when moving from short to long thunks
+ // we make sure that once a thunk has been set to long it cannot go back.
+ bool getMayUseShortThunk() {
+ if (!mayUseShortThunk)
+ return false;
+ if (!isInt<26>(computeOffset())) {
+ mayUseShortThunk = false;
+ return false;
+ }
+ return true;
+ }
+ uint32_t size() override { return getMayUseShortThunk() ? 8 : 32; }
+ void writeTo(uint8_t *buf) override;
+ void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
+
+private:
+ // Transitioning from long to short can create layout oscillations in
+ // certain corner cases which would prevent the layout from converging.
+ // This is similar to the handling for ARMThunk.
+ bool mayUseShortThunk = true;
+ int64_t computeOffset() const {
+ return destination.getVA() - (getThunkTargetSym()->getVA() + 4);
+ }
+};
+
+// PPC64 R12 Setup Stub
+// When a caller that does not maintain a toc-pointer performs a local call to
+// a callee which requires a toc-pointer then we need this stub to place the
+// callee's global entry point into r12 without a save of R2.
+class PPC64R12SetupStub final : public Thunk {
+public:
+ PPC64R12SetupStub(Symbol &dest) : Thunk(dest, 0) { alignment = 16; }
+ uint32_t size() override { return 32; }
+ void writeTo(uint8_t *buf) override;
+ void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
+};
+
+// PPC64 PC-relative PLT Stub
+// When a caller that does not maintain a toc-pointer performs an extern call
+// then this stub is needed for:
+// 1) Loading the target functions address from the procedure linkage table into
+// r12 for use by the target functions global entry point, and into the count
+// register with pc-relative instructions.
+// 2) Transferring control to the target function through an indirect branch.
+class PPC64PCRelPLTStub final : public Thunk {
+public:
+ PPC64PCRelPLTStub(Symbol &dest) : Thunk(dest, 0) { alignment = 16; }
+ uint32_t size() override { return 32; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
};
// A bl instruction uses a signed 24 bit offset, with an implicit 4 byte
// alignment. This gives a possible 26 bits of 'reach'. If the call offset is
-// larger then that we need to emit a long-branch thunk. The target address
+// larger than that we need to emit a long-branch thunk. The target address
// of the callee is stored in a table to be accessed TOC-relative. Since the
// call must be local (a non-local call will have a PltCallStub instead) the
// table stores the address of the callee's local entry point. For
// used.
class PPC64LongBranchThunk : public Thunk {
public:
- uint32_t size() override { return 16; }
+ uint32_t size() override { return 32; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
protected:
PPC64LongBranchThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {}
assert(!dest.isPreemptible);
if (Optional<uint32_t> index =
in.ppc64LongBranchTarget->addEntry(&dest, addend)) {
- mainPart->relaDyn->addReloc(
- {target->relativeRel, in.ppc64LongBranchTarget, *index * UINT64_C(8),
- true, &dest,
- addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther)});
+ mainPart->relaDyn->addRelativeReloc(
+ target->relativeRel, in.ppc64LongBranchTarget, *index * UINT64_C(8),
+ dest, addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther),
+ target->symbolicRel, R_ABS);
}
}
};
}
};
+// A bl instruction uses a signed 24 bit offset, with an implicit 4 byte
+// alignment. This gives a possible 26 bits of 'reach'. If the caller and
+// callee do not use toc and the call offset is larger than 26 bits,
+// we need to emit a pc-rel based long-branch thunk. The target address of
+// the callee is computed with a PC-relative offset.
+class PPC64PCRelLongBranchThunk final : public Thunk {
+public:
+ PPC64PCRelLongBranchThunk(Symbol &dest, int64_t addend)
+ : Thunk(dest, addend) {
+ alignment = 16;
+ }
+ uint32_t size() override { return 32; }
+ void writeTo(uint8_t *buf) override;
+ void addSymbols(ThunkSection &isec) override;
+ bool isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const override;
+};
+
} // end anonymous namespace
Defined *Thunk::addSymbol(StringRef name, uint8_t type, uint64_t value,
s->file = destination.file;
}
+bool PPC64PltCallStub::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24 || rel.type == R_PPC64_REL14;
+}
+
void PPC64R2SaveStub::writeTo(uint8_t *buf) {
- int64_t offset = destination.getVA() - (getThunkTargetSym()->getVA() + 4);
+ const int64_t offset = computeOffset();
+ write32(buf + 0, 0xf8410018); // std r2,24(r1)
// The branch offset needs to fit in 26 bits.
- if (!isInt<26>(offset))
- fatal("R2 save stub branch offset is too large: " + Twine(offset));
- write32(buf + 0, 0xf8410018); // std r2,24(r1)
- write32(buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b <offset>
+ if (getMayUseShortThunk()) {
+ write32(buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b <offset>
+ } else if (isInt<34>(offset)) {
+ int nextInstOffset;
+ if (!config->Power10Stub) {
+ uint64_t tocOffset = destination.getVA() - getPPC64TocBase();
+ if (tocOffset >> 16 > 0) {
+ const uint64_t addi = ADDI_R12_TO_R12_NO_DISP | (tocOffset & 0xffff);
+ const uint64_t addis = ADDIS_R12_TO_R2_NO_DISP | ((tocOffset >> 16) & 0xffff);
+ write32(buf + 4, addis); // addis r12, r2 , top of offset
+ write32(buf + 8, addi); // addi r12, r12, bottom of offset
+ nextInstOffset = 12;
+ } else {
+ const uint64_t addi = ADDI_R12_TO_R2_NO_DISP | (tocOffset & 0xffff);
+ write32(buf + 4, addi); // addi r12, r2, offset
+ nextInstOffset = 8;
+ }
+ } else {
+ const uint64_t paddi = PADDI_R12_NO_DISP |
+ (((offset >> 16) & 0x3ffff) << 32) |
+ (offset & 0xffff);
+ writePrefixedInstruction(buf + 4, paddi); // paddi r12, 0, func@pcrel, 1
+ nextInstOffset = 12;
+ }
+ write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12
+ write32(buf + nextInstOffset + 4, BCTR); // bctr
+ } else {
+ in.ppc64LongBranchTarget->addEntry(&destination, addend);
+ const int64_t offsetFromTOC =
+ in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
+ getPPC64TocBase();
+ writePPC64LoadAndBranch(buf + 4, offsetFromTOC);
+ }
}
void PPC64R2SaveStub::addSymbols(ThunkSection &isec) {
s->needsTocRestore = true;
}
+bool PPC64R2SaveStub::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24 || rel.type == R_PPC64_REL14;
+}
+
+void PPC64R12SetupStub::writeTo(uint8_t *buf) {
+ int64_t offset = destination.getVA() - getThunkTargetSym()->getVA();
+ if (!isInt<34>(offset))
+ reportRangeError(buf, offset, 34, destination, "R12 setup stub offset");
+
+ int nextInstOffset;
+ if (!config->Power10Stub) {
+ uint32_t off = destination.getVA(addend) - getThunkTargetSym()->getVA() - 8;
+ write32(buf + 0, 0x7c0802a6); // mflr r12
+ write32(buf + 4, 0x429f0005); // bcl 20,31,.+4
+ write32(buf + 8, 0x7d6802a6); // mflr r11
+ write32(buf + 12, 0x7d8803a6); // mtlr r12
+ write32(buf + 16, 0x3d8b0000 | computeHiBits(off));// addis r12,r11,off@ha
+ write32(buf + 20, 0x398c0000 | (off & 0xffff)); // addi r12,r12,off@l
+ nextInstOffset = 24;
+ } else {
+ uint64_t paddi = PADDI_R12_NO_DISP | (((offset >> 16) & 0x3ffff) << 32) |
+ (offset & 0xffff);
+ writePrefixedInstruction(buf + 0, paddi); // paddi r12, 0, func@pcrel, 1
+ nextInstOffset = 8;
+ }
+ write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12
+ write32(buf + nextInstOffset + 4, BCTR); // bctr
+}
+
+void PPC64R12SetupStub::addSymbols(ThunkSection &isec) {
+ addSymbol(saver.save("__gep_setup_" + destination.getName()), STT_FUNC, 0,
+ isec);
+}
+
+bool PPC64R12SetupStub::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24_NOTOC;
+}
+
+void PPC64PCRelPLTStub::writeTo(uint8_t *buf) {
+ int nextInstOffset = 0;
+ int64_t offset = destination.getGotPltVA() - getThunkTargetSym()->getVA();
+
+ if (config->Power10Stub) {
+ if (!isInt<34>(offset))
+ reportRangeError(buf, offset, 34, destination,
+ "PC-relative PLT stub offset");
+ const uint64_t pld = PLD_R12_NO_DISP | (((offset >> 16) & 0x3ffff) << 32) |
+ (offset & 0xffff);
+ writePrefixedInstruction(buf + 0, pld); // pld r12, func@plt@pcrel
+ nextInstOffset = 8;
+ } else {
+ uint32_t off = destination.getVA(addend) - getThunkTargetSym()->getVA() - 8;
+ write32(buf + 0, 0x7c0802a6); // mflr r12
+ write32(buf + 4, 0x429f0005); // bcl 20,31,.+4
+ write32(buf + 8, 0x7d6802a6); // mflr r11
+ write32(buf + 12, 0x7d8803a6); // mtlr r12
+ write32(buf + 16, 0x3d8b0000 | computeHiBits(off)); // addis r12,r11,off@ha
+ write32(buf + 20, 0x398c0000 | (off & 0xffff)); // addi r12,r12,off@l
+ nextInstOffset = 24;
+ }
+ write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12
+ write32(buf + nextInstOffset + 4, BCTR); // bctr
+}
+
+void PPC64PCRelPLTStub::addSymbols(ThunkSection &isec) {
+ addSymbol(saver.save("__plt_pcrel_" + destination.getName()), STT_FUNC, 0,
+ isec);
+}
+
+bool PPC64PCRelPLTStub::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24_NOTOC;
+}
+
void PPC64LongBranchThunk::writeTo(uint8_t *buf) {
int64_t offset = in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
getPPC64TocBase();
isec);
}
+bool PPC64LongBranchThunk::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24 || rel.type == R_PPC64_REL14;
+}
+
+void PPC64PCRelLongBranchThunk::writeTo(uint8_t *buf) {
+ int64_t offset = destination.getVA() - getThunkTargetSym()->getVA();
+ if (!isInt<34>(offset))
+ reportRangeError(buf, offset, 34, destination,
+ "PC-relative long branch stub offset");
+
+ int nextInstOffset;
+ if (!config->Power10Stub) {
+ uint32_t off = destination.getVA(addend) - getThunkTargetSym()->getVA() - 8;
+ write32(buf + 0, 0x7c0802a6); // mflr r12
+ write32(buf + 4, 0x429f0005); // bcl 20,31,.+4
+ write32(buf + 8, 0x7d6802a6); // mflr r11
+ write32(buf + 12, 0x7d8803a6); // mtlr r12
+ write32(buf + 16, 0x3d8b0000 | computeHiBits(off)); // addis r12,r11,off@ha
+ write32(buf + 20, 0x398c0000 | (off & 0xffff)); // addi r12,r12,off@l
+ nextInstOffset = 24;
+ } else {
+ uint64_t paddi = PADDI_R12_NO_DISP | (((offset >> 16) & 0x3ffff) << 32) |
+ (offset & 0xffff);
+ writePrefixedInstruction(buf + 0, paddi); // paddi r12, 0, func@pcrel, 1
+ nextInstOffset = 8;
+ }
+ write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12
+ write32(buf + nextInstOffset + 4, BCTR); // bctr
+}
+
+void PPC64PCRelLongBranchThunk::addSymbols(ThunkSection &isec) {
+ addSymbol(saver.save("__long_branch_pcrel_" + destination.getName()),
+ STT_FUNC, 0, isec);
+}
+
+bool PPC64PCRelLongBranchThunk::isCompatibleWith(const InputSection &isec,
+ const Relocation &rel) const {
+ return rel.type == R_PPC64_REL24_NOTOC;
+}
+
Thunk::Thunk(Symbol &d, int64_t a) : destination(d), addend(a), offset(0) {}
Thunk::~Thunk() = default;
// - MOVT and MOVW instructions cannot be used
// - Only Thumb relocation that can generate a Thunk is a BL, this can always
// be transformed into a BLX
-static Thunk *addThunkPreArmv7(RelType reloc, Symbol &s) {
+static Thunk *addThunkPreArmv7(RelType reloc, Symbol &s, int64_t a) {
switch (reloc) {
case R_ARM_PC24:
case R_ARM_PLT32:
case R_ARM_CALL:
case R_ARM_THM_CALL:
if (config->picThunk)
- return make<ARMV5PILongThunk>(s);
- return make<ARMV5ABSLongThunk>(s);
+ return make<ARMV5PILongThunk>(s, a);
+ return make<ARMV5ABSLongThunk>(s, a);
}
fatal("relocation " + toString(reloc) + " to " + toString(s) +
" not supported for Armv5 or Armv6 targets");
// - MOVT and MOVW instructions cannot be used.
// - Only a limited number of instructions can access registers r8 and above
// - No interworking support is needed (all Thumb).
-static Thunk *addThunkV6M(RelType reloc, Symbol &s) {
+static Thunk *addThunkV6M(RelType reloc, Symbol &s, int64_t a) {
switch (reloc) {
case R_ARM_THM_JUMP19:
case R_ARM_THM_JUMP24:
case R_ARM_THM_CALL:
if (config->isPic)
- return make<ThumbV6MPILongThunk>(s);
- return make<ThumbV6MABSLongThunk>(s);
+ return make<ThumbV6MPILongThunk>(s, a);
+ return make<ThumbV6MABSLongThunk>(s, a);
}
fatal("relocation " + toString(reloc) + " to " + toString(s) +
" not supported for Armv6-M targets");
}
// Creates a thunk for Thumb-ARM interworking or branch range extension.
-static Thunk *addThunkArm(RelType reloc, Symbol &s) {
+static Thunk *addThunkArm(RelType reloc, Symbol &s, int64_t a) {
// Decide which Thunk is needed based on:
// Available instruction set
// - An Arm Thunk can only be used if Arm state is available.
// architecture to flag.
if (!config->armHasMovtMovw) {
if (!config->armJ1J2BranchEncoding)
- return addThunkPreArmv7(reloc, s);
- return addThunkV6M(reloc, s);
+ return addThunkPreArmv7(reloc, s, a);
+ return addThunkV6M(reloc, s, a);
}
switch (reloc) {
case R_ARM_JUMP24:
case R_ARM_CALL:
if (config->picThunk)
- return make<ARMV7PILongThunk>(s);
- return make<ARMV7ABSLongThunk>(s);
+ return make<ARMV7PILongThunk>(s, a);
+ return make<ARMV7ABSLongThunk>(s, a);
case R_ARM_THM_JUMP19:
case R_ARM_THM_JUMP24:
case R_ARM_THM_CALL:
if (config->picThunk)
- return make<ThumbV7PILongThunk>(s);
- return make<ThumbV7ABSLongThunk>(s);
+ return make<ThumbV7PILongThunk>(s, a);
+ return make<ThumbV7ABSLongThunk>(s, a);
}
fatal("unrecognized relocation type");
}
}
static Thunk *addThunkPPC64(RelType type, Symbol &s, int64_t a) {
- assert((type == R_PPC64_REL14 || type == R_PPC64_REL24) &&
+ assert((type == R_PPC64_REL14 || type == R_PPC64_REL24 ||
+ type == R_PPC64_REL24_NOTOC) &&
"unexpected relocation type for thunk");
if (s.isInPlt())
- return make<PPC64PltCallStub>(s);
+ return type == R_PPC64_REL24_NOTOC ? (Thunk *)make<PPC64PCRelPLTStub>(s)
+ : (Thunk *)make<PPC64PltCallStub>(s);
// This check looks at the st_other bits of the callee. If the value is 1
- // then the callee clobbers the TOC and we need an R2 save stub.
- if ((s.stOther >> 5) == 1)
- return make<PPC64R2SaveStub>(s);
+ // then the callee clobbers the TOC and we need an R2 save stub when RelType
+ // is R_PPC64_REL14 or R_PPC64_REL24.
+ if ((type == R_PPC64_REL14 || type == R_PPC64_REL24) && (s.stOther >> 5) == 1)
+ return make<PPC64R2SaveStub>(s, a);
+
+ if (type == R_PPC64_REL24_NOTOC)
+ return (s.stOther >> 5) > 1
+ ? (Thunk *)make<PPC64R12SetupStub>(s)
+ : (Thunk *)make<PPC64PCRelLongBranchThunk>(s, a);
if (config->picThunk)
return make<PPC64PILongBranchThunk>(s, a);
return addThunkAArch64(rel.type, s, a);
if (config->emachine == EM_ARM)
- return addThunkArm(rel.type, s);
+ return addThunkArm(rel.type, s, a);
if (config->emachine == EM_MIPS)
return addThunkMips(rel.type, s);
#ifndef LLD_ELF_THUNKS_H
#define LLD_ELF_THUNKS_H
+#include "llvm/ADT/SmallVector.h"
#include "Relocations.h"
namespace lld {
const InputFile *file, int64_t addend);
void writePPC64LoadAndBranch(uint8_t *buf, int64_t offset);
+static inline uint16_t computeHiBits(uint32_t toCompute) {
+ return (toCompute + 0x8000) >> 16;
+}
+
} // namespace elf
} // namespace lld
--- /dev/null
+//===- ARM.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/ADT/Bitfields.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace llvm::support::endian;
+using namespace lld;
+using namespace lld::macho;
+
+namespace {
+
+struct ARM : TargetInfo {
+ ARM(uint32_t cpuSubtype);
+
+ int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
+ const relocation_info) const override;
+ void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
+ uint64_t pc) const override;
+
+ void writeStub(uint8_t *buf, const Symbol &) const override;
+ void writeStubHelperHeader(uint8_t *buf) const override;
+ void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
+ uint64_t entryAddr) const override;
+
+ void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
+ const RelocAttrs &getRelocAttrs(uint8_t type) const override;
+ uint64_t getPageSize() const override { return 4 * 1024; }
+};
+
+} // namespace
+
+const RelocAttrs &ARM::getRelocAttrs(uint8_t type) const {
+ static const std::array<RelocAttrs, 10> relocAttrsArray{{
+#define B(x) RelocAttrBits::x
+ {"VANILLA", /* FIXME populate this */ B(_0)},
+ {"PAIR", /* FIXME populate this */ B(_0)},
+ {"SECTDIFF", /* FIXME populate this */ B(_0)},
+ {"LOCAL_SECTDIFF", /* FIXME populate this */ B(_0)},
+ {"PB_LA_PTR", /* FIXME populate this */ B(_0)},
+ {"BR24", B(PCREL) | B(LOCAL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
+ {"BR22", B(PCREL) | B(LOCAL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
+ {"32BIT_BRANCH", /* FIXME populate this */ B(_0)},
+ {"HALF", /* FIXME populate this */ B(_0)},
+ {"HALF_SECTDIFF", /* FIXME populate this */ B(_0)},
+#undef B
+ }};
+ assert(type < relocAttrsArray.size() && "invalid relocation type");
+ if (type >= relocAttrsArray.size())
+ return invalidRelocAttrs;
+ return relocAttrsArray[type];
+}
+
+int64_t ARM::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
+ relocation_info rel) const {
+ // FIXME: implement this
+ return 0;
+}
+
+template <int N> using BitfieldFlag = Bitfield::Element<bool, N, 1>;
+
+// ARM BL encoding:
+//
+// 30 28 24 0
+// +---------+---------+----------------------------------------------+
+// | cond | 1 0 1 1 | imm24 |
+// +---------+---------+----------------------------------------------+
+//
+// `cond` here varies depending on whether we have bleq, blne, etc.
+// `imm24` encodes a 26-bit pcrel offset -- last 2 bits are zero as ARM
+// functions are 4-byte-aligned.
+//
+// ARM BLX encoding:
+//
+// 30 28 24 0
+// +---------+---------+----------------------------------------------+
+// | 1 1 1 1 | 1 0 1 H | imm24 |
+// +---------+---------+----------------------------------------------+
+//
+// Since Thumb functions are 2-byte-aligned, we need one extra bit to encode
+// the offset -- that is the H bit.
+//
+// BLX is always unconditional, so while we can convert directly from BLX to BL,
+// we need to insert a shim if a BL's target is a Thumb function.
+//
+// Helper aliases for decoding BL / BLX:
+using Cond = Bitfield::Element<uint32_t, 28, 4>;
+using Imm24 = Bitfield::Element<int32_t, 0, 24>;
+
+void ARM::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
+ uint64_t pc) const {
+ switch (r.type) {
+ case ARM_RELOC_BR24: {
+ uint32_t base = read32le(loc);
+ bool isBlx = Bitfield::get<Cond>(base) == 0xf;
+ const Symbol *sym = r.referent.get<Symbol *>();
+ int32_t offset = value - (pc + 8);
+
+ if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (!isBlx && defined->thumb) {
+ error("TODO: implement interworking shim");
+ return;
+ } else if (isBlx && !defined->thumb) {
+ Bitfield::set<Cond>(base, 0xe); // unconditional BL
+ Bitfield::set<BitfieldFlag<24>>(base, 1);
+ isBlx = false;
+ }
+ } else {
+ error("TODO: Implement ARM_RELOC_BR24 for dylib symbols");
+ return;
+ }
+
+ if (isBlx) {
+ assert((0x1 & value) == 0);
+ Bitfield::set<Imm24>(base, offset >> 2);
+ Bitfield::set<BitfieldFlag<24>>(base, (offset >> 1) & 1); // H bit
+ } else {
+ assert((0x3 & value) == 0);
+ Bitfield::set<Imm24>(base, offset >> 2);
+ }
+ write32le(loc, base);
+ break;
+ }
+ default:
+ fatal("unhandled relocation type");
+ }
+}
+
+void ARM::writeStub(uint8_t *buf, const Symbol &sym) const {
+ fatal("TODO: implement this");
+}
+
+void ARM::writeStubHelperHeader(uint8_t *buf) const {
+ fatal("TODO: implement this");
+}
+
+void ARM::writeStubHelperEntry(uint8_t *buf, const DylibSymbol &sym,
+ uint64_t entryAddr) const {
+ fatal("TODO: implement this");
+}
+
+void ARM::relaxGotLoad(uint8_t *loc, uint8_t type) const {
+ fatal("TODO: implement this");
+}
+
+ARM::ARM(uint32_t cpuSubtype) : TargetInfo(ILP32()) {
+ cpuType = CPU_TYPE_ARM;
+ this->cpuSubtype = cpuSubtype;
+
+ stubSize = 0 /* FIXME */;
+ stubHelperHeaderSize = 0 /* FIXME */;
+ stubHelperEntrySize = 0 /* FIXME */;
+}
+
+TargetInfo *macho::createARMTargetInfo(uint32_t cpuSubtype) {
+ static ARM t(cpuSubtype);
+ return &t;
+}
--- /dev/null
+//===- ARM64.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Arch/ARM64Common.h"
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace llvm::support::endian;
+using namespace lld;
+using namespace lld::macho;
+
+namespace {
+
+struct ARM64 : ARM64Common {
+ ARM64();
+ void writeStub(uint8_t *buf, const Symbol &) const override;
+ void writeStubHelperHeader(uint8_t *buf) const override;
+ void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
+ uint64_t entryAddr) const override;
+ const RelocAttrs &getRelocAttrs(uint8_t type) const override;
+ void populateThunk(InputSection *thunk, Symbol *funcSym) override;
+};
+
+} // namespace
+
+// Random notes on reloc types:
+// ADDEND always pairs with BRANCH26, PAGE21, or PAGEOFF12
+// POINTER_TO_GOT: ld64 supports a 4-byte pc-relative form as well as an 8-byte
+// absolute version of this relocation. The semantics of the absolute relocation
+// are weird -- it results in the value of the GOT slot being written, instead
+// of the address. Let's not support it unless we find a real-world use case.
+
+const RelocAttrs &ARM64::getRelocAttrs(uint8_t type) const {
+ static const std::array<RelocAttrs, 11> relocAttrsArray{{
+#define B(x) RelocAttrBits::x
+ {"UNSIGNED",
+ B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
+ {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
+ {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
+ {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
+ {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
+ {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
+ {"GOT_LOAD_PAGEOFF12",
+ B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
+ {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
+ {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
+ {"TLVP_LOAD_PAGEOFF12",
+ B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
+ {"ADDEND", B(ADDEND)},
+#undef B
+ }};
+ assert(type < relocAttrsArray.size() && "invalid relocation type");
+ if (type >= relocAttrsArray.size())
+ return invalidRelocAttrs;
+ return relocAttrsArray[type];
+}
+
+static constexpr uint32_t stubCode[] = {
+ 0x90000010, // 00: adrp x16, __la_symbol_ptr@page
+ 0xf9400210, // 04: ldr x16, [x16, __la_symbol_ptr@pageoff]
+ 0xd61f0200, // 08: br x16
+};
+
+void ARM64::writeStub(uint8_t *buf8, const Symbol &sym) const {
+ ::writeStub<LP64>(buf8, stubCode, sym);
+}
+
+static constexpr uint32_t stubHelperHeaderCode[] = {
+ 0x90000011, // 00: adrp x17, _dyld_private@page
+ 0x91000231, // 04: add x17, x17, _dyld_private@pageoff
+ 0xa9bf47f0, // 08: stp x16/x17, [sp, #-16]!
+ 0x90000010, // 0c: adrp x16, dyld_stub_binder@page
+ 0xf9400210, // 10: ldr x16, [x16, dyld_stub_binder@pageoff]
+ 0xd61f0200, // 14: br x16
+};
+
+void ARM64::writeStubHelperHeader(uint8_t *buf8) const {
+ ::writeStubHelperHeader<LP64>(buf8, stubHelperHeaderCode);
+}
+
+static constexpr uint32_t stubHelperEntryCode[] = {
+ 0x18000050, // 00: ldr w16, l0
+ 0x14000000, // 04: b stubHelperHeader
+ 0x00000000, // 08: l0: .long 0
+};
+
+void ARM64::writeStubHelperEntry(uint8_t *buf8, const DylibSymbol &sym,
+ uint64_t entryVA) const {
+ ::writeStubHelperEntry(buf8, stubHelperEntryCode, sym, entryVA);
+}
+
+// A thunk is the relaxed variation of stubCode. We don't need the
+// extra indirection through a lazy pointer because the target address
+// is known at link time.
+static constexpr uint32_t thunkCode[] = {
+ 0x90000010, // 00: adrp x16, <thunk.ptr>@page
+ 0x91000210, // 04: add x16, [x16,<thunk.ptr>@pageoff]
+ 0xd61f0200, // 08: br x16
+};
+
+void ARM64::populateThunk(InputSection *thunk, Symbol *funcSym) {
+ thunk->align = 4;
+ thunk->data = {reinterpret_cast<const uint8_t *>(thunkCode),
+ sizeof(thunkCode)};
+ thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGEOFF12,
+ /*pcrel=*/false, /*length=*/2,
+ /*offset=*/4, /*addend=*/0,
+ /*referent=*/funcSym});
+ thunk->relocs.push_back({/*type=*/ARM64_RELOC_PAGE21,
+ /*pcrel=*/true, /*length=*/2,
+ /*offset=*/0, /*addend=*/0,
+ /*referent=*/funcSym});
+}
+
+ARM64::ARM64() : ARM64Common(LP64()) {
+ cpuType = CPU_TYPE_ARM64;
+ cpuSubtype = CPU_SUBTYPE_ARM64_ALL;
+
+ stubSize = sizeof(stubCode);
+ thunkSize = sizeof(thunkCode);
+ branchRange = maxIntN(28) - thunkSize;
+ stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
+ stubHelperEntrySize = sizeof(stubHelperEntryCode);
+}
+
+TargetInfo *macho::createARM64TargetInfo() {
+ static ARM64 t;
+ return &t;
+}
--- /dev/null
+//===- ARM64Common.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Arch/ARM64Common.h"
+
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm::MachO;
+using namespace llvm::support::endian;
+using namespace lld;
+using namespace lld::macho;
+
+int64_t ARM64Common::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
+ const relocation_info rel) const {
+ if (rel.r_type != ARM64_RELOC_UNSIGNED &&
+ rel.r_type != ARM64_RELOC_SUBTRACTOR) {
+ // All other reloc types should use the ADDEND relocation to store their
+ // addends.
+ // TODO(gkm): extract embedded addend just so we can assert that it is 0
+ return 0;
+ }
+
+ const auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
+ const uint8_t *loc = buf + offset + rel.r_address;
+ switch (rel.r_length) {
+ case 2:
+ return static_cast<int32_t>(read32le(loc));
+ case 3:
+ return read64le(loc);
+ default:
+ llvm_unreachable("invalid r_length");
+ }
+}
+
+// For instruction relocations (load, store, add), the base
+// instruction is pre-populated in the text section. A pre-populated
+// instruction has opcode & register-operand bits set, with immediate
+// operands zeroed. We read it from text, OR-in the immediate
+// operands, then write-back the completed instruction.
+
+void ARM64Common::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
+ uint64_t pc) const {
+ uint32_t base = ((r.length == 2) ? read32le(loc) : 0);
+ switch (r.type) {
+ case ARM64_RELOC_BRANCH26:
+ value = encodeBranch26(r, base, value - pc);
+ break;
+ case ARM64_RELOC_SUBTRACTOR:
+ case ARM64_RELOC_UNSIGNED:
+ if (r.length == 2)
+ checkInt(r, value, 32);
+ break;
+ case ARM64_RELOC_POINTER_TO_GOT:
+ if (r.pcrel)
+ value -= pc;
+ checkInt(r, value, 32);
+ break;
+ case ARM64_RELOC_PAGE21:
+ case ARM64_RELOC_GOT_LOAD_PAGE21:
+ case ARM64_RELOC_TLVP_LOAD_PAGE21: {
+ assert(r.pcrel);
+ value = encodePage21(r, base, pageBits(value) - pageBits(pc));
+ break;
+ }
+ case ARM64_RELOC_PAGEOFF12:
+ case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ assert(!r.pcrel);
+ value = encodePageOff12(base, value);
+ break;
+ default:
+ llvm_unreachable("unexpected relocation type");
+ }
+
+ switch (r.length) {
+ case 2:
+ write32le(loc, value);
+ break;
+ case 3:
+ write64le(loc, value);
+ break;
+ default:
+ llvm_unreachable("invalid r_length");
+ }
+}
+
+void ARM64Common::relaxGotLoad(uint8_t *loc, uint8_t type) const {
+ // The instruction format comments below are quoted from
+ // Arm® Architecture Reference Manual
+ // Armv8, for Armv8-A architecture profile
+ // ARM DDI 0487G.a (ID011921)
+ uint32_t instruction = read32le(loc);
+ // C6.2.132 LDR (immediate)
+ // This matches both the 64- and 32-bit variants:
+ // LDR <(X|W)t>, [<Xn|SP>{, #<pimm>}]
+ if ((instruction & 0xbfc00000) != 0xb9400000)
+ error(getRelocAttrs(type).name + " reloc requires LDR instruction");
+ assert(((instruction >> 10) & 0xfff) == 0 &&
+ "non-zero embedded LDR immediate");
+ // C6.2.4 ADD (immediate)
+ // ADD <Xd|SP>, <Xn|SP>, #<imm>{, <shift>}
+ instruction = ((instruction & 0x001fffff) | 0x91000000);
+ write32le(loc, instruction);
+}
--- /dev/null
+//===- ARM64Common.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_ARCH_ARM64COMMON_H
+#define LLD_MACHO_ARCH_ARM64COMMON_H
+
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+
+namespace lld {
+namespace macho {
+
+struct ARM64Common : TargetInfo {
+ template <class LP> ARM64Common(LP lp) : TargetInfo(lp) {}
+
+ int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
+ const llvm::MachO::relocation_info) const override;
+ void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
+ uint64_t pc) const override;
+
+ void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
+ uint64_t getPageSize() const override { return 16 * 1024; }
+};
+
+inline uint64_t bitField(uint64_t value, int right, int width, int left) {
+ return ((value >> right) & ((1 << width) - 1)) << left;
+}
+
+// 25 0
+// +-----------+---------------------------------------------------+
+// | | imm26 |
+// +-----------+---------------------------------------------------+
+
+inline uint64_t encodeBranch26(const Reloc &r, uint64_t base, uint64_t va) {
+ checkInt(r, va, 28);
+ // Since branch destinations are 4-byte aligned, the 2 least-
+ // significant bits are 0. They are right shifted off the end.
+ return (base | bitField(va, 2, 26, 0));
+}
+
+inline uint64_t encodeBranch26(SymbolDiagnostic d, uint64_t base, uint64_t va) {
+ checkInt(d, va, 28);
+ return (base | bitField(va, 2, 26, 0));
+}
+
+// 30 29 23 5
+// +-+---+---------+-------------------------------------+---------+
+// | |ilo| | immhi | |
+// +-+---+---------+-------------------------------------+---------+
+
+inline uint64_t encodePage21(const Reloc &r, uint64_t base, uint64_t va) {
+ checkInt(r, va, 35);
+ return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
+}
+
+inline uint64_t encodePage21(SymbolDiagnostic d, uint64_t base, uint64_t va) {
+ checkInt(d, va, 35);
+ return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
+}
+
+// 21 10
+// +-------------------+-----------------------+-------------------+
+// | | imm12 | |
+// +-------------------+-----------------------+-------------------+
+
+inline uint64_t encodePageOff12(uint32_t base, uint64_t va) {
+ int scale = 0;
+ if ((base & 0x3b00'0000) == 0x3900'0000) { // load/store
+ scale = base >> 30;
+ if (scale == 0 && (base & 0x0480'0000) == 0x0480'0000) // 128-bit variant
+ scale = 4;
+ }
+
+ // TODO(gkm): extract embedded addend and warn if != 0
+ // uint64_t addend = ((base & 0x003FFC00) >> 10);
+ return (base | bitField(va, scale, 12 - scale, 10));
+}
+
+inline uint64_t pageBits(uint64_t address) {
+ const uint64_t pageMask = ~0xfffull;
+ return address & pageMask;
+}
+
+template <class LP>
+inline void writeStub(uint8_t *buf8, const uint32_t stubCode[3],
+ const macho::Symbol &sym) {
+ auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
+ constexpr size_t stubCodeSize = 3 * sizeof(uint32_t);
+ uint64_t pcPageBits =
+ pageBits(in.stubs->addr + sym.stubsIndex * stubCodeSize);
+ uint64_t lazyPointerVA =
+ in.lazyPointers->addr + sym.stubsIndex * LP::wordSize;
+ buf32[0] = encodePage21({&sym, "stub"}, stubCode[0],
+ pageBits(lazyPointerVA) - pcPageBits);
+ buf32[1] = encodePageOff12(stubCode[1], lazyPointerVA);
+ buf32[2] = stubCode[2];
+}
+
+template <class LP>
+inline void writeStubHelperHeader(uint8_t *buf8,
+ const uint32_t stubHelperHeaderCode[6]) {
+ auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
+ auto pcPageBits = [](int i) {
+ return pageBits(in.stubHelper->addr + i * sizeof(uint32_t));
+ };
+ uint64_t loaderVA = in.imageLoaderCache->getVA();
+ SymbolDiagnostic d = {nullptr, "stub header helper"};
+ buf32[0] = encodePage21(d, stubHelperHeaderCode[0],
+ pageBits(loaderVA) - pcPageBits(0));
+ buf32[1] = encodePageOff12(stubHelperHeaderCode[1], loaderVA);
+ buf32[2] = stubHelperHeaderCode[2];
+ uint64_t binderVA =
+ in.got->addr + in.stubHelper->stubBinder->gotIndex * LP::wordSize;
+ buf32[3] = encodePage21(d, stubHelperHeaderCode[3],
+ pageBits(binderVA) - pcPageBits(3));
+ buf32[4] = encodePageOff12(stubHelperHeaderCode[4], binderVA);
+ buf32[5] = stubHelperHeaderCode[5];
+}
+
+inline void writeStubHelperEntry(uint8_t *buf8,
+ const uint32_t stubHelperEntryCode[3],
+ const DylibSymbol &sym, uint64_t entryVA) {
+ auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
+ auto pcVA = [entryVA](int i) { return entryVA + i * sizeof(uint32_t); };
+ uint64_t stubHelperHeaderVA = in.stubHelper->addr;
+ buf32[0] = stubHelperEntryCode[0];
+ buf32[1] = encodeBranch26({&sym, "stub helper"}, stubHelperEntryCode[1],
+ stubHelperHeaderVA - pcVA(1));
+ buf32[2] = sym.lazyBindOffset;
+}
+
+} // namespace macho
+} // namespace lld
+
+#endif
--- /dev/null
+//===- ARM64_32.cpp
+//----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Arch/ARM64Common.h"
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm::MachO;
+using namespace llvm::support::endian;
+using namespace lld;
+using namespace lld::macho;
+
+namespace {
+
+struct ARM64_32 : ARM64Common {
+ ARM64_32();
+ void writeStub(uint8_t *buf, const Symbol &) const override;
+ void writeStubHelperHeader(uint8_t *buf) const override;
+ void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
+ uint64_t entryAddr) const override;
+ const RelocAttrs &getRelocAttrs(uint8_t type) const override;
+};
+
+} // namespace
+
+// These are very similar to ARM64's relocation attributes, except that we don't
+// have the BYTE8 flag set.
+const RelocAttrs &ARM64_32::getRelocAttrs(uint8_t type) const {
+ static const std::array<RelocAttrs, 11> relocAttrsArray{{
+#define B(x) RelocAttrBits::x
+ {"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
+ {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4)},
+ {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
+ {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
+ {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
+ {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
+ {"GOT_LOAD_PAGEOFF12",
+ B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
+ {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
+ {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
+ {"TLVP_LOAD_PAGEOFF12",
+ B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
+ {"ADDEND", B(ADDEND)},
+#undef B
+ }};
+ assert(type < relocAttrsArray.size() && "invalid relocation type");
+ if (type >= relocAttrsArray.size())
+ return invalidRelocAttrs;
+ return relocAttrsArray[type];
+}
+
+// The stub code is fairly similar to ARM64's, except that we load pointers into
+// 32-bit 'w' registers, instead of the 64-bit 'x' ones.
+
+static constexpr uint32_t stubCode[] = {
+ 0x90000010, // 00: adrp x16, __la_symbol_ptr@page
+ 0xb9400210, // 04: ldr w16, [x16, __la_symbol_ptr@pageoff]
+ 0xd61f0200, // 08: br x16
+};
+
+void ARM64_32::writeStub(uint8_t *buf8, const Symbol &sym) const {
+ ::writeStub<ILP32>(buf8, stubCode, sym);
+}
+
+static constexpr uint32_t stubHelperHeaderCode[] = {
+ 0x90000011, // 00: adrp x17, _dyld_private@page
+ 0x91000231, // 04: add x17, x17, _dyld_private@pageoff
+ 0xa9bf47f0, // 08: stp x16/x17, [sp, #-16]!
+ 0x90000010, // 0c: adrp x16, dyld_stub_binder@page
+ 0xb9400210, // 10: ldr w16, [x16, dyld_stub_binder@pageoff]
+ 0xd61f0200, // 14: br x16
+};
+
+void ARM64_32::writeStubHelperHeader(uint8_t *buf8) const {
+ ::writeStubHelperHeader<ILP32>(buf8, stubHelperHeaderCode);
+}
+
+static constexpr uint32_t stubHelperEntryCode[] = {
+ 0x18000050, // 00: ldr w16, l0
+ 0x14000000, // 04: b stubHelperHeader
+ 0x00000000, // 08: l0: .long 0
+};
+
+void ARM64_32::writeStubHelperEntry(uint8_t *buf8, const DylibSymbol &sym,
+ uint64_t entryVA) const {
+ ::writeStubHelperEntry(buf8, stubHelperEntryCode, sym, entryVA);
+}
+
+ARM64_32::ARM64_32() : ARM64Common(ILP32()) {
+ cpuType = CPU_TYPE_ARM64_32;
+ cpuSubtype = CPU_SUBTYPE_ARM64_V8;
+
+ stubSize = sizeof(stubCode);
+ stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
+ stubHelperEntrySize = sizeof(stubHelperEntryCode);
+}
+
+TargetInfo *macho::createARM64_32TargetInfo() {
+ static ARM64_32 t;
+ return &t;
+}
struct X86_64 : TargetInfo {
X86_64();
- uint64_t getImplicitAddend(MemoryBufferRef, const section_64 &,
- const relocation_info &) const override;
- void relocateOne(uint8_t *loc, const Reloc &, uint64_t val) const override;
+ int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
+ const relocation_info) const override;
+ void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
+ uint64_t relocVA) const override;
- void writeStub(uint8_t *buf, const DylibSymbol &) const override;
+ void writeStub(uint8_t *buf, const Symbol &) const override;
void writeStubHelperHeader(uint8_t *buf) const override;
void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
uint64_t entryAddr) const override;
- void prepareSymbolRelocation(lld::macho::Symbol &, const InputSection *,
- const Reloc &) override;
- uint64_t getSymbolVA(const lld::macho::Symbol &, uint8_t type) const override;
+ void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
+ const RelocAttrs &getRelocAttrs(uint8_t type) const override;
+ uint64_t getPageSize() const override { return 4 * 1024; }
};
} // namespace
-static std::string getErrorLocation(MemoryBufferRef mb, const section_64 &sec,
- const relocation_info &rel) {
- return ("invalid relocation at offset " + std::to_string(rel.r_address) +
- " of " + sec.segname + "," + sec.sectname + " in " +
- mb.getBufferIdentifier())
- .str();
+const RelocAttrs &X86_64::getRelocAttrs(uint8_t type) const {
+ static const std::array<RelocAttrs, 10> relocAttrsArray{{
+#define B(x) RelocAttrBits::x
+ {"UNSIGNED",
+ B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
+ {"SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
+ {"BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
+ {"GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
+ {"GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
+ {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
+ {"SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
+ {"SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
+ {"SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
+ {"TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
+#undef B
+ }};
+ assert(type < relocAttrsArray.size() && "invalid relocation type");
+ if (type >= relocAttrsArray.size())
+ return invalidRelocAttrs;
+ return relocAttrsArray[type];
}
-static void validateLength(MemoryBufferRef mb, const section_64 &sec,
- const relocation_info &rel,
- const std::vector<uint8_t> &validLengths) {
- if (std::find(validLengths.begin(), validLengths.end(), rel.r_length) !=
- validLengths.end())
- return;
-
- std::string msg = getErrorLocation(mb, sec, rel) + ": relocations of type " +
- std::to_string(rel.r_type) + " must have r_length of ";
- bool first = true;
- for (uint8_t length : validLengths) {
- if (!first)
- msg += " or ";
- first = false;
- msg += std::to_string(length);
- }
- fatal(msg);
-}
-
-uint64_t X86_64::getImplicitAddend(MemoryBufferRef mb, const section_64 &sec,
- const relocation_info &rel) const {
- auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
- const uint8_t *loc = buf + sec.offset + rel.r_address;
- switch (rel.r_type) {
- case X86_64_RELOC_BRANCH:
- // XXX: ld64 also supports r_length = 0 here but I'm not sure when such a
- // relocation will actually be generated.
- validateLength(mb, sec, rel, {2});
- break;
- case X86_64_RELOC_SIGNED:
+static int pcrelOffset(uint8_t type) {
+ switch (type) {
case X86_64_RELOC_SIGNED_1:
+ return 1;
case X86_64_RELOC_SIGNED_2:
+ return 2;
case X86_64_RELOC_SIGNED_4:
- case X86_64_RELOC_GOT_LOAD:
- case X86_64_RELOC_GOT:
- if (!rel.r_pcrel)
- fatal(getErrorLocation(mb, sec, rel) + ": relocations of type " +
- std::to_string(rel.r_type) + " must be pcrel");
- validateLength(mb, sec, rel, {2});
- break;
- case X86_64_RELOC_UNSIGNED:
- if (rel.r_pcrel)
- fatal(getErrorLocation(mb, sec, rel) + ": relocations of type " +
- std::to_string(rel.r_type) + " must not be pcrel");
- validateLength(mb, sec, rel, {2, 3});
- break;
+ return 4;
default:
- error("TODO: Unhandled relocation type " + std::to_string(rel.r_type));
return 0;
}
+}
+
+int64_t X86_64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
+ relocation_info rel) const {
+ auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
+ const uint8_t *loc = buf + offset + rel.r_address;
switch (rel.r_length) {
- case 0:
- return *loc;
- case 1:
- return read16le(loc);
case 2:
- return read32le(loc);
+ return static_cast<int32_t>(read32le(loc)) + pcrelOffset(rel.r_type);
case 3:
- return read64le(loc);
+ return read64le(loc) + pcrelOffset(rel.r_type);
default:
llvm_unreachable("invalid r_length");
}
}
-void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t val) const {
- switch (r.type) {
- case X86_64_RELOC_BRANCH:
- case X86_64_RELOC_SIGNED:
- case X86_64_RELOC_SIGNED_1:
- case X86_64_RELOC_SIGNED_2:
- case X86_64_RELOC_SIGNED_4:
- case X86_64_RELOC_GOT_LOAD:
- case X86_64_RELOC_GOT:
- // These types are only used for pc-relative relocations, so offset by 4
- // since the RIP has advanced by 4 at this point. This is only valid when
- // r_length = 2, which is enforced by validateLength().
- val -= 4;
- break;
- case X86_64_RELOC_UNSIGNED:
- break;
- default:
- llvm_unreachable(
- "getImplicitAddend should have flagged all unhandled relocation types");
+void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
+ uint64_t relocVA) const {
+ if (r.pcrel) {
+ uint64_t pc = relocVA + 4 + pcrelOffset(r.type);
+ value -= pc;
}
switch (r.length) {
- case 0:
- *loc = val;
- break;
- case 1:
- write16le(loc, val);
- break;
case 2:
- write32le(loc, val);
+ if (r.type == X86_64_RELOC_UNSIGNED)
+ checkUInt(r, value, 32);
+ else
+ checkInt(r, value, 32);
+ write32le(loc, value);
break;
case 3:
- write64le(loc, val);
+ write64le(loc, value);
break;
default:
llvm_unreachable("invalid r_length");
// bufAddr: The virtual address corresponding to buf[0].
// bufOff: The offset within buf of the next instruction.
// destAddr: The destination address that the current instruction references.
-static void writeRipRelative(uint8_t *buf, uint64_t bufAddr, uint64_t bufOff,
- uint64_t destAddr) {
+static void writeRipRelative(SymbolDiagnostic d, uint8_t *buf, uint64_t bufAddr,
+ uint64_t bufOff, uint64_t destAddr) {
uint64_t rip = bufAddr + bufOff;
+ checkInt(d, destAddr - rip, 32);
// For the instructions we care about, the RIP-relative address is always
// stored in the last 4 bytes of the instruction.
write32le(buf + bufOff - 4, destAddr - rip);
0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
};
-void X86_64::writeStub(uint8_t *buf, const DylibSymbol &sym) const {
+void X86_64::writeStub(uint8_t *buf, const Symbol &sym) const {
memcpy(buf, stub, 2); // just copy the two nonzero bytes
uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
- writeRipRelative(buf, stubAddr, sizeof(stub),
- in.lazyPointers->addr + sym.stubsIndex * WordSize);
+ writeRipRelative({&sym, "stub"}, buf, stubAddr, sizeof(stub),
+ in.lazyPointers->addr + sym.stubsIndex * LP64::wordSize);
}
static constexpr uint8_t stubHelperHeader[] = {
0x90, // 0xf: nop
};
-static constexpr uint8_t stubHelperEntry[] = {
- 0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
- 0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
-};
-
void X86_64::writeStubHelperHeader(uint8_t *buf) const {
memcpy(buf, stubHelperHeader, sizeof(stubHelperHeader));
- writeRipRelative(buf, in.stubHelper->addr, 7, in.imageLoaderCache->getVA());
- writeRipRelative(buf, in.stubHelper->addr, 0xf,
+ SymbolDiagnostic d = {nullptr, "stub helper header"};
+ writeRipRelative(d, buf, in.stubHelper->addr, 7,
+ in.imageLoaderCache->getVA());
+ writeRipRelative(d, buf, in.stubHelper->addr, 0xf,
in.got->addr +
- in.stubHelper->stubBinder->gotIndex * WordSize);
+ in.stubHelper->stubBinder->gotIndex * LP64::wordSize);
}
+static constexpr uint8_t stubHelperEntry[] = {
+ 0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
+ 0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
+};
+
void X86_64::writeStubHelperEntry(uint8_t *buf, const DylibSymbol &sym,
uint64_t entryAddr) const {
memcpy(buf, stubHelperEntry, sizeof(stubHelperEntry));
write32le(buf + 1, sym.lazyBindOffset);
- writeRipRelative(buf, entryAddr, sizeof(stubHelperEntry),
- in.stubHelper->addr);
+ writeRipRelative({&sym, "stub helper"}, buf, entryAddr,
+ sizeof(stubHelperEntry), in.stubHelper->addr);
}
-void X86_64::prepareSymbolRelocation(lld::macho::Symbol &sym,
- const InputSection *isec, const Reloc &r) {
- switch (r.type) {
- case X86_64_RELOC_GOT_LOAD:
- // TODO: implement mov -> lea relaxation for non-dynamic symbols
- case X86_64_RELOC_GOT:
- in.got->addEntry(sym);
- break;
- case X86_64_RELOC_BRANCH: {
- if (auto *dysym = dyn_cast<DylibSymbol>(&sym))
- in.stubs->addEntry(*dysym);
- break;
- }
- case X86_64_RELOC_UNSIGNED: {
- if (auto *dysym = dyn_cast<DylibSymbol>(&sym)) {
- if (r.length != 3) {
- error("X86_64_RELOC_UNSIGNED referencing the dynamic symbol " +
- dysym->getName() + " must have r_length = 3");
- return;
- }
- in.binding->addEntry(dysym, isec, r.offset, r.addend);
- }
- break;
- }
- case X86_64_RELOC_SIGNED:
- case X86_64_RELOC_SIGNED_1:
- case X86_64_RELOC_SIGNED_2:
- case X86_64_RELOC_SIGNED_4:
- break;
- case X86_64_RELOC_SUBTRACTOR:
- case X86_64_RELOC_TLV:
- fatal("TODO: handle relocation type " + std::to_string(r.type));
- break;
- default:
- llvm_unreachable("unexpected relocation type");
- }
-}
-
-uint64_t X86_64::getSymbolVA(const lld::macho::Symbol &sym,
- uint8_t type) const {
- switch (type) {
- case X86_64_RELOC_GOT_LOAD:
- case X86_64_RELOC_GOT:
- return in.got->addr + sym.gotIndex * WordSize;
- case X86_64_RELOC_BRANCH:
- if (auto *dysym = dyn_cast<DylibSymbol>(&sym))
- return in.stubs->addr + dysym->stubsIndex * sizeof(stub);
- return sym.getVA();
- case X86_64_RELOC_UNSIGNED:
- case X86_64_RELOC_SIGNED:
- case X86_64_RELOC_SIGNED_1:
- case X86_64_RELOC_SIGNED_2:
- case X86_64_RELOC_SIGNED_4:
- return sym.getVA();
- case X86_64_RELOC_SUBTRACTOR:
- case X86_64_RELOC_TLV:
- fatal("TODO: handle relocation type " + std::to_string(type));
- default:
- llvm_unreachable("Unexpected relocation type");
- }
+void X86_64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
+ // Convert MOVQ to LEAQ
+ if (loc[-2] != 0x8b)
+ error(getRelocAttrs(type).name + " reloc requires MOVQ instruction");
+ loc[-2] = 0x8d;
}
-X86_64::X86_64() {
+X86_64::X86_64() : TargetInfo(LP64()) {
cpuType = CPU_TYPE_X86_64;
cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(MachOOptionsTableGen)
+include_directories(${LLVM_MAIN_SRC_DIR}/../libunwind/include)
+
add_lld_library(lldMachO2
+ Arch/ARM.cpp
+ Arch/ARM64.cpp
+ Arch/ARM64Common.cpp
+ Arch/ARM64_32.cpp
Arch/X86_64.cpp
+ ConcatOutputSection.cpp
Driver.cpp
+ DriverUtils.cpp
+ Dwarf.cpp
ExportTrie.cpp
+ ICF.cpp
InputFiles.cpp
InputSection.cpp
- MergedOutputSection.cpp
+ LTO.cpp
+ MapFile.cpp
+ MarkLive.cpp
+ ObjC.cpp
OutputSection.cpp
OutputSegment.cpp
+ Relocations.cpp
SymbolTable.cpp
Symbols.cpp
SyntheticSections.cpp
Target.cpp
+ UnwindInfoSection.cpp
Writer.cpp
LINK_COMPONENTS
${LLVM_TARGETS_TO_BUILD}
BinaryFormat
+ BitReader
Core
+ DebugInfoDWARF
+ LTO
+ MC
+ ObjCARCOpts
Object
Option
+ Passes
Support
TextAPI
LINK_LIBS
lldCommon
${LLVM_PTHREAD_LIB}
+ ${XAR_LIB}
DEPENDS
MachOOptionsTableGen
${tablegen_deps}
)
+
+if(LLVM_HAVE_LIBXAR)
+ target_link_libraries(lldMachO2 PRIVATE ${XAR_LIB})
+endif()
--- /dev/null
+//===- ConcatOutputSection.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ConcatOutputSection.h"
+#include "Config.h"
+#include "OutputSegment.h"
+#include "SymbolTable.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/TimeProfiler.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace lld;
+using namespace lld::macho;
+
+MapVector<NamePair, ConcatOutputSection *> macho::concatOutputSections;
+
+void ConcatOutputSection::addInput(ConcatInputSection *input) {
+ assert(input->parent == this);
+ if (inputs.empty()) {
+ align = input->align;
+ flags = input->getFlags();
+ } else {
+ align = std::max(align, input->align);
+ finalizeFlags(input);
+ }
+ inputs.push_back(input);
+}
+
+// Branch-range extension can be implemented in two ways, either through ...
+//
+// (1) Branch islands: Single branch instructions (also of limited range),
+// that might be chained in multiple hops to reach the desired
+// destination. On ARM64, as 16 branch islands are needed to hop between
+// opposite ends of a 2 GiB program. LD64 uses branch islands exclusively,
+// even when it needs excessive hops.
+//
+// (2) Thunks: Instruction(s) to load the destination address into a scratch
+// register, followed by a register-indirect branch. Thunks are
+// constructed to reach any arbitrary address, so need not be
+// chained. Although thunks need not be chained, a program might need
+// multiple thunks to the same destination distributed throughout a large
+// program so that all call sites can have one within range.
+//
+// The optimal approach is to mix islands for distinations within two hops,
+// and use thunks for destinations at greater distance. For now, we only
+// implement thunks. TODO: Adding support for branch islands!
+//
+// Internally -- as expressed in LLD's data structures -- a
+// branch-range-extension thunk comprises ...
+//
+// (1) new Defined privateExtern symbol for the thunk named
+// <FUNCTION>.thunk.<SEQUENCE>, which references ...
+// (2) new InputSection, which contains ...
+// (3.1) new data for the instructions to load & branch to the far address +
+// (3.2) new Relocs on instructions to load the far address, which reference ...
+// (4.1) existing Defined extern symbol for the real function in __text, or
+// (4.2) existing DylibSymbol for the real function in a dylib
+//
+// Nearly-optimal thunk-placement algorithm features:
+//
+// * Single pass: O(n) on the number of call sites.
+//
+// * Accounts for the exact space overhead of thunks - no heuristics
+//
+// * Exploits the full range of call instructions - forward & backward
+//
+// Data:
+//
+// * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol
+// to its thunk bookkeeper.
+//
+// * struct ThunkInfo (bookkeeper): Call instructions have limited range, and
+// distant call sites might be unable to reach the same thunk, so multiple
+// thunks are necessary to serve all call sites in a very large program. A
+// thunkInfo stores state for all thunks associated with a particular
+// function: (a) thunk symbol, (b) input section containing stub code, and
+// (c) sequence number for the active thunk incarnation. When an old thunk
+// goes out of range, we increment the sequence number and create a new
+// thunk named <FUNCTION>.thunk.<SEQUENCE>.
+//
+// * A thunk incarnation comprises (a) private-extern Defined symbol pointing
+// to (b) an InputSection holding machine instructions (similar to a MachO
+// stub), and (c) Reloc(s) that reference the real function for fixing-up
+// the stub code.
+//
+// * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel
+// to the inputs vector. We store new thunks via cheap vector append, rather
+// than costly insertion into the inputs vector.
+//
+// Control Flow:
+//
+// * During address assignment, MergedInputSection::finalize() examines call
+// sites by ascending address and creates thunks. When a function is beyond
+// the range of a call site, we need a thunk. Place it at the largest
+// available forward address from the call site. Call sites increase
+// monotonically and thunks are always placed as far forward as possible;
+// thus, we place thunks at monotonically increasing addresses. Once a thunk
+// is placed, it and all previous input-section addresses are final.
+//
+// * MergedInputSection::finalize() and MergedInputSection::writeTo() merge
+// the inputs and thunks vectors (both ordered by ascending address), which
+// is simple and cheap.
+
+DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap;
+
+// Determine whether we need thunks, which depends on the target arch -- RISC
+// (i.e., ARM) generally does because it has limited-range branch/call
+// instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs
+// thunks for programs so large that branch source & destination addresses
+// might differ more than the range of branch instruction(s).
+bool ConcatOutputSection::needsThunks() const {
+ if (!target->usesThunks())
+ return false;
+ uint64_t isecAddr = addr;
+ for (InputSection *isec : inputs)
+ isecAddr = alignTo(isecAddr, isec->align) + isec->getSize();
+ if (isecAddr - addr + in.stubs->getSize() <= target->branchRange)
+ return false;
+ // Yes, this program is large enough to need thunks.
+ for (InputSection *isec : inputs) {
+ for (Reloc &r : isec->relocs) {
+ if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
+ continue;
+ auto *sym = r.referent.get<Symbol *>();
+ // Pre-populate the thunkMap and memoize call site counts for every
+ // InputSection and ThunkInfo. We do this for the benefit of
+ // ConcatOutputSection::estimateStubsInRangeVA()
+ ThunkInfo &thunkInfo = thunkMap[sym];
+ // Knowing ThunkInfo call site count will help us know whether or not we
+ // might need to create more for this referent at the time we are
+ // estimating distance to __stubs in .
+ ++thunkInfo.callSiteCount;
+ // Knowing InputSection call site count will help us avoid work on those
+ // that have no BRANCH relocs.
+ ++isec->callSiteCount;
+ }
+ }
+ return true;
+}
+
+// Since __stubs is placed after __text, we must estimate the address
+// beyond which stubs are within range of a simple forward branch.
+uint64_t ConcatOutputSection::estimateStubsInRangeVA(size_t callIdx) const {
+ uint64_t branchRange = target->branchRange;
+ size_t endIdx = inputs.size();
+ ConcatInputSection *isec = inputs[callIdx];
+ uint64_t isecVA = isec->getVA();
+ // Tally the non-stub functions which still have call sites
+ // remaining to process, which yields the maximum number
+ // of thunks we might yet place.
+ size_t maxPotentialThunks = 0;
+ for (auto &tp : thunkMap) {
+ ThunkInfo &ti = tp.second;
+ maxPotentialThunks +=
+ !tp.first->isInStubs() && ti.callSitesUsed < ti.callSiteCount;
+ }
+ // Tally the total size of input sections remaining to process.
+ uint64_t isecEnd = isec->getVA();
+ for (size_t i = callIdx; i < endIdx; i++) {
+ InputSection *isec = inputs[i];
+ isecEnd = alignTo(isecEnd, isec->align) + isec->getSize();
+ }
+ // Estimate the address after which call sites can safely call stubs
+ // directly rather than through intermediary thunks.
+ uint64_t stubsInRangeVA = isecEnd + maxPotentialThunks * target->thunkSize +
+ in.stubs->getSize() - branchRange;
+ log("thunks = " + std::to_string(thunkMap.size()) +
+ ", potential = " + std::to_string(maxPotentialThunks) +
+ ", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " +
+ to_hexString(isecVA) + ", threshold = " + to_hexString(stubsInRangeVA) +
+ ", isecEnd = " + to_hexString(isecEnd) +
+ ", tail = " + to_hexString(isecEnd - isecVA) +
+ ", slop = " + to_hexString(branchRange - (isecEnd - isecVA)));
+ return stubsInRangeVA;
+}
+
+void ConcatOutputSection::finalize() {
+ uint64_t isecAddr = addr;
+ uint64_t isecFileOff = fileOff;
+ auto finalizeOne = [&](ConcatInputSection *isec) {
+ isecAddr = alignTo(isecAddr, isec->align);
+ isecFileOff = alignTo(isecFileOff, isec->align);
+ isec->outSecOff = isecAddr - addr;
+ isec->isFinal = true;
+ isecAddr += isec->getSize();
+ isecFileOff += isec->getFileSize();
+ };
+
+ if (!needsThunks()) {
+ for (ConcatInputSection *isec : inputs)
+ finalizeOne(isec);
+ size = isecAddr - addr;
+ fileSize = isecFileOff - fileOff;
+ return;
+ }
+
+ uint64_t branchRange = target->branchRange;
+ uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA;
+ size_t thunkSize = target->thunkSize;
+ size_t relocCount = 0;
+ size_t callSiteCount = 0;
+ size_t thunkCallCount = 0;
+ size_t thunkCount = 0;
+
+ // inputs[finalIdx] is for finalization (address-assignment)
+ size_t finalIdx = 0;
+ // Kick-off by ensuring that the first input section has an address
+ for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx;
+ ++callIdx) {
+ if (finalIdx == callIdx)
+ finalizeOne(inputs[finalIdx++]);
+ ConcatInputSection *isec = inputs[callIdx];
+ assert(isec->isFinal);
+ uint64_t isecVA = isec->getVA();
+ // Assign addresses up-to the forward branch-range limit
+ while (finalIdx < endIdx &&
+ isecAddr + inputs[finalIdx]->getSize() < isecVA + branchRange)
+ finalizeOne(inputs[finalIdx++]);
+ if (isec->callSiteCount == 0)
+ continue;
+ if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) {
+ // When we have finalized all input sections, __stubs (destined
+ // to follow __text) comes within range of forward branches and
+ // we can estimate the threshold address after which we can
+ // reach any stub with a forward branch. Note that although it
+ // sits in the middle of a loop, this code executes only once.
+ // It is in the loop because we need to call it at the proper
+ // time: the earliest call site from which the end of __text
+ // (and start of __stubs) comes within range of a forward branch.
+ stubsInRangeVA = estimateStubsInRangeVA(callIdx);
+ }
+ // Process relocs by ascending address, i.e., ascending offset within isec
+ std::vector<Reloc> &relocs = isec->relocs;
+ // FIXME: This property does not hold for object files produced by ld64's
+ // `-r` mode.
+ assert(is_sorted(relocs,
+ [](Reloc &a, Reloc &b) { return a.offset > b.offset; }));
+ for (Reloc &r : reverse(relocs)) {
+ ++relocCount;
+ if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
+ continue;
+ ++callSiteCount;
+ // Calculate branch reachability boundaries
+ uint64_t callVA = isecVA + r.offset;
+ uint64_t lowVA = branchRange < callVA ? callVA - branchRange : 0;
+ uint64_t highVA = callVA + branchRange;
+ // Calculate our call referent address
+ auto *funcSym = r.referent.get<Symbol *>();
+ ThunkInfo &thunkInfo = thunkMap[funcSym];
+ // The referent is not reachable, so we need to use a thunk ...
+ if (funcSym->isInStubs() && callVA >= stubsInRangeVA) {
+ // ... Oh, wait! We are close enough to the end that __stubs
+ // are now within range of a simple forward branch.
+ continue;
+ }
+ uint64_t funcVA = funcSym->resolveBranchVA();
+ ++thunkInfo.callSitesUsed;
+ if (lowVA < funcVA && funcVA < highVA) {
+ // The referent is reachable with a simple call instruction.
+ continue;
+ }
+ ++thunkInfo.thunkCallCount;
+ ++thunkCallCount;
+ // If an existing thunk is reachable, use it ...
+ if (thunkInfo.sym) {
+ uint64_t thunkVA = thunkInfo.isec->getVA();
+ if (lowVA < thunkVA && thunkVA < highVA) {
+ r.referent = thunkInfo.sym;
+ continue;
+ }
+ }
+ // ... otherwise, create a new thunk
+ if (isecAddr > highVA) {
+ // When there is small-to-no margin between highVA and
+ // isecAddr and the distance between subsequent call sites is
+ // smaller than thunkSize, then a new thunk can go out of
+ // range. Fix by unfinalizing inputs[finalIdx] to reduce the
+ // distance between callVA and highVA, then shift some thunks
+ // to occupy address-space formerly occupied by the
+ // unfinalized inputs[finalIdx].
+ fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun");
+ }
+ thunkInfo.isec =
+ make<ConcatInputSection>(isec->getSegName(), isec->getName());
+ thunkInfo.isec->parent = this;
+ StringRef thunkName = saver.save(funcSym->getName() + ".thunk." +
+ std::to_string(thunkInfo.sequence++));
+ r.referent = thunkInfo.sym = symtab->addDefined(
+ thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0,
+ /*size=*/thunkSize, /*isWeakDef=*/false, /*isPrivateExtern=*/true,
+ /*isThumb=*/false, /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
+ target->populateThunk(thunkInfo.isec, funcSym);
+ finalizeOne(thunkInfo.isec);
+ thunks.push_back(thunkInfo.isec);
+ ++thunkCount;
+ }
+ }
+ size = isecAddr - addr;
+ fileSize = isecFileOff - fileOff;
+
+ log("thunks for " + parent->name + "," + name +
+ ": funcs = " + std::to_string(thunkMap.size()) +
+ ", relocs = " + std::to_string(relocCount) +
+ ", all calls = " + std::to_string(callSiteCount) +
+ ", thunk calls = " + std::to_string(thunkCallCount) +
+ ", thunks = " + std::to_string(thunkCount));
+}
+
+void ConcatOutputSection::writeTo(uint8_t *buf) const {
+ // Merge input sections from thunk & ordinary vectors
+ size_t i = 0, ie = inputs.size();
+ size_t t = 0, te = thunks.size();
+ while (i < ie || t < te) {
+ while (i < ie && (t == te || inputs[i]->getSize() == 0 ||
+ inputs[i]->outSecOff < thunks[t]->outSecOff)) {
+ inputs[i]->writeTo(buf + inputs[i]->outSecOff);
+ ++i;
+ }
+ while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) {
+ thunks[t]->writeTo(buf + thunks[t]->outSecOff);
+ ++t;
+ }
+ }
+}
+
+void ConcatOutputSection::finalizeFlags(InputSection *input) {
+ switch (sectionType(input->getFlags())) {
+ default /*type-unspec'ed*/:
+ // FIXME: Add additional logic here when supporting emitting obj files.
+ break;
+ case S_4BYTE_LITERALS:
+ case S_8BYTE_LITERALS:
+ case S_16BYTE_LITERALS:
+ case S_CSTRING_LITERALS:
+ case S_ZEROFILL:
+ case S_LAZY_SYMBOL_POINTERS:
+ case S_MOD_TERM_FUNC_POINTERS:
+ case S_THREAD_LOCAL_REGULAR:
+ case S_THREAD_LOCAL_ZEROFILL:
+ case S_THREAD_LOCAL_VARIABLES:
+ case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
+ case S_THREAD_LOCAL_VARIABLE_POINTERS:
+ case S_NON_LAZY_SYMBOL_POINTERS:
+ case S_SYMBOL_STUBS:
+ flags |= input->getFlags();
+ break;
+ }
+}
+
+ConcatOutputSection *
+ConcatOutputSection::getOrCreateForInput(const InputSection *isec) {
+ NamePair names = maybeRenameSection({isec->getSegName(), isec->getName()});
+ ConcatOutputSection *&osec = concatOutputSections[names];
+ if (!osec)
+ osec = make<ConcatOutputSection>(names.second);
+ return osec;
+}
+
+NamePair macho::maybeRenameSection(NamePair key) {
+ auto newNames = config->sectionRenameMap.find(key);
+ if (newNames != config->sectionRenameMap.end())
+ return newNames->second;
+ return key;
+}
--- /dev/null
+//===- ConcatOutputSection.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_MERGED_OUTPUT_SECTION_H
+#define LLD_MACHO_MERGED_OUTPUT_SECTION_H
+
+#include "InputSection.h"
+#include "OutputSection.h"
+#include "lld/Common/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+
+namespace lld {
+namespace macho {
+
+class Defined;
+
+// Linking multiple files will inevitably mean resolving sections in different
+// files that are labeled with the same segment and section name. This class
+// contains all such sections and writes the data from each section sequentially
+// in the final binary.
+class ConcatOutputSection final : public OutputSection {
+public:
+ explicit ConcatOutputSection(StringRef name)
+ : OutputSection(ConcatKind, name) {}
+
+ const ConcatInputSection *firstSection() const { return inputs.front(); }
+ const ConcatInputSection *lastSection() const { return inputs.back(); }
+ bool isNeeded() const override { return !inputs.empty(); }
+
+ // These accessors will only be valid after finalizing the section
+ uint64_t getSize() const override { return size; }
+ uint64_t getFileSize() const override { return fileSize; }
+
+ void addInput(ConcatInputSection *input);
+ void finalize() override;
+ bool needsThunks() const;
+ uint64_t estimateStubsInRangeVA(size_t callIdx) const;
+
+ void writeTo(uint8_t *buf) const override;
+
+ std::vector<ConcatInputSection *> inputs;
+ std::vector<ConcatInputSection *> thunks;
+
+ static bool classof(const OutputSection *sec) {
+ return sec->kind() == ConcatKind;
+ }
+
+ static ConcatOutputSection *getOrCreateForInput(const InputSection *);
+
+private:
+ void finalizeFlags(InputSection *input);
+
+ size_t size = 0;
+ uint64_t fileSize = 0;
+};
+
+// We maintain one ThunkInfo per real function.
+//
+// The "active thunk" is represented by the sym/isec pair that
+// turns-over during finalize(): as the call-site address advances,
+// the active thunk goes out of branch-range, and we create a new
+// thunk to take its place.
+//
+// The remaining members -- bools and counters -- apply to the
+// collection of thunks associated with the real function.
+
+struct ThunkInfo {
+ // These denote the active thunk:
+ Defined *sym = nullptr; // private-extern symbol for active thunk
+ ConcatInputSection *isec = nullptr; // input section for active thunk
+
+ // The following values are cumulative across all thunks on this function
+ uint32_t callSiteCount = 0; // how many calls to the real function?
+ uint32_t callSitesUsed = 0; // how many call sites processed so-far?
+ uint32_t thunkCallCount = 0; // how many call sites went to thunk?
+ uint8_t sequence = 0; // how many thunks created so-far?
+};
+
+NamePair maybeRenameSection(NamePair key);
+
+// Output sections are added to output segments in iteration order
+// of ConcatOutputSection, so must have deterministic iteration order.
+extern llvm::MapVector<NamePair, ConcatOutputSection *> concatOutputSections;
+
+extern llvm::DenseMap<Symbol *, ThunkInfo> thunkMap;
+
+} // namespace macho
+} // namespace lld
+
+#endif
#ifndef LLD_MACHO_CONFIG_H
#define LLD_MACHO_CONFIG_H
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/BinaryFormat/MachO.h"
-#include "llvm/TextAPI/MachO/Architecture.h"
+#include "llvm/Support/CachePruning.h"
+#include "llvm/Support/GlobPattern.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/TextAPI/Architecture.h"
+#include "llvm/TextAPI/Platform.h"
+#include "llvm/TextAPI/Target.h"
#include <vector>
class Symbol;
struct SymbolPriorityEntry;
+using NamePair = std::pair<llvm::StringRef, llvm::StringRef>;
+using SectionRenameMap = llvm::DenseMap<NamePair, NamePair>;
+using SegmentRenameMap = llvm::DenseMap<llvm::StringRef, llvm::StringRef>;
+
+struct PlatformInfo {
+ llvm::MachO::Target target;
+ llvm::VersionTuple minimum;
+ llvm::VersionTuple sdk;
+};
+
+inline uint32_t encodeVersion(const llvm::VersionTuple &version) {
+ return ((version.getMajor() << 020) |
+ (version.getMinor().getValueOr(0) << 010) |
+ version.getSubminor().getValueOr(0));
+}
+
+enum class NamespaceKind {
+ twolevel,
+ flat,
+};
+
+enum class UndefinedSymbolTreatment {
+ unknown,
+ error,
+ warning,
+ suppress,
+ dynamic_lookup,
+};
+
+enum class ICFLevel {
+ unknown,
+ none,
+ safe,
+ all,
+};
+
+struct SectionAlign {
+ llvm::StringRef segName;
+ llvm::StringRef sectName;
+ uint32_t align;
+};
+
+struct SegmentProtection {
+ llvm::StringRef name;
+ uint32_t maxProt;
+ uint32_t initProt;
+};
+
+class SymbolPatterns {
+public:
+ // GlobPattern can also match literals,
+ // but we prefer the O(1) lookup of DenseSet.
+ llvm::DenseSet<llvm::CachedHashStringRef> literals;
+ std::vector<llvm::GlobPattern> globs;
+
+ bool empty() const { return literals.empty() && globs.empty(); }
+ void clear();
+ void insert(llvm::StringRef symbolName);
+ bool matchLiteral(llvm::StringRef symbolName) const;
+ bool matchGlob(llvm::StringRef symbolName) const;
+ bool match(llvm::StringRef symbolName) const;
+};
+
struct Configuration {
- Symbol *entry;
+ Symbol *entry = nullptr;
bool hasReexports = false;
- llvm::StringRef installName;
+ bool allLoad = false;
+ bool applicationExtension = false;
+ bool archMultiple = false;
+ bool exportDynamic = false;
+ bool forceLoadObjC = false;
+ bool forceLoadSwift = false;
+ bool staticLink = false;
+ bool implicitDylibs = false;
+ bool isPic = false;
+ bool headerPadMaxInstallNames = false;
+ bool ltoNewPassManager = LLVM_ENABLE_NEW_PASS_MANAGER;
+ bool markDeadStrippableDylib = false;
+ bool printDylibSearch = false;
+ bool printEachFile = false;
+ bool printWhyLoad = false;
+ bool searchDylibsFirst = false;
+ bool saveTemps = false;
+ bool adhocCodesign = false;
+ bool emitFunctionStarts = false;
+ bool emitBitcodeBundle = false;
+ bool emitDataInCodeInfo = false;
+ bool emitEncryptionInfo = false;
+ bool timeTraceEnabled = false;
+ bool dataConst = false;
+ bool dedupLiterals = true;
+ uint32_t headerPad;
+ uint32_t dylibCompatibilityVersion = 0;
+ uint32_t dylibCurrentVersion = 0;
+ uint32_t timeTraceGranularity = 500;
+ unsigned optimize;
+ std::string progName;
+
+ // For `clang -arch arm64 -arch x86_64`, clang will:
+ // 1. invoke the linker twice, to write one temporary output per arch
+ // 2. invoke `lipo` to merge the two outputs into a single file
+ // `outputFile` is the name of the temporary file the linker writes to.
+ // `finalOutput `is the name of the file lipo writes to after the link.
llvm::StringRef outputFile;
- llvm::MachO::Architecture arch;
+ llvm::StringRef finalOutput;
+
+ llvm::StringRef installName;
+ llvm::StringRef mapFile;
+ llvm::StringRef ltoObjPath;
+ llvm::StringRef thinLTOJobs;
+ llvm::StringRef umbrella;
+ uint32_t ltoo = 2;
+ llvm::CachePruningPolicy thinLTOCachePolicy;
+ llvm::StringRef thinLTOCacheDir;
+ bool deadStripDylibs = false;
+ bool demangle = false;
+ bool deadStrip = false;
+ PlatformInfo platformInfo;
+ NamespaceKind namespaceKind = NamespaceKind::twolevel;
+ UndefinedSymbolTreatment undefinedSymbolTreatment =
+ UndefinedSymbolTreatment::error;
+ ICFLevel icfLevel = ICFLevel::none;
llvm::MachO::HeaderFileType outputType;
+ std::vector<llvm::StringRef> systemLibraryRoots;
std::vector<llvm::StringRef> librarySearchPaths;
- // TODO: use the framework search paths
std::vector<llvm::StringRef> frameworkSearchPaths;
+ std::vector<llvm::StringRef> runtimePaths;
+ std::vector<std::string> astPaths;
+ std::vector<Symbol *> explicitUndefineds;
+ llvm::StringSet<> explicitDynamicLookups;
+ // There are typically few custom sectionAlignments or segmentProtections,
+ // so use a vector instead of a map.
+ std::vector<SectionAlign> sectionAlignments;
+ std::vector<SegmentProtection> segmentProtections;
+
llvm::DenseMap<llvm::StringRef, SymbolPriorityEntry> priorities;
+ SectionRenameMap sectionRenameMap;
+ SegmentRenameMap segmentRenameMap;
+
+ SymbolPatterns exportedSymbols;
+ SymbolPatterns unexportedSymbols;
+
+ bool zeroModTime = false;
+
+ llvm::MachO::Architecture arch() const { return platformInfo.target.Arch; }
+
+ llvm::MachO::PlatformKind platform() const {
+ return platformInfo.target.Platform;
+ }
};
// The symbol with the highest priority should be ordered first in the output
#include "Driver.h"
#include "Config.h"
+#include "ICF.h"
#include "InputFiles.h"
+#include "LTO.h"
+#include "MarkLive.h"
+#include "ObjC.h"
#include "OutputSection.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "Target.h"
+#include "UnwindInfoSection.h"
#include "Writer.h"
#include "lld/Common/Args.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/LLVM.h"
#include "lld/Common/Memory.h"
+#include "lld/Common/Reproduce.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/LTO/LTO.h"
#include "llvm/Object/Archive.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Option/Option.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TarWriter.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/TextAPI/PackedVersion.h"
+
+#include <algorithm>
using namespace llvm;
using namespace llvm::MachO;
-using namespace llvm::sys;
+using namespace llvm::object;
using namespace llvm::opt;
+using namespace llvm::sys;
using namespace lld;
using namespace lld::macho;
-Configuration *lld::macho::config;
-
-// Create prefix string literals used in Options.td
-#define PREFIX(NAME, VALUE) const char *NAME[] = VALUE;
-#include "Options.inc"
-#undef PREFIX
-
-// Create table mapping all options defined in Options.td
-static const opt::OptTable::Info optInfo[] = {
-#define OPTION(X1, X2, ID, KIND, GROUP, ALIAS, X7, X8, X9, X10, X11, X12) \
- {X1, X2, X10, X11, OPT_##ID, opt::Option::KIND##Class, \
- X9, X8, OPT_##GROUP, OPT_##ALIAS, X7, X12},
-#include "Options.inc"
-#undef OPTION
-};
-
-MachOOptTable::MachOOptTable() : OptTable(optInfo) {}
-
-opt::InputArgList MachOOptTable::parse(ArrayRef<const char *> argv) {
- // Make InputArgList from string vectors.
- unsigned missingIndex;
- unsigned missingCount;
- SmallVector<const char *, 256> vec(argv.data(), argv.data() + argv.size());
+Configuration *macho::config;
+DependencyTracker *macho::depTracker;
- opt::InputArgList args = ParseArgs(vec, missingIndex, missingCount);
-
- if (missingCount)
- error(Twine(args.getArgString(missingIndex)) + ": missing argument");
+static HeaderFileType getOutputType(const InputArgList &args) {
+ // TODO: -r, -dylinker, -preload...
+ Arg *outputArg = args.getLastArg(OPT_bundle, OPT_dylib, OPT_execute);
+ if (outputArg == nullptr)
+ return MH_EXECUTE;
- for (opt::Arg *arg : args.filtered(OPT_UNKNOWN))
- error("unknown argument: " + arg->getSpelling());
- return args;
+ switch (outputArg->getOption().getID()) {
+ case OPT_bundle:
+ return MH_BUNDLE;
+ case OPT_dylib:
+ return MH_DYLIB;
+ case OPT_execute:
+ return MH_EXECUTE;
+ default:
+ llvm_unreachable("internal error");
+ }
}
-void MachOOptTable::printHelp(const char *argv0, bool showHidden) const {
- PrintHelp(lld::outs(), (std::string(argv0) + " [options] file...").c_str(),
- "LLVM Linker", showHidden);
- lld::outs() << "\n";
+static Optional<StringRef> findLibrary(StringRef name) {
+ if (config->searchDylibsFirst) {
+ if (Optional<StringRef> path = findPathCombination(
+ "lib" + name, config->librarySearchPaths, {".tbd", ".dylib"}))
+ return path;
+ return findPathCombination("lib" + name, config->librarySearchPaths,
+ {".a"});
+ }
+ return findPathCombination("lib" + name, config->librarySearchPaths,
+ {".tbd", ".dylib", ".a"});
}
-static Optional<std::string> findLibrary(StringRef name) {
- std::string stub = (llvm::Twine("lib") + name + ".tbd").str();
- std::string shared = (llvm::Twine("lib") + name + ".dylib").str();
- std::string archive = (llvm::Twine("lib") + name + ".a").str();
- llvm::SmallString<260> location;
+static Optional<std::string> findFramework(StringRef name) {
+ SmallString<260> symlink;
+ StringRef suffix;
+ std::tie(name, suffix) = name.split(",");
+ for (StringRef dir : config->frameworkSearchPaths) {
+ symlink = dir;
+ path::append(symlink, name + ".framework", name);
- for (StringRef dir : config->librarySearchPaths) {
- for (StringRef library : {stub, shared, archive}) {
- location = dir;
- llvm::sys::path::append(location, library);
- if (fs::exists(location))
- return location.str().str();
+ if (!suffix.empty()) {
+ // NOTE: we must resolve the symlink before trying the suffixes, because
+ // there are no symlinks for the suffixed paths.
+ SmallString<260> location;
+ if (!fs::real_path(symlink, location)) {
+ // only append suffix if realpath() succeeds
+ Twine suffixed = location + suffix;
+ if (fs::exists(suffixed))
+ return suffixed.str();
+ }
+ // Suffix lookup failed, fall through to the no-suffix case.
}
- }
- return {};
-}
-static TargetInfo *createTargetInfo(opt::InputArgList &args) {
- StringRef arch = args.getLastArgValue(OPT_arch, "x86_64");
- config->arch = llvm::MachO::getArchitectureFromName(
- args.getLastArgValue(OPT_arch, arch));
- switch (config->arch) {
- case llvm::MachO::AK_x86_64:
- case llvm::MachO::AK_x86_64h:
- return createX86_64TargetInfo();
- default:
- fatal("missing or unsupported -arch " + arch);
+ if (Optional<std::string> path = resolveDylibPath(symlink))
+ return path;
}
+ return {};
}
-static bool isDirectory(StringRef option, StringRef path) {
+static bool warnIfNotDirectory(StringRef option, StringRef path) {
if (!fs::exists(path)) {
warn("directory not found for option -" + option + path);
return false;
return true;
}
-static void getSearchPaths(std::vector<StringRef> &paths, unsigned optionCode,
- opt::InputArgList &args,
- const SmallVector<StringRef, 2> &systemPaths) {
- StringRef optionLetter{(optionCode == OPT_F ? "F" : "L")};
- for (auto const &path : args::getStrings(args, optionCode)) {
- if (isDirectory(optionLetter, path))
+static std::vector<StringRef>
+getSearchPaths(unsigned optionCode, InputArgList &args,
+ const std::vector<StringRef> &roots,
+ const SmallVector<StringRef, 2> &systemPaths) {
+ std::vector<StringRef> paths;
+ StringRef optionLetter{optionCode == OPT_F ? "F" : "L"};
+ for (StringRef path : args::getStrings(args, optionCode)) {
+ // NOTE: only absolute paths are re-rooted to syslibroot(s)
+ bool found = false;
+ if (path::is_absolute(path, path::Style::posix)) {
+ for (StringRef root : roots) {
+ SmallString<261> buffer(root);
+ path::append(buffer, path);
+ // Do not warn about paths that are computed via the syslib roots
+ if (fs::is_directory(buffer)) {
+ paths.push_back(saver.save(buffer.str()));
+ found = true;
+ }
+ }
+ }
+ if (!found && warnIfNotDirectory(optionLetter, path))
paths.push_back(path);
}
- if (!args.hasArg(OPT_Z) && Triple(sys::getProcessTriple()).isOSDarwin()) {
- for (auto const &path : systemPaths) {
- if (isDirectory(optionLetter, path))
- paths.push_back(path);
+
+ // `-Z` suppresses the standard "system" search paths.
+ if (args.hasArg(OPT_Z))
+ return paths;
+
+ for (const StringRef &path : systemPaths) {
+ for (const StringRef &root : roots) {
+ SmallString<261> buffer(root);
+ path::append(buffer, path);
+ if (fs::is_directory(buffer))
+ paths.push_back(saver.save(buffer.str()));
}
}
+ return paths;
+}
+
+static std::vector<StringRef> getSystemLibraryRoots(InputArgList &args) {
+ std::vector<StringRef> roots;
+ for (const Arg *arg : args.filtered(OPT_syslibroot))
+ roots.push_back(arg->getValue());
+ // NOTE: the final `-syslibroot` being `/` will ignore all roots
+ if (roots.size() && roots.back() == "/")
+ roots.clear();
+ // NOTE: roots can never be empty - add an empty root to simplify the library
+ // and framework search path computation.
+ if (roots.empty())
+ roots.emplace_back("");
+ return roots;
+}
+
+static std::vector<StringRef>
+getLibrarySearchPaths(InputArgList &args, const std::vector<StringRef> &roots) {
+ return getSearchPaths(OPT_L, args, roots, {"/usr/lib", "/usr/local/lib"});
}
-static void getLibrarySearchPaths(std::vector<StringRef> &paths,
- opt::InputArgList &args) {
- getSearchPaths(paths, OPT_L, args, {"/usr/lib", "/usr/local/lib"});
+static std::vector<StringRef>
+getFrameworkSearchPaths(InputArgList &args,
+ const std::vector<StringRef> &roots) {
+ return getSearchPaths(OPT_F, args, roots,
+ {"/Library/Frameworks", "/System/Library/Frameworks"});
}
-static void getFrameworkSearchPaths(std::vector<StringRef> &paths,
- opt::InputArgList &args) {
- getSearchPaths(paths, OPT_F, args,
- {"/Library/Frameworks", "/System/Library/Frameworks"});
+static llvm::CachePruningPolicy getLTOCachePolicy(InputArgList &args) {
+ SmallString<128> ltoPolicy;
+ auto add = [<oPolicy](Twine val) {
+ if (!ltoPolicy.empty())
+ ltoPolicy += ":";
+ val.toVector(ltoPolicy);
+ };
+ for (const Arg *arg :
+ args.filtered(OPT_thinlto_cache_policy, OPT_prune_interval_lto,
+ OPT_prune_after_lto, OPT_max_relative_cache_size_lto)) {
+ switch (arg->getOption().getID()) {
+ case OPT_thinlto_cache_policy: add(arg->getValue()); break;
+ case OPT_prune_interval_lto:
+ if (!strcmp("-1", arg->getValue()))
+ add("prune_interval=87600h"); // 10 years
+ else
+ add(Twine("prune_interval=") + arg->getValue() + "s");
+ break;
+ case OPT_prune_after_lto:
+ add(Twine("prune_after=") + arg->getValue() + "s");
+ break;
+ case OPT_max_relative_cache_size_lto:
+ add(Twine("cache_size=") + arg->getValue() + "%");
+ break;
+ }
+ }
+ return CHECK(parseCachePruningPolicy(ltoPolicy), "invalid LTO cache policy");
+}
+
+namespace {
+struct ArchiveMember {
+ MemoryBufferRef mbref;
+ uint32_t modTime;
+ uint64_t offsetInArchive;
+};
+} // namespace
+
+// Returns slices of MB by parsing MB as an archive file.
+// Each slice consists of a member file in the archive.
+static std::vector<ArchiveMember> getArchiveMembers(MemoryBufferRef mb) {
+ std::unique_ptr<Archive> file =
+ CHECK(Archive::create(mb),
+ mb.getBufferIdentifier() + ": failed to parse archive");
+ Archive *archive = file.get();
+ make<std::unique_ptr<Archive>>(std::move(file)); // take ownership
+
+ std::vector<ArchiveMember> v;
+ Error err = Error::success();
+
+ // Thin archives refer to .o files, so --reproduce needs the .o files too.
+ bool addToTar = archive->isThin() && tar;
+
+ for (const Archive::Child &c : archive->children(err)) {
+ MemoryBufferRef mbref =
+ CHECK(c.getMemoryBufferRef(),
+ mb.getBufferIdentifier() +
+ ": could not get the buffer for a child of the archive");
+ if (addToTar)
+ tar->append(relativeToRoot(check(c.getFullName())), mbref.getBuffer());
+ uint32_t modTime = toTimeT(
+ CHECK(c.getLastModified(), mb.getBufferIdentifier() +
+ ": could not get the modification "
+ "time for a child of the archive"));
+ v.push_back({mbref, modTime, c.getChildOffset()});
+ }
+ if (err)
+ fatal(mb.getBufferIdentifier() +
+ ": Archive::children failed: " + toString(std::move(err)));
+
+ return v;
}
-static void addFile(StringRef path) {
+static DenseMap<StringRef, ArchiveFile *> loadedArchives;
+
+static InputFile *addFile(StringRef path, bool forceLoadArchive,
+ bool isExplicit = true, bool isBundleLoader = false) {
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer)
- return;
+ return nullptr;
MemoryBufferRef mbref = *buffer;
+ InputFile *newFile = nullptr;
- switch (identify_magic(mbref.getBuffer())) {
+ file_magic magic = identify_magic(mbref.getBuffer());
+ switch (magic) {
case file_magic::archive: {
+ // Avoid loading archives twice. If the archives are being force-loaded,
+ // loading them twice would create duplicate symbol errors. In the
+ // non-force-loading case, this is just a minor performance optimization.
+ // We don't take a reference to cachedFile here because the
+ // loadArchiveMember() call below may recursively call addFile() and
+ // invalidate this reference.
+ if (ArchiveFile *cachedFile = loadedArchives[path])
+ return cachedFile;
+
std::unique_ptr<object::Archive> file = CHECK(
object::Archive::create(mbref), path + ": failed to parse archive");
if (!file->isEmpty() && !file->hasSymbolTable())
error(path + ": archive has no index; run ranlib to add one");
- inputFiles.push_back(make<ArchiveFile>(std::move(file)));
+ if (config->allLoad || forceLoadArchive) {
+ if (Optional<MemoryBufferRef> buffer = readFile(path)) {
+ for (const ArchiveMember &member : getArchiveMembers(*buffer)) {
+ if (Optional<InputFile *> file = loadArchiveMember(
+ member.mbref, member.modTime, path, /*objCOnly=*/false,
+ member.offsetInArchive)) {
+ inputFiles.insert(*file);
+ printArchiveMemberLoad(
+ (forceLoadArchive ? "-force_load" : "-all_load"),
+ inputFiles.back());
+ }
+ }
+ }
+ } else if (config->forceLoadObjC) {
+ for (const object::Archive::Symbol &sym : file->symbols())
+ if (sym.getName().startswith(objc::klass))
+ symtab->addUndefined(sym.getName(), /*file=*/nullptr,
+ /*isWeakRef=*/false);
+
+ // TODO: no need to look for ObjC sections for a given archive member if
+ // we already found that it contains an ObjC symbol. We should also
+ // consider creating a LazyObjFile class in order to avoid double-loading
+ // these files here and below (as part of the ArchiveFile).
+ if (Optional<MemoryBufferRef> buffer = readFile(path)) {
+ for (const ArchiveMember &member : getArchiveMembers(*buffer)) {
+ if (Optional<InputFile *> file = loadArchiveMember(
+ member.mbref, member.modTime, path, /*objCOnly=*/true,
+ member.offsetInArchive)) {
+ inputFiles.insert(*file);
+ printArchiveMemberLoad("-ObjC", inputFiles.back());
+ }
+ }
+ }
+ }
+
+ newFile = loadedArchives[path] = make<ArchiveFile>(std::move(file));
break;
}
case file_magic::macho_object:
- inputFiles.push_back(make<ObjFile>(mbref));
+ newFile = make<ObjFile>(mbref, getModTime(path), "");
break;
case file_magic::macho_dynamically_linked_shared_lib:
- inputFiles.push_back(make<DylibFile>(mbref));
+ case file_magic::macho_dynamically_linked_shared_lib_stub:
+ case file_magic::tapi_file:
+ if (DylibFile *dylibFile = loadDylib(mbref)) {
+ if (isExplicit)
+ dylibFile->explicitlyLinked = true;
+ newFile = dylibFile;
+ }
break;
- case file_magic::tapi_file: {
- llvm::Expected<std::unique_ptr<llvm::MachO::InterfaceFile>> result =
- TextAPIReader::get(mbref);
- if (!result)
- return;
-
- inputFiles.push_back(make<DylibFile>(std::move(*result)));
+ case file_magic::bitcode:
+ newFile = make<BitcodeFile>(mbref, "", 0);
+ break;
+ case file_magic::macho_executable:
+ case file_magic::macho_bundle:
+ // We only allow executable and bundle type here if it is used
+ // as a bundle loader.
+ if (!isBundleLoader)
+ error(path + ": unhandled file type");
+ if (DylibFile *dylibFile = loadDylib(mbref, nullptr, isBundleLoader))
+ newFile = dylibFile;
break;
- }
default:
error(path + ": unhandled file type");
}
+ if (newFile && !isa<DylibFile>(newFile)) {
+ // printArchiveMemberLoad() prints both .a and .o names, so no need to
+ // print the .a name here.
+ if (config->printEachFile && magic != file_magic::archive)
+ message(toString(newFile));
+ inputFiles.insert(newFile);
+ }
+ return newFile;
+}
+
+static void addLibrary(StringRef name, bool isNeeded, bool isWeak,
+ bool isReexport, bool isExplicit, bool forceLoad) {
+ if (Optional<StringRef> path = findLibrary(name)) {
+ if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
+ addFile(*path, forceLoad, isExplicit))) {
+ if (isNeeded)
+ dylibFile->forceNeeded = true;
+ if (isWeak)
+ dylibFile->forceWeakImport = true;
+ if (isReexport) {
+ config->hasReexports = true;
+ dylibFile->reexport = true;
+ }
+ }
+ return;
+ }
+ error("library not found for -l" + name);
+}
+
+static void addFramework(StringRef name, bool isNeeded, bool isWeak,
+ bool isReexport, bool isExplicit) {
+ if (Optional<std::string> path = findFramework(name)) {
+ if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
+ addFile(*path, /*forceLoadArchive=*/false, isExplicit))) {
+ if (isNeeded)
+ dylibFile->forceNeeded = true;
+ if (isWeak)
+ dylibFile->forceWeakImport = true;
+ if (isReexport) {
+ config->hasReexports = true;
+ dylibFile->reexport = true;
+ }
+ }
+ return;
+ }
+ error("framework not found for -framework " + name);
+}
+
+// Parses LC_LINKER_OPTION contents, which can add additional command line
+// flags.
+void macho::parseLCLinkerOption(InputFile *f, unsigned argc, StringRef data) {
+ SmallVector<const char *, 4> argv;
+ size_t offset = 0;
+ for (unsigned i = 0; i < argc && offset < data.size(); ++i) {
+ argv.push_back(data.data() + offset);
+ offset += strlen(data.data() + offset) + 1;
+ }
+ if (argv.size() != argc || offset > data.size())
+ fatal(toString(f) + ": invalid LC_LINKER_OPTION");
+
+ MachOOptTable table;
+ unsigned missingIndex, missingCount;
+ InputArgList args = table.ParseArgs(argv, missingIndex, missingCount);
+ if (missingCount)
+ fatal(Twine(args.getArgString(missingIndex)) + ": missing argument");
+ for (const Arg *arg : args.filtered(OPT_UNKNOWN))
+ error("unknown argument: " + arg->getAsString(args));
+
+ for (const Arg *arg : args) {
+ switch (arg->getOption().getID()) {
+ case OPT_l: {
+ StringRef name = arg->getValue();
+ bool forceLoad =
+ config->forceLoadSwift ? name.startswith("swift") : false;
+ addLibrary(name, /*isNeeded=*/false, /*isWeak=*/false,
+ /*isReexport=*/false, /*isExplicit=*/false, forceLoad);
+ break;
+ }
+ case OPT_framework:
+ addFramework(arg->getValue(), /*isNeeded=*/false, /*isWeak=*/false,
+ /*isReexport=*/false, /*isExplicit=*/false);
+ break;
+ default:
+ error(arg->getSpelling() + " is not allowed in LC_LINKER_OPTION");
+ }
+ }
}
-static std::array<StringRef, 6> archNames{"arm", "arm64", "i386",
- "x86_64", "ppc", "ppc64"};
-static bool isArchString(StringRef s) {
- static DenseSet<StringRef> archNamesSet(archNames.begin(), archNames.end());
- return archNamesSet.find(s) != archNamesSet.end();
+static void addFileList(StringRef path) {
+ Optional<MemoryBufferRef> buffer = readFile(path);
+ if (!buffer)
+ return;
+ MemoryBufferRef mbref = *buffer;
+ for (StringRef path : args::getLines(mbref))
+ addFile(rerootPath(path), /*forceLoadArchive=*/false);
}
// An order file has one entry per line, in the following format:
//
-// <arch>:<object file>:<symbol name>
+// <cpu>:<object file>:<symbol name>
//
-// <arch> and <object file> are optional. If not specified, then that entry
-// matches any symbol of that name.
+// <cpu> and <object file> are optional. If not specified, then that entry
+// matches any symbol of that name. Parsing this format is not quite
+// straightforward because the symbol name itself can contain colons, so when
+// encountering a colon, we consider the preceding characters to decide if it
+// can be a valid CPU type or file path.
//
// If a symbol is matched by multiple entries, then it takes the lowest-ordered
// entry (the one nearest to the front of the list.)
//
// The file can also have line comments that start with '#'.
-void parseOrderFile(StringRef path) {
+static void parseOrderFile(StringRef path) {
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer) {
error("Could not read order file at " + path);
MemoryBufferRef mbref = *buffer;
size_t priority = std::numeric_limits<size_t>::max();
- for (StringRef rest : args::getLines(mbref)) {
- StringRef arch, objectFile, symbol;
-
- std::array<StringRef, 3> fields;
- uint8_t fieldCount = 0;
- while (rest != "" && fieldCount < 3) {
- std::pair<StringRef, StringRef> p = getToken(rest, ": \t\n\v\f\r");
- StringRef tok = p.first;
- rest = p.second;
-
- // Check if we have a comment
- if (tok == "" || tok[0] == '#')
- break;
+ for (StringRef line : args::getLines(mbref)) {
+ StringRef objectFile, symbol;
+ line = line.take_until([](char c) { return c == '#'; }); // ignore comments
+ line = line.ltrim();
- fields[fieldCount++] = tok;
- }
+ CPUType cpuType = StringSwitch<CPUType>(line)
+ .StartsWith("i386:", CPU_TYPE_I386)
+ .StartsWith("x86_64:", CPU_TYPE_X86_64)
+ .StartsWith("arm:", CPU_TYPE_ARM)
+ .StartsWith("arm64:", CPU_TYPE_ARM64)
+ .StartsWith("ppc:", CPU_TYPE_POWERPC)
+ .StartsWith("ppc64:", CPU_TYPE_POWERPC64)
+ .Default(CPU_TYPE_ANY);
- switch (fieldCount) {
- case 3:
- arch = fields[0];
- objectFile = fields[1];
- symbol = fields[2];
- break;
- case 2:
- (isArchString(fields[0]) ? arch : objectFile) = fields[0];
- symbol = fields[1];
- break;
- case 1:
- symbol = fields[0];
- break;
- case 0:
- break;
- default:
- llvm_unreachable("too many fields in order file");
- }
+ if (cpuType != CPU_TYPE_ANY && cpuType != target->cpuType)
+ continue;
- if (!arch.empty()) {
- if (!isArchString(arch)) {
- error("invalid arch \"" + arch + "\" in order file: expected one of " +
- llvm::join(archNames, ", "));
- continue;
- }
+ // Drop the CPU type as well as the colon
+ if (cpuType != CPU_TYPE_ANY)
+ line = line.drop_until([](char c) { return c == ':'; }).drop_front();
- // TODO: Update when we extend support for other archs
- if (arch != "x86_64")
- continue;
- }
-
- if (!objectFile.empty() && !objectFile.endswith(".o")) {
- error("invalid object file name \"" + objectFile +
- "\" in order file: should end with .o");
- continue;
+ constexpr std::array<StringRef, 2> fileEnds = {".o:", ".o):"};
+ for (StringRef fileEnd : fileEnds) {
+ size_t pos = line.find(fileEnd);
+ if (pos != StringRef::npos) {
+ // Split the string around the colon
+ objectFile = line.take_front(pos + fileEnd.size() - 1);
+ line = line.drop_front(pos + fileEnd.size());
+ break;
+ }
}
+ symbol = line.trim();
if (!symbol.empty()) {
SymbolPriorityEntry &entry = config->priorities[symbol];
}
// We expect sub-library names of the form "libfoo", which will match a dylib
-// with a path of .*/libfoo.dylib.
-static bool markSubLibrary(StringRef searchName) {
+// with a path of .*/libfoo.{dylib, tbd}.
+// XXX ld64 seems to ignore the extension entirely when matching sub-libraries;
+// I'm not sure what the use case for that is.
+static bool markReexport(StringRef searchName, ArrayRef<StringRef> extensions) {
for (InputFile *file : inputFiles) {
if (auto *dylibFile = dyn_cast<DylibFile>(file)) {
StringRef filename = path::filename(dylibFile->getName());
- if (filename.consume_front(searchName) && filename == ".dylib") {
+ if (filename.consume_front(searchName) &&
+ (filename.empty() ||
+ find(extensions, filename) != extensions.end())) {
dylibFile->reexport = true;
return true;
}
return false;
}
-static void handlePlatformVersion(const opt::Arg *arg) {
- // TODO: implementation coming very soon ...
+// This function is called on startup. We need this for LTO since
+// LTO calls LLVM functions to compile bitcode files to native code.
+// Technically this can be delayed until we read bitcode files, but
+// we don't bother to do lazily because the initialization is fast.
+static void initLLVM() {
+ InitializeAllTargets();
+ InitializeAllTargetMCs();
+ InitializeAllAsmPrinters();
+ InitializeAllAsmParsers();
+}
+
+static void compileBitcodeFiles() {
+ // FIXME: Remove this once LTO.cpp honors config->exportDynamic.
+ if (config->exportDynamic)
+ for (InputFile *file : inputFiles)
+ if (isa<BitcodeFile>(file)) {
+ warn("the effect of -export_dynamic on LTO is not yet implemented");
+ break;
+ }
+
+ TimeTraceScope timeScope("LTO");
+ auto *lto = make<BitcodeCompiler>();
+ for (InputFile *file : inputFiles)
+ if (auto *bitcodeFile = dyn_cast<BitcodeFile>(file))
+ lto->add(*bitcodeFile);
+
+ for (ObjFile *file : lto->compile())
+ inputFiles.insert(file);
+}
+
+// Replaces common symbols with defined symbols residing in __common sections.
+// This function must be called after all symbol names are resolved (i.e. after
+// all InputFiles have been loaded.) As a result, later operations won't see
+// any CommonSymbols.
+static void replaceCommonSymbols() {
+ TimeTraceScope timeScope("Replace common symbols");
+ ConcatOutputSection *osec = nullptr;
+ for (Symbol *sym : symtab->getSymbols()) {
+ auto *common = dyn_cast<CommonSymbol>(sym);
+ if (common == nullptr)
+ continue;
+
+ // Casting to size_t will truncate large values on 32-bit architectures,
+ // but it's not really worth supporting the linking of 64-bit programs on
+ // 32-bit archs.
+ ArrayRef<uint8_t> data = {nullptr, static_cast<size_t>(common->size)};
+ auto *isec = make<ConcatInputSection>(
+ segment_names::data, section_names::common, common->getFile(), data,
+ common->align, S_ZEROFILL);
+ if (!osec)
+ osec = ConcatOutputSection::getOrCreateForInput(isec);
+ isec->parent = osec;
+ inputSections.push_back(isec);
+
+ // FIXME: CommonSymbol should store isReferencedDynamically, noDeadStrip
+ // and pass them on here.
+ replaceSymbol<Defined>(sym, sym->getName(), isec->getFile(), isec,
+ /*value=*/0,
+ /*size=*/0,
+ /*isWeakDef=*/false,
+ /*isExternal=*/true, common->privateExtern,
+ /*isThumb=*/false,
+ /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
+ }
+}
+
+static void initializeSectionRenameMap() {
+ if (config->dataConst) {
+ SmallVector<StringRef> v{section_names::got,
+ section_names::authGot,
+ section_names::authPtr,
+ section_names::nonLazySymbolPtr,
+ section_names::const_,
+ section_names::cfString,
+ section_names::moduleInitFunc,
+ section_names::moduleTermFunc,
+ section_names::objcClassList,
+ section_names::objcNonLazyClassList,
+ section_names::objcCatList,
+ section_names::objcNonLazyCatList,
+ section_names::objcProtoList,
+ section_names::objcImageInfo};
+ for (StringRef s : v)
+ config->sectionRenameMap[{segment_names::data, s}] = {
+ segment_names::dataConst, s};
+ }
+ config->sectionRenameMap[{segment_names::text, section_names::staticInit}] = {
+ segment_names::text, section_names::text};
+ config->sectionRenameMap[{segment_names::import, section_names::pointers}] = {
+ config->dataConst ? segment_names::dataConst : segment_names::data,
+ section_names::nonLazySymbolPtr};
+}
+
+static inline char toLowerDash(char x) {
+ if (x >= 'A' && x <= 'Z')
+ return x - 'A' + 'a';
+ else if (x == ' ')
+ return '-';
+ return x;
+}
+
+static std::string lowerDash(StringRef s) {
+ return std::string(map_iterator(s.begin(), toLowerDash),
+ map_iterator(s.end(), toLowerDash));
+}
+
+// Has the side-effect of setting Config::platformInfo.
+static PlatformKind parsePlatformVersion(const ArgList &args) {
+ const Arg *arg = args.getLastArg(OPT_platform_version);
+ if (!arg) {
+ error("must specify -platform_version");
+ return PlatformKind::unknown;
+ }
+
+ StringRef platformStr = arg->getValue(0);
+ StringRef minVersionStr = arg->getValue(1);
+ StringRef sdkVersionStr = arg->getValue(2);
+
+ // TODO(compnerd) see if we can generate this case list via XMACROS
+ PlatformKind platform =
+ StringSwitch<PlatformKind>(lowerDash(platformStr))
+ .Cases("macos", "1", PlatformKind::macOS)
+ .Cases("ios", "2", PlatformKind::iOS)
+ .Cases("tvos", "3", PlatformKind::tvOS)
+ .Cases("watchos", "4", PlatformKind::watchOS)
+ .Cases("bridgeos", "5", PlatformKind::bridgeOS)
+ .Cases("mac-catalyst", "6", PlatformKind::macCatalyst)
+ .Cases("ios-simulator", "7", PlatformKind::iOSSimulator)
+ .Cases("tvos-simulator", "8", PlatformKind::tvOSSimulator)
+ .Cases("watchos-simulator", "9", PlatformKind::watchOSSimulator)
+ .Cases("driverkit", "10", PlatformKind::driverKit)
+ .Default(PlatformKind::unknown);
+ if (platform == PlatformKind::unknown)
+ error(Twine("malformed platform: ") + platformStr);
+ // TODO: check validity of version strings, which varies by platform
+ // NOTE: ld64 accepts version strings with 5 components
+ // llvm::VersionTuple accepts no more than 4 components
+ // Has Apple ever published version strings with 5 components?
+ if (config->platformInfo.minimum.tryParse(minVersionStr))
+ error(Twine("malformed minimum version: ") + minVersionStr);
+ if (config->platformInfo.sdk.tryParse(sdkVersionStr))
+ error(Twine("malformed sdk version: ") + sdkVersionStr);
+ return platform;
+}
+
+// Has the side-effect of setting Config::target.
+static TargetInfo *createTargetInfo(InputArgList &args) {
+ StringRef archName = args.getLastArgValue(OPT_arch);
+ if (archName.empty())
+ fatal("must specify -arch");
+ PlatformKind platform = parsePlatformVersion(args);
+
+ config->platformInfo.target =
+ MachO::Target(getArchitectureFromName(archName), platform);
+
+ uint32_t cpuType;
+ uint32_t cpuSubtype;
+ std::tie(cpuType, cpuSubtype) = getCPUTypeFromArchitecture(config->arch());
+
+ switch (cpuType) {
+ case CPU_TYPE_X86_64:
+ return createX86_64TargetInfo();
+ case CPU_TYPE_ARM64:
+ return createARM64TargetInfo();
+ case CPU_TYPE_ARM64_32:
+ return createARM64_32TargetInfo();
+ case CPU_TYPE_ARM:
+ return createARMTargetInfo(cpuSubtype);
+ default:
+ fatal("missing or unsupported -arch " + archName);
+ }
}
-static void warnIfDeprecatedOption(const opt::Option &opt) {
+static UndefinedSymbolTreatment
+getUndefinedSymbolTreatment(const ArgList &args) {
+ StringRef treatmentStr = args.getLastArgValue(OPT_undefined);
+ auto treatment =
+ StringSwitch<UndefinedSymbolTreatment>(treatmentStr)
+ .Cases("error", "", UndefinedSymbolTreatment::error)
+ .Case("warning", UndefinedSymbolTreatment::warning)
+ .Case("suppress", UndefinedSymbolTreatment::suppress)
+ .Case("dynamic_lookup", UndefinedSymbolTreatment::dynamic_lookup)
+ .Default(UndefinedSymbolTreatment::unknown);
+ if (treatment == UndefinedSymbolTreatment::unknown) {
+ warn(Twine("unknown -undefined TREATMENT '") + treatmentStr +
+ "', defaulting to 'error'");
+ treatment = UndefinedSymbolTreatment::error;
+ } else if (config->namespaceKind == NamespaceKind::twolevel &&
+ (treatment == UndefinedSymbolTreatment::warning ||
+ treatment == UndefinedSymbolTreatment::suppress)) {
+ if (treatment == UndefinedSymbolTreatment::warning)
+ error("'-undefined warning' only valid with '-flat_namespace'");
+ else
+ error("'-undefined suppress' only valid with '-flat_namespace'");
+ treatment = UndefinedSymbolTreatment::error;
+ }
+ return treatment;
+}
+
+static ICFLevel getICFLevel(const ArgList &args) {
+ bool noDeduplicate = args.hasArg(OPT_no_deduplicate);
+ StringRef icfLevelStr = args.getLastArgValue(OPT_icf_eq);
+ auto icfLevel = StringSwitch<ICFLevel>(icfLevelStr)
+ .Cases("none", "", ICFLevel::none)
+ .Case("safe", ICFLevel::safe)
+ .Case("all", ICFLevel::all)
+ .Default(ICFLevel::unknown);
+ if (icfLevel == ICFLevel::unknown) {
+ warn(Twine("unknown --icf=OPTION `") + icfLevelStr +
+ "', defaulting to `none'");
+ icfLevel = ICFLevel::none;
+ } else if (icfLevel != ICFLevel::none && noDeduplicate) {
+ warn(Twine("`--icf=" + icfLevelStr +
+ "' conflicts with -no_deduplicate, setting to `none'"));
+ icfLevel = ICFLevel::none;
+ } else if (icfLevel == ICFLevel::safe) {
+ warn(Twine("`--icf=safe' is not yet implemented, reverting to `none'"));
+ icfLevel = ICFLevel::none;
+ }
+ return icfLevel;
+}
+
+static void warnIfDeprecatedOption(const Option &opt) {
if (!opt.getGroup().isValid())
return;
if (opt.getGroup().getID() == OPT_grp_deprecated) {
}
}
-static void warnIfUnimplementedOption(const opt::Option &opt) {
- if (!opt.getGroup().isValid())
+static void warnIfUnimplementedOption(const Option &opt) {
+ if (!opt.getGroup().isValid() || !opt.hasFlag(DriverFlag::HelpHidden))
return;
switch (opt.getGroup().getID()) {
case OPT_grp_deprecated:
}
}
-bool macho::link(llvm::ArrayRef<const char *> argsArr, bool canExitEarly,
+static const char *getReproduceOption(InputArgList &args) {
+ if (const Arg *arg = args.getLastArg(OPT_reproduce))
+ return arg->getValue();
+ return getenv("LLD_REPRODUCE");
+}
+
+static void parseClangOption(StringRef opt, const Twine &msg) {
+ std::string err;
+ raw_string_ostream os(err);
+
+ const char *argv[] = {"lld", opt.data()};
+ if (cl::ParseCommandLineOptions(2, argv, "", &os))
+ return;
+ os.flush();
+ error(msg + ": " + StringRef(err).trim());
+}
+
+static uint32_t parseDylibVersion(const ArgList &args, unsigned id) {
+ const Arg *arg = args.getLastArg(id);
+ if (!arg)
+ return 0;
+
+ if (config->outputType != MH_DYLIB) {
+ error(arg->getAsString(args) + ": only valid with -dylib");
+ return 0;
+ }
+
+ PackedVersion version;
+ if (!version.parse32(arg->getValue())) {
+ error(arg->getAsString(args) + ": malformed version");
+ return 0;
+ }
+
+ return version.rawValue();
+}
+
+static uint32_t parseProtection(StringRef protStr) {
+ uint32_t prot = 0;
+ for (char c : protStr) {
+ switch (c) {
+ case 'r':
+ prot |= VM_PROT_READ;
+ break;
+ case 'w':
+ prot |= VM_PROT_WRITE;
+ break;
+ case 'x':
+ prot |= VM_PROT_EXECUTE;
+ break;
+ case '-':
+ break;
+ default:
+ error("unknown -segprot letter '" + Twine(c) + "' in " + protStr);
+ return 0;
+ }
+ }
+ return prot;
+}
+
+static std::vector<SectionAlign> parseSectAlign(const opt::InputArgList &args) {
+ std::vector<SectionAlign> sectAligns;
+ for (const Arg *arg : args.filtered(OPT_sectalign)) {
+ StringRef segName = arg->getValue(0);
+ StringRef sectName = arg->getValue(1);
+ StringRef alignStr = arg->getValue(2);
+ if (alignStr.startswith("0x") || alignStr.startswith("0X"))
+ alignStr = alignStr.drop_front(2);
+ uint32_t align;
+ if (alignStr.getAsInteger(16, align)) {
+ error("-sectalign: failed to parse '" + StringRef(arg->getValue(2)) +
+ "' as number");
+ continue;
+ }
+ if (!isPowerOf2_32(align)) {
+ error("-sectalign: '" + StringRef(arg->getValue(2)) +
+ "' (in base 16) not a power of two");
+ continue;
+ }
+ sectAligns.push_back({segName, sectName, align});
+ }
+ return sectAligns;
+}
+
+PlatformKind macho::removeSimulator(PlatformKind platform) {
+ switch (platform) {
+ case PlatformKind::iOSSimulator:
+ return PlatformKind::iOS;
+ case PlatformKind::tvOSSimulator:
+ return PlatformKind::tvOS;
+ case PlatformKind::watchOSSimulator:
+ return PlatformKind::watchOS;
+ default:
+ return platform;
+ }
+}
+
+static bool dataConstDefault(const InputArgList &args) {
+ static const std::vector<std::pair<PlatformKind, VersionTuple>> minVersion = {
+ {PlatformKind::macOS, VersionTuple(10, 15)},
+ {PlatformKind::iOS, VersionTuple(13, 0)},
+ {PlatformKind::tvOS, VersionTuple(13, 0)},
+ {PlatformKind::watchOS, VersionTuple(6, 0)},
+ {PlatformKind::bridgeOS, VersionTuple(4, 0)}};
+ PlatformKind platform = removeSimulator(config->platformInfo.target.Platform);
+ auto it = llvm::find_if(minVersion,
+ [&](const auto &p) { return p.first == platform; });
+ if (it != minVersion.end())
+ if (config->platformInfo.minimum < it->second)
+ return false;
+
+ switch (config->outputType) {
+ case MH_EXECUTE:
+ return !args.hasArg(OPT_no_pie);
+ case MH_BUNDLE:
+ // FIXME: return false when -final_name ...
+ // has prefix "/System/Library/UserEventPlugins/"
+ // or matches "/usr/libexec/locationd" "/usr/libexec/terminusd"
+ return true;
+ case MH_DYLIB:
+ return true;
+ case MH_OBJECT:
+ return false;
+ default:
+ llvm_unreachable(
+ "unsupported output type for determining data-const default");
+ }
+ return false;
+}
+
+void SymbolPatterns::clear() {
+ literals.clear();
+ globs.clear();
+}
+
+void SymbolPatterns::insert(StringRef symbolName) {
+ if (symbolName.find_first_of("*?[]") == StringRef::npos)
+ literals.insert(CachedHashStringRef(symbolName));
+ else if (Expected<GlobPattern> pattern = GlobPattern::create(symbolName))
+ globs.emplace_back(*pattern);
+ else
+ error("invalid symbol-name pattern: " + symbolName);
+}
+
+bool SymbolPatterns::matchLiteral(StringRef symbolName) const {
+ return literals.contains(CachedHashStringRef(symbolName));
+}
+
+bool SymbolPatterns::matchGlob(StringRef symbolName) const {
+ for (const GlobPattern &glob : globs)
+ if (glob.match(symbolName))
+ return true;
+ return false;
+}
+
+bool SymbolPatterns::match(StringRef symbolName) const {
+ return matchLiteral(symbolName) || matchGlob(symbolName);
+}
+
+static void handleSymbolPatterns(InputArgList &args,
+ SymbolPatterns &symbolPatterns,
+ unsigned singleOptionCode,
+ unsigned listFileOptionCode) {
+ for (const Arg *arg : args.filtered(singleOptionCode))
+ symbolPatterns.insert(arg->getValue());
+ for (const Arg *arg : args.filtered(listFileOptionCode)) {
+ StringRef path = arg->getValue();
+ Optional<MemoryBufferRef> buffer = readFile(path);
+ if (!buffer) {
+ error("Could not read symbol file: " + path);
+ continue;
+ }
+ MemoryBufferRef mbref = *buffer;
+ for (StringRef line : args::getLines(mbref)) {
+ line = line.take_until([](char c) { return c == '#'; }).trim();
+ if (!line.empty())
+ symbolPatterns.insert(line);
+ }
+ }
+}
+
+void createFiles(const InputArgList &args) {
+ TimeTraceScope timeScope("Load input files");
+ // This loop should be reserved for options whose exact ordering matters.
+ // Other options should be handled via filtered() and/or getLastArg().
+ for (const Arg *arg : args) {
+ const Option &opt = arg->getOption();
+ warnIfDeprecatedOption(opt);
+ warnIfUnimplementedOption(opt);
+
+ switch (opt.getID()) {
+ case OPT_INPUT:
+ addFile(rerootPath(arg->getValue()), /*forceLoadArchive=*/false);
+ break;
+ case OPT_needed_library:
+ if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
+ addFile(rerootPath(arg->getValue()), false)))
+ dylibFile->forceNeeded = true;
+ break;
+ case OPT_reexport_library:
+ if (auto *dylibFile = dyn_cast_or_null<DylibFile>(addFile(
+ rerootPath(arg->getValue()), /*forceLoadArchive=*/false))) {
+ config->hasReexports = true;
+ dylibFile->reexport = true;
+ }
+ break;
+ case OPT_weak_library:
+ if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
+ addFile(rerootPath(arg->getValue()), /*forceLoadArchive=*/false)))
+ dylibFile->forceWeakImport = true;
+ break;
+ case OPT_filelist:
+ addFileList(arg->getValue());
+ break;
+ case OPT_force_load:
+ addFile(rerootPath(arg->getValue()), /*forceLoadArchive=*/true);
+ break;
+ case OPT_l:
+ case OPT_needed_l:
+ case OPT_reexport_l:
+ case OPT_weak_l:
+ addLibrary(arg->getValue(), opt.getID() == OPT_needed_l,
+ opt.getID() == OPT_weak_l, opt.getID() == OPT_reexport_l,
+ /*isExplicit=*/true, /*forceLoad=*/false);
+ break;
+ case OPT_framework:
+ case OPT_needed_framework:
+ case OPT_reexport_framework:
+ case OPT_weak_framework:
+ addFramework(arg->getValue(), opt.getID() == OPT_needed_framework,
+ opt.getID() == OPT_weak_framework,
+ opt.getID() == OPT_reexport_framework, /*isExplicit=*/true);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gatherInputSections() {
+ TimeTraceScope timeScope("Gathering input sections");
+ int inputOrder = 0;
+ for (const InputFile *file : inputFiles) {
+ for (const SubsectionMap &map : file->subsections) {
+ ConcatOutputSection *osec = nullptr;
+ for (const SubsectionEntry &entry : map) {
+ if (auto *isec = dyn_cast<ConcatInputSection>(entry.isec)) {
+ if (isec->isCoalescedWeak())
+ continue;
+ if (isec->getSegName() == segment_names::ld) {
+ assert(isec->getName() == section_names::compactUnwind);
+ in.unwindInfo->addInput(isec);
+ continue;
+ }
+ isec->outSecOff = inputOrder++;
+ if (!osec)
+ osec = ConcatOutputSection::getOrCreateForInput(isec);
+ isec->parent = osec;
+ inputSections.push_back(isec);
+ } else if (auto *isec = dyn_cast<CStringInputSection>(entry.isec)) {
+ if (in.cStringSection->inputOrder == UnspecifiedInputOrder)
+ in.cStringSection->inputOrder = inputOrder++;
+ in.cStringSection->addInput(isec);
+ } else if (auto *isec = dyn_cast<WordLiteralInputSection>(entry.isec)) {
+ if (in.wordLiteralSection->inputOrder == UnspecifiedInputOrder)
+ in.wordLiteralSection->inputOrder = inputOrder++;
+ in.wordLiteralSection->addInput(isec);
+ } else {
+ llvm_unreachable("unexpected input section kind");
+ }
+ }
+ }
+ }
+ assert(inputOrder <= UnspecifiedInputOrder);
+}
+
+static void foldIdenticalLiterals() {
+ // We always create a cStringSection, regardless of whether dedupLiterals is
+ // true. If it isn't, we simply create a non-deduplicating CStringSection.
+ // Either way, we must unconditionally finalize it here.
+ in.cStringSection->finalizeContents();
+ if (in.wordLiteralSection)
+ in.wordLiteralSection->finalizeContents();
+}
+
+static void referenceStubBinder() {
+ bool needsStubHelper = config->outputType == MH_DYLIB ||
+ config->outputType == MH_EXECUTE ||
+ config->outputType == MH_BUNDLE;
+ if (!needsStubHelper || !symtab->find("dyld_stub_binder"))
+ return;
+
+ // dyld_stub_binder is used by dyld to resolve lazy bindings. This code here
+ // adds a opportunistic reference to dyld_stub_binder if it happens to exist.
+ // dyld_stub_binder is in libSystem.dylib, which is usually linked in. This
+ // isn't needed for correctness, but the presence of that symbol suppresses
+ // "no symbols" diagnostics from `nm`.
+ // StubHelperSection::setup() adds a reference and errors out if
+ // dyld_stub_binder doesn't exist in case it is actually needed.
+ symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr, /*isWeak=*/false);
+}
+
+bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
raw_ostream &stdoutOS, raw_ostream &stderrOS) {
lld::stdoutOS = &stdoutOS;
lld::stderrOS = &stderrOS;
+ errorHandler().cleanupCallback = []() { freeArena(); };
+
+ errorHandler().logName = args::getFilenameWithoutExe(argsArr[0]);
stderrOS.enable_colors(stderrOS.has_colors());
- // TODO: Set up error handler properly, e.g. the errorLimitExceededMsg
MachOOptTable parser;
- opt::InputArgList args = parser.parse(argsArr.slice(1));
+ InputArgList args = parser.parse(argsArr.slice(1));
+
+ errorHandler().errorLimitExceededMsg =
+ "too many errors emitted, stopping now "
+ "(use --error-limit=0 to see all errors)";
+ errorHandler().errorLimit = args::getInteger(args, OPT_error_limit_eq, 20);
+ errorHandler().verbose = args.hasArg(OPT_verbose);
if (args.hasArg(OPT_help_hidden)) {
parser.printHelp(argsArr[0], /*showHidden=*/true);
return true;
- } else if (args.hasArg(OPT_help)) {
+ }
+ if (args.hasArg(OPT_help)) {
parser.printHelp(argsArr[0], /*showHidden=*/false);
return true;
}
+ if (args.hasArg(OPT_version)) {
+ message(getLLDVersion());
+ return true;
+ }
config = make<Configuration>();
symtab = make<SymbolTable>();
target = createTargetInfo(args);
+ depTracker =
+ make<DependencyTracker>(args.getLastArgValue(OPT_dependency_info));
+
+ // Must be set before any InputSections and Symbols are created.
+ config->deadStrip = args.hasArg(OPT_dead_strip);
+
+ config->systemLibraryRoots = getSystemLibraryRoots(args);
+ if (const char *path = getReproduceOption(args)) {
+ // Note that --reproduce is a debug option so you can ignore it
+ // if you are trying to understand the whole picture of the code.
+ Expected<std::unique_ptr<TarWriter>> errOrWriter =
+ TarWriter::create(path, path::stem(path));
+ if (errOrWriter) {
+ tar = std::move(*errOrWriter);
+ tar->append("response.txt", createResponseFile(args));
+ tar->append("version.txt", getLLDVersion() + "\n");
+ } else {
+ error("--reproduce: " + toString(errOrWriter.takeError()));
+ }
+ }
+
+ if (auto *arg = args.getLastArg(OPT_threads_eq)) {
+ StringRef v(arg->getValue());
+ unsigned threads = 0;
+ if (!llvm::to_integer(v, threads, 0) || threads == 0)
+ error(arg->getSpelling() + ": expected a positive integer, but got '" +
+ arg->getValue() + "'");
+ parallel::strategy = hardware_concurrency(threads);
+ config->thinLTOJobs = v;
+ }
+ if (auto *arg = args.getLastArg(OPT_thinlto_jobs_eq))
+ config->thinLTOJobs = arg->getValue();
+ if (!get_threadpool_strategy(config->thinLTOJobs))
+ error("--thinlto-jobs: invalid job count: " + config->thinLTOJobs);
+
+ for (const Arg *arg : args.filtered(OPT_u)) {
+ config->explicitUndefineds.push_back(symtab->addUndefined(
+ arg->getValue(), /*file=*/nullptr, /*isWeakRef=*/false));
+ }
- config->entry = symtab->addUndefined(args.getLastArgValue(OPT_e, "_main"));
+ for (const Arg *arg : args.filtered(OPT_U))
+ config->explicitDynamicLookups.insert(arg->getValue());
+
+ config->mapFile = args.getLastArgValue(OPT_map);
+ config->optimize = args::getInteger(args, OPT_O, 1);
config->outputFile = args.getLastArgValue(OPT_o, "a.out");
- config->installName =
- args.getLastArgValue(OPT_install_name, config->outputFile);
- getLibrarySearchPaths(config->librarySearchPaths, args);
- getFrameworkSearchPaths(config->frameworkSearchPaths, args);
- config->outputType = args.hasArg(OPT_dylib) ? MH_DYLIB : MH_EXECUTE;
+ config->finalOutput =
+ args.getLastArgValue(OPT_final_output, config->outputFile);
+ config->astPaths = args.getAllArgValues(OPT_add_ast_path);
+ config->headerPad = args::getHex(args, OPT_headerpad, /*Default=*/32);
+ config->headerPadMaxInstallNames =
+ args.hasArg(OPT_headerpad_max_install_names);
+ config->printDylibSearch =
+ args.hasArg(OPT_print_dylib_search) || getenv("RC_TRACE_DYLIB_SEARCHING");
+ config->printEachFile = args.hasArg(OPT_t);
+ config->printWhyLoad = args.hasArg(OPT_why_load);
+ config->outputType = getOutputType(args);
+ if (const Arg *arg = args.getLastArg(OPT_bundle_loader)) {
+ if (config->outputType != MH_BUNDLE)
+ error("-bundle_loader can only be used with MachO bundle output");
+ addFile(arg->getValue(), /*forceLoadArchive=*/false, /*isExplicit=*/false,
+ /*isBundleLoader=*/true);
+ }
+ if (const Arg *arg = args.getLastArg(OPT_umbrella)) {
+ if (config->outputType != MH_DYLIB)
+ warn("-umbrella used, but not creating dylib");
+ config->umbrella = arg->getValue();
+ }
+ config->ltoObjPath = args.getLastArgValue(OPT_object_path_lto);
+ config->ltoNewPassManager =
+ args.hasFlag(OPT_no_lto_legacy_pass_manager, OPT_lto_legacy_pass_manager,
+ LLVM_ENABLE_NEW_PASS_MANAGER);
+ config->ltoo = args::getInteger(args, OPT_lto_O, 2);
+ if (config->ltoo > 3)
+ error("--lto-O: invalid optimization level: " + Twine(config->ltoo));
+ config->thinLTOCacheDir = args.getLastArgValue(OPT_cache_path_lto);
+ config->thinLTOCachePolicy = getLTOCachePolicy(args);
+ config->runtimePaths = args::getStrings(args, OPT_rpath);
+ config->allLoad = args.hasArg(OPT_all_load);
+ config->archMultiple = args.hasArg(OPT_arch_multiple);
+ config->applicationExtension = args.hasFlag(
+ OPT_application_extension, OPT_no_application_extension, false);
+ config->exportDynamic = args.hasArg(OPT_export_dynamic);
+ config->forceLoadObjC = args.hasArg(OPT_ObjC);
+ config->forceLoadSwift = args.hasArg(OPT_force_load_swift_libs);
+ config->deadStripDylibs = args.hasArg(OPT_dead_strip_dylibs);
+ config->demangle = args.hasArg(OPT_demangle);
+ config->implicitDylibs = !args.hasArg(OPT_no_implicit_dylibs);
+ config->emitFunctionStarts =
+ args.hasFlag(OPT_function_starts, OPT_no_function_starts, true);
+ config->emitBitcodeBundle = args.hasArg(OPT_bitcode_bundle);
+ config->emitDataInCodeInfo =
+ args.hasFlag(OPT_data_in_code_info, OPT_no_data_in_code_info, true);
+ config->icfLevel = getICFLevel(args);
+ config->dedupLiterals = args.hasArg(OPT_deduplicate_literals) ||
+ config->icfLevel != ICFLevel::none;
+
+ // FIXME: Add a commandline flag for this too.
+ config->zeroModTime = getenv("ZERO_AR_DATE");
+
+ std::array<PlatformKind, 3> encryptablePlatforms{
+ PlatformKind::iOS, PlatformKind::watchOS, PlatformKind::tvOS};
+ config->emitEncryptionInfo =
+ args.hasFlag(OPT_encryptable, OPT_no_encryption,
+ is_contained(encryptablePlatforms, config->platform()));
+
+#ifndef LLVM_HAVE_LIBXAR
+ if (config->emitBitcodeBundle)
+ error("-bitcode_bundle unsupported because LLD wasn't built with libxar");
+#endif
+
+ if (const Arg *arg = args.getLastArg(OPT_install_name)) {
+ if (config->outputType != MH_DYLIB)
+ warn(arg->getAsString(args) + ": ignored, only has effect with -dylib");
+ else
+ config->installName = arg->getValue();
+ } else if (config->outputType == MH_DYLIB) {
+ config->installName = config->finalOutput;
+ }
+
+ if (args.hasArg(OPT_mark_dead_strippable_dylib)) {
+ if (config->outputType != MH_DYLIB)
+ warn("-mark_dead_strippable_dylib: ignored, only has effect with -dylib");
+ else
+ config->markDeadStrippableDylib = true;
+ }
+
+ if (const Arg *arg = args.getLastArg(OPT_static, OPT_dynamic))
+ config->staticLink = (arg->getOption().getID() == OPT_static);
+
+ if (const Arg *arg =
+ args.getLastArg(OPT_flat_namespace, OPT_twolevel_namespace))
+ config->namespaceKind = arg->getOption().getID() == OPT_twolevel_namespace
+ ? NamespaceKind::twolevel
+ : NamespaceKind::flat;
+
+ config->undefinedSymbolTreatment = getUndefinedSymbolTreatment(args);
+
+ if (config->outputType == MH_EXECUTE)
+ config->entry = symtab->addUndefined(args.getLastArgValue(OPT_e, "_main"),
+ /*file=*/nullptr,
+ /*isWeakRef=*/false);
+
+ config->librarySearchPaths =
+ getLibrarySearchPaths(args, config->systemLibraryRoots);
+ config->frameworkSearchPaths =
+ getFrameworkSearchPaths(args, config->systemLibraryRoots);
+ if (const Arg *arg =
+ args.getLastArg(OPT_search_paths_first, OPT_search_dylibs_first))
+ config->searchDylibsFirst =
+ arg->getOption().getID() == OPT_search_dylibs_first;
+
+ config->dylibCompatibilityVersion =
+ parseDylibVersion(args, OPT_compatibility_version);
+ config->dylibCurrentVersion = parseDylibVersion(args, OPT_current_version);
+
+ config->dataConst =
+ args.hasFlag(OPT_data_const, OPT_no_data_const, dataConstDefault(args));
+ // Populate config->sectionRenameMap with builtin default renames.
+ // Options -rename_section and -rename_segment are able to override.
+ initializeSectionRenameMap();
+ // Reject every special character except '.' and '$'
+ // TODO(gkm): verify that this is the proper set of invalid chars
+ StringRef invalidNameChars("!\"#%&'()*+,-/:;<=>?@[\\]^`{|}~");
+ auto validName = [invalidNameChars](StringRef s) {
+ if (s.find_first_of(invalidNameChars) != StringRef::npos)
+ error("invalid name for segment or section: " + s);
+ return s;
+ };
+ for (const Arg *arg : args.filtered(OPT_rename_section)) {
+ config->sectionRenameMap[{validName(arg->getValue(0)),
+ validName(arg->getValue(1))}] = {
+ validName(arg->getValue(2)), validName(arg->getValue(3))};
+ }
+ for (const Arg *arg : args.filtered(OPT_rename_segment)) {
+ config->segmentRenameMap[validName(arg->getValue(0))] =
+ validName(arg->getValue(1));
+ }
+
+ config->sectionAlignments = parseSectAlign(args);
+
+ for (const Arg *arg : args.filtered(OPT_segprot)) {
+ StringRef segName = arg->getValue(0);
+ uint32_t maxProt = parseProtection(arg->getValue(1));
+ uint32_t initProt = parseProtection(arg->getValue(2));
+ if (maxProt != initProt && config->arch() != AK_i386)
+ error("invalid argument '" + arg->getAsString(args) +
+ "': max and init must be the same for non-i386 archs");
+ if (segName == segment_names::linkEdit)
+ error("-segprot cannot be used to change __LINKEDIT's protections");
+ config->segmentProtections.push_back({segName, maxProt, initProt});
+ }
+
+ handleSymbolPatterns(args, config->exportedSymbols, OPT_exported_symbol,
+ OPT_exported_symbols_list);
+ handleSymbolPatterns(args, config->unexportedSymbols, OPT_unexported_symbol,
+ OPT_unexported_symbols_list);
+ if (!config->exportedSymbols.empty() && !config->unexportedSymbols.empty()) {
+ error("cannot use both -exported_symbol* and -unexported_symbol* options\n"
+ ">>> ignoring unexports");
+ config->unexportedSymbols.clear();
+ }
+ // Explicitly-exported literal symbols must be defined, but might
+ // languish in an archive if unreferenced elsewhere. Light a fire
+ // under those lazy symbols!
+ for (const CachedHashStringRef &cachedName : config->exportedSymbols.literals)
+ symtab->addUndefined(cachedName.val(), /*file=*/nullptr,
+ /*isWeakRef=*/false);
+
+ config->saveTemps = args.hasArg(OPT_save_temps);
+
+ config->adhocCodesign = args.hasFlag(
+ OPT_adhoc_codesign, OPT_no_adhoc_codesign,
+ (config->arch() == AK_arm64 || config->arch() == AK_arm64e) &&
+ config->platform() == PlatformKind::macOS);
if (args.hasArg(OPT_v)) {
message(getLLDVersion());
message(StringRef("Library search paths:") +
- (config->librarySearchPaths.size()
- ? "\n\t" + llvm::join(config->librarySearchPaths, "\n\t")
- : ""));
+ (config->librarySearchPaths.empty()
+ ? ""
+ : "\n\t" + join(config->librarySearchPaths, "\n\t")));
message(StringRef("Framework search paths:") +
- (config->frameworkSearchPaths.size()
- ? "\n\t" + llvm::join(config->frameworkSearchPaths, "\n\t")
- : ""));
- freeArena();
- return !errorCount();
+ (config->frameworkSearchPaths.empty()
+ ? ""
+ : "\n\t" + join(config->frameworkSearchPaths, "\n\t")));
}
- for (const auto &arg : args) {
- const auto &opt = arg->getOption();
- warnIfDeprecatedOption(opt);
- switch (arg->getOption().getID()) {
- case OPT_INPUT:
- addFile(arg->getValue());
- break;
- case OPT_l: {
- StringRef name = arg->getValue();
- if (Optional<std::string> path = findLibrary(name)) {
- addFile(*path);
- break;
- }
- error("library not found for -l" + name);
- break;
- }
- case OPT_platform_version:
- handlePlatformVersion(arg);
- break;
- case OPT_o:
- case OPT_dylib:
- case OPT_e:
- case OPT_L:
- case OPT_Z:
- case OPT_arch:
- // handled elsewhere
- break;
- default:
- warnIfUnimplementedOption(opt);
- break;
+ config->progName = argsArr[0];
+
+ config->timeTraceEnabled = args.hasArg(
+ OPT_time_trace, OPT_time_trace_granularity_eq, OPT_time_trace_file_eq);
+ config->timeTraceGranularity =
+ args::getInteger(args, OPT_time_trace_granularity_eq, 500);
+
+ // Initialize time trace profiler.
+ if (config->timeTraceEnabled)
+ timeTraceProfilerInitialize(config->timeTraceGranularity, config->progName);
+
+ {
+ TimeTraceScope timeScope("ExecuteLinker");
+
+ initLLVM(); // must be run before any call to addFile()
+ createFiles(args);
+
+ config->isPic = config->outputType == MH_DYLIB ||
+ config->outputType == MH_BUNDLE ||
+ (config->outputType == MH_EXECUTE &&
+ args.hasFlag(OPT_pie, OPT_no_pie, true));
+
+ // Now that all dylibs have been loaded, search for those that should be
+ // re-exported.
+ {
+ auto reexportHandler = [](const Arg *arg,
+ const std::vector<StringRef> &extensions) {
+ config->hasReexports = true;
+ StringRef searchName = arg->getValue();
+ if (!markReexport(searchName, extensions))
+ error(arg->getSpelling() + " " + searchName +
+ " does not match a supplied dylib");
+ };
+ std::vector<StringRef> extensions = {".tbd"};
+ for (const Arg *arg : args.filtered(OPT_sub_umbrella))
+ reexportHandler(arg, extensions);
+
+ extensions.push_back(".dylib");
+ for (const Arg *arg : args.filtered(OPT_sub_library))
+ reexportHandler(arg, extensions);
}
- }
- // Now that all dylibs have been loaded, search for those that should be
- // re-exported.
- for (opt::Arg *arg : args.filtered(OPT_sub_library)) {
- config->hasReexports = true;
- StringRef searchName = arg->getValue();
- if (!markSubLibrary(searchName))
- error("-sub_library " + searchName + " does not match a supplied dylib");
- }
+ // Parse LTO options.
+ if (const Arg *arg = args.getLastArg(OPT_mcpu))
+ parseClangOption(saver.save("-mcpu=" + StringRef(arg->getValue())),
+ arg->getSpelling());
- StringRef orderFile = args.getLastArgValue(OPT_order_file);
- if (!orderFile.empty())
- parseOrderFile(orderFile);
+ for (const Arg *arg : args.filtered(OPT_mllvm))
+ parseClangOption(arg->getValue(), arg->getSpelling());
- if (config->outputType == MH_EXECUTE && !isa<Defined>(config->entry)) {
- error("undefined symbol: " + config->entry->getName());
- return false;
- }
+ compileBitcodeFiles();
+ replaceCommonSymbols();
- createSyntheticSections();
+ StringRef orderFile = args.getLastArgValue(OPT_order_file);
+ if (!orderFile.empty())
+ parseOrderFile(orderFile);
- // Initialize InputSections.
- for (InputFile *file : inputFiles) {
- for (SubsectionMap &map : file->subsections) {
- for (auto &p : map) {
- InputSection *isec = p.second;
- inputSections.push_back(isec);
+ referenceStubBinder();
+
+ // FIXME: should terminate the link early based on errors encountered so
+ // far?
+
+ createSyntheticSections();
+ createSyntheticSymbols();
+
+ if (!config->exportedSymbols.empty()) {
+ for (Symbol *sym : symtab->getSymbols()) {
+ if (auto *defined = dyn_cast<Defined>(sym)) {
+ StringRef symbolName = defined->getName();
+ if (config->exportedSymbols.match(symbolName)) {
+ if (defined->privateExtern) {
+ error("cannot export hidden symbol " + symbolName +
+ "\n>>> defined in " + toString(defined->getFile()));
+ }
+ } else {
+ defined->privateExtern = true;
+ }
+ }
}
+ } else if (!config->unexportedSymbols.empty()) {
+ for (Symbol *sym : symtab->getSymbols())
+ if (auto *defined = dyn_cast<Defined>(sym))
+ if (config->unexportedSymbols.match(defined->getName()))
+ defined->privateExtern = true;
}
+
+ for (const Arg *arg : args.filtered(OPT_sectcreate)) {
+ StringRef segName = arg->getValue(0);
+ StringRef sectName = arg->getValue(1);
+ StringRef fileName = arg->getValue(2);
+ Optional<MemoryBufferRef> buffer = readFile(fileName);
+ if (buffer)
+ inputFiles.insert(make<OpaqueFile>(*buffer, segName, sectName));
+ }
+
+ gatherInputSections();
+
+ if (config->deadStrip)
+ markLive();
+
+ // ICF assumes that all literals have been folded already, so we must run
+ // foldIdenticalLiterals before foldIdenticalSections.
+ foldIdenticalLiterals();
+ if (config->icfLevel != ICFLevel::none)
+ foldIdenticalSections();
+
+ // Write to an output file.
+ if (target->wordSize == 8)
+ writeResult<LP64>();
+ else
+ writeResult<ILP32>();
+
+ depTracker->write(getLLDVersion(), inputFiles, config->outputFile);
}
- // Write to an output file.
- writeResult();
+ if (config->timeTraceEnabled) {
+ if (auto E = timeTraceProfilerWrite(
+ args.getLastArgValue(OPT_time_trace_file_eq).str(),
+ config->outputFile)) {
+ handleAllErrors(std::move(E),
+ [&](const StringError &SE) { error(SE.getMessage()); });
+ }
+
+ timeTraceProfilerCleanup();
+ }
if (canExitEarly)
exitLld(errorCount() ? 1 : 0);
- freeArena();
return !errorCount();
}
#define LLD_MACHO_DRIVER_H
#include "lld/Common/LLVM.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Option/OptTable.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#include <set>
+#include <type_traits>
+
+namespace llvm {
+namespace MachO {
+class InterfaceFile;
+enum class PlatformKind : unsigned;
+} // namespace MachO
+} // namespace llvm
namespace lld {
namespace macho {
+class DylibFile;
+class InputFile;
+
class MachOOptTable : public llvm::opt::OptTable {
public:
MachOOptTable();
#undef OPTION
};
+void parseLCLinkerOption(InputFile *, unsigned argc, StringRef data);
+
+std::string createResponseFile(const llvm::opt::InputArgList &args);
+
+// Check for both libfoo.dylib and libfoo.tbd (in that order).
+llvm::Optional<std::string> resolveDylibPath(llvm::StringRef path);
+
+DylibFile *loadDylib(llvm::MemoryBufferRef mbref, DylibFile *umbrella = nullptr,
+ bool isBundleLoader = false);
+
+// Search for all possible combinations of `{root}/{name}.{extension}`.
+// If \p extensions are not specified, then just search for `{root}/{name}`.
+llvm::Optional<llvm::StringRef>
+findPathCombination(const llvm::Twine &name,
+ const std::vector<llvm::StringRef> &roots,
+ ArrayRef<llvm::StringRef> extensions = {""});
+
+// If -syslibroot is specified, absolute paths to non-object files may be
+// rerooted.
+llvm::StringRef rerootPath(llvm::StringRef path);
+
+llvm::Optional<InputFile *> loadArchiveMember(MemoryBufferRef, uint32_t modTime,
+ StringRef archiveName,
+ bool objCOnly,
+ uint64_t offsetInArchive);
+
+uint32_t getModTime(llvm::StringRef path);
+
+void printArchiveMemberLoad(StringRef reason, const InputFile *);
+
+// Map simulator platforms to their underlying device platform.
+llvm::MachO::PlatformKind removeSimulator(llvm::MachO::PlatformKind platform);
+
+// Helper class to export dependency info.
+class DependencyTracker {
+public:
+ explicit DependencyTracker(llvm::StringRef path);
+
+ // Adds the given path to the set of not-found files.
+ inline void logFileNotFound(const Twine &path) {
+ if (active)
+ notFounds.insert(path.str());
+ }
+
+ // Writes the dependencies to specified path.
+ // The content is sorted by its Op Code, then within each section,
+ // alphabetical order.
+ void write(llvm::StringRef version,
+ const llvm::SetVector<InputFile *> &inputs,
+ llvm::StringRef output);
+
+private:
+ enum DepOpCode : uint8_t {
+ // Denotes the linker version.
+ Version = 0x00,
+ // Denotes the input files.
+ Input = 0x10,
+ // Denotes the files that do not exist(?)
+ NotFound = 0x11,
+ // Denotes the output files.
+ Output = 0x40,
+ };
+
+ const llvm::StringRef path;
+ bool active;
+
+ // The paths need to be alphabetically ordered.
+ // We need to own the paths because some of them are temporarily
+ // constructed.
+ std::set<std::string> notFounds;
+};
+
+extern DependencyTracker *depTracker;
+
} // namespace macho
} // namespace lld
--- /dev/null
+//===- DriverUtils.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Config.h"
+#include "Driver.h"
+#include "InputFiles.h"
+#include "ObjC.h"
+#include "Target.h"
+
+#include "lld/Common/Args.h"
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
+#include "lld/Common/Reproduce.h"
+#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/TextAPI/InterfaceFile.h"
+#include "llvm/TextAPI/TextAPIReader.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace llvm::opt;
+using namespace llvm::sys;
+using namespace lld;
+using namespace lld::macho;
+
+// Create prefix string literals used in Options.td
+#define PREFIX(NAME, VALUE) const char *NAME[] = VALUE;
+#include "Options.inc"
+#undef PREFIX
+
+// Create table mapping all options defined in Options.td
+static const OptTable::Info optInfo[] = {
+#define OPTION(X1, X2, ID, KIND, GROUP, ALIAS, X7, X8, X9, X10, X11, X12) \
+ {X1, X2, X10, X11, OPT_##ID, Option::KIND##Class, \
+ X9, X8, OPT_##GROUP, OPT_##ALIAS, X7, X12},
+#include "Options.inc"
+#undef OPTION
+};
+
+MachOOptTable::MachOOptTable() : OptTable(optInfo) {}
+
+// Set color diagnostics according to --color-diagnostics={auto,always,never}
+// or --no-color-diagnostics flags.
+static void handleColorDiagnostics(InputArgList &args) {
+ const Arg *arg =
+ args.getLastArg(OPT_color_diagnostics, OPT_color_diagnostics_eq,
+ OPT_no_color_diagnostics);
+ if (!arg)
+ return;
+ if (arg->getOption().getID() == OPT_color_diagnostics) {
+ lld::errs().enable_colors(true);
+ } else if (arg->getOption().getID() == OPT_no_color_diagnostics) {
+ lld::errs().enable_colors(false);
+ } else {
+ StringRef s = arg->getValue();
+ if (s == "always")
+ lld::errs().enable_colors(true);
+ else if (s == "never")
+ lld::errs().enable_colors(false);
+ else if (s != "auto")
+ error("unknown option: --color-diagnostics=" + s);
+ }
+}
+
+InputArgList MachOOptTable::parse(ArrayRef<const char *> argv) {
+ // Make InputArgList from string vectors.
+ unsigned missingIndex;
+ unsigned missingCount;
+ SmallVector<const char *, 256> vec(argv.data(), argv.data() + argv.size());
+
+ // Expand response files (arguments in the form of @<filename>)
+ // and then parse the argument again.
+ cl::ExpandResponseFiles(saver, cl::TokenizeGNUCommandLine, vec);
+ InputArgList args = ParseArgs(vec, missingIndex, missingCount);
+
+ // Handle -fatal_warnings early since it converts missing argument warnings
+ // to errors.
+ errorHandler().fatalWarnings = args.hasArg(OPT_fatal_warnings);
+
+ if (missingCount)
+ error(Twine(args.getArgString(missingIndex)) + ": missing argument");
+
+ handleColorDiagnostics(args);
+
+ for (const Arg *arg : args.filtered(OPT_UNKNOWN)) {
+ std::string nearest;
+ if (findNearest(arg->getAsString(args), nearest) > 1)
+ error("unknown argument '" + arg->getAsString(args) + "'");
+ else
+ error("unknown argument '" + arg->getAsString(args) +
+ "', did you mean '" + nearest + "'");
+ }
+ return args;
+}
+
+void MachOOptTable::printHelp(const char *argv0, bool showHidden) const {
+ OptTable::printHelp(lld::outs(),
+ (std::string(argv0) + " [options] file...").c_str(),
+ "LLVM Linker", showHidden);
+ lld::outs() << "\n";
+}
+
+static std::string rewritePath(StringRef s) {
+ if (fs::exists(s))
+ return relativeToRoot(s);
+ return std::string(s);
+}
+
+static std::string rewriteInputPath(StringRef s) {
+ // Don't bother rewriting "absolute" paths that are actually under the
+ // syslibroot; simply rewriting the syslibroot is sufficient.
+ if (rerootPath(s) == s && fs::exists(s))
+ return relativeToRoot(s);
+ return std::string(s);
+}
+
+// Reconstructs command line arguments so that so that you can re-run
+// the same command with the same inputs. This is for --reproduce.
+std::string macho::createResponseFile(const InputArgList &args) {
+ SmallString<0> data;
+ raw_svector_ostream os(data);
+
+ // Copy the command line to the output while rewriting paths.
+ for (const Arg *arg : args) {
+ switch (arg->getOption().getID()) {
+ case OPT_reproduce:
+ break;
+ case OPT_INPUT:
+ os << quote(rewriteInputPath(arg->getValue())) << "\n";
+ break;
+ case OPT_o:
+ os << "-o " << quote(path::filename(arg->getValue())) << "\n";
+ break;
+ case OPT_filelist:
+ if (Optional<MemoryBufferRef> buffer = readFile(arg->getValue()))
+ for (StringRef path : args::getLines(*buffer))
+ os << quote(rewriteInputPath(path)) << "\n";
+ break;
+ case OPT_force_load:
+ case OPT_weak_library:
+ os << arg->getSpelling() << " "
+ << quote(rewriteInputPath(arg->getValue())) << "\n";
+ break;
+ case OPT_F:
+ case OPT_L:
+ case OPT_bundle_loader:
+ case OPT_exported_symbols_list:
+ case OPT_order_file:
+ case OPT_rpath:
+ case OPT_syslibroot:
+ case OPT_unexported_symbols_list:
+ os << arg->getSpelling() << " " << quote(rewritePath(arg->getValue()))
+ << "\n";
+ break;
+ case OPT_sectcreate:
+ os << arg->getSpelling() << " " << quote(arg->getValue(0)) << " "
+ << quote(arg->getValue(1)) << " "
+ << quote(rewritePath(arg->getValue(2))) << "\n";
+ break;
+ default:
+ os << toString(*arg) << "\n";
+ }
+ }
+ return std::string(data.str());
+}
+
+static void searchedDylib(const Twine &path, bool found) {
+ if (config->printDylibSearch)
+ message("searched " + path + (found ? ", found " : ", not found"));
+ if (!found)
+ depTracker->logFileNotFound(path);
+}
+
+Optional<std::string> macho::resolveDylibPath(StringRef dylibPath) {
+ // TODO: if a tbd and dylib are both present, we should check to make sure
+ // they are consistent.
+ bool dylibExists = fs::exists(dylibPath);
+ searchedDylib(dylibPath, dylibExists);
+ if (dylibExists)
+ return std::string(dylibPath);
+
+ SmallString<261> tbdPath = dylibPath;
+ path::replace_extension(tbdPath, ".tbd");
+ bool tbdExists = fs::exists(tbdPath);
+ searchedDylib(tbdPath, tbdExists);
+ if (tbdExists)
+ return std::string(tbdPath);
+ return {};
+}
+
+// It's not uncommon to have multiple attempts to load a single dylib,
+// especially if it's a commonly re-exported core library.
+static DenseMap<CachedHashStringRef, DylibFile *> loadedDylibs;
+
+DylibFile *macho::loadDylib(MemoryBufferRef mbref, DylibFile *umbrella,
+ bool isBundleLoader) {
+ CachedHashStringRef path(mbref.getBufferIdentifier());
+ DylibFile *&file = loadedDylibs[path];
+ if (file)
+ return file;
+
+ DylibFile *newFile;
+ file_magic magic = identify_magic(mbref.getBuffer());
+ if (magic == file_magic::tapi_file) {
+ Expected<std::unique_ptr<InterfaceFile>> result = TextAPIReader::get(mbref);
+ if (!result) {
+ error("could not load TAPI file at " + mbref.getBufferIdentifier() +
+ ": " + toString(result.takeError()));
+ return nullptr;
+ }
+ file = make<DylibFile>(**result, umbrella, isBundleLoader);
+
+ // parseReexports() can recursively call loadDylib(). That's fine since
+ // we wrote the DylibFile we just loaded to the loadDylib cache via the
+ // `file` reference. But the recursive load can grow loadDylibs, so the
+ // `file` reference might become invalid after parseReexports() -- so copy
+ // the pointer it refers to before continuing.
+ newFile = file;
+ if (newFile->exportingFile)
+ newFile->parseReexports(**result);
+ } else {
+ assert(magic == file_magic::macho_dynamically_linked_shared_lib ||
+ magic == file_magic::macho_dynamically_linked_shared_lib_stub ||
+ magic == file_magic::macho_executable ||
+ magic == file_magic::macho_bundle);
+ file = make<DylibFile>(mbref, umbrella, isBundleLoader);
+
+ // parseLoadCommands() can also recursively call loadDylib(). See comment
+ // in previous block for why this means we must copy `file` here.
+ newFile = file;
+ if (newFile->exportingFile)
+ newFile->parseLoadCommands(mbref);
+ }
+ return newFile;
+}
+
+Optional<StringRef>
+macho::findPathCombination(const Twine &name,
+ const std::vector<StringRef> &roots,
+ ArrayRef<StringRef> extensions) {
+ SmallString<261> base;
+ for (StringRef dir : roots) {
+ base = dir;
+ path::append(base, name);
+ for (StringRef ext : extensions) {
+ Twine location = base + ext;
+ bool exists = fs::exists(location);
+ searchedDylib(location, exists);
+ if (exists)
+ return saver.save(location.str());
+ }
+ }
+ return {};
+}
+
+StringRef macho::rerootPath(StringRef path) {
+ if (!path::is_absolute(path, path::Style::posix) || path.endswith(".o"))
+ return path;
+
+ if (Optional<StringRef> rerootedPath =
+ findPathCombination(path, config->systemLibraryRoots))
+ return *rerootedPath;
+
+ return path;
+}
+
+Optional<InputFile *> macho::loadArchiveMember(MemoryBufferRef mb,
+ uint32_t modTime,
+ StringRef archiveName,
+ bool objCOnly,
+ uint64_t offsetInArchive) {
+ if (config->zeroModTime)
+ modTime = 0;
+
+ switch (identify_magic(mb.getBuffer())) {
+ case file_magic::macho_object:
+ if (!objCOnly || hasObjCSection(mb))
+ return make<ObjFile>(mb, modTime, archiveName);
+ return None;
+ case file_magic::bitcode:
+ if (!objCOnly || check(isBitcodeContainingObjCCategory(mb)))
+ return make<BitcodeFile>(mb, archiveName, offsetInArchive);
+ return None;
+ default:
+ error(archiveName + ": archive member " + mb.getBufferIdentifier() +
+ " has unhandled file type");
+ return None;
+ }
+}
+
+uint32_t macho::getModTime(StringRef path) {
+ if (config->zeroModTime)
+ return 0;
+
+ fs::file_status stat;
+ if (!fs::status(path, stat))
+ if (fs::exists(stat))
+ return toTimeT(stat.getLastModificationTime());
+
+ warn("failed to get modification time of " + path);
+ return 0;
+}
+
+void macho::printArchiveMemberLoad(StringRef reason, const InputFile *f) {
+ if (config->printEachFile)
+ message(toString(f));
+ if (config->printWhyLoad)
+ message(reason + " forced load of " + toString(f));
+}
+
+macho::DependencyTracker::DependencyTracker(StringRef path)
+ : path(path), active(!path.empty()) {
+ if (active && fs::exists(path) && !fs::can_write(path)) {
+ warn("Ignoring dependency_info option since specified path is not "
+ "writeable.");
+ active = false;
+ }
+}
+
+void macho::DependencyTracker::write(StringRef version,
+ const SetVector<InputFile *> &inputs,
+ StringRef output) {
+ if (!active)
+ return;
+
+ std::error_code ec;
+ raw_fd_ostream os(path, ec, fs::OF_None);
+ if (ec) {
+ warn("Error writing dependency info to file");
+ return;
+ }
+
+ auto addDep = [&os](DepOpCode opcode, const StringRef &path) {
+ // XXX: Even though DepOpCode's underlying type is uint8_t,
+ // this cast is still needed because Clang older than 10.x has a bug,
+ // where it doesn't know to cast the enum to its underlying type.
+ // Hence `<< DepOpCode` is ambiguous to it.
+ os << static_cast<uint8_t>(opcode);
+ os << path;
+ os << '\0';
+ };
+
+ addDep(DepOpCode::Version, version);
+
+ // Sort the input by its names.
+ std::vector<StringRef> inputNames;
+ inputNames.reserve(inputs.size());
+ for (InputFile *f : inputs)
+ inputNames.push_back(f->getName());
+ llvm::sort(inputNames);
+
+ for (const StringRef &in : inputNames)
+ addDep(DepOpCode::Input, in);
+
+ for (const std::string &f : notFounds)
+ addDep(DepOpCode::NotFound, f);
+
+ addDep(DepOpCode::Output, output);
+}
--- /dev/null
+//===- DWARF.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Dwarf.h"
+#include "InputFiles.h"
+#include "InputSection.h"
+#include "OutputSegment.h"
+
+#include <memory>
+
+using namespace lld;
+using namespace lld::macho;
+using namespace llvm;
+
+std::unique_ptr<DwarfObject> DwarfObject::create(ObjFile *obj) {
+ auto dObj = std::make_unique<DwarfObject>();
+ bool hasDwarfInfo = false;
+ // LLD only needs to extract the source file path from the debug info, so we
+ // initialize DwarfObject with just the sections necessary to get that path.
+ // The debugger will locate the debug info via the object file paths that we
+ // emit in our STABS symbols, so we don't need to process & emit them
+ // ourselves.
+ for (const InputSection *isec : obj->debugSections) {
+ if (StringRef *s =
+ StringSwitch<StringRef *>(isec->getName())
+ .Case(section_names::debugInfo, &dObj->infoSection.Data)
+ .Case(section_names::debugAbbrev, &dObj->abbrevSection)
+ .Case(section_names::debugStr, &dObj->strSection)
+ .Default(nullptr)) {
+ *s = toStringRef(isec->data);
+ hasDwarfInfo = true;
+ }
+ }
+
+ if (hasDwarfInfo)
+ return dObj;
+ return nullptr;
+}
--- /dev/null
+//===- DWARF.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_DWARF_H
+#define LLD_MACHO_DWARF_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/DWARF/DWARFObject.h"
+
+namespace lld {
+namespace macho {
+
+class ObjFile;
+
+// Implements the interface between LLVM's DWARF-parsing utilities and LLD's
+// InputSection structures.
+class DwarfObject final : public llvm::DWARFObject {
+public:
+ bool isLittleEndian() const override { return true; }
+
+ llvm::Optional<llvm::RelocAddrEntry> find(const llvm::DWARFSection &sec,
+ uint64_t pos) const override {
+ // TODO: implement this
+ return llvm::None;
+ }
+
+ void forEachInfoSections(
+ llvm::function_ref<void(const llvm::DWARFSection &)> f) const override {
+ f(infoSection);
+ }
+
+ llvm::StringRef getAbbrevSection() const override { return abbrevSection; }
+ llvm::StringRef getStrSection() const override { return strSection; }
+
+ // Returns an instance of DwarfObject if the given object file has the
+ // relevant DWARF debug sections.
+ static std::unique_ptr<DwarfObject> create(ObjFile *);
+
+private:
+ llvm::DWARFSection infoSection;
+ llvm::StringRef abbrevSection;
+ llvm::StringRef strSection;
+};
+
+} // namespace macho
+} // namespace lld
+
+#endif
#include "llvm/Support/LEB128.h"
using namespace llvm;
-using namespace llvm::MachO;
using namespace lld;
using namespace lld::macho;
struct ExportInfo {
uint64_t address;
- // TODO: Add proper support for re-exports & stub-and-resolver flags.
+ uint8_t flags = 0;
+ ExportInfo(const Symbol &sym, uint64_t imageBase)
+ : address(sym.getVA() - imageBase) {
+ using namespace llvm::MachO;
+ // Set the symbol type.
+ if (sym.isWeakDef())
+ flags |= EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
+ // TODO: Add proper support for re-exports & stub-and-resolver flags.
+
+ // Set the symbol kind.
+ if (sym.isTlv()) {
+ flags |= EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL;
+ } else if (auto *defined = dyn_cast<Defined>(&sym)) {
+ if (defined->isAbsolute())
+ flags |= EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE;
+ }
+ }
};
} // namespace
// node.
size_t nodeSize;
if (info) {
- uint64_t flags = 0;
uint32_t terminalSize =
- getULEB128Size(flags) + getULEB128Size(info->address);
+ getULEB128Size(info->flags) + getULEB128Size(info->address);
// Overall node size so far is the uleb128 size of the length of the symbol
// info + the symbol info itself.
nodeSize = terminalSize + getULEB128Size(terminalSize);
}
// Compute size of all child edges.
++nodeSize; // Byte for number of children.
- for (Edge &edge : edges) {
+ for (const Edge &edge : edges) {
nodeSize += edge.substring.size() + 1 // String length.
+ getULEB128Size(edge.child->offset); // Offset len.
}
buf += offset;
if (info) {
// TrieNodes with Symbol info: size, flags address
- uint64_t flags = 0; // TODO: emit proper flags
uint32_t terminalSize =
- getULEB128Size(flags) + getULEB128Size(info->address);
+ getULEB128Size(info->flags) + getULEB128Size(info->address);
buf += encodeULEB128(terminalSize, buf);
- buf += encodeULEB128(flags, buf);
+ buf += encodeULEB128(info->flags, buf);
buf += encodeULEB128(info->address, buf);
} else {
// TrieNode with no Symbol info.
if (isTerminal) {
assert(j - i == 1); // no duplicate symbols
- node->info = {pivotSymbol->getVA()};
+ node->info = ExportInfo(*pivotSymbol, imageBase);
} else {
// This is the tail-call-optimized version of the following:
// sortAndBuild(vec.slice(i, j - i), node, lastPos, pos + 1);
class TrieBuilder {
public:
+ void setImageBase(uint64_t addr) { imageBase = addr; }
void addSymbol(const Symbol &sym) { exported.push_back(&sym); }
// Returns the size in bytes of the serialized trie.
size_t build();
void sortAndBuild(llvm::MutableArrayRef<const Symbol *> vec, TrieNode *node,
size_t lastPos, size_t pos);
+ uint64_t imageBase = 0;
std::vector<const Symbol *> exported;
std::vector<TrieNode *> nodes;
};
--- /dev/null
+//===- ICF.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ICF.h"
+#include "ConcatOutputSection.h"
+#include "InputSection.h"
+#include "Symbols.h"
+#include "UnwindInfoSection.h"
+
+#include "llvm/Support/Parallel.h"
+#include "llvm/Support/TimeProfiler.h"
+
+#include <atomic>
+
+using namespace llvm;
+using namespace lld;
+using namespace lld::macho;
+
+class ICF {
+public:
+ ICF(std::vector<ConcatInputSection *> &inputs);
+
+ void run();
+ void segregate(size_t begin, size_t end,
+ std::function<bool(const ConcatInputSection *,
+ const ConcatInputSection *)>
+ equals);
+ size_t findBoundary(size_t begin, size_t end);
+ void forEachClassRange(size_t begin, size_t end,
+ std::function<void(size_t, size_t)> func);
+ void forEachClass(std::function<void(size_t, size_t)> func);
+
+ // ICF needs a copy of the inputs vector because its equivalence-class
+ // segregation algorithm destroys the proper sequence.
+ std::vector<ConcatInputSection *> icfInputs;
+};
+
+ICF::ICF(std::vector<ConcatInputSection *> &inputs) {
+ icfInputs.assign(inputs.begin(), inputs.end());
+}
+
+// ICF = Identical Code Folding
+//
+// We only fold __TEXT,__text, so this is really "code" folding, and not
+// "COMDAT" folding. String and scalar constant literals are deduplicated
+// elsewhere.
+//
+// Summary of segments & sections:
+//
+// The __TEXT segment is readonly at the MMU. Some sections are already
+// deduplicated elsewhere (__TEXT,__cstring & __TEXT,__literal*) and some are
+// synthetic and inherently free of duplicates (__TEXT,__stubs &
+// __TEXT,__unwind_info). Note that we don't yet run ICF on __TEXT,__const,
+// because doing so induces many test failures.
+//
+// The __LINKEDIT segment is readonly at the MMU, yet entirely synthetic, and
+// thus ineligible for ICF.
+//
+// The __DATA_CONST segment is read/write at the MMU, but is logically const to
+// the application after dyld applies fixups to pointer data. We currently
+// fold only the __DATA_CONST,__cfstring section.
+//
+// The __DATA segment is read/write at the MMU, and as application-writeable
+// data, none of its sections are eligible for ICF.
+//
+// Please see the large block comment in lld/ELF/ICF.cpp for an explanation
+// of the segregation algorithm.
+//
+// FIXME(gkm): implement keep-unique attributes
+// FIXME(gkm): implement address-significance tables for MachO object files
+
+static unsigned icfPass = 0;
+static std::atomic<bool> icfRepeat{false};
+
+// Compare "non-moving" parts of two ConcatInputSections, namely everything
+// except references to other ConcatInputSections.
+static bool equalsConstant(const ConcatInputSection *ia,
+ const ConcatInputSection *ib) {
+ // We can only fold within the same OutputSection.
+ if (ia->parent != ib->parent)
+ return false;
+ if (ia->data.size() != ib->data.size())
+ return false;
+ if (ia->data != ib->data)
+ return false;
+ if (ia->relocs.size() != ib->relocs.size())
+ return false;
+ auto f = [](const Reloc &ra, const Reloc &rb) {
+ if (ra.type != rb.type)
+ return false;
+ if (ra.pcrel != rb.pcrel)
+ return false;
+ if (ra.length != rb.length)
+ return false;
+ if (ra.offset != rb.offset)
+ return false;
+ if (ra.addend != rb.addend)
+ return false;
+ if (ra.referent.is<Symbol *>() != rb.referent.is<Symbol *>())
+ return false;
+
+ InputSection *isecA, *isecB;
+ if (ra.referent.is<Symbol *>()) {
+ const auto *sa = ra.referent.get<Symbol *>();
+ const auto *sb = rb.referent.get<Symbol *>();
+ if (sa->kind() != sb->kind())
+ return false;
+ if (isa<Defined>(sa)) {
+ const auto *da = cast<Defined>(sa);
+ const auto *db = cast<Defined>(sb);
+ if (da->isec && db->isec) {
+ isecA = da->isec;
+ isecB = db->isec;
+ } else {
+ assert(da->isAbsolute() && db->isAbsolute());
+ return da->value == db->value;
+ }
+ } else {
+ assert(isa<DylibSymbol>(sa));
+ return sa == sb;
+ }
+ } else {
+ isecA = ra.referent.get<InputSection *>();
+ isecB = rb.referent.get<InputSection *>();
+ }
+
+ if (isecA->parent != isecB->parent)
+ return false;
+ // Sections with identical parents should be of the same kind.
+ assert(isecA->kind() == isecB->kind());
+ // We will compare ConcatInputSection contents in equalsVariable.
+ if (isa<ConcatInputSection>(isecA))
+ return true;
+ // Else we have two literal sections. References to them are equal iff their
+ // offsets in the output section are equal.
+ return isecA->getOffset(ra.addend) == isecB->getOffset(rb.addend);
+ };
+ return std::equal(ia->relocs.begin(), ia->relocs.end(), ib->relocs.begin(),
+ f);
+}
+
+// Compare the "moving" parts of two ConcatInputSections -- i.e. everything not
+// handled by equalsConstant().
+static bool equalsVariable(const ConcatInputSection *ia,
+ const ConcatInputSection *ib) {
+ assert(ia->relocs.size() == ib->relocs.size());
+ auto f = [](const Reloc &ra, const Reloc &rb) {
+ // We already filtered out mismatching values/addends in equalsConstant.
+ if (ra.referent == rb.referent)
+ return true;
+ const ConcatInputSection *isecA, *isecB;
+ if (ra.referent.is<Symbol *>()) {
+ // Matching DylibSymbols are already filtered out by the
+ // identical-referent check above. Non-matching DylibSymbols were filtered
+ // out in equalsConstant(). So we can safely cast to Defined here.
+ const auto *da = cast<Defined>(ra.referent.get<Symbol *>());
+ const auto *db = cast<Defined>(rb.referent.get<Symbol *>());
+ if (da->isAbsolute())
+ return true;
+ isecA = dyn_cast<ConcatInputSection>(da->isec);
+ if (!isecA)
+ return true; // literal sections were checked in equalsConstant.
+ isecB = cast<ConcatInputSection>(db->isec);
+ } else {
+ const auto *sa = ra.referent.get<InputSection *>();
+ const auto *sb = rb.referent.get<InputSection *>();
+ isecA = dyn_cast<ConcatInputSection>(sa);
+ if (!isecA)
+ return true;
+ isecB = cast<ConcatInputSection>(sb);
+ }
+ return isecA->icfEqClass[icfPass % 2] == isecB->icfEqClass[icfPass % 2];
+ };
+ return std::equal(ia->relocs.begin(), ia->relocs.end(), ib->relocs.begin(),
+ f);
+}
+
+// Find the first InputSection after BEGIN whose equivalence class differs
+size_t ICF::findBoundary(size_t begin, size_t end) {
+ uint64_t beginHash = icfInputs[begin]->icfEqClass[icfPass % 2];
+ for (size_t i = begin + 1; i < end; ++i)
+ if (beginHash != icfInputs[i]->icfEqClass[icfPass % 2])
+ return i;
+ return end;
+}
+
+// Invoke FUNC on subranges with matching equivalence class
+void ICF::forEachClassRange(size_t begin, size_t end,
+ std::function<void(size_t, size_t)> func) {
+ while (begin < end) {
+ size_t mid = findBoundary(begin, end);
+ func(begin, mid);
+ begin = mid;
+ }
+}
+
+// Split icfInputs into shards, then parallelize invocation of FUNC on subranges
+// with matching equivalence class
+void ICF::forEachClass(std::function<void(size_t, size_t)> func) {
+ // Only use threads when the benefits outweigh the overhead.
+ const size_t threadingThreshold = 1024;
+ if (icfInputs.size() < threadingThreshold) {
+ forEachClassRange(0, icfInputs.size(), func);
+ ++icfPass;
+ return;
+ }
+
+ // Shard into non-overlapping intervals, and call FUNC in parallel. The
+ // sharding must be completed before any calls to FUNC are made so that FUNC
+ // can modify the InputSection in its shard without causing data races.
+ const size_t shards = 256;
+ size_t step = icfInputs.size() / shards;
+ size_t boundaries[shards + 1];
+ boundaries[0] = 0;
+ boundaries[shards] = icfInputs.size();
+ parallelForEachN(1, shards, [&](size_t i) {
+ boundaries[i] = findBoundary((i - 1) * step, icfInputs.size());
+ });
+ parallelForEachN(1, shards + 1, [&](size_t i) {
+ if (boundaries[i - 1] < boundaries[i]) {
+ forEachClassRange(boundaries[i - 1], boundaries[i], func);
+ }
+ });
+ ++icfPass;
+}
+
+void ICF::run() {
+ // Into each origin-section hash, combine all reloc referent section hashes.
+ for (icfPass = 0; icfPass < 2; ++icfPass) {
+ parallelForEach(icfInputs, [&](ConcatInputSection *isec) {
+ uint64_t hash = isec->icfEqClass[icfPass % 2];
+ for (const Reloc &r : isec->relocs) {
+ if (auto *sym = r.referent.dyn_cast<Symbol *>()) {
+ if (auto *dylibSym = dyn_cast<DylibSymbol>(sym))
+ hash += dylibSym->stubsHelperIndex;
+ else if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (defined->isec) {
+ if (auto isec = dyn_cast<ConcatInputSection>(defined->isec))
+ hash += defined->value + isec->icfEqClass[icfPass % 2];
+ else
+ hash += defined->isec->kind() +
+ defined->isec->getOffset(defined->value);
+ } else {
+ hash += defined->value;
+ }
+ } else
+ llvm_unreachable("foldIdenticalSections symbol kind");
+ }
+ }
+ // Set MSB to 1 to avoid collisions with non-hashed classes.
+ isec->icfEqClass[(icfPass + 1) % 2] = hash | (1ull << 63);
+ });
+ }
+
+ llvm::stable_sort(
+ icfInputs, [](const ConcatInputSection *a, const ConcatInputSection *b) {
+ return a->icfEqClass[0] < b->icfEqClass[0];
+ });
+ forEachClass(
+ [&](size_t begin, size_t end) { segregate(begin, end, equalsConstant); });
+
+ // Split equivalence groups by comparing relocations until convergence
+ do {
+ icfRepeat = false;
+ forEachClass([&](size_t begin, size_t end) {
+ segregate(begin, end, equalsVariable);
+ });
+ } while (icfRepeat);
+ log("ICF needed " + Twine(icfPass) + " iterations");
+
+ // Fold sections within equivalence classes
+ forEachClass([&](size_t begin, size_t end) {
+ if (end - begin < 2)
+ return;
+ ConcatInputSection *beginIsec = icfInputs[begin];
+ for (size_t i = begin + 1; i < end; ++i)
+ beginIsec->foldIdentical(icfInputs[i]);
+ });
+}
+
+// Split an equivalence class into smaller classes.
+void ICF::segregate(
+ size_t begin, size_t end,
+ std::function<bool(const ConcatInputSection *, const ConcatInputSection *)>
+ equals) {
+ while (begin < end) {
+ // Divide [begin, end) into two. Let mid be the start index of the
+ // second group.
+ auto bound = std::stable_partition(icfInputs.begin() + begin + 1,
+ icfInputs.begin() + end,
+ [&](ConcatInputSection *isec) {
+ return equals(icfInputs[begin], isec);
+ });
+ size_t mid = bound - icfInputs.begin();
+
+ // Split [begin, end) into [begin, mid) and [mid, end). We use mid as an
+ // equivalence class ID because every group ends with a unique index.
+ for (size_t i = begin; i < mid; ++i)
+ icfInputs[i]->icfEqClass[(icfPass + 1) % 2] = mid;
+
+ // If we created a group, we need to iterate the main loop again.
+ if (mid != end)
+ icfRepeat = true;
+
+ begin = mid;
+ }
+}
+
+template <class Ptr>
+DenseSet<const InputSection *> findFunctionsWithUnwindInfo() {
+ DenseSet<const InputSection *> result;
+ for (ConcatInputSection *isec : in.unwindInfo->getInputs()) {
+ for (size_t i = 0; i < isec->relocs.size(); ++i) {
+ Reloc &r = isec->relocs[i];
+ assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
+ if (r.offset % sizeof(CompactUnwindEntry<Ptr>) !=
+ offsetof(CompactUnwindEntry<Ptr>, functionAddress))
+ continue;
+ result.insert(r.referent.get<InputSection *>());
+ }
+ }
+ return result;
+}
+
+void macho::foldIdenticalSections() {
+ TimeTraceScope timeScope("Fold Identical Code Sections");
+ // The ICF equivalence-class segregation algorithm relies on pre-computed
+ // hashes of InputSection::data for the ConcatOutputSection::inputs and all
+ // sections referenced by their relocs. We could recursively traverse the
+ // relocs to find every referenced InputSection, but that precludes easy
+ // parallelization. Therefore, we hash every InputSection here where we have
+ // them all accessible as simple vectors.
+
+ // ICF can't fold functions with unwind info
+ DenseSet<const InputSection *> functionsWithUnwindInfo =
+ target->wordSize == 8 ? findFunctionsWithUnwindInfo<uint64_t>()
+ : findFunctionsWithUnwindInfo<uint32_t>();
+
+ // If an InputSection is ineligible for ICF, we give it a unique ID to force
+ // it into an unfoldable singleton equivalence class. Begin the unique-ID
+ // space at inputSections.size(), so that it will never intersect with
+ // equivalence-class IDs which begin at 0. Since hashes & unique IDs never
+ // coexist with equivalence-class IDs, this is not necessary, but might help
+ // someone keep the numbers straight in case we ever need to debug the
+ // ICF::segregate()
+ std::vector<ConcatInputSection *> hashable;
+ uint64_t icfUniqueID = inputSections.size();
+ for (ConcatInputSection *isec : inputSections) {
+ // FIXME: consider non-code __text sections as hashable?
+ bool isHashable = (isCodeSection(isec) || isCfStringSection(isec)) &&
+ !isec->shouldOmitFromOutput() &&
+ !functionsWithUnwindInfo.contains(isec) &&
+ isec->isHashableForICF();
+ if (isHashable)
+ hashable.push_back(isec);
+ else
+ isec->icfEqClass[0] = ++icfUniqueID;
+ }
+ parallelForEach(hashable,
+ [](ConcatInputSection *isec) { isec->hashForICF(); });
+ // Now that every input section is either hashed or marked as unique, run the
+ // segregation algorithm to detect foldable subsections.
+ ICF(hashable).run();
+}
--- /dev/null
+//===- ICF.h ----------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_ICF_H
+#define LLD_MACHO_ICF_H
+
+#include "lld/Common/LLVM.h"
+#include <vector>
+
+namespace lld {
+namespace macho {
+
+void foldIdenticalSections();
+
+} // namespace macho
+} // namespace lld
+
+#endif
#include "InputFiles.h"
#include "Config.h"
+#include "Driver.h"
+#include "Dwarf.h"
#include "ExportTrie.h"
#include "InputSection.h"
#include "MachOStructs.h"
+#include "ObjC.h"
#include "OutputSection.h"
+#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "Target.h"
+#include "lld/Common/DWARF.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
+#include "lld/Common/Reproduce.h"
+#include "llvm/ADT/iterator.h"
#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/LTO/LTO.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TarWriter.h"
+#include "llvm/TextAPI/Architecture.h"
+#include "llvm/TextAPI/InterfaceFile.h"
using namespace llvm;
using namespace llvm::MachO;
using namespace lld;
using namespace lld::macho;
-std::vector<InputFile *> macho::inputFiles;
+// Returns "<internal>", "foo.a(bar.o)", or "baz.o".
+std::string lld::toString(const InputFile *f) {
+ if (!f)
+ return "<internal>";
+
+ // Multiple dylibs can be defined in one .tbd file.
+ if (auto dylibFile = dyn_cast<DylibFile>(f))
+ if (f->getName().endswith(".tbd"))
+ return (f->getName() + "(" + dylibFile->installName + ")").str();
+
+ if (f->archiveName.empty())
+ return std::string(f->getName());
+ return (f->archiveName + "(" + path::filename(f->getName()) + ")").str();
+}
+
+SetVector<InputFile *> macho::inputFiles;
+std::unique_ptr<TarWriter> macho::tar;
+int InputFile::idCount = 0;
+
+static VersionTuple decodeVersion(uint32_t version) {
+ unsigned major = version >> 16;
+ unsigned minor = (version >> 8) & 0xffu;
+ unsigned subMinor = version & 0xffu;
+ return VersionTuple(major, minor, subMinor);
+}
+
+static std::vector<PlatformInfo> getPlatformInfos(const InputFile *input) {
+ if (!isa<ObjFile>(input) && !isa<DylibFile>(input))
+ return {};
+
+ const char *hdr = input->mb.getBufferStart();
+
+ std::vector<PlatformInfo> platformInfos;
+ for (auto *cmd : findCommands<build_version_command>(hdr, LC_BUILD_VERSION)) {
+ PlatformInfo info;
+ info.target.Platform = static_cast<PlatformKind>(cmd->platform);
+ info.minimum = decodeVersion(cmd->minos);
+ platformInfos.emplace_back(std::move(info));
+ }
+ for (auto *cmd : findCommands<version_min_command>(
+ hdr, LC_VERSION_MIN_MACOSX, LC_VERSION_MIN_IPHONEOS,
+ LC_VERSION_MIN_TVOS, LC_VERSION_MIN_WATCHOS)) {
+ PlatformInfo info;
+ switch (cmd->cmd) {
+ case LC_VERSION_MIN_MACOSX:
+ info.target.Platform = PlatformKind::macOS;
+ break;
+ case LC_VERSION_MIN_IPHONEOS:
+ info.target.Platform = PlatformKind::iOS;
+ break;
+ case LC_VERSION_MIN_TVOS:
+ info.target.Platform = PlatformKind::tvOS;
+ break;
+ case LC_VERSION_MIN_WATCHOS:
+ info.target.Platform = PlatformKind::watchOS;
+ break;
+ }
+ info.minimum = decodeVersion(cmd->version);
+ platformInfos.emplace_back(std::move(info));
+ }
+
+ return platformInfos;
+}
+
+static bool checkCompatibility(const InputFile *input) {
+ std::vector<PlatformInfo> platformInfos = getPlatformInfos(input);
+ if (platformInfos.empty())
+ return true;
+
+ auto it = find_if(platformInfos, [&](const PlatformInfo &info) {
+ return removeSimulator(info.target.Platform) ==
+ removeSimulator(config->platform());
+ });
+ if (it == platformInfos.end()) {
+ std::string platformNames;
+ raw_string_ostream os(platformNames);
+ interleave(
+ platformInfos, os,
+ [&](const PlatformInfo &info) {
+ os << getPlatformName(info.target.Platform);
+ },
+ "/");
+ error(toString(input) + " has platform " + platformNames +
+ Twine(", which is different from target platform ") +
+ getPlatformName(config->platform()));
+ return false;
+ }
+
+ if (it->minimum > config->platformInfo.minimum)
+ warn(toString(input) + " has version " + it->minimum.getAsString() +
+ ", which is newer than target minimum of " +
+ config->platformInfo.minimum.getAsString());
+
+ return true;
+}
// Open a given file path and return it as a memory-mapped file.
Optional<MemoryBufferRef> macho::readFile(StringRef path) {
- // Open a file.
- auto mbOrErr = MemoryBuffer::getFile(path);
- if (auto ec = mbOrErr.getError()) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> mbOrErr = MemoryBuffer::getFile(path);
+ if (std::error_code ec = mbOrErr.getError()) {
error("cannot open " + path + ": " + ec.message());
return None;
}
// If this is a regular non-fat file, return it.
const char *buf = mbref.getBufferStart();
- auto *hdr = reinterpret_cast<const MachO::fat_header *>(buf);
- if (read32be(&hdr->magic) != MachO::FAT_MAGIC)
+ const auto *hdr = reinterpret_cast<const fat_header *>(buf);
+ if (mbref.getBufferSize() < sizeof(uint32_t) ||
+ read32be(&hdr->magic) != FAT_MAGIC) {
+ if (tar)
+ tar->append(relativeToRoot(path), mbref.getBuffer());
return mbref;
+ }
- // Object files and archive files may be fat files, which contains
- // multiple real files for different CPU ISAs. Here, we search for a
- // file that matches with the current link target and returns it as
- // a MemoryBufferRef.
- auto *arch = reinterpret_cast<const MachO::fat_arch *>(buf + sizeof(*hdr));
+ // Object files and archive files may be fat files, which contain multiple
+ // real files for different CPU ISAs. Here, we search for a file that matches
+ // with the current link target and returns it as a MemoryBufferRef.
+ const auto *arch = reinterpret_cast<const fat_arch *>(buf + sizeof(*hdr));
for (uint32_t i = 0, n = read32be(&hdr->nfat_arch); i < n; ++i) {
if (reinterpret_cast<const char *>(arch + i + 1) >
return None;
}
- if (read32be(&arch[i].cputype) != target->cpuType ||
+ if (read32be(&arch[i].cputype) != static_cast<uint32_t>(target->cpuType) ||
read32be(&arch[i].cpusubtype) != target->cpuSubtype)
continue;
uint32_t size = read32be(&arch[i].size);
if (offset + size > mbref.getBufferSize())
error(path + ": slice extends beyond end of file");
+ if (tar)
+ tar->append(relativeToRoot(path), mbref.getBuffer());
return MemoryBufferRef(StringRef(buf + offset, size), path.copy(bAlloc));
}
return None;
}
-static const load_command *findCommand(const mach_header_64 *hdr,
- uint32_t type) {
- const uint8_t *p =
- reinterpret_cast<const uint8_t *>(hdr) + sizeof(mach_header_64);
+InputFile::InputFile(Kind kind, const InterfaceFile &interface)
+ : id(idCount++), fileKind(kind), name(saver.save(interface.getPath())) {}
- for (uint32_t i = 0, n = hdr->ncmds; i < n; ++i) {
- auto *cmd = reinterpret_cast<const load_command *>(p);
- if (cmd->cmd == type)
- return cmd;
- p += cmd->cmdsize;
- }
- return nullptr;
-}
-
-void InputFile::parseSections(ArrayRef<section_64> sections) {
+template <class Section>
+void ObjFile::parseSections(ArrayRef<Section> sections) {
subsections.reserve(sections.size());
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
- for (const section_64 &sec : sections) {
- InputSection *isec = make<InputSection>();
- isec->file = this;
- isec->name = StringRef(sec.sectname, strnlen(sec.sectname, 16));
- isec->segname = StringRef(sec.segname, strnlen(sec.segname, 16));
- isec->data = {isZeroFill(sec.flags) ? nullptr : buf + sec.offset,
- static_cast<size_t>(sec.size)};
- if (sec.align >= 32)
- error("alignment " + std::to_string(sec.align) + " of section " +
- isec->name + " is too large");
- else
- isec->align = 1 << sec.align;
- isec->flags = sec.flags;
- subsections.push_back({{0, isec}});
+ for (const Section &sec : sections) {
+ StringRef name =
+ StringRef(sec.sectname, strnlen(sec.sectname, sizeof(sec.sectname)));
+ StringRef segname =
+ StringRef(sec.segname, strnlen(sec.segname, sizeof(sec.segname)));
+ ArrayRef<uint8_t> data = {isZeroFill(sec.flags) ? nullptr
+ : buf + sec.offset,
+ static_cast<size_t>(sec.size)};
+ if (sec.align >= 32) {
+ error("alignment " + std::to_string(sec.align) + " of section " + name +
+ " is too large");
+ subsections.push_back({});
+ continue;
+ }
+ uint32_t align = 1 << sec.align;
+ uint32_t flags = sec.flags;
+
+ if (sectionType(sec.flags) == S_CSTRING_LITERALS ||
+ (config->dedupLiterals && isWordLiteralSection(sec.flags))) {
+ if (sec.nreloc && config->dedupLiterals)
+ fatal(toString(this) + " contains relocations in " + sec.segname + "," +
+ sec.sectname +
+ ", so LLD cannot deduplicate literals. Try re-running without "
+ "--deduplicate-literals.");
+
+ InputSection *isec;
+ if (sectionType(sec.flags) == S_CSTRING_LITERALS) {
+ isec =
+ make<CStringInputSection>(segname, name, this, data, align, flags);
+ // FIXME: parallelize this?
+ cast<CStringInputSection>(isec)->splitIntoPieces();
+ } else {
+ isec = make<WordLiteralInputSection>(segname, name, this, data, align,
+ flags);
+ }
+ subsections.push_back({{0, isec}});
+ } else if (config->icfLevel != ICFLevel::none &&
+ (name == section_names::cfString &&
+ segname == segment_names::data)) {
+ uint64_t literalSize = target->wordSize == 8 ? 32 : 16;
+ subsections.push_back({});
+ SubsectionMap &subsecMap = subsections.back();
+ for (uint64_t off = 0; off < data.size(); off += literalSize)
+ subsecMap.push_back(
+ {off, make<ConcatInputSection>(segname, name, this,
+ data.slice(off, literalSize), align,
+ flags)});
+ } else {
+ auto *isec =
+ make<ConcatInputSection>(segname, name, this, data, align, flags);
+ if (!(isDebugSection(isec->getFlags()) &&
+ isec->getSegName() == segment_names::dwarf)) {
+ subsections.push_back({{0, isec}});
+ } else {
+ // Instead of emitting DWARF sections, we emit STABS symbols to the
+ // object files that contain them. We filter them out early to avoid
+ // parsing their relocations unnecessarily. But we must still push an
+ // empty map to ensure the indices line up for the remaining sections.
+ subsections.push_back({});
+ debugSections.push_back(isec);
+ }
+ }
}
}
// same location as an offset relative to the start of the containing
// subsection.
static InputSection *findContainingSubsection(SubsectionMap &map,
- uint32_t *offset) {
- auto it = std::prev(map.upper_bound(*offset));
- *offset -= it->first;
- return it->second;
+ uint64_t *offset) {
+ auto it = std::prev(llvm::upper_bound(
+ map, *offset, [](uint64_t value, SubsectionEntry subsecEntry) {
+ return value < subsecEntry.offset;
+ }));
+ *offset -= it->offset;
+ return it->isec;
+}
+
+template <class Section>
+static bool validateRelocationInfo(InputFile *file, const Section &sec,
+ relocation_info rel) {
+ const RelocAttrs &relocAttrs = target->getRelocAttrs(rel.r_type);
+ bool valid = true;
+ auto message = [relocAttrs, file, sec, rel, &valid](const Twine &diagnostic) {
+ valid = false;
+ return (relocAttrs.name + " relocation " + diagnostic + " at offset " +
+ std::to_string(rel.r_address) + " of " + sec.segname + "," +
+ sec.sectname + " in " + toString(file))
+ .str();
+ };
+
+ if (!relocAttrs.hasAttr(RelocAttrBits::LOCAL) && !rel.r_extern)
+ error(message("must be extern"));
+ if (relocAttrs.hasAttr(RelocAttrBits::PCREL) != rel.r_pcrel)
+ error(message(Twine("must ") + (rel.r_pcrel ? "not " : "") +
+ "be PC-relative"));
+ if (isThreadLocalVariables(sec.flags) &&
+ !relocAttrs.hasAttr(RelocAttrBits::UNSIGNED))
+ error(message("not allowed in thread-local section, must be UNSIGNED"));
+ if (rel.r_length < 2 || rel.r_length > 3 ||
+ !relocAttrs.hasAttr(static_cast<RelocAttrBits>(1 << rel.r_length))) {
+ static SmallVector<StringRef, 4> widths{"0", "4", "8", "4 or 8"};
+ error(message("has width " + std::to_string(1 << rel.r_length) +
+ " bytes, but must be " +
+ widths[(static_cast<int>(relocAttrs.bits) >> 2) & 3] +
+ " bytes"));
+ }
+ return valid;
}
-void InputFile::parseRelocations(const section_64 &sec,
- SubsectionMap &subsecMap) {
+template <class Section>
+void ObjFile::parseRelocations(ArrayRef<Section> sectionHeaders,
+ const Section &sec, SubsectionMap &subsecMap) {
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
- ArrayRef<any_relocation_info> relInfos(
- reinterpret_cast<const any_relocation_info *>(buf + sec.reloff),
- sec.nreloc);
+ ArrayRef<relocation_info> relInfos(
+ reinterpret_cast<const relocation_info *>(buf + sec.reloff), sec.nreloc);
- for (const any_relocation_info &anyRel : relInfos) {
- if (anyRel.r_word0 & R_SCATTERED)
- fatal("TODO: Scattered relocations not supported");
+ auto subsecIt = subsecMap.rbegin();
+ for (size_t i = 0; i < relInfos.size(); i++) {
+ // Paired relocations serve as Mach-O's method for attaching a
+ // supplemental datum to a primary relocation record. ELF does not
+ // need them because the *_RELOC_RELA records contain the extra
+ // addend field, vs. *_RELOC_REL which omit the addend.
+ //
+ // The {X86_64,ARM64}_RELOC_SUBTRACTOR record holds the subtrahend,
+ // and the paired *_RELOC_UNSIGNED record holds the minuend. The
+ // datum for each is a symbolic address. The result is the offset
+ // between two addresses.
+ //
+ // The ARM64_RELOC_ADDEND record holds the addend, and the paired
+ // ARM64_RELOC_BRANCH26 or ARM64_RELOC_PAGE21/PAGEOFF12 holds the
+ // base symbolic address.
+ //
+ // Note: X86 does not use *_RELOC_ADDEND because it can embed an
+ // addend into the instruction stream. On X86, a relocatable address
+ // field always occupies an entire contiguous sequence of byte(s),
+ // so there is no need to merge opcode bits with address
+ // bits. Therefore, it's easy and convenient to store addends in the
+ // instruction-stream bytes that would otherwise contain zeroes. By
+ // contrast, RISC ISAs such as ARM64 mix opcode bits with with
+ // address bits so that bitwise arithmetic is necessary to extract
+ // and insert them. Storing addends in the instruction stream is
+ // possible, but inconvenient and more costly at link time.
- auto rel = reinterpret_cast<const relocation_info &>(anyRel);
+ int64_t pairedAddend = 0;
+ relocation_info relInfo = relInfos[i];
+ if (target->hasAttr(relInfo.r_type, RelocAttrBits::ADDEND)) {
+ pairedAddend = SignExtend64<24>(relInfo.r_symbolnum);
+ relInfo = relInfos[++i];
+ }
+ assert(i < relInfos.size());
+ if (!validateRelocationInfo(this, sec, relInfo))
+ continue;
+ if (relInfo.r_address & R_SCATTERED)
+ fatal("TODO: Scattered relocations not supported");
+ bool isSubtrahend =
+ target->hasAttr(relInfo.r_type, RelocAttrBits::SUBTRAHEND);
+ int64_t embeddedAddend = target->getEmbeddedAddend(mb, sec.offset, relInfo);
+ assert(!(embeddedAddend && pairedAddend));
+ int64_t totalAddend = pairedAddend + embeddedAddend;
Reloc r;
- r.type = rel.r_type;
- r.pcrel = rel.r_pcrel;
- r.length = rel.r_length;
- uint64_t rawAddend = target->getImplicitAddend(mb, sec, rel);
-
- if (rel.r_extern) {
- r.target = symbols[rel.r_symbolnum];
- r.addend = rawAddend;
+ r.type = relInfo.r_type;
+ r.pcrel = relInfo.r_pcrel;
+ r.length = relInfo.r_length;
+ r.offset = relInfo.r_address;
+ if (relInfo.r_extern) {
+ r.referent = symbols[relInfo.r_symbolnum];
+ r.addend = isSubtrahend ? 0 : totalAddend;
} else {
- if (rel.r_symbolnum == 0 || rel.r_symbolnum > subsections.size())
- fatal("invalid section index in relocation for offset " +
- std::to_string(r.offset) + " in section " + sec.sectname +
- " of " + getName());
-
- SubsectionMap &targetSubsecMap = subsections[rel.r_symbolnum - 1];
- const section_64 &targetSec = sectionHeaders[rel.r_symbolnum - 1];
- uint32_t targetOffset;
- if (rel.r_pcrel) {
+ assert(!isSubtrahend);
+ const Section &referentSec = sectionHeaders[relInfo.r_symbolnum - 1];
+ uint64_t referentOffset;
+ if (relInfo.r_pcrel) {
// The implicit addend for pcrel section relocations is the pcrel offset
// in terms of the addresses in the input file. Here we adjust it so
- // that it describes the offset from the start of the target section.
- // TODO: The offset of 4 is probably not right for ARM64, nor for
- // relocations with r_length != 2.
- targetOffset =
- sec.addr + rel.r_address + 4 + rawAddend - targetSec.addr;
+ // that it describes the offset from the start of the referent section.
+ // FIXME This logic was written around x86_64 behavior -- ARM64 doesn't
+ // have pcrel section relocations. We may want to factor this out into
+ // the arch-specific .cpp file.
+ assert(target->hasAttr(r.type, RelocAttrBits::BYTE4));
+ referentOffset =
+ sec.addr + relInfo.r_address + 4 + totalAddend - referentSec.addr;
} else {
// The addend for a non-pcrel relocation is its absolute address.
- targetOffset = rawAddend - targetSec.addr;
+ referentOffset = totalAddend - referentSec.addr;
}
- r.target = findContainingSubsection(targetSubsecMap, &targetOffset);
- r.addend = targetOffset;
+ SubsectionMap &referentSubsecMap = subsections[relInfo.r_symbolnum - 1];
+ r.referent = findContainingSubsection(referentSubsecMap, &referentOffset);
+ r.addend = referentOffset;
}
- r.offset = rel.r_address;
- InputSection *subsec = findContainingSubsection(subsecMap, &r.offset);
+ // Find the subsection that this relocation belongs to.
+ // Though not required by the Mach-O format, clang and gcc seem to emit
+ // relocations in order, so let's take advantage of it. However, ld64 emits
+ // unsorted relocations (in `-r` mode), so we have a fallback for that
+ // uncommon case.
+ InputSection *subsec;
+ while (subsecIt != subsecMap.rend() && subsecIt->offset > r.offset)
+ ++subsecIt;
+ if (subsecIt == subsecMap.rend() ||
+ subsecIt->offset + subsecIt->isec->getSize() <= r.offset) {
+ subsec = findContainingSubsection(subsecMap, &r.offset);
+ // Now that we know the relocs are unsorted, avoid trying the 'fast path'
+ // for the other relocations.
+ subsecIt = subsecMap.rend();
+ } else {
+ subsec = subsecIt->isec;
+ r.offset -= subsecIt->offset;
+ }
subsec->relocs.push_back(r);
+
+ if (isSubtrahend) {
+ relocation_info minuendInfo = relInfos[++i];
+ // SUBTRACTOR relocations should always be followed by an UNSIGNED one
+ // attached to the same address.
+ assert(target->hasAttr(minuendInfo.r_type, RelocAttrBits::UNSIGNED) &&
+ relInfo.r_address == minuendInfo.r_address);
+ Reloc p;
+ p.type = minuendInfo.r_type;
+ if (minuendInfo.r_extern) {
+ p.referent = symbols[minuendInfo.r_symbolnum];
+ p.addend = totalAddend;
+ } else {
+ uint64_t referentOffset =
+ totalAddend - sectionHeaders[minuendInfo.r_symbolnum - 1].addr;
+ SubsectionMap &referentSubsecMap =
+ subsections[minuendInfo.r_symbolnum - 1];
+ p.referent =
+ findContainingSubsection(referentSubsecMap, &referentOffset);
+ p.addend = referentOffset;
+ }
+ subsec->relocs.push_back(p);
+ }
}
}
-void InputFile::parseSymbols(ArrayRef<structs::nlist_64> nList,
- const char *strtab, bool subsectionsViaSymbols) {
- // resize(), not reserve(), because we are going to create N_ALT_ENTRY symbols
- // out-of-sequence.
- symbols.resize(nList.size());
- std::vector<size_t> altEntrySymIdxs;
+template <class NList>
+static macho::Symbol *createDefined(const NList &sym, StringRef name,
+ InputSection *isec, uint64_t value,
+ uint64_t size) {
+ // Symbol scope is determined by sym.n_type & (N_EXT | N_PEXT):
+ // N_EXT: Global symbols. These go in the symbol table during the link,
+ // and also in the export table of the output so that the dynamic
+ // linker sees them.
+ // N_EXT | N_PEXT: Linkage unit (think: dylib) scoped. These go in the
+ // symbol table during the link so that duplicates are
+ // either reported (for non-weak symbols) or merged
+ // (for weak symbols), but they do not go in the export
+ // table of the output.
+ // N_PEXT: llvm-mc does not emit these, but `ld -r` (wherein ld64 emits
+ // object files) may produce them. LLD does not yet support -r.
+ // These are translation-unit scoped, identical to the `0` case.
+ // 0: Translation-unit scoped. These are not in the symbol table during
+ // link, and not in the export table of the output either.
+ bool isWeakDefCanBeHidden =
+ (sym.n_desc & (N_WEAK_DEF | N_WEAK_REF)) == (N_WEAK_DEF | N_WEAK_REF);
- auto createDefined = [&](const structs::nlist_64 &sym, InputSection *isec,
- uint32_t value) -> Symbol * {
- StringRef name = strtab + sym.n_strx;
- if (sym.n_type & N_EXT)
- // Global defined symbol
- return symtab->addDefined(name, isec, value);
- else
- // Local defined symbol
- return make<Defined>(name, isec, value);
- };
+ if (sym.n_type & N_EXT) {
+ bool isPrivateExtern = sym.n_type & N_PEXT;
+ // lld's behavior for merging symbols is slightly different from ld64:
+ // ld64 picks the winning symbol based on several criteria (see
+ // pickBetweenRegularAtoms() in ld64's SymbolTable.cpp), while lld
+ // just merges metadata and keeps the contents of the first symbol
+ // with that name (see SymbolTable::addDefined). For:
+ // * inline function F in a TU built with -fvisibility-inlines-hidden
+ // * and inline function F in another TU built without that flag
+ // ld64 will pick the one from the file built without
+ // -fvisibility-inlines-hidden.
+ // lld will instead pick the one listed first on the link command line and
+ // give it visibility as if the function was built without
+ // -fvisibility-inlines-hidden.
+ // If both functions have the same contents, this will have the same
+ // behavior. If not, it won't, but the input had an ODR violation in
+ // that case.
+ //
+ // Similarly, merging a symbol
+ // that's isPrivateExtern and not isWeakDefCanBeHidden with one
+ // that's not isPrivateExtern but isWeakDefCanBeHidden technically
+ // should produce one
+ // that's not isPrivateExtern but isWeakDefCanBeHidden. That matters
+ // with ld64's semantics, because it means the non-private-extern
+ // definition will continue to take priority if more private extern
+ // definitions are encountered. With lld's semantics there's no observable
+ // difference between a symbol that's isWeakDefCanBeHidden or one that's
+ // privateExtern -- neither makes it into the dynamic symbol table. So just
+ // promote isWeakDefCanBeHidden to isPrivateExtern here.
+ if (isWeakDefCanBeHidden)
+ isPrivateExtern = true;
- for (size_t i = 0, n = nList.size(); i < n; ++i) {
- const structs::nlist_64 &sym = nList[i];
+ return symtab->addDefined(
+ name, isec->getFile(), isec, value, size, sym.n_desc & N_WEAK_DEF,
+ isPrivateExtern, sym.n_desc & N_ARM_THUMB_DEF,
+ sym.n_desc & REFERENCED_DYNAMICALLY, sym.n_desc & N_NO_DEAD_STRIP);
+ }
- // Undefined symbol
- if (!sym.n_sect) {
- StringRef name = strtab + sym.n_strx;
- symbols[i] = symtab->addUndefined(name);
- continue;
- }
+ assert(!isWeakDefCanBeHidden &&
+ "weak_def_can_be_hidden on already-hidden symbol?");
+ return make<Defined>(
+ name, isec->getFile(), isec, value, size, sym.n_desc & N_WEAK_DEF,
+ /*isExternal=*/false, /*isPrivateExtern=*/false,
+ sym.n_desc & N_ARM_THUMB_DEF, sym.n_desc & REFERENCED_DYNAMICALLY,
+ sym.n_desc & N_NO_DEAD_STRIP);
+}
+
+// Absolute symbols are defined symbols that do not have an associated
+// InputSection. They cannot be weak.
+template <class NList>
+static macho::Symbol *createAbsolute(const NList &sym, InputFile *file,
+ StringRef name) {
+ if (sym.n_type & N_EXT) {
+ return symtab->addDefined(
+ name, file, nullptr, sym.n_value, /*size=*/0,
+ /*isWeakDef=*/false, sym.n_type & N_PEXT, sym.n_desc & N_ARM_THUMB_DEF,
+ /*isReferencedDynamically=*/false, sym.n_desc & N_NO_DEAD_STRIP);
+ }
+ return make<Defined>(name, file, nullptr, sym.n_value, /*size=*/0,
+ /*isWeakDef=*/false,
+ /*isExternal=*/false, /*isPrivateExtern=*/false,
+ sym.n_desc & N_ARM_THUMB_DEF,
+ /*isReferencedDynamically=*/false,
+ sym.n_desc & N_NO_DEAD_STRIP);
+}
+
+template <class NList>
+macho::Symbol *ObjFile::parseNonSectionSymbol(const NList &sym,
+ StringRef name) {
+ uint8_t type = sym.n_type & N_TYPE;
+ switch (type) {
+ case N_UNDF:
+ return sym.n_value == 0
+ ? symtab->addUndefined(name, this, sym.n_desc & N_WEAK_REF)
+ : symtab->addCommon(name, this, sym.n_value,
+ 1 << GET_COMM_ALIGN(sym.n_desc),
+ sym.n_type & N_PEXT);
+ case N_ABS:
+ return createAbsolute(sym, this, name);
+ case N_PBUD:
+ case N_INDR:
+ error("TODO: support symbols of type " + std::to_string(type));
+ return nullptr;
+ case N_SECT:
+ llvm_unreachable(
+ "N_SECT symbols should not be passed to parseNonSectionSymbol");
+ default:
+ llvm_unreachable("invalid symbol type");
+ }
+}
+
+template <class NList>
+static bool isUndef(const NList &sym) {
+ return (sym.n_type & N_TYPE) == N_UNDF && sym.n_value == 0;
+}
- const section_64 &sec = sectionHeaders[sym.n_sect - 1];
- SubsectionMap &subsecMap = subsections[sym.n_sect - 1];
- uint64_t offset = sym.n_value - sec.addr;
+template <class LP>
+void ObjFile::parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
+ ArrayRef<typename LP::nlist> nList,
+ const char *strtab, bool subsectionsViaSymbols) {
+ using NList = typename LP::nlist;
- // If the input file does not use subsections-via-symbols, all symbols can
- // use the same subsection. Otherwise, we must split the sections along
- // symbol boundaries.
- if (!subsectionsViaSymbols) {
- symbols[i] = createDefined(sym, subsecMap[0], offset);
+ // Groups indices of the symbols by the sections that contain them.
+ std::vector<std::vector<uint32_t>> symbolsBySection(subsections.size());
+ symbols.resize(nList.size());
+ SmallVector<unsigned, 32> undefineds;
+ for (uint32_t i = 0; i < nList.size(); ++i) {
+ const NList &sym = nList[i];
+
+ // Ignore debug symbols for now.
+ // FIXME: may need special handling.
+ if (sym.n_type & N_STAB)
continue;
+
+ StringRef name = strtab + sym.n_strx;
+ if ((sym.n_type & N_TYPE) == N_SECT) {
+ SubsectionMap &subsecMap = subsections[sym.n_sect - 1];
+ // parseSections() may have chosen not to parse this section.
+ if (subsecMap.empty())
+ continue;
+ symbolsBySection[sym.n_sect - 1].push_back(i);
+ } else if (isUndef(sym)) {
+ undefineds.push_back(i);
+ } else {
+ symbols[i] = parseNonSectionSymbol(sym, name);
}
+ }
- // nList entries aren't necessarily arranged in address order. Therefore,
- // we can't create alt-entry symbols at this point because a later symbol
- // may split its section, which may affect which subsection the alt-entry
- // symbol is assigned to. So we need to handle them in a second pass below.
- if (sym.n_desc & N_ALT_ENTRY) {
- altEntrySymIdxs.push_back(i);
+ for (size_t i = 0; i < subsections.size(); ++i) {
+ SubsectionMap &subsecMap = subsections[i];
+ if (subsecMap.empty())
continue;
- }
- // Find the subsection corresponding to the greatest section offset that is
- // <= that of the current symbol. The subsection that we find either needs
- // to be used directly or split in two.
- uint32_t firstSize = offset;
- InputSection *firstIsec = findContainingSubsection(subsecMap, &firstSize);
+ std::vector<uint32_t> &symbolIndices = symbolsBySection[i];
+ uint64_t sectionAddr = sectionHeaders[i].addr;
+ uint32_t sectionAlign = 1u << sectionHeaders[i].align;
- if (firstSize == 0) {
- // Alias of an existing symbol, or the first symbol in the section. These
- // are handled by reusing the existing section.
- symbols[i] = createDefined(sym, firstIsec, 0);
+ InputSection *isec = subsecMap.back().isec;
+ // __cfstring has already been split into subsections during
+ // parseSections(), so we simply need to match Symbols to the corresponding
+ // subsection here.
+ if (config->icfLevel != ICFLevel::none && isCfStringSection(isec)) {
+ for (size_t j = 0; j < symbolIndices.size(); ++j) {
+ uint32_t symIndex = symbolIndices[j];
+ const NList &sym = nList[symIndex];
+ StringRef name = strtab + sym.n_strx;
+ uint64_t symbolOffset = sym.n_value - sectionAddr;
+ InputSection *isec = findContainingSubsection(subsecMap, &symbolOffset);
+ if (symbolOffset != 0) {
+ error(toString(this) + ": __cfstring contains symbol " + name +
+ " at misaligned offset");
+ continue;
+ }
+ symbols[symIndex] = createDefined(sym, name, isec, 0, isec->getSize());
+ }
continue;
}
- // We saw a symbol definition at a new offset. Split the section into two
- // subsections. The new symbol uses the second subsection.
- auto *secondIsec = make<InputSection>(*firstIsec);
- secondIsec->data = firstIsec->data.slice(firstSize);
- firstIsec->data = firstIsec->data.slice(0, firstSize);
- // TODO: ld64 appears to preserve the original alignment as well as each
- // subsection's offset from the last aligned address. We should consider
- // emulating that behavior.
- secondIsec->align = MinAlign(firstIsec->align, offset);
+ // Calculate symbol sizes and create subsections by splitting the sections
+ // along symbol boundaries.
+ // We populate subsecMap by repeatedly splitting the last (highest address)
+ // subsection.
+ llvm::stable_sort(symbolIndices, [&](uint32_t lhs, uint32_t rhs) {
+ return nList[lhs].n_value < nList[rhs].n_value;
+ });
+ SubsectionEntry subsecEntry = subsecMap.back();
+ for (size_t j = 0; j < symbolIndices.size(); ++j) {
+ uint32_t symIndex = symbolIndices[j];
+ const NList &sym = nList[symIndex];
+ StringRef name = strtab + sym.n_strx;
+ InputSection *isec = subsecEntry.isec;
+
+ uint64_t subsecAddr = sectionAddr + subsecEntry.offset;
+ size_t symbolOffset = sym.n_value - subsecAddr;
+ uint64_t symbolSize =
+ j + 1 < symbolIndices.size()
+ ? nList[symbolIndices[j + 1]].n_value - sym.n_value
+ : isec->data.size() - symbolOffset;
+ // There are 4 cases where we do not need to create a new subsection:
+ // 1. If the input file does not use subsections-via-symbols.
+ // 2. Multiple symbols at the same address only induce one subsection.
+ // (The symbolOffset == 0 check covers both this case as well as
+ // the first loop iteration.)
+ // 3. Alternative entry points do not induce new subsections.
+ // 4. If we have a literal section (e.g. __cstring and __literal4).
+ if (!subsectionsViaSymbols || symbolOffset == 0 ||
+ sym.n_desc & N_ALT_ENTRY || !isa<ConcatInputSection>(isec)) {
+ symbols[symIndex] =
+ createDefined(sym, name, isec, symbolOffset, symbolSize);
+ continue;
+ }
+ auto *concatIsec = cast<ConcatInputSection>(isec);
+
+ auto *nextIsec = make<ConcatInputSection>(*concatIsec);
+ nextIsec->numRefs = 0;
+ nextIsec->wasCoalesced = false;
+ if (isZeroFill(isec->getFlags())) {
+ // Zero-fill sections have NULL data.data() non-zero data.size()
+ nextIsec->data = {nullptr, isec->data.size() - symbolOffset};
+ isec->data = {nullptr, symbolOffset};
+ } else {
+ nextIsec->data = isec->data.slice(symbolOffset);
+ isec->data = isec->data.slice(0, symbolOffset);
+ }
- subsecMap[offset] = secondIsec;
- // By construction, the symbol will be at offset zero in the new section.
- symbols[i] = createDefined(sym, secondIsec, 0);
+ // By construction, the symbol will be at offset zero in the new
+ // subsection.
+ symbols[symIndex] =
+ createDefined(sym, name, nextIsec, /*value=*/0, symbolSize);
+ // TODO: ld64 appears to preserve the original alignment as well as each
+ // subsection's offset from the last aligned address. We should consider
+ // emulating that behavior.
+ nextIsec->align = MinAlign(sectionAlign, sym.n_value);
+ subsecMap.push_back({sym.n_value - sectionAddr, nextIsec});
+ subsecEntry = subsecMap.back();
+ }
}
- for (size_t idx : altEntrySymIdxs) {
- const structs::nlist_64 &sym = nList[idx];
- SubsectionMap &subsecMap = subsections[sym.n_sect - 1];
- uint32_t off = sym.n_value - sectionHeaders[sym.n_sect - 1].addr;
- InputSection *subsec = findContainingSubsection(subsecMap, &off);
- symbols[idx] = createDefined(sym, subsec, off);
+ // Undefined symbols can trigger recursive fetch from Archives due to
+ // LazySymbols. Process defined symbols first so that the relative order
+ // between a defined symbol and an undefined symbol does not change the
+ // symbol resolution behavior. In addition, a set of interconnected symbols
+ // will all be resolved to the same file, instead of being resolved to
+ // different files.
+ for (unsigned i : undefineds) {
+ const NList &sym = nList[i];
+ StringRef name = strtab + sym.n_strx;
+ symbols[i] = parseNonSectionSymbol(sym, name);
}
}
-ObjFile::ObjFile(MemoryBufferRef mb) : InputFile(ObjKind, mb) {
+OpaqueFile::OpaqueFile(MemoryBufferRef mb, StringRef segName,
+ StringRef sectName)
+ : InputFile(OpaqueKind, mb) {
+ const auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
+ ArrayRef<uint8_t> data = {buf, mb.getBufferSize()};
+ ConcatInputSection *isec =
+ make<ConcatInputSection>(segName.take_front(16), sectName.take_front(16),
+ /*file=*/this, data);
+ isec->live = true;
+ subsections.push_back({{0, isec}});
+}
+
+ObjFile::ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName)
+ : InputFile(ObjKind, mb), modTime(modTime) {
+ this->archiveName = std::string(archiveName);
+ if (target->wordSize == 8)
+ parse<LP64>();
+ else
+ parse<ILP32>();
+}
+
+template <class LP> void ObjFile::parse() {
+ using Header = typename LP::mach_header;
+ using SegmentCommand = typename LP::segment_command;
+ using Section = typename LP::section;
+ using NList = typename LP::nlist;
+
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
- auto *hdr = reinterpret_cast<const mach_header_64 *>(mb.getBufferStart());
+ auto *hdr = reinterpret_cast<const Header *>(mb.getBufferStart());
+
+ Architecture arch = getArchitectureFromCpuType(hdr->cputype, hdr->cpusubtype);
+ if (arch != config->arch()) {
+ error(toString(this) + " has architecture " + getArchitectureName(arch) +
+ " which is incompatible with target architecture " +
+ getArchitectureName(config->arch()));
+ return;
+ }
+
+ if (!checkCompatibility(this))
+ return;
- if (const load_command *cmd = findCommand(hdr, LC_SEGMENT_64)) {
- auto *c = reinterpret_cast<const segment_command_64 *>(cmd);
- sectionHeaders = ArrayRef<section_64>{
- reinterpret_cast<const section_64 *>(c + 1), c->nsects};
+ for (auto *cmd : findCommands<linker_option_command>(hdr, LC_LINKER_OPTION)) {
+ StringRef data{reinterpret_cast<const char *>(cmd + 1),
+ cmd->cmdsize - sizeof(linker_option_command)};
+ parseLCLinkerOption(this, cmd->count, data);
+ }
+
+ ArrayRef<Section> sectionHeaders;
+ if (const load_command *cmd = findCommand(hdr, LP::segmentLCType)) {
+ auto *c = reinterpret_cast<const SegmentCommand *>(cmd);
+ sectionHeaders =
+ ArrayRef<Section>{reinterpret_cast<const Section *>(c + 1), c->nsects};
parseSections(sectionHeaders);
}
// TODO: Error on missing LC_SYMTAB?
if (const load_command *cmd = findCommand(hdr, LC_SYMTAB)) {
auto *c = reinterpret_cast<const symtab_command *>(cmd);
- ArrayRef<structs::nlist_64> nList(
- reinterpret_cast<const structs::nlist_64 *>(buf + c->symoff), c->nsyms);
+ ArrayRef<NList> nList(reinterpret_cast<const NList *>(buf + c->symoff),
+ c->nsyms);
const char *strtab = reinterpret_cast<const char *>(buf) + c->stroff;
bool subsectionsViaSymbols = hdr->flags & MH_SUBSECTIONS_VIA_SYMBOLS;
- parseSymbols(nList, strtab, subsectionsViaSymbols);
+ parseSymbols<LP>(sectionHeaders, nList, strtab, subsectionsViaSymbols);
}
// The relocations may refer to the symbols, so we parse them after we have
// parsed all the symbols.
for (size_t i = 0, n = subsections.size(); i < n; ++i)
- parseRelocations(sectionHeaders[i], subsections[i]);
+ if (!subsections[i].empty())
+ parseRelocations(sectionHeaders, sectionHeaders[i], subsections[i]);
+
+ parseDebugInfo();
+ if (config->emitDataInCodeInfo)
+ parseDataInCode();
+}
+
+void ObjFile::parseDebugInfo() {
+ std::unique_ptr<DwarfObject> dObj = DwarfObject::create(this);
+ if (!dObj)
+ return;
+
+ auto *ctx = make<DWARFContext>(
+ std::move(dObj), "",
+ [&](Error err) {
+ warn(toString(this) + ": " + toString(std::move(err)));
+ },
+ [&](Error warning) {
+ warn(toString(this) + ": " + toString(std::move(warning)));
+ });
+
+ // TODO: Since object files can contain a lot of DWARF info, we should verify
+ // that we are parsing just the info we need
+ const DWARFContext::compile_unit_range &units = ctx->compile_units();
+ // FIXME: There can be more than one compile unit per object file. See
+ // PR48637.
+ auto it = units.begin();
+ compileUnit = it->get();
+}
+
+void ObjFile::parseDataInCode() {
+ const auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
+ const load_command *cmd = findCommand(buf, LC_DATA_IN_CODE);
+ if (!cmd)
+ return;
+ const auto *c = reinterpret_cast<const linkedit_data_command *>(cmd);
+ dataInCodeEntries = {
+ reinterpret_cast<const data_in_code_entry *>(buf + c->dataoff),
+ c->datasize / sizeof(data_in_code_entry)};
+ assert(is_sorted(dataInCodeEntries, [](const data_in_code_entry &lhs,
+ const data_in_code_entry &rhs) {
+ return lhs.offset < rhs.offset;
+ }));
+}
+
+// The path can point to either a dylib or a .tbd file.
+static DylibFile *loadDylib(StringRef path, DylibFile *umbrella) {
+ Optional<MemoryBufferRef> mbref = readFile(path);
+ if (!mbref) {
+ error("could not read dylib file at " + path);
+ return nullptr;
+ }
+ return loadDylib(*mbref, umbrella);
}
-DylibFile::DylibFile(MemoryBufferRef mb, DylibFile *umbrella)
- : InputFile(DylibKind, mb) {
+// TBD files are parsed into a series of TAPI documents (InterfaceFiles), with
+// the first document storing child pointers to the rest of them. When we are
+// processing a given TBD file, we store that top-level document in
+// currentTopLevelTapi. When processing re-exports, we search its children for
+// potentially matching documents in the same TBD file. Note that the children
+// themselves don't point to further documents, i.e. this is a two-level tree.
+//
+// Re-exports can either refer to on-disk files, or to documents within .tbd
+// files.
+static DylibFile *findDylib(StringRef path, DylibFile *umbrella,
+ const InterfaceFile *currentTopLevelTapi) {
+ // Search order:
+ // 1. Install name basename in -F / -L directories.
+ {
+ StringRef stem = path::stem(path);
+ SmallString<128> frameworkName;
+ path::append(frameworkName, path::Style::posix, stem + ".framework", stem);
+ bool isFramework = path.endswith(frameworkName);
+ if (isFramework) {
+ for (StringRef dir : config->frameworkSearchPaths) {
+ SmallString<128> candidate = dir;
+ path::append(candidate, frameworkName);
+ if (Optional<std::string> dylibPath = resolveDylibPath(candidate))
+ return loadDylib(*dylibPath, umbrella);
+ }
+ } else if (Optional<StringRef> dylibPath = findPathCombination(
+ stem, config->librarySearchPaths, {".tbd", ".dylib"}))
+ return loadDylib(*dylibPath, umbrella);
+ }
+
+ // 2. As absolute path.
+ if (path::is_absolute(path, path::Style::posix))
+ for (StringRef root : config->systemLibraryRoots)
+ if (Optional<std::string> dylibPath =
+ resolveDylibPath((root + path).str()))
+ return loadDylib(*dylibPath, umbrella);
+
+ // 3. As relative path.
+
+ // TODO: Handle -dylib_file
+
+ // Replace @executable_path, @loader_path, @rpath prefixes in install name.
+ SmallString<128> newPath;
+ if (config->outputType == MH_EXECUTE &&
+ path.consume_front("@executable_path/")) {
+ // ld64 allows overriding this with the undocumented flag -executable_path.
+ // lld doesn't currently implement that flag.
+ // FIXME: Consider using finalOutput instead of outputFile.
+ path::append(newPath, path::parent_path(config->outputFile), path);
+ path = newPath;
+ } else if (path.consume_front("@loader_path/")) {
+ fs::real_path(umbrella->getName(), newPath);
+ path::remove_filename(newPath);
+ path::append(newPath, path);
+ path = newPath;
+ } else if (path.startswith("@rpath/")) {
+ for (StringRef rpath : umbrella->rpaths) {
+ newPath.clear();
+ if (rpath.consume_front("@loader_path/")) {
+ fs::real_path(umbrella->getName(), newPath);
+ path::remove_filename(newPath);
+ }
+ path::append(newPath, rpath, path.drop_front(strlen("@rpath/")));
+ if (Optional<std::string> dylibPath = resolveDylibPath(newPath))
+ return loadDylib(*dylibPath, umbrella);
+ }
+ }
+
+ // FIXME: Should this be further up?
+ if (currentTopLevelTapi) {
+ for (InterfaceFile &child :
+ make_pointee_range(currentTopLevelTapi->documents())) {
+ assert(child.documents().empty());
+ if (path == child.getInstallName()) {
+ auto file = make<DylibFile>(child, umbrella);
+ file->parseReexports(child);
+ return file;
+ }
+ }
+ }
+
+ if (Optional<std::string> dylibPath = resolveDylibPath(path))
+ return loadDylib(*dylibPath, umbrella);
+
+ return nullptr;
+}
+
+// If a re-exported dylib is public (lives in /usr/lib or
+// /System/Library/Frameworks), then it is considered implicitly linked: we
+// should bind to its symbols directly instead of via the re-exporting umbrella
+// library.
+static bool isImplicitlyLinked(StringRef path) {
+ if (!config->implicitDylibs)
+ return false;
+
+ if (path::parent_path(path) == "/usr/lib")
+ return true;
+
+ // Match /System/Library/Frameworks/$FOO.framework/**/$FOO
+ if (path.consume_front("/System/Library/Frameworks/")) {
+ StringRef frameworkName = path.take_until([](char c) { return c == '.'; });
+ return path::filename(path) == frameworkName;
+ }
+
+ return false;
+}
+
+static void loadReexport(StringRef path, DylibFile *umbrella,
+ const InterfaceFile *currentTopLevelTapi) {
+ DylibFile *reexport = findDylib(path, umbrella, currentTopLevelTapi);
+ if (!reexport)
+ error("unable to locate re-export with install name " + path);
+}
+
+DylibFile::DylibFile(MemoryBufferRef mb, DylibFile *umbrella,
+ bool isBundleLoader)
+ : InputFile(DylibKind, mb), refState(RefState::Unreferenced),
+ isBundleLoader(isBundleLoader) {
+ assert(!isBundleLoader || !umbrella);
if (umbrella == nullptr)
umbrella = this;
+ this->umbrella = umbrella;
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
- auto *hdr = reinterpret_cast<const mach_header_64 *>(mb.getBufferStart());
+ auto *hdr = reinterpret_cast<const mach_header *>(mb.getBufferStart());
- // Initialize dylibName.
+ // Initialize installName.
if (const load_command *cmd = findCommand(hdr, LC_ID_DYLIB)) {
auto *c = reinterpret_cast<const dylib_command *>(cmd);
- dylibName = reinterpret_cast<const char *>(cmd) + read32le(&c->dylib.name);
- } else {
- error("dylib " + getName() + " missing LC_ID_DYLIB load command");
+ currentVersion = read32le(&c->dylib.current_version);
+ compatibilityVersion = read32le(&c->dylib.compatibility_version);
+ installName =
+ reinterpret_cast<const char *>(cmd) + read32le(&c->dylib.name);
+ } else if (!isBundleLoader) {
+ // macho_executable and macho_bundle don't have LC_ID_DYLIB,
+ // so it's OK.
+ error("dylib " + toString(this) + " missing LC_ID_DYLIB load command");
return;
}
+ if (config->printEachFile)
+ message(toString(this));
+ inputFiles.insert(this);
+
+ deadStrippable = hdr->flags & MH_DEAD_STRIPPABLE_DYLIB;
+
+ if (!checkCompatibility(this))
+ return;
+
+ checkAppExtensionSafety(hdr->flags & MH_APP_EXTENSION_SAFE);
+
+ for (auto *cmd : findCommands<rpath_command>(hdr, LC_RPATH)) {
+ StringRef rpath{reinterpret_cast<const char *>(cmd) + cmd->path};
+ rpaths.push_back(rpath);
+ }
+
// Initialize symbols.
+ exportingFile = isImplicitlyLinked(installName) ? this : this->umbrella;
if (const load_command *cmd = findCommand(hdr, LC_DYLD_INFO_ONLY)) {
auto *c = reinterpret_cast<const dyld_info_command *>(cmd);
parseTrie(buf + c->export_off, c->export_size,
[&](const Twine &name, uint64_t flags) {
- symbols.push_back(symtab->addDylib(saver.save(name), umbrella));
+ StringRef savedName = saver.save(name);
+ if (handleLDSymbol(savedName))
+ return;
+ bool isWeakDef = flags & EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION;
+ bool isTlv = flags & EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL;
+ symbols.push_back(symtab->addDylib(savedName, exportingFile,
+ isWeakDef, isTlv));
});
} else {
- error("LC_DYLD_INFO_ONLY not found in " + getName());
+ error("LC_DYLD_INFO_ONLY not found in " + toString(this));
return;
}
+}
- if (hdr->flags & MH_NO_REEXPORTED_DYLIBS)
- return;
-
- const uint8_t *p =
- reinterpret_cast<const uint8_t *>(hdr) + sizeof(mach_header_64);
+void DylibFile::parseLoadCommands(MemoryBufferRef mb) {
+ auto *hdr = reinterpret_cast<const mach_header *>(mb.getBufferStart());
+ const uint8_t *p = reinterpret_cast<const uint8_t *>(mb.getBufferStart()) +
+ target->headerSize;
for (uint32_t i = 0, n = hdr->ncmds; i < n; ++i) {
auto *cmd = reinterpret_cast<const load_command *>(p);
p += cmd->cmdsize;
- if (cmd->cmd != LC_REEXPORT_DYLIB)
- continue;
- auto *c = reinterpret_cast<const dylib_command *>(cmd);
- StringRef reexportPath =
- reinterpret_cast<const char *>(c) + read32le(&c->dylib.name);
- // TODO: Expand @loader_path, @executable_path etc in reexportPath
- Optional<MemoryBufferRef> buffer = readFile(reexportPath);
- if (!buffer) {
- error("unable to read re-exported dylib at " + reexportPath);
- return;
+ if (!(hdr->flags & MH_NO_REEXPORTED_DYLIBS) &&
+ cmd->cmd == LC_REEXPORT_DYLIB) {
+ const auto *c = reinterpret_cast<const dylib_command *>(cmd);
+ StringRef reexportPath =
+ reinterpret_cast<const char *>(c) + read32le(&c->dylib.name);
+ loadReexport(reexportPath, exportingFile, nullptr);
+ }
+
+ // FIXME: What about LC_LOAD_UPWARD_DYLIB, LC_LAZY_LOAD_DYLIB,
+ // LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB (..are reexports from dylibs with
+ // MH_NO_REEXPORTED_DYLIBS loaded for -flat_namespace)?
+ if (config->namespaceKind == NamespaceKind::flat &&
+ cmd->cmd == LC_LOAD_DYLIB) {
+ const auto *c = reinterpret_cast<const dylib_command *>(cmd);
+ StringRef dylibPath =
+ reinterpret_cast<const char *>(c) + read32le(&c->dylib.name);
+ DylibFile *dylib = findDylib(dylibPath, umbrella, nullptr);
+ if (!dylib)
+ error(Twine("unable to locate library '") + dylibPath +
+ "' loaded from '" + toString(this) + "' for -flat_namespace");
}
- reexported.push_back(make<DylibFile>(*buffer, umbrella));
}
}
-DylibFile::DylibFile(std::shared_ptr<llvm::MachO::InterfaceFile> interface,
- DylibFile *umbrella)
- : InputFile(DylibKind, MemoryBufferRef()) {
+// Some versions of XCode ship with .tbd files that don't have the right
+// platform settings.
+static constexpr std::array<StringRef, 3> skipPlatformChecks{
+ "/usr/lib/system/libsystem_kernel.dylib",
+ "/usr/lib/system/libsystem_platform.dylib",
+ "/usr/lib/system/libsystem_pthread.dylib"};
+
+DylibFile::DylibFile(const InterfaceFile &interface, DylibFile *umbrella,
+ bool isBundleLoader)
+ : InputFile(DylibKind, interface), refState(RefState::Unreferenced),
+ isBundleLoader(isBundleLoader) {
+ // FIXME: Add test for the missing TBD code path.
+
if (umbrella == nullptr)
umbrella = this;
+ this->umbrella = umbrella;
+
+ installName = saver.save(interface.getInstallName());
+ compatibilityVersion = interface.getCompatibilityVersion().rawValue();
+ currentVersion = interface.getCurrentVersion().rawValue();
- dylibName = saver.save(interface->getInstallName());
+ if (config->printEachFile)
+ message(toString(this));
+ inputFiles.insert(this);
+
+ if (!is_contained(skipPlatformChecks, installName) &&
+ !is_contained(interface.targets(), config->platformInfo.target)) {
+ error(toString(this) + " is incompatible with " +
+ std::string(config->platformInfo.target));
+ return;
+ }
+
+ checkAppExtensionSafety(interface.isApplicationExtensionSafe());
+
+ exportingFile = isImplicitlyLinked(installName) ? this : umbrella;
+ auto addSymbol = [&](const Twine &name) -> void {
+ symbols.push_back(symtab->addDylib(saver.save(name), exportingFile,
+ /*isWeakDef=*/false,
+ /*isTlv=*/false));
+ };
// TODO(compnerd) filter out symbols based on the target platform
- for (const auto symbol : interface->symbols())
- if (symbol->getArchitectures().has(config->arch))
- symbols.push_back(
- symtab->addDylib(saver.save(symbol->getName()), umbrella));
- // TODO(compnerd) properly represent the hierarchy of the documents as it is
- // in theory possible to have re-exported dylibs from re-exported dylibs which
- // should be parent'ed to the child.
- for (auto document : interface->documents())
- reexported.push_back(make<DylibFile>(document, umbrella));
-}
-
-ArchiveFile::ArchiveFile(std::unique_ptr<llvm::object::Archive> &&f)
+ // TODO: handle weak defs, thread locals
+ for (const auto *symbol : interface.symbols()) {
+ if (!symbol->getArchitectures().has(config->arch()))
+ continue;
+
+ if (handleLDSymbol(symbol->getName()))
+ continue;
+
+ switch (symbol->getKind()) {
+ case SymbolKind::GlobalSymbol:
+ addSymbol(symbol->getName());
+ break;
+ case SymbolKind::ObjectiveCClass:
+ // XXX ld64 only creates these symbols when -ObjC is passed in. We may
+ // want to emulate that.
+ addSymbol(objc::klass + symbol->getName());
+ addSymbol(objc::metaclass + symbol->getName());
+ break;
+ case SymbolKind::ObjectiveCClassEHType:
+ addSymbol(objc::ehtype + symbol->getName());
+ break;
+ case SymbolKind::ObjectiveCInstanceVariable:
+ addSymbol(objc::ivar + symbol->getName());
+ break;
+ }
+ }
+}
+
+void DylibFile::parseReexports(const InterfaceFile &interface) {
+ const InterfaceFile *topLevel =
+ interface.getParent() == nullptr ? &interface : interface.getParent();
+ for (InterfaceFileRef intfRef : interface.reexportedLibraries()) {
+ InterfaceFile::const_target_range targets = intfRef.targets();
+ if (is_contained(skipPlatformChecks, intfRef.getInstallName()) ||
+ is_contained(targets, config->platformInfo.target))
+ loadReexport(intfRef.getInstallName(), exportingFile, topLevel);
+ }
+}
+
+// $ld$ symbols modify the properties/behavior of the library (e.g. its install
+// name, compatibility version or hide/add symbols) for specific target
+// versions.
+bool DylibFile::handleLDSymbol(StringRef originalName) {
+ if (!originalName.startswith("$ld$"))
+ return false;
+
+ StringRef action;
+ StringRef name;
+ std::tie(action, name) = originalName.drop_front(strlen("$ld$")).split('$');
+ if (action == "previous")
+ handleLDPreviousSymbol(name, originalName);
+ else if (action == "install_name")
+ handleLDInstallNameSymbol(name, originalName);
+ return true;
+}
+
+void DylibFile::handleLDPreviousSymbol(StringRef name, StringRef originalName) {
+ // originalName: $ld$ previous $ <installname> $ <compatversion> $
+ // <platformstr> $ <startversion> $ <endversion> $ <symbol-name> $
+ StringRef installName;
+ StringRef compatVersion;
+ StringRef platformStr;
+ StringRef startVersion;
+ StringRef endVersion;
+ StringRef symbolName;
+ StringRef rest;
+
+ std::tie(installName, name) = name.split('$');
+ std::tie(compatVersion, name) = name.split('$');
+ std::tie(platformStr, name) = name.split('$');
+ std::tie(startVersion, name) = name.split('$');
+ std::tie(endVersion, name) = name.split('$');
+ std::tie(symbolName, rest) = name.split('$');
+ // TODO: ld64 contains some logic for non-empty symbolName as well.
+ if (!symbolName.empty())
+ return;
+ unsigned platform;
+ if (platformStr.getAsInteger(10, platform) ||
+ platform != static_cast<unsigned>(config->platform()))
+ return;
+
+ VersionTuple start;
+ if (start.tryParse(startVersion)) {
+ warn("failed to parse start version, symbol '" + originalName +
+ "' ignored");
+ return;
+ }
+ VersionTuple end;
+ if (end.tryParse(endVersion)) {
+ warn("failed to parse end version, symbol '" + originalName + "' ignored");
+ return;
+ }
+ if (config->platformInfo.minimum < start ||
+ config->platformInfo.minimum >= end)
+ return;
+
+ this->installName = saver.save(installName);
+
+ if (!compatVersion.empty()) {
+ VersionTuple cVersion;
+ if (cVersion.tryParse(compatVersion)) {
+ warn("failed to parse compatibility version, symbol '" + originalName +
+ "' ignored");
+ return;
+ }
+ compatibilityVersion = encodeVersion(cVersion);
+ }
+}
+
+void DylibFile::handleLDInstallNameSymbol(StringRef name,
+ StringRef originalName) {
+ // originalName: $ld$ install_name $ os<version> $ install_name
+ StringRef condition, installName;
+ std::tie(condition, installName) = name.split('$');
+ VersionTuple version;
+ if (!condition.consume_front("os") || version.tryParse(condition))
+ warn("failed to parse os version, symbol '" + originalName + "' ignored");
+ else if (version == config->platformInfo.minimum)
+ this->installName = saver.save(installName);
+}
+
+void DylibFile::checkAppExtensionSafety(bool dylibIsAppExtensionSafe) const {
+ if (config->applicationExtension && !dylibIsAppExtensionSafe)
+ warn("using '-application_extension' with unsafe dylib: " + toString(this));
+}
+
+ArchiveFile::ArchiveFile(std::unique_ptr<object::Archive> &&f)
: InputFile(ArchiveKind, f->getMemoryBufferRef()), file(std::move(f)) {
for (const object::Archive::Symbol &sym : file->symbols())
symtab->addLazy(sym.getName(), this, sym);
object::Archive::Child c =
CHECK(sym.getMember(), toString(this) +
": could not get the member for symbol " +
- sym.getName());
+ toMachOString(sym));
if (!seen.insert(c.getChildOffset()).second)
return;
CHECK(c.getMemoryBufferRef(),
toString(this) +
": could not get the buffer for the member defining symbol " +
- sym.getName());
- auto file = make<ObjFile>(mb);
- symbols.insert(symbols.end(), file->symbols.begin(), file->symbols.end());
- subsections.insert(subsections.end(), file->subsections.begin(),
- file->subsections.end());
+ toMachOString(sym));
+
+ if (tar && c.getParent()->isThin())
+ tar->append(relativeToRoot(CHECK(c.getFullName(), this)), mb.getBuffer());
+
+ uint32_t modTime = toTimeT(
+ CHECK(c.getLastModified(), toString(this) +
+ ": could not get the modification time "
+ "for the member defining symbol " +
+ toMachOString(sym)));
+
+ // `sym` is owned by a LazySym, which will be replace<>()d by make<ObjFile>
+ // and become invalid after that call. Copy it to the stack so we can refer
+ // to it later.
+ const object::Archive::Symbol symCopy = sym;
+
+ if (Optional<InputFile *> file = loadArchiveMember(
+ mb, modTime, getName(), /*objCOnly=*/false, c.getChildOffset())) {
+ inputFiles.insert(*file);
+ // ld64 doesn't demangle sym here even with -demangle.
+ // Match that: intentionally don't call toMachOString().
+ printArchiveMemberLoad(symCopy.getName(), *file);
+ }
}
-// Returns "<internal>" or "baz.o".
-std::string lld::toString(const InputFile *file) {
- return file ? std::string(file->getName()) : "<internal>";
+static macho::Symbol *createBitcodeSymbol(const lto::InputFile::Symbol &objSym,
+ BitcodeFile &file) {
+ StringRef name = saver.save(objSym.getName());
+
+ // TODO: support weak references
+ if (objSym.isUndefined())
+ return symtab->addUndefined(name, &file, /*isWeakRef=*/false);
+
+ assert(!objSym.isCommon() && "TODO: support common symbols in LTO");
+
+ // TODO: Write a test demonstrating why computing isPrivateExtern before
+ // LTO compilation is important.
+ bool isPrivateExtern = false;
+ switch (objSym.getVisibility()) {
+ case GlobalValue::HiddenVisibility:
+ isPrivateExtern = true;
+ break;
+ case GlobalValue::ProtectedVisibility:
+ error(name + " has protected visibility, which is not supported by Mach-O");
+ break;
+ case GlobalValue::DefaultVisibility:
+ break;
+ }
+
+ return symtab->addDefined(name, &file, /*isec=*/nullptr, /*value=*/0,
+ /*size=*/0, objSym.isWeak(), isPrivateExtern,
+ /*isThumb=*/false,
+ /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
+}
+
+BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
+ uint64_t offsetInArchive)
+ : InputFile(BitcodeKind, mb) {
+ std::string path = mb.getBufferIdentifier().str();
+ // ThinLTO assumes that all MemoryBufferRefs given to it have a unique
+ // name. If two members with the same name are provided, this causes a
+ // collision and ThinLTO can't proceed.
+ // So, we append the archive name to disambiguate two members with the same
+ // name from multiple different archives, and offset within the archive to
+ // disambiguate two members of the same name from a single archive.
+ MemoryBufferRef mbref(
+ mb.getBuffer(),
+ saver.save(archiveName.empty() ? path
+ : archiveName + sys::path::filename(path) +
+ utostr(offsetInArchive)));
+
+ obj = check(lto::InputFile::create(mbref));
+
+ // Convert LTO Symbols to LLD Symbols in order to perform resolution. The
+ // "winning" symbol will then be marked as Prevailing at LTO compilation
+ // time.
+ for (const lto::InputFile::Symbol &objSym : obj->symbols())
+ symbols.push_back(createBitcodeSymbol(objSym, *this));
}
+
+template void ObjFile::parse<LP64>();
#define LLD_MACHO_INPUT_FILES_H
#include "MachOStructs.h"
+#include "Target.h"
#include "lld/Common/LLVM.h"
+#include "lld/Common/Memory.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Object/Archive.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/TextAPI/MachO/InterfaceFile.h"
-#include "llvm/TextAPI/MachO/TextAPIReader.h"
+#include "llvm/TextAPI/TextAPIReader.h"
-#include <map>
#include <vector>
+namespace llvm {
+namespace lto {
+class InputFile;
+} // namespace lto
+namespace MachO {
+class InterfaceFile;
+} // namespace MachO
+class TarWriter;
+} // namespace llvm
+
namespace lld {
namespace macho {
-class InputSection;
+struct PlatformInfo;
+class ConcatInputSection;
class Symbol;
struct Reloc;
+enum class RefState : uint8_t;
+
+// If --reproduce option is given, all input files are written
+// to this tar archive.
+extern std::unique_ptr<llvm::TarWriter> tar;
// If .subsections_via_symbols is set, each InputSection will be split along
-// symbol boundaries. The keys of a SubsectionMap represent the offsets of
-// each subsection from the start of the original pre-split InputSection.
-using SubsectionMap = std::map<uint32_t, InputSection *>;
+// symbol boundaries. The field offset represents the offset of the subsection
+// from the start of the original pre-split InputSection.
+struct SubsectionEntry {
+ uint64_t offset;
+ InputSection *isec;
+};
+using SubsectionMap = std::vector<SubsectionEntry>;
class InputFile {
public:
enum Kind {
ObjKind,
+ OpaqueKind,
DylibKind,
ArchiveKind,
+ BitcodeKind,
};
virtual ~InputFile() = default;
Kind kind() const { return fileKind; }
- StringRef getName() const { return mb.getBufferIdentifier(); }
+ StringRef getName() const { return name; }
MemoryBufferRef mb;
+
std::vector<Symbol *> symbols;
- ArrayRef<llvm::MachO::section_64> sectionHeaders;
std::vector<SubsectionMap> subsections;
+ // Provides an easy way to sort InputFiles deterministically.
+ const int id;
-protected:
- InputFile(Kind kind, MemoryBufferRef mb) : mb(mb), fileKind(kind) {}
-
- void parseSections(ArrayRef<llvm::MachO::section_64>);
+ // If not empty, this stores the name of the archive containing this file.
+ // We use this string for creating error messages.
+ std::string archiveName;
- void parseSymbols(ArrayRef<lld::structs::nlist_64> nList, const char *strtab,
- bool subsectionsViaSymbols);
+protected:
+ InputFile(Kind kind, MemoryBufferRef mb)
+ : mb(mb), id(idCount++), fileKind(kind), name(mb.getBufferIdentifier()) {}
- void parseRelocations(const llvm::MachO::section_64 &, SubsectionMap &);
+ InputFile(Kind, const llvm::MachO::InterfaceFile &);
private:
const Kind fileKind;
+ const StringRef name;
+
+ static int idCount;
};
// .o file
-class ObjFile : public InputFile {
+class ObjFile final : public InputFile {
public:
- explicit ObjFile(MemoryBufferRef mb);
+ ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName);
static bool classof(const InputFile *f) { return f->kind() == ObjKind; }
+
+ llvm::DWARFUnit *compileUnit = nullptr;
+ const uint32_t modTime;
+ std::vector<ConcatInputSection *> debugSections;
+ ArrayRef<llvm::MachO::data_in_code_entry> dataInCodeEntries;
+
+private:
+ template <class LP> void parse();
+ template <class Section> void parseSections(ArrayRef<Section>);
+ template <class LP>
+ void parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
+ ArrayRef<typename LP::nlist> nList, const char *strtab,
+ bool subsectionsViaSymbols);
+ template <class NList>
+ Symbol *parseNonSectionSymbol(const NList &sym, StringRef name);
+ template <class Section>
+ void parseRelocations(ArrayRef<Section> sectionHeaders, const Section &,
+ SubsectionMap &);
+ void parseDebugInfo();
+ void parseDataInCode();
};
-// .dylib file
-class DylibFile : public InputFile {
+// command-line -sectcreate file
+class OpaqueFile final : public InputFile {
public:
- explicit DylibFile(std::shared_ptr<llvm::MachO::InterfaceFile> interface,
- DylibFile *umbrella = nullptr);
+ OpaqueFile(MemoryBufferRef mb, StringRef segName, StringRef sectName);
+ static bool classof(const InputFile *f) { return f->kind() == OpaqueKind; }
+};
+// .dylib or .tbd file
+class DylibFile final : public InputFile {
+public:
// Mach-O dylibs can re-export other dylibs as sub-libraries, meaning that the
// symbols in those sub-libraries will be available under the umbrella
// library's namespace. Those sub-libraries can also have their own
// the root dylib to ensure symbols in the child library are correctly bound
// to the root. On the other hand, if a dylib is being directly loaded
// (through an -lfoo flag), then `umbrella` should be a nullptr.
- explicit DylibFile(MemoryBufferRef mb, DylibFile *umbrella = nullptr);
+ explicit DylibFile(MemoryBufferRef mb, DylibFile *umbrella,
+ bool isBundleLoader = false);
+ explicit DylibFile(const llvm::MachO::InterfaceFile &interface,
+ DylibFile *umbrella = nullptr,
+ bool isBundleLoader = false);
+
+ void parseLoadCommands(MemoryBufferRef mb);
+ void parseReexports(const llvm::MachO::InterfaceFile &interface);
static bool classof(const InputFile *f) { return f->kind() == DylibKind; }
- StringRef dylibName;
- uint64_t ordinal = 0; // Ordinal numbering starts from 1, so 0 is a sentinel
+ StringRef installName;
+ DylibFile *exportingFile = nullptr;
+ DylibFile *umbrella;
+ SmallVector<StringRef, 2> rpaths;
+ uint32_t compatibilityVersion = 0;
+ uint32_t currentVersion = 0;
+ int64_t ordinal = 0; // Ordinal numbering starts from 1, so 0 is a sentinel
+ RefState refState;
bool reexport = false;
- std::vector<DylibFile *> reexported;
+ bool forceNeeded = false;
+ bool forceWeakImport = false;
+ bool deadStrippable = false;
+ bool explicitlyLinked = false;
+
+ unsigned numReferencedSymbols = 0;
+
+ bool isReferenced() const { return numReferencedSymbols > 0; }
+
+ // An executable can be used as a bundle loader that will load the output
+ // file being linked, and that contains symbols referenced, but not
+ // implemented in the bundle. When used like this, it is very similar
+ // to a Dylib, so we re-used the same class to represent it.
+ bool isBundleLoader;
+
+private:
+ bool handleLDSymbol(StringRef originalName);
+ void handleLDPreviousSymbol(StringRef name, StringRef originalName);
+ void handleLDInstallNameSymbol(StringRef name, StringRef originalName);
+ void checkAppExtensionSafety(bool dylibIsAppExtensionSafe) const;
};
// .a file
-class ArchiveFile : public InputFile {
+class ArchiveFile final : public InputFile {
public:
explicit ArchiveFile(std::unique_ptr<llvm::object::Archive> &&file);
static bool classof(const InputFile *f) { return f->kind() == ArchiveKind; }
llvm::DenseSet<uint64_t> seen;
};
-extern std::vector<InputFile *> inputFiles;
+class BitcodeFile final : public InputFile {
+public:
+ explicit BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
+ uint64_t offsetInArchive);
+ static bool classof(const InputFile *f) { return f->kind() == BitcodeKind; }
+
+ std::unique_ptr<llvm::lto::InputFile> obj;
+};
+
+extern llvm::SetVector<InputFile *> inputFiles;
llvm::Optional<MemoryBufferRef> readFile(StringRef path);
+namespace detail {
+
+template <class CommandType, class... Types>
+std::vector<const CommandType *>
+findCommands(const void *anyHdr, size_t maxCommands, Types... types) {
+ std::vector<const CommandType *> cmds;
+ std::initializer_list<uint32_t> typesList{types...};
+ const auto *hdr = reinterpret_cast<const llvm::MachO::mach_header *>(anyHdr);
+ const uint8_t *p =
+ reinterpret_cast<const uint8_t *>(hdr) + target->headerSize;
+ for (uint32_t i = 0, n = hdr->ncmds; i < n; ++i) {
+ auto *cmd = reinterpret_cast<const CommandType *>(p);
+ if (llvm::is_contained(typesList, cmd->cmd)) {
+ cmds.push_back(cmd);
+ if (cmds.size() == maxCommands)
+ return cmds;
+ }
+ p += cmd->cmdsize;
+ }
+ return cmds;
+}
+
+} // namespace detail
+
+// anyHdr should be a pointer to either mach_header or mach_header_64
+template <class CommandType = llvm::MachO::load_command, class... Types>
+const CommandType *findCommand(const void *anyHdr, Types... types) {
+ std::vector<const CommandType *> cmds =
+ detail::findCommands<CommandType>(anyHdr, 1, types...);
+ return cmds.size() ? cmds[0] : nullptr;
+}
+
+template <class CommandType = llvm::MachO::load_command, class... Types>
+std::vector<const CommandType *> findCommands(const void *anyHdr,
+ Types... types) {
+ return detail::findCommands<CommandType>(anyHdr, 0, types...);
+}
+
} // namespace macho
std::string toString(const macho::InputFile *file);
//===----------------------------------------------------------------------===//
#include "InputSection.h"
+#include "ConcatOutputSection.h"
+#include "Config.h"
+#include "InputFiles.h"
#include "OutputSegment.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "Target.h"
+#include "UnwindInfoSection.h"
+#include "Writer.h"
#include "lld/Common/Memory.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Support/xxhash.h"
using namespace llvm;
using namespace llvm::MachO;
using namespace lld;
using namespace lld::macho;
-std::vector<InputSection *> macho::inputSections;
+std::vector<ConcatInputSection *> macho::inputSections;
-uint64_t InputSection::getFileOffset() const {
- return parent->fileOff + outSecFileOff;
+uint64_t InputSection::getFileSize() const {
+ return isZeroFill(getFlags()) ? 0 : getSize();
}
-uint64_t InputSection::getVA() const { return parent->addr + outSecOff; }
+uint64_t InputSection::getVA(uint64_t off) const {
+ return parent->addr + getOffset(off);
+}
+
+static uint64_t resolveSymbolVA(const Symbol *sym, uint8_t type) {
+ const RelocAttrs &relocAttrs = target->getRelocAttrs(type);
+ if (relocAttrs.hasAttr(RelocAttrBits::BRANCH))
+ return sym->resolveBranchVA();
+ if (relocAttrs.hasAttr(RelocAttrBits::GOT))
+ return sym->resolveGotVA();
+ if (relocAttrs.hasAttr(RelocAttrBits::TLV))
+ return sym->resolveTlvVA();
+ return sym->getVA();
+}
+
+// ICF needs to hash any section that might potentially be duplicated so
+// that it can match on content rather than identity.
+bool ConcatInputSection::isHashableForICF() const {
+ switch (sectionType(getFlags())) {
+ case S_REGULAR:
+ return true;
+ case S_CSTRING_LITERALS:
+ case S_4BYTE_LITERALS:
+ case S_8BYTE_LITERALS:
+ case S_16BYTE_LITERALS:
+ case S_LITERAL_POINTERS:
+ llvm_unreachable("found unexpected literal type in ConcatInputSection");
+ case S_ZEROFILL:
+ case S_GB_ZEROFILL:
+ case S_NON_LAZY_SYMBOL_POINTERS:
+ case S_LAZY_SYMBOL_POINTERS:
+ case S_SYMBOL_STUBS:
+ case S_MOD_INIT_FUNC_POINTERS:
+ case S_MOD_TERM_FUNC_POINTERS:
+ case S_COALESCED:
+ case S_INTERPOSING:
+ case S_DTRACE_DOF:
+ case S_LAZY_DYLIB_SYMBOL_POINTERS:
+ case S_THREAD_LOCAL_REGULAR:
+ case S_THREAD_LOCAL_ZEROFILL:
+ case S_THREAD_LOCAL_VARIABLES:
+ case S_THREAD_LOCAL_VARIABLE_POINTERS:
+ case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
+ return false;
+ default:
+ llvm_unreachable("Section type");
+ }
+}
+
+void ConcatInputSection::hashForICF() {
+ assert(data.data()); // zeroFill section data has nullptr with non-zero size
+ assert(icfEqClass[0] == 0); // don't overwrite a unique ID!
+ // Turn-on the top bit to guarantee that valid hashes have no collisions
+ // with the small-integer unique IDs for ICF-ineligible sections
+ icfEqClass[0] = xxHash64(data) | (1ull << 63);
+}
+
+void ConcatInputSection::foldIdentical(ConcatInputSection *copy) {
+ align = std::max(align, copy->align);
+ copy->live = false;
+ copy->wasCoalesced = true;
+ numRefs += copy->numRefs;
+ copy->numRefs = 0;
+ copy->replacement = this;
+}
+
+void ConcatInputSection::writeTo(uint8_t *buf) {
+ assert(!shouldOmitFromOutput());
-void InputSection::writeTo(uint8_t *buf) {
if (getFileSize() == 0)
return;
memcpy(buf, data.data(), data.size());
- for (Reloc &r : relocs) {
- uint64_t va = 0;
- if (auto *s = r.target.dyn_cast<Symbol *>())
- va = target->getSymbolVA(*s, r.type);
- else if (auto *isec = r.target.dyn_cast<InputSection *>())
- va = isec->getVA();
-
- uint64_t val = va + r.addend;
- if (r.pcrel)
- val -= getVA() + r.offset;
- target->relocateOne(buf + r.offset, r, val);
+ for (size_t i = 0; i < relocs.size(); i++) {
+ const Reloc &r = relocs[i];
+ uint8_t *loc = buf + r.offset;
+ uint64_t referentVA = 0;
+ if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) {
+ const Symbol *fromSym = r.referent.get<Symbol *>();
+ const Reloc &minuend = relocs[++i];
+ uint64_t minuendVA;
+ if (const Symbol *toSym = minuend.referent.dyn_cast<Symbol *>())
+ minuendVA = toSym->getVA() + minuend.addend;
+ else {
+ auto *referentIsec = minuend.referent.get<InputSection *>();
+ assert(!::shouldOmitFromOutput(referentIsec));
+ minuendVA = referentIsec->getVA(minuend.addend);
+ }
+ referentVA = minuendVA - fromSym->getVA();
+ } else if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) {
+ if (target->hasAttr(r.type, RelocAttrBits::LOAD) &&
+ !referentSym->isInGot())
+ target->relaxGotLoad(loc, r.type);
+ referentVA = resolveSymbolVA(referentSym, r.type) + r.addend;
+
+ if (isThreadLocalVariables(getFlags())) {
+ // References from thread-local variable sections are treated as offsets
+ // relative to the start of the thread-local data memory area, which
+ // is initialized via copying all the TLV data sections (which are all
+ // contiguous).
+ if (isa<Defined>(referentSym))
+ referentVA -= firstTLVDataSection->addr;
+ }
+ } else if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
+ assert(!::shouldOmitFromOutput(referentIsec));
+ referentVA = referentIsec->getVA(r.addend);
+ }
+ target->relocateOne(loc, r, referentVA, getVA() + r.offset);
+ }
+}
+
+void CStringInputSection::splitIntoPieces() {
+ size_t off = 0;
+ StringRef s = toStringRef(data);
+ while (!s.empty()) {
+ size_t end = s.find(0);
+ if (end == StringRef::npos)
+ fatal(toString(this) + ": string is not null terminated");
+ size_t size = end + 1;
+ uint32_t hash = config->dedupLiterals ? xxHash64(s.substr(0, size)) : 0;
+ pieces.emplace_back(off, hash);
+ s = s.substr(size);
+ off += size;
+ }
+}
+
+StringPiece &CStringInputSection::getStringPiece(uint64_t off) {
+ if (off >= data.size())
+ fatal(toString(this) + ": offset is outside the section");
+
+ auto it =
+ partition_point(pieces, [=](StringPiece p) { return p.inSecOff <= off; });
+ return it[-1];
+}
+
+const StringPiece &CStringInputSection::getStringPiece(uint64_t off) const {
+ return const_cast<CStringInputSection *>(this)->getStringPiece(off);
+}
+
+uint64_t CStringInputSection::getOffset(uint64_t off) const {
+ const StringPiece &piece = getStringPiece(off);
+ uint64_t addend = off - piece.inSecOff;
+ return piece.outSecOff + addend;
+}
+
+WordLiteralInputSection::WordLiteralInputSection(StringRef segname,
+ StringRef name,
+ InputFile *file,
+ ArrayRef<uint8_t> data,
+ uint32_t align, uint32_t flags)
+ : InputSection(WordLiteralKind, segname, name, file, data, align, flags) {
+ switch (sectionType(flags)) {
+ case S_4BYTE_LITERALS:
+ power2LiteralSize = 2;
+ break;
+ case S_8BYTE_LITERALS:
+ power2LiteralSize = 3;
+ break;
+ case S_16BYTE_LITERALS:
+ power2LiteralSize = 4;
+ break;
+ default:
+ llvm_unreachable("invalid literal section type");
+ }
+
+ live.resize(data.size() >> power2LiteralSize, !config->deadStrip);
+}
+
+uint64_t WordLiteralInputSection::getOffset(uint64_t off) const {
+ auto *osec = cast<WordLiteralSection>(parent);
+ const uint8_t *buf = data.data();
+ switch (sectionType(getFlags())) {
+ case S_4BYTE_LITERALS:
+ return osec->getLiteral4Offset(buf + off);
+ case S_8BYTE_LITERALS:
+ return osec->getLiteral8Offset(buf + off);
+ case S_16BYTE_LITERALS:
+ return osec->getLiteral16Offset(buf + off);
+ default:
+ llvm_unreachable("invalid literal section type");
}
}
+
+bool macho::isCodeSection(const InputSection *isec) {
+ uint32_t type = sectionType(isec->getFlags());
+ if (type != S_REGULAR && type != S_COALESCED)
+ return false;
+
+ uint32_t attr = isec->getFlags() & SECTION_ATTRIBUTES_USR;
+ if (attr == S_ATTR_PURE_INSTRUCTIONS)
+ return true;
+
+ if (isec->getSegName() == segment_names::text)
+ return StringSwitch<bool>(isec->getName())
+ .Cases(section_names::textCoalNt, section_names::staticInit, true)
+ .Default(false);
+
+ return false;
+}
+
+bool macho::isCfStringSection(const InputSection *isec) {
+ return isec->getName() == section_names::cfString &&
+ isec->getSegName() == segment_names::data;
+}
+
+std::string lld::toString(const InputSection *isec) {
+ return (toString(isec->getFile()) + ":(" + isec->getName() + ")").str();
+}
#ifndef LLD_MACHO_INPUT_SECTION_H
#define LLD_MACHO_INPUT_SECTION_H
+#include "Config.h"
+#include "Relocations.h"
+
#include "lld/Common/LLVM.h"
+#include "lld/Common/Memory.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/BinaryFormat/MachO.h"
namespace lld {
namespace macho {
class InputFile;
-class InputSection;
class OutputSection;
-class Symbol;
-
-struct Reloc {
- uint8_t type;
- bool pcrel;
- uint8_t length;
- // The offset from the start of the subsection that this relocation belongs
- // to.
- uint32_t offset;
- // Adding this offset to the address of the target symbol or subsection gives
- // the destination that this relocation refers to.
- uint64_t addend;
- llvm::PointerUnion<Symbol *, InputSection *> target;
-};
-
-inline bool isZeroFill(uint8_t flags) {
- return (flags & llvm::MachO::SECTION_TYPE) == llvm::MachO::S_ZEROFILL;
-}
+class Defined;
class InputSection {
public:
+ enum Kind {
+ ConcatKind,
+ CStringLiteralKind,
+ WordLiteralKind,
+ };
+
+ Kind kind() const { return shared->sectionKind; }
virtual ~InputSection() = default;
virtual uint64_t getSize() const { return data.size(); }
- virtual uint64_t getFileSize() const {
- return isZeroFill(flags) ? 0 : getSize();
- }
- uint64_t getFileOffset() const;
- uint64_t getVA() const;
-
- virtual void writeTo(uint8_t *buf);
-
- InputFile *file = nullptr;
- StringRef name;
- StringRef segname;
+ InputFile *getFile() const { return shared->file; }
+ StringRef getName() const { return shared->name; }
+ StringRef getSegName() const { return shared->segname; }
+ uint32_t getFlags() const { return shared->flags; }
+ uint64_t getFileSize() const;
+ // Translates \p off -- an offset relative to this InputSection -- into an
+ // offset from the beginning of its parent OutputSection.
+ virtual uint64_t getOffset(uint64_t off) const = 0;
+ // The offset from the beginning of the file.
+ uint64_t getVA(uint64_t off) const;
+ // Whether the data at \p off in this InputSection is live.
+ virtual bool isLive(uint64_t off) const = 0;
+ virtual void markLive(uint64_t off) = 0;
+ virtual InputSection *canonical() { return this; }
OutputSection *parent = nullptr;
- uint64_t outSecOff = 0;
- uint64_t outSecFileOff = 0;
uint32_t align = 1;
- uint32_t flags = 0;
+ uint32_t callSiteCount : 31;
+ // is address assigned?
+ uint32_t isFinal : 1;
ArrayRef<uint8_t> data;
std::vector<Reloc> relocs;
+
+protected:
+ // The fields in this struct are immutable. Since we create a lot of
+ // InputSections with identical values for them (due to
+ // .subsections_via_symbols), factoring them out into a shared struct reduces
+ // memory consumption and makes copying cheaper.
+ struct Shared {
+ InputFile *file;
+ StringRef name;
+ StringRef segname;
+ uint32_t flags;
+ Kind sectionKind;
+ Shared(InputFile *file, StringRef name, StringRef segname, uint32_t flags,
+ Kind kind)
+ : file(file), name(name), segname(segname), flags(flags),
+ sectionKind(kind) {}
+ };
+
+ InputSection(Kind kind, StringRef segname, StringRef name)
+ : callSiteCount(0), isFinal(false),
+ shared(make<Shared>(nullptr, name, segname, 0, kind)) {}
+
+ InputSection(Kind kind, StringRef segname, StringRef name, InputFile *file,
+ ArrayRef<uint8_t> data, uint32_t align, uint32_t flags)
+ : align(align), callSiteCount(0), isFinal(false), data(data),
+ shared(make<Shared>(file, name, segname, flags, kind)) {}
+
+ const Shared *const shared;
};
-extern std::vector<InputSection *> inputSections;
+// ConcatInputSections are combined into (Concat)OutputSections through simple
+// concatenation, in contrast with literal sections which may have their
+// contents merged before output.
+class ConcatInputSection final : public InputSection {
+public:
+ ConcatInputSection(StringRef segname, StringRef name)
+ : InputSection(ConcatKind, segname, name) {}
+
+ ConcatInputSection(StringRef segname, StringRef name, InputFile *file,
+ ArrayRef<uint8_t> data, uint32_t align = 1,
+ uint32_t flags = 0)
+ : InputSection(ConcatKind, segname, name, file, data, align, flags) {}
+
+ uint64_t getOffset(uint64_t off) const override { return outSecOff + off; }
+ uint64_t getVA() const { return InputSection::getVA(0); }
+ // ConcatInputSections are entirely live or dead, so the offset is irrelevant.
+ bool isLive(uint64_t off) const override { return live; }
+ void markLive(uint64_t off) override { live = true; }
+ bool isCoalescedWeak() const { return wasCoalesced && numRefs == 0; }
+ bool shouldOmitFromOutput() const { return !live || isCoalescedWeak(); }
+ bool isHashableForICF() const;
+ void hashForICF();
+ void writeTo(uint8_t *buf);
+
+ void foldIdentical(ConcatInputSection *redundant);
+ InputSection *canonical() override {
+ return replacement ? replacement : this;
+ }
+
+ static bool classof(const InputSection *isec) {
+ return isec->kind() == ConcatKind;
+ }
+
+ // Points to the surviving section after this one is folded by ICF
+ InputSection *replacement = nullptr;
+ // Equivalence-class ID for ICF
+ uint64_t icfEqClass[2] = {0, 0};
+
+ // With subsections_via_symbols, most symbols have their own InputSection,
+ // and for weak symbols (e.g. from inline functions), only the
+ // InputSection from one translation unit will make it to the output,
+ // while all copies in other translation units are coalesced into the
+ // first and not copied to the output.
+ bool wasCoalesced = false;
+ bool live = !config->deadStrip;
+ // How many symbols refer to this InputSection.
+ uint32_t numRefs = 0;
+ // This variable has two usages. Initially, it represents the input order.
+ // After assignAddresses is called, it represents the offset from the
+ // beginning of the output section this section was assigned to.
+ uint64_t outSecOff = 0;
+};
+
+// Verify ConcatInputSection's size on 64-bit builds.
+static_assert(sizeof(int) != 8 || sizeof(ConcatInputSection) == 112,
+ "Try to minimize ConcatInputSection's size, we create many "
+ "instances of it");
+
+// Helper functions to make it easy to sprinkle asserts.
+
+inline bool shouldOmitFromOutput(InputSection *isec) {
+ return isa<ConcatInputSection>(isec) &&
+ cast<ConcatInputSection>(isec)->shouldOmitFromOutput();
+}
+
+inline bool isCoalescedWeak(InputSection *isec) {
+ return isa<ConcatInputSection>(isec) &&
+ cast<ConcatInputSection>(isec)->isCoalescedWeak();
+}
+
+// We allocate a lot of these and binary search on them, so they should be as
+// compact as possible. Hence the use of 31 rather than 64 bits for the hash.
+struct StringPiece {
+ // Offset from the start of the containing input section.
+ uint32_t inSecOff;
+ uint32_t live : 1;
+ // Only set if deduplicating literals
+ uint32_t hash : 31;
+ // Offset from the start of the containing output section.
+ uint64_t outSecOff = 0;
+
+ StringPiece(uint64_t off, uint32_t hash)
+ : inSecOff(off), live(!config->deadStrip), hash(hash) {}
+};
+
+static_assert(sizeof(StringPiece) == 16, "StringPiece is too big!");
+
+// CStringInputSections are composed of multiple null-terminated string
+// literals, which we represent using StringPieces. These literals can be
+// deduplicated and tail-merged, so translating offsets between the input and
+// outputs sections is more complicated.
+//
+// NOTE: One significant difference between LLD and ld64 is that we merge all
+// cstring literals, even those referenced directly by non-private symbols.
+// ld64 is more conservative and does not do that. This was mostly done for
+// implementation simplicity; if we find programs that need the more
+// conservative behavior we can certainly implement that.
+class CStringInputSection final : public InputSection {
+public:
+ CStringInputSection(StringRef segname, StringRef name, InputFile *file,
+ ArrayRef<uint8_t> data, uint32_t align, uint32_t flags)
+ : InputSection(CStringLiteralKind, segname, name, file, data, align,
+ flags) {}
+ uint64_t getOffset(uint64_t off) const override;
+ bool isLive(uint64_t off) const override { return getStringPiece(off).live; }
+ void markLive(uint64_t off) override { getStringPiece(off).live = true; }
+ // Find the StringPiece that contains this offset.
+ StringPiece &getStringPiece(uint64_t off);
+ const StringPiece &getStringPiece(uint64_t off) const;
+ // Split at each null byte.
+ void splitIntoPieces();
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef getStringRef(size_t i) const {
+ size_t begin = pieces[i].inSecOff;
+ size_t end =
+ (pieces.size() - 1 == i) ? data.size() : pieces[i + 1].inSecOff;
+ return toStringRef(data.slice(begin, end - begin));
+ }
+
+ // Returns i'th piece as a CachedHashStringRef. This function is very hot when
+ // string merging is enabled, so we want to inline.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ llvm::CachedHashStringRef getCachedHashStringRef(size_t i) const {
+ assert(config->dedupLiterals);
+ return {getStringRef(i), pieces[i].hash};
+ }
+
+ static bool classof(const InputSection *isec) {
+ return isec->kind() == CStringLiteralKind;
+ }
+
+ std::vector<StringPiece> pieces;
+};
+
+class WordLiteralInputSection final : public InputSection {
+public:
+ WordLiteralInputSection(StringRef segname, StringRef name, InputFile *file,
+ ArrayRef<uint8_t> data, uint32_t align,
+ uint32_t flags);
+ uint64_t getOffset(uint64_t off) const override;
+ bool isLive(uint64_t off) const override {
+ return live[off >> power2LiteralSize];
+ }
+ void markLive(uint64_t off) override { live[off >> power2LiteralSize] = 1; }
+
+ static bool classof(const InputSection *isec) {
+ return isec->kind() == WordLiteralKind;
+ }
+
+private:
+ unsigned power2LiteralSize;
+ // The liveness of data[off] is tracked by live[off >> power2LiteralSize].
+ llvm::BitVector live;
+};
+
+inline uint8_t sectionType(uint32_t flags) {
+ return flags & llvm::MachO::SECTION_TYPE;
+}
+
+inline bool isZeroFill(uint32_t flags) {
+ return llvm::MachO::isVirtualSection(sectionType(flags));
+}
+
+inline bool isThreadLocalVariables(uint32_t flags) {
+ return sectionType(flags) == llvm::MachO::S_THREAD_LOCAL_VARIABLES;
+}
+
+// These sections contain the data for initializing thread-local variables.
+inline bool isThreadLocalData(uint32_t flags) {
+ return sectionType(flags) == llvm::MachO::S_THREAD_LOCAL_REGULAR ||
+ sectionType(flags) == llvm::MachO::S_THREAD_LOCAL_ZEROFILL;
+}
+
+inline bool isDebugSection(uint32_t flags) {
+ return (flags & llvm::MachO::SECTION_ATTRIBUTES_USR) ==
+ llvm::MachO::S_ATTR_DEBUG;
+}
+
+inline bool isWordLiteralSection(uint32_t flags) {
+ return sectionType(flags) == llvm::MachO::S_4BYTE_LITERALS ||
+ sectionType(flags) == llvm::MachO::S_8BYTE_LITERALS ||
+ sectionType(flags) == llvm::MachO::S_16BYTE_LITERALS;
+}
+
+bool isCodeSection(const InputSection *);
+
+bool isCfStringSection(const InputSection *);
+
+extern std::vector<ConcatInputSection *> inputSections;
+
+namespace section_names {
+
+constexpr const char authGot[] = "__auth_got";
+constexpr const char authPtr[] = "__auth_ptr";
+constexpr const char binding[] = "__binding";
+constexpr const char bitcodeBundle[] = "__bundle";
+constexpr const char cString[] = "__cstring";
+constexpr const char cfString[] = "__cfstring";
+constexpr const char codeSignature[] = "__code_signature";
+constexpr const char common[] = "__common";
+constexpr const char compactUnwind[] = "__compact_unwind";
+constexpr const char data[] = "__data";
+constexpr const char debugAbbrev[] = "__debug_abbrev";
+constexpr const char debugInfo[] = "__debug_info";
+constexpr const char debugStr[] = "__debug_str";
+constexpr const char ehFrame[] = "__eh_frame";
+constexpr const char export_[] = "__export";
+constexpr const char dataInCode[] = "__data_in_code";
+constexpr const char functionStarts[] = "__func_starts";
+constexpr const char got[] = "__got";
+constexpr const char header[] = "__mach_header";
+constexpr const char indirectSymbolTable[] = "__ind_sym_tab";
+constexpr const char const_[] = "__const";
+constexpr const char lazySymbolPtr[] = "__la_symbol_ptr";
+constexpr const char lazyBinding[] = "__lazy_binding";
+constexpr const char literals[] = "__literals";
+constexpr const char moduleInitFunc[] = "__mod_init_func";
+constexpr const char moduleTermFunc[] = "__mod_term_func";
+constexpr const char nonLazySymbolPtr[] = "__nl_symbol_ptr";
+constexpr const char objcCatList[] = "__objc_catlist";
+constexpr const char objcClassList[] = "__objc_classlist";
+constexpr const char objcConst[] = "__objc_const";
+constexpr const char objcImageInfo[] = "__objc_imageinfo";
+constexpr const char objcNonLazyCatList[] = "__objc_nlcatlist";
+constexpr const char objcNonLazyClassList[] = "__objc_nlclslist";
+constexpr const char objcProtoList[] = "__objc_protolist";
+constexpr const char pageZero[] = "__pagezero";
+constexpr const char pointers[] = "__pointers";
+constexpr const char rebase[] = "__rebase";
+constexpr const char staticInit[] = "__StaticInit";
+constexpr const char stringTable[] = "__string_table";
+constexpr const char stubHelper[] = "__stub_helper";
+constexpr const char stubs[] = "__stubs";
+constexpr const char swift[] = "__swift";
+constexpr const char symbolTable[] = "__symbol_table";
+constexpr const char textCoalNt[] = "__textcoal_nt";
+constexpr const char text[] = "__text";
+constexpr const char threadPtrs[] = "__thread_ptrs";
+constexpr const char threadVars[] = "__thread_vars";
+constexpr const char unwindInfo[] = "__unwind_info";
+constexpr const char weakBinding[] = "__weak_binding";
+constexpr const char zeroFill[] = "__zerofill";
+
+} // namespace section_names
} // namespace macho
+
+std::string toString(const macho::InputSection *);
+
} // namespace lld
#endif
--- /dev/null
+//===- LTO.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "LTO.h"
+#include "Config.h"
+#include "Driver.h"
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "Target.h"
+
+#include "lld/Common/Args.h"
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Strings.h"
+#include "lld/Common/TargetOptionsCommandFlags.h"
+#include "llvm/LTO/Caching.h"
+#include "llvm/LTO/Config.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/ObjCARC.h"
+
+using namespace lld;
+using namespace lld::macho;
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace llvm::sys;
+
+static lto::Config createConfig() {
+ lto::Config c;
+ c.Options = initTargetOptionsFromCodeGenFlags();
+ c.CodeModel = getCodeModelFromCMModel();
+ c.CPU = getCPUStr();
+ c.MAttrs = getMAttrs();
+ c.UseNewPM = config->ltoNewPassManager;
+ c.PreCodeGenPassesHook = [](legacy::PassManager &pm) {
+ pm.add(createObjCARCContractPass());
+ };
+ c.TimeTraceEnabled = config->timeTraceEnabled;
+ c.TimeTraceGranularity = config->timeTraceGranularity;
+ c.OptLevel = config->ltoo;
+ c.CGOptLevel = args::getCGOptLevel(config->ltoo);
+ if (config->saveTemps)
+ checkError(c.addSaveTemps(config->outputFile.str() + ".",
+ /*UseInputModulePath=*/true));
+ return c;
+}
+
+BitcodeCompiler::BitcodeCompiler() {
+ lto::ThinBackend backend = lto::createInProcessThinBackend(
+ heavyweight_hardware_concurrency(config->thinLTOJobs));
+ ltoObj = std::make_unique<lto::LTO>(createConfig(), backend);
+}
+
+void BitcodeCompiler::add(BitcodeFile &f) {
+ ArrayRef<lto::InputFile::Symbol> objSyms = f.obj->symbols();
+ std::vector<lto::SymbolResolution> resols;
+ resols.reserve(objSyms.size());
+
+ // Provide a resolution to the LTO API for each symbol.
+ auto symIt = f.symbols.begin();
+ for (const lto::InputFile::Symbol &objSym : objSyms) {
+ resols.emplace_back();
+ lto::SymbolResolution &r = resols.back();
+ Symbol *sym = *symIt++;
+
+ // Ideally we shouldn't check for SF_Undefined but currently IRObjectFile
+ // reports two symbols for module ASM defined. Without this check, lld
+ // flags an undefined in IR with a definition in ASM as prevailing.
+ // Once IRObjectFile is fixed to report only one symbol this hack can
+ // be removed.
+ r.Prevailing = !objSym.isUndefined() && sym->getFile() == &f;
+
+ // FIXME: What about other output types? And we can probably be less
+ // restrictive with -flat_namespace, but it's an infrequent use case.
+ // FIXME: Honor config->exportDynamic.
+ r.VisibleToRegularObj = config->outputType != MH_EXECUTE ||
+ config->namespaceKind == NamespaceKind::flat ||
+ sym->isUsedInRegularObj;
+
+ // Un-define the symbol so that we don't get duplicate symbol errors when we
+ // load the ObjFile emitted by LTO compilation.
+ if (r.Prevailing)
+ replaceSymbol<Undefined>(sym, sym->getName(), sym->getFile(),
+ RefState::Strong);
+
+ // TODO: set the other resolution configs properly
+ }
+ checkError(ltoObj->add(std::move(f.obj), resols));
+}
+
+// Merge all the bitcode files we have seen, codegen the result
+// and return the resulting ObjectFile(s).
+std::vector<ObjFile *> BitcodeCompiler::compile() {
+ unsigned maxTasks = ltoObj->getMaxTasks();
+ buf.resize(maxTasks);
+ files.resize(maxTasks);
+
+ // The -cache_path_lto option specifies the path to a directory in which
+ // to cache native object files for ThinLTO incremental builds. If a path was
+ // specified, configure LTO to use it as the cache directory.
+ lto::NativeObjectCache cache;
+ if (!config->thinLTOCacheDir.empty())
+ cache = check(
+ lto::localCache(config->thinLTOCacheDir,
+ [&](size_t task, std::unique_ptr<MemoryBuffer> mb) {
+ files[task] = std::move(mb);
+ }));
+
+ checkError(ltoObj->run(
+ [&](size_t task) {
+ return std::make_unique<lto::NativeObjectStream>(
+ std::make_unique<raw_svector_ostream>(buf[task]));
+ },
+ cache));
+
+ if (!config->thinLTOCacheDir.empty())
+ pruneCache(config->thinLTOCacheDir, config->thinLTOCachePolicy);
+
+ if (config->saveTemps) {
+ if (!buf[0].empty())
+ saveBuffer(buf[0], config->outputFile + ".lto.o");
+ for (unsigned i = 1; i != maxTasks; ++i)
+ saveBuffer(buf[i], config->outputFile + Twine(i) + ".lto.o");
+ }
+
+ if (!config->ltoObjPath.empty())
+ fs::create_directories(config->ltoObjPath);
+
+ std::vector<ObjFile *> ret;
+ for (unsigned i = 0; i != maxTasks; ++i) {
+ if (buf[i].empty())
+ continue;
+ SmallString<261> filePath("/tmp/lto.tmp");
+ uint32_t modTime = 0;
+ if (!config->ltoObjPath.empty()) {
+ filePath = config->ltoObjPath;
+ path::append(filePath, Twine(i) + "." +
+ getArchitectureName(config->arch()) +
+ ".lto.o");
+ saveBuffer(buf[i], filePath);
+ modTime = getModTime(filePath);
+ }
+ ret.push_back(make<ObjFile>(
+ MemoryBufferRef(buf[i], saver.save(filePath.str())), modTime, ""));
+ }
+ for (std::unique_ptr<MemoryBuffer> &file : files)
+ if (file)
+ ret.push_back(make<ObjFile>(*file, 0, ""));
+ return ret;
+}
--- /dev/null
+//===- LTO.h ----------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_LTO_H
+#define LLD_MACHO_LTO_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace lto {
+class LTO;
+} // namespace lto
+} // namespace llvm
+
+namespace lld {
+namespace macho {
+
+class BitcodeFile;
+class ObjFile;
+
+class BitcodeCompiler {
+public:
+ BitcodeCompiler();
+
+ void add(BitcodeFile &f);
+ std::vector<ObjFile *> compile();
+
+private:
+ std::unique_ptr<llvm::lto::LTO> ltoObj;
+ std::vector<llvm::SmallString<0>> buf;
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>> files;
+};
+
+} // namespace macho
+} // namespace lld
+
+#endif
llvm::support::ulittle64_t n_value;
};
+struct nlist {
+ llvm::support::ulittle32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ llvm::support::ulittle16_t n_desc;
+ llvm::support::ulittle32_t n_value;
+};
+
+struct entry_point_command {
+ llvm::support::ulittle32_t cmd;
+ llvm::support::ulittle32_t cmdsize;
+ llvm::support::ulittle64_t entryoff;
+ llvm::support::ulittle64_t stacksize;
+};
+
} // namespace structs
} // namespace lld
--- /dev/null
+//===- MapFile.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the -map option. It shows lists in order and
+// hierarchically the outputFile, arch, input files, output sections and
+// symbol:
+//
+// # Path: test
+// # Arch: x86_84
+// # Object files:
+// [ 0] linker synthesized
+// [ 1] a.o
+// # Sections:
+// # Address Size Segment Section
+// 0x1000005C0 0x0000004C __TEXT __text
+// # Symbols:
+// # Address File Name
+// 0x1000005C0 [ 1] _main
+//
+//===----------------------------------------------------------------------===//
+
+#include "MapFile.h"
+#include "Config.h"
+#include "InputFiles.h"
+#include "InputSection.h"
+#include "OutputSection.h"
+#include "OutputSegment.h"
+#include "Symbols.h"
+#include "Target.h"
+#include "llvm/Support/Parallel.h"
+#include "llvm/Support/TimeProfiler.h"
+
+using namespace llvm;
+using namespace llvm::sys;
+using namespace lld;
+using namespace lld::macho;
+
+using SymbolMapTy = DenseMap<const InputSection *, SmallVector<Defined *, 4>>;
+
+// Returns a map from sections to their symbols.
+static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
+ SymbolMapTy ret;
+ for (Defined *dr : syms)
+ ret[dr->isec].push_back(dr);
+
+ // Sort symbols by address. We want to print out symbols in the order they
+ // appear in the output file rather than the order they appeared in the input
+ // files.
+ for (auto &it : ret)
+ parallelSort(
+ it.second.begin(), it.second.end(), [](Defined *a, Defined *b) {
+ return a->getVA() != b->getVA() ? a->getVA() < b->getVA()
+ : a->getName() < b->getName();
+ });
+ return ret;
+}
+
+// Returns a list of all symbols that we want to print out.
+static std::vector<Defined *> getSymbols() {
+ std::vector<Defined *> v;
+ for (InputFile *file : inputFiles)
+ if (isa<ObjFile>(file))
+ for (Symbol *sym : file->symbols)
+ if (auto *d = dyn_cast_or_null<Defined>(sym))
+ if (d->isLive() && d->isec && d->getFile() == file) {
+ assert(!shouldOmitFromOutput(d->isec));
+ v.push_back(d);
+ }
+ return v;
+}
+
+// Construct a map from symbols to their stringified representations.
+// Demangling symbols (which is what toString() does) is slow, so
+// we do that in batch using parallel-for.
+static DenseMap<Symbol *, std::string>
+getSymbolStrings(ArrayRef<Defined *> syms) {
+ std::vector<std::string> str(syms.size());
+ parallelForEachN(0, syms.size(), [&](size_t i) {
+ raw_string_ostream os(str[i]);
+ os << toString(*syms[i]);
+ });
+
+ DenseMap<Symbol *, std::string> ret;
+ for (size_t i = 0, e = syms.size(); i < e; ++i)
+ ret[syms[i]] = std::move(str[i]);
+ return ret;
+}
+
+void macho::writeMapFile() {
+ if (config->mapFile.empty())
+ return;
+
+ TimeTraceScope timeScope("Write map file");
+
+ // Open a map file for writing.
+ std::error_code ec;
+ raw_fd_ostream os(config->mapFile, ec, sys::fs::OF_None);
+ if (ec) {
+ error("cannot open " + config->mapFile + ": " + ec.message());
+ return;
+ }
+
+ // Dump output path.
+ os << format("# Path: %s\n", config->outputFile.str().c_str());
+
+ // Dump output architecture.
+ os << format("# Arch: %s\n",
+ getArchitectureName(config->arch()).str().c_str());
+
+ // Dump table of object files.
+ os << "# Object files:\n";
+ os << format("[%3u] %s\n", 0, (const char *)"linker synthesized");
+ uint32_t fileIndex = 1;
+ DenseMap<lld::macho::InputFile *, uint32_t> readerToFileOrdinal;
+ for (InputFile *file : inputFiles) {
+ if (isa<ObjFile>(file)) {
+ os << format("[%3u] %s\n", fileIndex, file->getName().str().c_str());
+ readerToFileOrdinal[file] = fileIndex++;
+ }
+ }
+
+ // Collect symbol info that we want to print out.
+ std::vector<Defined *> syms = getSymbols();
+ SymbolMapTy sectionSyms = getSectionSyms(syms);
+ DenseMap<Symbol *, std::string> symStr = getSymbolStrings(syms);
+
+ // Dump table of sections
+ os << "# Sections:\n";
+ os << "# Address\tSize \tSegment\tSection\n";
+ for (OutputSegment *seg : outputSegments)
+ for (OutputSection *osec : seg->getSections()) {
+ if (osec->isHidden())
+ continue;
+
+ os << format("0x%08llX\t0x%08llX\t%s\t%s\n", osec->addr, osec->getSize(),
+ seg->name.str().c_str(), osec->name.str().c_str());
+ }
+
+ // Dump table of symbols
+ os << "# Symbols:\n";
+ os << "# Address\t File Name\n";
+ for (InputSection *isec : inputSections) {
+ auto symsIt = sectionSyms.find(isec);
+ assert(!shouldOmitFromOutput(isec) || (symsIt == sectionSyms.end()));
+ if (symsIt == sectionSyms.end())
+ continue;
+ for (Symbol *sym : symsIt->second) {
+ os << format("0x%08llX\t[%3u] %s\n", sym->getVA(),
+ readerToFileOrdinal[sym->getFile()], symStr[sym].c_str());
+ }
+ }
+
+ // TODO: when we implement -dead_strip, we should dump dead stripped symbols
+}
--- /dev/null
+//===- MapFile.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_MAPFILE_H
+#define LLD_MACHO_MAPFILE_H
+
+namespace lld {
+namespace macho {
+void writeMapFile();
+} // namespace macho
+} // namespace lld
+
+#endif
--- /dev/null
+//===- MarkLive.cpp -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MarkLive.h"
+#include "Config.h"
+#include "OutputSegment.h"
+#include "SymbolTable.h"
+#include "Symbols.h"
+#include "UnwindInfoSection.h"
+#include "mach-o/compact_unwind_encoding.h"
+#include "llvm/Support/TimeProfiler.h"
+
+namespace lld {
+namespace macho {
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+// Set live bit on for each reachable chunk. Unmarked (unreachable)
+// InputSections will be ignored by Writer, so they will be excluded
+// from the final output.
+void markLive() {
+ TimeTraceScope timeScope("markLive");
+
+ // We build up a worklist of sections which have been marked as live. We only
+ // push into the worklist when we discover an unmarked section, and we mark
+ // as we push, so sections never appear twice in the list.
+ // Literal sections cannot contain references to other sections, so we only
+ // store ConcatInputSections in our worklist.
+ SmallVector<ConcatInputSection *, 256> worklist;
+
+ auto enqueue = [&](InputSection *isec, uint64_t off) {
+ if (isec->isLive(off))
+ return;
+ isec->markLive(off);
+ if (auto s = dyn_cast<ConcatInputSection>(isec)) {
+ assert(!s->isCoalescedWeak());
+ worklist.push_back(s);
+ }
+ };
+
+ auto addSym = [&](Symbol *s) {
+ s->used = true;
+ if (auto *d = dyn_cast<Defined>(s))
+ if (d->isec)
+ enqueue(d->isec, d->value);
+ };
+
+ // Add GC roots.
+ if (config->entry)
+ addSym(config->entry);
+ for (Symbol *sym : symtab->getSymbols()) {
+ if (auto *defined = dyn_cast<Defined>(sym)) {
+ // -exported_symbol(s_list)
+ if (!config->exportedSymbols.empty() &&
+ config->exportedSymbols.match(defined->getName())) {
+ // FIXME: Instead of doing this here, maybe the Driver code doing
+ // the matching should add them to explicitUndefineds? Then the
+ // explicitUndefineds code below would handle this automatically.
+ assert(!defined->privateExtern &&
+ "should have been rejected by driver");
+ addSym(defined);
+ continue;
+ }
+
+ // public symbols explicitly marked .no_dead_strip
+ if (defined->referencedDynamically || defined->noDeadStrip) {
+ addSym(defined);
+ continue;
+ }
+
+ // FIXME: When we implement these flags, make symbols from them GC roots:
+ // * -reexported_symbol(s_list)
+ // * -alias(-list)
+ // * -init
+
+ // In dylibs and bundles and in executables with -export_dynamic,
+ // all external functions are GC roots.
+ bool externsAreRoots =
+ config->outputType != MH_EXECUTE || config->exportDynamic;
+ if (externsAreRoots && !defined->privateExtern) {
+ addSym(defined);
+ continue;
+ }
+ }
+ }
+ // -u symbols
+ for (Symbol *sym : config->explicitUndefineds)
+ if (auto *defined = dyn_cast<Defined>(sym))
+ addSym(defined);
+ // local symbols explicitly marked .no_dead_strip
+ for (const InputFile *file : inputFiles)
+ if (auto *objFile = dyn_cast<ObjFile>(file))
+ for (Symbol *sym : objFile->symbols)
+ if (auto *defined = dyn_cast_or_null<Defined>(sym))
+ if (!defined->isExternal() && defined->noDeadStrip)
+ addSym(defined);
+ if (auto *stubBinder =
+ dyn_cast_or_null<DylibSymbol>(symtab->find("dyld_stub_binder")))
+ addSym(stubBinder);
+ for (ConcatInputSection *isec : inputSections) {
+ // Sections marked no_dead_strip
+ if (isec->getFlags() & S_ATTR_NO_DEAD_STRIP) {
+ enqueue(isec, 0);
+ continue;
+ }
+
+ // mod_init_funcs, mod_term_funcs sections
+ if (sectionType(isec->getFlags()) == S_MOD_INIT_FUNC_POINTERS ||
+ sectionType(isec->getFlags()) == S_MOD_TERM_FUNC_POINTERS) {
+ enqueue(isec, 0);
+ continue;
+ }
+ }
+
+ // Dead strip runs before UnwindInfoSection handling so we need to keep
+ // __LD,__compact_unwind alive here.
+ // But that section contains absolute references to __TEXT,__text and
+ // keeps most code alive due to that. So we can't just enqueue() the
+ // section: We must skip the relocations for the functionAddress
+ // in each CompactUnwindEntry.
+ // See also scanEhFrameSection() in lld/ELF/MarkLive.cpp.
+ for (ConcatInputSection *isec : in.unwindInfo->getInputs()) {
+ isec->live = true;
+ const int compactUnwindEntrySize =
+ target->wordSize == 8 ? sizeof(CompactUnwindEntry<uint64_t>)
+ : sizeof(CompactUnwindEntry<uint32_t>);
+ for (const Reloc &r : isec->relocs) {
+ // This is the relocation for the address of the function itself.
+ // Ignore it, else these would keep everything alive.
+ if (r.offset % compactUnwindEntrySize == 0)
+ continue;
+
+ if (auto *s = r.referent.dyn_cast<Symbol *>())
+ addSym(s);
+ else
+ enqueue(r.referent.get<InputSection *>(), r.addend);
+ }
+ }
+
+ do {
+ // Mark things reachable from GC roots as live.
+ while (!worklist.empty()) {
+ ConcatInputSection *s = worklist.pop_back_val();
+ assert(s->live && "We mark as live when pushing onto the worklist!");
+
+ // Mark all symbols listed in the relocation table for this section.
+ for (const Reloc &r : s->relocs) {
+ if (auto *s = r.referent.dyn_cast<Symbol *>())
+ addSym(s);
+ else
+ enqueue(r.referent.get<InputSection *>(), r.addend);
+ }
+ }
+
+ // S_ATTR_LIVE_SUPPORT sections are live if they point _to_ a live section.
+ // Process them in a second pass.
+ for (ConcatInputSection *isec : inputSections) {
+ // FIXME: Check if copying all S_ATTR_LIVE_SUPPORT sections into a
+ // separate vector and only walking that here is faster.
+ if (!(isec->getFlags() & S_ATTR_LIVE_SUPPORT) || isec->live)
+ continue;
+
+ for (const Reloc &r : isec->relocs) {
+ bool referentLive;
+ if (auto *s = r.referent.dyn_cast<Symbol *>())
+ referentLive = s->isLive();
+ else
+ referentLive = r.referent.get<InputSection *>()->isLive(r.addend);
+ if (referentLive)
+ enqueue(isec, 0);
+ }
+ }
+
+ // S_ATTR_LIVE_SUPPORT could have marked additional sections live,
+ // which in turn could mark additional S_ATTR_LIVE_SUPPORT sections live.
+ // Iterate. In practice, the second iteration won't mark additional
+ // S_ATTR_LIVE_SUPPORT sections live.
+ } while (!worklist.empty());
+}
+
+} // namespace macho
+} // namespace lld
--- /dev/null
+//===- MarkLive.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_MARKLIVE_H
+#define LLD_MACHO_MARKLIVE_H
+
+namespace lld {
+namespace macho {
+
+void markLive();
+
+} // namespace macho
+} // namespace lld
+
+#endif // LLD_MACHO_MARKLIVE_H
--- /dev/null
+//===- ObjC.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ObjC.h"
+#include "InputFiles.h"
+#include "InputSection.h"
+#include "OutputSegment.h"
+#include "Target.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace lld;
+using namespace lld::macho;
+
+template <class LP> static bool hasObjCSection(MemoryBufferRef mb) {
+ using Section = typename LP::section;
+
+ auto *hdr =
+ reinterpret_cast<const typename LP::mach_header *>(mb.getBufferStart());
+ if (hdr->magic != LP::magic)
+ return false;
+
+ if (const auto *c =
+ findCommand<typename LP::segment_command>(hdr, LP::segmentLCType)) {
+ auto sectionHeaders =
+ ArrayRef<Section>{reinterpret_cast<const Section *>(c + 1), c->nsects};
+ for (const Section &sec : sectionHeaders) {
+ StringRef sectname(sec.sectname,
+ strnlen(sec.sectname, sizeof(sec.sectname)));
+ StringRef segname(sec.segname, strnlen(sec.segname, sizeof(sec.segname)));
+ if ((segname == segment_names::data &&
+ sectname == section_names::objcCatList) ||
+ (segname == segment_names::text &&
+ sectname == section_names::swift)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool macho::hasObjCSection(MemoryBufferRef mb) {
+ if (target->wordSize == 8)
+ return ::hasObjCSection<LP64>(mb);
+ else
+ return ::hasObjCSection<ILP32>(mb);
+}
--- /dev/null
+//===- ObjC.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_OBJC_H
+#define LLD_MACHO_OBJC_H
+
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace lld {
+namespace macho {
+
+namespace objc {
+
+constexpr const char klass[] = "_OBJC_CLASS_$_";
+constexpr const char metaclass[] = "_OBJC_METACLASS_$_";
+constexpr const char ehtype[] = "_OBJC_EHTYPE_$_";
+constexpr const char ivar[] = "_OBJC_IVAR_$_";
+
+} // namespace objc
+
+bool hasObjCSection(llvm::MemoryBufferRef);
+
+} // namespace macho
+} // namespace lld
+
+#endif
include "llvm/Option/OptParser.td"
-def help : Flag<["-", "--"], "help">;
+// Flags that lld/MachO understands but ld64 doesn't. These take
+// '--' instead of '-' and use dashes instead of underscores, so
+// they don't collide with the ld64 compat options.
+def grp_lld : OptionGroup<"kind">, HelpText<"LLD-SPECIFIC">;
+
+def help : Flag<["-", "--"], "help">,
+ Group<grp_lld>;
def help_hidden : Flag<["--"], "help-hidden">,
- HelpText<"Display help for hidden options">;
+ HelpText<"Display help for hidden options">,
+ Group<grp_lld>;
+def verbose : Flag<["--"], "verbose">,
+ Group<grp_lld>;
+def error_limit_eq : Joined<["--"], "error-limit=">,
+ HelpText<"Maximum number of errors to print before exiting (default: 20)">,
+ Group<grp_lld>;
+def color_diagnostics: Flag<["--"], "color-diagnostics">,
+ HelpText<"Alias for --color-diagnostics=always">,
+ Group<grp_lld>;
+def no_color_diagnostics: Flag<["--"], "no-color-diagnostics">,
+ HelpText<"Alias for --color-diagnostics=never">,
+ Group<grp_lld>;
+def color_diagnostics_eq: Joined<["--"], "color-diagnostics=">,
+ HelpText<"Use colors in diagnostics (default: auto)">,
+ MetaVarName<"[auto,always,never]">,
+ Group<grp_lld>;
+def threads_eq : Joined<["--"], "threads=">,
+ HelpText<"Number of threads. '1' disables multi-threading. By default all available hardware threads are used">,
+ Group<grp_lld>;
+def thinlto_jobs_eq : Joined<["--"], "thinlto-jobs=">,
+ HelpText<"Number of ThinLTO jobs. Default to --threads=">,
+ Group<grp_lld>;
+def reproduce: Separate<["--"], "reproduce">,
+ Group<grp_lld>;
+def reproduce_eq: Joined<["--"], "reproduce=">,
+ Alias<!cast<Separate>(reproduce)>,
+ HelpText<"Write tar file containing inputs and command to reproduce link">,
+ Group<grp_lld>;
+def version: Flag<["--"], "version">,
+ HelpText<"Display the version number and exit">,
+ Group<grp_lld>;
+def lto_legacy_pass_manager: Flag<["--"], "lto-legacy-pass-manager">,
+ HelpText<"Use the legacy pass manager in LLVM">,
+ Group<grp_lld>;
+def no_lto_legacy_pass_manager : Flag<["--"], "no-lto-legacy-pass-manager">,
+ HelpText<"Use the new pass manager in LLVM">,
+ Group<grp_lld>;
+def time_trace: Flag<["--"], "time-trace">, HelpText<"Record time trace">,
+ Group<grp_lld>;
+def time_trace_granularity_eq: Joined<["--"], "time-trace-granularity=">,
+ HelpText<"Minimum time granularity (in microseconds) traced by time profiler">,
+ Group<grp_lld>;
+def time_trace_file_eq: Joined<["--"], "time-trace-file=">,
+ HelpText<"Specify time trace output file">,
+ Group<grp_lld>;
+def deduplicate_literals: Flag<["--"], "deduplicate-literals">,
+ HelpText<"Enable literal deduplication. This is implied by --icf={safe,all}">,
+ Group<grp_lld>;
+def print_dylib_search: Flag<["--"], "print-dylib-search">,
+ HelpText<"Print which paths lld searched when trying to find dylibs">,
+ Group<grp_lld>;
+def icf_eq: Joined<["--"], "icf=">,
+ HelpText<"Set level for identical code folding (default: none)">,
+ MetaVarName<"[none,safe,all]">,
+ Group<grp_lld>;
+def lto_O: Joined<["--"], "lto-O">,
+ HelpText<"Set optimization level for LTO (default: 2)">,
+ MetaVarName<"<opt-level>">,
+ Group<grp_lld>;
+def thinlto_cache_policy: Joined<["--"], "thinlto-cache-policy=">,
+ HelpText<"Pruning policy for the ThinLTO cache">,
+ Group<grp_lld>;
+def O : JoinedOrSeparate<["-"], "O">,
+ HelpText<"Optimize output file size">;
// This is a complete Options.td compiled from Apple's ld(1) manpage
// dated 2018-03-07 and cross checked with ld64 source code in repo
def grp_kind : OptionGroup<"kind">, HelpText<"OUTPUT KIND">;
def execute : Flag<["-"], "execute">,
- HelpText<"Produce a main executable (default)">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Produce a main executable (default)">,
+ Group<grp_kind>;
def dylib : Flag<["-"], "dylib">,
- HelpText<"Produce a shared library">,
- Group<grp_kind>;
+ HelpText<"Produce a shared library">,
+ Group<grp_kind>;
def bundle : Flag<["-"], "bundle">,
- HelpText<"Produce a bundle">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Produce a bundle">,
+ Group<grp_kind>;
def r : Flag<["-"], "r">,
- HelpText<"Merge multiple object files into one, retaining relocations">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Merge multiple object files into one, retaining relocations">,
+ Flags<[HelpHidden]>,
+ Group<grp_kind>;
def dylinker : Flag<["-"], "dylinker">,
- HelpText<"Produce a dylinker only used when building dyld">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Produce a dylinker only used when building dyld">,
+ Flags<[HelpHidden]>,
+ Group<grp_kind>;
def dynamic : Flag<["-"], "dynamic">,
- HelpText<"Link dynamically (default)">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Link dynamically (default)">,
+ Group<grp_kind>;
def static : Flag<["-"], "static">,
- HelpText<"Link statically">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Link statically">,
+ Flags<[HelpHidden]>,
+ Group<grp_kind>;
def preload : Flag<["-"], "preload">,
- HelpText<"Produce an unsegmented binary for embedded systems">,
- Flags<[HelpHidden]>,
- Group<grp_kind>;
+ HelpText<"Produce an unsegmented binary for embedded systems">,
+ Flags<[HelpHidden]>,
+ Group<grp_kind>;
def arch : Separate<["-"], "arch">,
- MetaVarName<"<arch_name>">,
- HelpText<"The architecture (e.g. ppc, ppc64, i386, x86_64)">,
- Group<grp_kind>;
+ MetaVarName<"<arch_name>">,
+ HelpText<"The architecture (e.g. ppc, ppc64, i386, x86_64)">,
+ Group<grp_kind>;
def o : Separate<["-"], "o">,
- MetaVarName<"<path>">,
- HelpText<"The name of the output file (default: `a.out')">,
- Group<grp_kind>;
+ MetaVarName<"<path>">,
+ HelpText<"The name of the output file (default: `a.out')">,
+ Group<grp_kind>;
def grp_libs : OptionGroup<"libs">, HelpText<"LIBRARIES">;
def l : Joined<["-"], "l">,
- MetaVarName<"<name>">,
- HelpText<"Search for lib<name>.dylib or lib<name>.a on the library search path">,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Search for lib<name>.dylib or lib<name>.a on the library search path">,
+ Group<grp_libs>;
def weak_l : Joined<["-"], "weak-l">,
- MetaVarName<"<name>">,
- HelpText<"Like -l<name>, but mark library and its references as weak imports">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -l<name>, but mark library and its references as weak imports">,
+ Group<grp_libs>;
def weak_library : Separate<["-"], "weak_library">,
- MetaVarName<"<path>">,
- HelpText<"Like bare <path>, but mark library and its references as weak imports">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<path>">,
+ HelpText<"Like bare <path>, but mark library and its references as weak imports">,
+ Group<grp_libs>;
+def needed_l : Joined<["-"], "needed-l">,
+ MetaVarName<"<name>">,
+ HelpText<"Like -l<name>, but link library even if its symbols are not used and -dead_strip_dylibs is active">,
+ Group<grp_libs>;
+def needed_library : Separate<["-"], "needed_library">,
+ MetaVarName<"<path>">,
+ HelpText<"Like bare <path>, but link library even if its symbols are not used and -dead_strip_dylibs is active">,
+ Group<grp_libs>;
def reexport_l : Joined<["-"], "reexport-l">,
- MetaVarName<"<name>">,
- HelpText<"Like -l<name>, but export all symbols of <name> from newly created library">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -l<name>, but export all symbols of <name> from newly created library">,
+ Group<grp_libs>;
def reexport_library : Separate<["-"], "reexport_library">,
- MetaVarName<"<path>">,
- HelpText<"Like bare <path>, but export all symbols of <path> from newly created library">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<path>">,
+ HelpText<"Like bare <path>, but export all symbols of <path> from newly created library">,
+ Group<grp_libs>;
def upward_l : Joined<["-"], "upward-l">,
- MetaVarName<"<name>">,
- HelpText<"Like -l<name>, but specify dylib as an upward dependency">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -l<name>, but specify dylib as an upward dependency">,
+ Flags<[HelpHidden]>,
+ Group<grp_libs>;
def upward_library : Separate<["-"], "upward_library">,
- MetaVarName<"<path>">,
- HelpText<"Like bare <path>, but specify dylib as an upward dependency">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<path>">,
+ HelpText<"Like bare <path>, but specify dylib as an upward dependency">,
+ Flags<[HelpHidden]>,
+ Group<grp_libs>;
def L : JoinedOrSeparate<["-"], "L">,
- MetaVarName<"<dir>">,
- HelpText<"Add dir to the library search path">,
- Group<grp_libs>;
+ MetaVarName<"<dir>">,
+ HelpText<"Add dir to the library search path">,
+ Group<grp_libs>;
def Z : Flag<["-"], "Z">,
- HelpText<"Remove standard directories from the library and framework search paths">,
- Group<grp_libs>;
+ HelpText<"Remove standard directories from the library and framework search paths">,
+ Group<grp_libs>;
def syslibroot : Separate<["-"], "syslibroot">,
- MetaVarName<"<rootdir>">,
- HelpText<"Prepend <rootdir> to all library and framework search paths">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<rootdir>">,
+ HelpText<"Prepend <rootdir> to all library and framework search paths">,
+ Group<grp_libs>;
def search_paths_first : Flag<["-"], "search_paths_first">,
- HelpText<"Search for lib<name>.dylib and lib<name>.a at each step in traversing search path (default for Xcode 4 and later)">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ HelpText<"Search for lib<name>.dylib and lib<name>.a at each step in traversing search path (default for Xcode 4 and later)">,
+ Group<grp_libs>;
def search_dylibs_first : Flag<["-"], "search_dylibs_first">,
- HelpText<"Search for lib<name>.dylib on first pass, then for lib<name>.a on second pass through search path (default for Xcode 3 and earlier)">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ HelpText<"Search for lib<name>.dylib on first pass, then for lib<name>.a on second pass through search path (default for Xcode 3 and earlier)">,
+ Group<grp_libs>;
def framework : Separate<["-"], "framework">,
- MetaVarName<"<name>">,
- HelpText<"Search for <name>.framework/<name> on the framework search path">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Search for <name>.framework/<name> on the framework search path">,
+ Group<grp_libs>;
def weak_framework : Separate<["-"], "weak_framework">,
- MetaVarName<"<name>">,
- HelpText<"Like -framework <name>, but mark framework and its references as weak imports">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -framework <name>, but mark framework and its references as weak imports">,
+ Group<grp_libs>;
+def needed_framework : Separate<["-"], "needed_framework">,
+ MetaVarName<"<name>">,
+ HelpText<"Like -framework <name>, but link <name> even if none of its symbols are used and -dead_strip_dylibs is active">,
+ Group<grp_libs>;
def reexport_framework : Separate<["-"], "reexport_framework">,
- MetaVarName<"<name>">,
- HelpText<"Like -framework <name>, but export all symbols of <name> from the newly created library">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -framework <name>, but export all symbols of <name> from the newly created library">,
+ Group<grp_libs>;
def upward_framework : Separate<["-"], "upward_framework">,
- MetaVarName<"<name>">,
- HelpText<"Like -framework <name>, but specify the framework as an upward dependency">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<name>">,
+ HelpText<"Like -framework <name>, but specify the framework as an upward dependency">,
+ Flags<[HelpHidden]>,
+ Group<grp_libs>;
def F : JoinedOrSeparate<["-"], "F">,
- MetaVarName<"<dir>">,
- HelpText<"Add dir to the framework search path">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<dir>">,
+ HelpText<"Add dir to the framework search path">,
+ Group<grp_libs>;
def all_load : Flag<["-"], "all_load">,
- HelpText<"Load all members of all static archive libraries">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ HelpText<"Load all members of all static archive libraries">,
+ Group<grp_libs>;
def ObjC : Flag<["-"], "ObjC">,
- HelpText<"Load all members of static archives that are an Objective-C class or category.">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ HelpText<"Load all members of static archives that are an Objective-C class or category.">,
+ Group<grp_libs>;
def force_load : Separate<["-"], "force_load">,
- MetaVarName<"<path>">,
- HelpText<"Load all members static archive library at <path>">,
- Flags<[HelpHidden]>,
- Group<grp_libs>;
+ MetaVarName<"<path>">,
+ HelpText<"Load all members static archive library at <path>">,
+ Group<grp_libs>;
+def force_load_swift_libs : Flag<["-"], "force_load_swift_libs">,
+ HelpText<"Apply -force_load to libraries listed in LC_LINKER_OPTIONS whose names start with 'swift'">,
+ Group<grp_libs>;
def grp_content : OptionGroup<"content">, HelpText<"ADDITIONAL CONTENT">;
def sectcreate : MultiArg<["-"], "sectcreate", 3>,
- MetaVarName<"<segment> <section> <file>">,
- HelpText<"Create <section> in <segment> from the contents of <file>">,
- Flags<[HelpHidden]>,
- Group<grp_content>;
+ MetaVarName<"<segment> <section> <file>">,
+ HelpText<"Create <section> in <segment> from the contents of <file>">,
+ Group<grp_content>;
def segcreate : MultiArg<["-"], "segcreate", 3>,
- MetaVarName<"<segment> <section> <file>">,
- Alias<sectcreate>,
- HelpText<"Alias for -sectcreate">,
- Flags<[HelpHidden]>,
- Group<grp_content>;
+ MetaVarName<"<segment> <section> <file>">,
+ Alias<sectcreate>,
+ HelpText<"Alias for -sectcreate">,
+ Group<grp_content>;
def filelist : Separate<["-"], "filelist">,
- MetaVarName<"<file>">,
- HelpText<"Read names of files to link from <file>">,
- Flags<[HelpHidden]>,
- Group<grp_content>;
+ MetaVarName<"<file>">,
+ HelpText<"Read names of files to link from <file>">,
+ Group<grp_content>;
def dtrace : Separate<["-"], "dtrace">,
- MetaVarName<"<script>">,
- HelpText<"Enable DTrace static probes according to declarations in <script>">,
- Flags<[HelpHidden]>,
- Group<grp_content>;
+ MetaVarName<"<script>">,
+ HelpText<"Enable DTrace static probes according to declarations in <script>">,
+ Flags<[HelpHidden]>,
+ Group<grp_content>;
def grp_opts : OptionGroup<"opts">, HelpText<"OPTIMIZATIONS">;
def dead_strip : Flag<["-"], "dead_strip">,
- HelpText<"Remove unreachable functions and data">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Remove unreachable functions and data">,
+ Group<grp_opts>;
def order_file : Separate<["-"], "order_file">,
- MetaVarName<"<file>">,
- HelpText<"Layout functions and data according to specification in <file>">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def sectorder : MultiArg<["-"], "sectorder", 3>,
- MetaVarName<"<segname> <sectname> <orderfile>">,
- HelpText<"Replaced by more general -order_file option">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ MetaVarName<"<file>">,
+ HelpText<"Layout functions and data according to specification in <file>">,
+ Group<grp_opts>;
def no_order_inits : Flag<["-"], "no_order_inits">,
- HelpText<"Disable default reordering of initializer and terminator functions">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Disable default reordering of initializer and terminator functions">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def no_order_data : Flag<["-"], "no_order_data">,
- HelpText<"Disable default reordering of global data accessed at launch time">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def platform_version : MultiArg<["-"], "platform_version", 3>,
- MetaVarName<"<platform> <min_version> <sdk_version>">,
- HelpText<"Platform (e.g., macos, ios, tvos, watchos, bridgeos, mac-catalyst, ios-sim, tvos-sim, watchos-sim, driverkit) and version numbers">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def macos_version_min : Separate<["-"], "macos_version_min">,
- MetaVarName<"<version>">,
- HelpText<"Oldest macOS version for which linked output is useable">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def macosx_version_min : Separate<["-"], "macosx_version_min">,
- MetaVarName<"<version>">,
- Alias<macos_version_min>,
- HelpText<"Alias for -macos_version_min">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def ios_version_min : Separate<["-"], "ios_version_min">,
- MetaVarName<"<version>">,
- HelpText<"Oldest iOS version for which linked output is useable">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
-def iphoneos_version_min : Separate<["-"], "iphoneos_version_min">,
- MetaVarName<"<version>">,
- Alias<ios_version_min>,
- HelpText<"Alias for -ios_version_min">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Disable default reordering of global data accessed at launch time">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def image_base : Separate<["-"], "image_base">,
- MetaVarName<"<address>">,
- HelpText<"Preferred hex load address for a dylib or bundle.">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ MetaVarName<"<address>">,
+ HelpText<"Preferred hex load address for a dylib or bundle.">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def seg1addr : Separate<["-"], "seg1addr">,
- MetaVarName<"<address>">,
- Alias<image_base>,
- HelpText<"Alias for -image_base">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ MetaVarName<"<address>">,
+ Alias<image_base>,
+ HelpText<"Alias for -image_base">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def no_implicit_dylibs : Flag<["-"], "no_implicit_dylibs">,
- HelpText<"Do not optimize public dylib transitive symbol references">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Do not optimize public dylib transitive symbol references">,
+ Group<grp_opts>;
def exported_symbols_order : Separate<["-"], "exported_symbols_order">,
- MetaVarName<"<file>">,
- HelpText<"Specify frequently-used symbols in <file> to optimize symbol exports">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ MetaVarName<"<file>">,
+ HelpText<"Specify frequently-used symbols in <file> to optimize symbol exports">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def no_zero_fill_sections : Flag<["-"], "no_zero_fill_sections">,
- HelpText<"Explicitly store zeroed data in the final image">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Explicitly store zeroed data in the final image">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def merge_zero_fill_sections : Flag<["-"], "merge_zero_fill_sections">,
- HelpText<"Merge all zeroed data into the __zerofill section">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Merge all zeroed data into the __zerofill section">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
def no_branch_islands : Flag<["-"], "no_branch_islands">,
- HelpText<"Disable infra for branches beyond the maximum branch distance.">,
- Flags<[HelpHidden]>,
- Group<grp_opts>;
+ HelpText<"Disable infra for branches beyond the maximum branch distance.">,
+ Flags<[HelpHidden]>,
+ Group<grp_opts>;
+def no_deduplicate : Flag<["-"], "no_deduplicate">,
+ HelpText<"Disable code deduplicaiton (synonym for `--icf=none')">,
+ Group<grp_opts>;
+
+def grp_version : OptionGroup<"version">, HelpText<"VERSION TARGETING">;
+
+def platform_version : MultiArg<["-"], "platform_version", 3>,
+ MetaVarName<"<platform> <min_version> <sdk_version>">,
+ HelpText<"Platform (e.g., macos, ios, tvos, watchos, bridgeos, mac-catalyst, ios-sim, tvos-sim, watchos-sim, driverkit) and version numbers">,
+ Group<grp_version>;
+def sdk_version : Separate<["-"], "sdk_version">,
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def macos_version_min : Separate<["-"], "macos_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest macOS version for which linked output is usable">,
+ Group<grp_version>;
+def macosx_version_min : Separate<["-"], "macosx_version_min">,
+ MetaVarName<"<version>">,
+ Alias<macos_version_min>,
+ HelpText<"Alias for -macos_version_min">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def ios_version_min : Separate<["-"], "ios_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest iOS version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def ios_simulator_version_min : Separate<["-"], "ios_simulator_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest iOS simulator version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def iphoneos_version_min : Separate<["-"], "iphoneos_version_min">,
+ MetaVarName<"<version>">,
+ Alias<ios_version_min>,
+ HelpText<"Alias for -ios_version_min">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def maccatalyst_version_min : Separate<["-"], "maccatalyst_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest MacCatalyst version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def iosmac_version_min : Separate<["-"], "iosmac_version_min">,
+ MetaVarName<"<version>">,
+ Alias<maccatalyst_version_min>,
+ HelpText<"Alias for -maccatalyst_version_min">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def uikitformac_version_min : Separate<["-"], "uikitformac_version_min">,
+ MetaVarName<"<version>">,
+ Alias<maccatalyst_version_min>,
+ HelpText<"Alias for -maccatalyst_version_min">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def tvos_version_min : Separate<["-"], "tvos_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest tvOS version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def watchos_version_min : Separate<["-"], "watchos_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest watchOS version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def bridgeos_version_min : Separate<["-"], "bridgeos_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest bridgeOS version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
+def driverkit_version_min : Separate<["-"], "driverkit_version_min">,
+ MetaVarName<"<version>">,
+ HelpText<"Oldest DriverKit version for which linked output is usable">,
+ Flags<[HelpHidden]>,
+ Group<grp_version>;
def grp_dylib : OptionGroup<"dylib">, HelpText<"DYNAMIC LIBRARIES (DYLIB)">;
def install_name : Separate<["-"], "install_name">,
- MetaVarName<"<name>">,
- HelpText<"Set an internal install path in a dylib">,
- Group<grp_dylib>;
+ MetaVarName<"<name>">,
+ HelpText<"Set an internal install path in a dylib">,
+ Group<grp_dylib>;
def dylib_install_name : Separate<["-"], "dylib_install_name">,
- MetaVarName<"<name>">,
- Alias<install_name>,
- HelpText<"Alias for -install_name">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<name>">,
+ Alias<install_name>,
+ HelpText<"Alias for -install_name">,
+ Group<grp_dylib>;
def dylinker_install_name : Separate<["-"], "dylinker_install_name">,
- MetaVarName<"<name>">,
- Alias<install_name>,
- HelpText<"Alias for -install_name">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<name>">,
+ Alias<install_name>,
+ HelpText<"Alias for -install_name">,
+ Group<grp_dylib>;
def mark_dead_strippable_dylib : Flag<["-"], "mark_dead_strippable_dylib">,
- HelpText<"Clients can discard this dylib if it is unreferenced">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ HelpText<"Mark output dylib as dead-strippable: When a client links against it but does not use any of its symbols, the dylib will not be added to the client's list of needed dylibs">,
+ Group<grp_dylib>;
def compatibility_version : Separate<["-"], "compatibility_version">,
- MetaVarName<"<version>">,
- HelpText<"Compatibility <version> of this library">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<version>">,
+ HelpText<"Compatibility <version> of this library">,
+ Group<grp_dylib>;
def dylib_compatibility_version : Separate<["-"], "dylib_compatibility_version">,
- MetaVarName<"<version>">,
- Alias<compatibility_version>,
- HelpText<"Alias for -compatibility_version">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<version>">,
+ Alias<compatibility_version>,
+ HelpText<"Alias for -compatibility_version">,
+ Flags<[HelpHidden]>,
+ Group<grp_dylib>;
def current_version : Separate<["-"], "current_version">,
- MetaVarName<"<version>">,
- HelpText<"Current <version> of this library">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<version>">,
+ HelpText<"Current <version> of this library">,
+ Group<grp_dylib>;
def dylib_current_version : Separate<["-"], "dylib_current_version">,
- MetaVarName<"<version>">,
- Alias<current_version>,
- HelpText<"Alias for -current_version">,
- Flags<[HelpHidden]>,
- Group<grp_dylib>;
+ MetaVarName<"<version>">,
+ Alias<current_version>,
+ HelpText<"Alias for -current_version">,
+ Flags<[HelpHidden]>,
+ Group<grp_dylib>;
def grp_main : OptionGroup<"main">, HelpText<"MAIN EXECUTABLE">;
def pie : Flag<["-"], "pie">,
- HelpText<"Build a position independent executable (default for macOS 10.7 and later)">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ HelpText<"Build a position independent executable (default)">,
+ Group<grp_main>;
def no_pie : Flag<["-"], "no_pie">,
- HelpText<"Do not build a position independent executable (default for macOS 10.6 and earlier)">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ HelpText<"Do not build a position independent executable">,
+ Group<grp_main>;
def pagezero_size : Separate<["-"], "pagezero_size">,
- MetaVarName<"<size>">,
- HelpText<"Size of unreadable segment at address zero is hex <size> (default is 4KB on 32-bit and 4GB on 64-bit)">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ MetaVarName<"<size>">,
+ HelpText<"Size of unreadable segment at address zero is hex <size> (default is 4KB on 32-bit and 4GB on 64-bit)">,
+ Flags<[HelpHidden]>,
+ Group<grp_main>;
def stack_size : Separate<["-"], "stack_size">,
- MetaVarName<"<size>">,
- HelpText<"Maximum hex stack size for the main thread in a program. (default is 8MB)">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ MetaVarName<"<size>">,
+ HelpText<"Maximum hex stack size for the main thread in a program. (default is 8MB)">,
+ Flags<[HelpHidden]>,
+ Group<grp_main>;
def allow_stack_execute : Flag<["-"], "allow_stack_execute">,
- HelpText<"Mark stack segment as executable">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ HelpText<"Mark stack segment as executable">,
+ Flags<[HelpHidden]>,
+ Group<grp_main>;
def export_dynamic : Flag<["-"], "export_dynamic">,
- HelpText<"Preserve all global symbols during LTO">,
- Flags<[HelpHidden]>,
- Group<grp_main>;
+ HelpText<"Preserve all global symbols during LTO and when dead-stripping executables">,
+ Group<grp_main>;
def grp_bundle : OptionGroup<"bundle">, HelpText<"CREATING A BUNDLE">;
def bundle_loader : Separate<["-"], "bundle_loader">,
- MetaVarName<"<executable>">,
- HelpText<"Resolve undefined symbols from <executable>">,
- Flags<[HelpHidden]>,
- Group<grp_bundle>;
+ MetaVarName<"<executable>">,
+ HelpText<"Resolve undefined symbols from <executable>">,
+ Group<grp_bundle>;
def grp_object : OptionGroup<"object">, HelpText<"CREATING AN OBJECT FILE">;
def keep_private_externs : Flag<["-"], "keep_private_externs">,
- HelpText<"Do not convert private external symbols to static symbols">,
- Flags<[HelpHidden]>,
- Group<grp_object>;
+ HelpText<"Do not convert private external symbols to static symbols (only valid with -r)">,
+ Flags<[HelpHidden]>,
+ Group<grp_object>;
def d : Flag<["-"], "d">,
- HelpText<"Force tentative into real definitions for common symbols">,
- Flags<[HelpHidden]>,
- Group<grp_object>;
+ HelpText<"Force tentative into real definitions for common symbols">,
+ Flags<[HelpHidden]>,
+ Group<grp_object>;
def grp_resolve : OptionGroup<"resolve">, HelpText<"SYMBOL RESOLUTION">;
-def exported_symbols_list : Separate<["-"], "exported_symbols_list">,
- MetaVarName<"<file>">,
- HelpText<"Symbols specified in <file> remain global, while others become private externs">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
def exported_symbol : Separate<["-"], "exported_symbol">,
- MetaVarName<"<symbol>">,
- HelpText<"<symbol> remains global, while others become private externs">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
-def unexported_symbols_list : Separate<["-"], "unexported_symbols_list">,
- MetaVarName<"<file>">,
- HelpText<"Global symbols specified in <file> become private externs">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<symbol>">,
+ HelpText<"<symbol> remains global, while others become private externs">,
+ Group<grp_resolve>;
+def exported_symbols_list : Separate<["-"], "exported_symbols_list">,
+ MetaVarName<"<file>">,
+ HelpText<"Symbols specified in <file> remain global, while others become private externs">,
+ Group<grp_resolve>;
def unexported_symbol : Separate<["-"], "unexported_symbol">,
- MetaVarName<"<symbol>">,
- HelpText<"Global <symbol> becomes private extern">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Global <symbol> becomes private extern">,
+ Group<grp_resolve>;
+def unexported_symbols_list : Separate<["-"], "unexported_symbols_list">,
+ MetaVarName<"<file>">,
+ HelpText<"Global symbols specified in <file> become private externs">,
+ Group<grp_resolve>;
def reexported_symbols_list : Separate<["-"], "reexported_symbols_list">,
- MetaVarName<"<file>">,
- HelpText<"Symbols from dependent dylibs specified in <file> are reexported by this dylib">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<file>">,
+ HelpText<"Symbols from dependent dylibs specified in <file> are reexported by this dylib">,
+ Flags<[HelpHidden]>,
+ Group<grp_resolve>;
def alias : MultiArg<["-"], "alias", 2>,
- MetaVarName<"<symbol_name> <alternate_name>">,
- HelpText<"Create a symbol alias with default global visibility">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<symbol_name> <alternate_name>">,
+ HelpText<"Create a symbol alias with default global visibility">,
+ Flags<[HelpHidden]>,
+ Group<grp_resolve>;
def alias_list : Separate<["-"], "alias_list">,
- MetaVarName<"<file>">,
- HelpText<"Create symbol aliases specified in <file>">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<file>">,
+ HelpText<"Create symbol aliases specified in <file>">,
+ Flags<[HelpHidden]>,
+ Group<grp_resolve>;
def flat_namespace : Flag<["-"], "flat_namespace">,
- HelpText<"Resolve symbols from all dylibs, both direct & transitive. Do not record source libraries: dyld must re-search at runtime and use the first definition found">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ HelpText<"Resolve symbols from all dylibs, both direct and transitive. Do not record source libraries: dyld must re-search at runtime and use the first definition found">,
+ Group<grp_resolve>;
+def twolevel_namespace : Flag<["-"], "twolevel_namespace">,
+ HelpText<"Make dyld look up symbols by (dylib,name) pairs (default)">,
+ Group<grp_resolve>;
def u : Separate<["-"], "u">,
- MetaVarName<"<symbol>">,
- HelpText<"Require that <symbol> be defined for the link to succeed">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Require that <symbol> be defined for the link to succeed">,
+ Group<grp_resolve>;
def U : Separate<["-"], "U">,
- MetaVarName<"<symbol>">,
- HelpText<"Allow <symbol> to have no definition">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Allow <symbol> to have no definition">,
+ Group<grp_resolve>;
def undefined : Separate<["-"], "undefined">,
- MetaVarName<"<treatment>">,
- HelpText<"Handle undefined symbols according to <treatment>: error, warning, suppress, or dynamic_lookup (default is error)">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<treatment>">,
+ HelpText<"Handle undefined symbols according to <treatment>: error, warning, suppress, or dynamic_lookup (default is error)">,
+ Group<grp_resolve>;
def rpath : Separate<["-"], "rpath">,
- MetaVarName<"<path>">,
- HelpText<"Add <path> to dyld search list for dylibs with load path prefix `@rpath/'">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<path>">,
+ HelpText<"Add <path> to dyld search list for dylibs with load path prefix `@rpath/'">,
+ Group<grp_resolve>;
def commons : Separate<["-"], "commons">,
- MetaVarName<"<treatment>">,
- HelpText<"Resolve tentative definitions in dylibs according to <treatment>: ignore_dylibs, use_dylibs, error (default is ignore_dylibs)">,
- Flags<[HelpHidden]>,
- Group<grp_resolve>;
+ MetaVarName<"<treatment>">,
+ HelpText<"Resolve tentative definitions in dylibs according to <treatment>: ignore_dylibs, use_dylibs, error (default is ignore_dylibs)">,
+ Flags<[HelpHidden]>,
+ Group<grp_resolve>;
def grp_introspect : OptionGroup<"introspect">, HelpText<"INTROSPECTING THE LINKER">;
def why_load : Flag<["-"], "why_load">,
- HelpText<"Log the symbol that compels loading of each object file from a static library">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ HelpText<"Log why each object file is loaded from a static library">,
+ Group<grp_introspect>;
def whyload : Flag<["-"], "whyload">,
- Alias<why_load>,
- HelpText<"Alias for -why_load">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ Alias<why_load>,
+ HelpText<"Alias for -why_load">,
+ Group<grp_introspect>;
def why_live : Separate<["-"], "why_live">,
- MetaVarName<"<symbol>">,
- HelpText<"Log a chain of references to <symbol>, for use with -dead_strip">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Log a chain of references to <symbol>, for use with -dead_strip">,
+ Flags<[HelpHidden]>,
+ Group<grp_introspect>;
def print_statistics : Flag<["-"], "print_statistics">,
- HelpText<"Log the linker's memory and CPU usage">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ HelpText<"Log the linker's memory and CPU usage">,
+ Flags<[HelpHidden]>,
+ Group<grp_introspect>;
def t : Flag<["-"], "t">,
- HelpText<"Log every file the linker loads: object, archive, and dylib">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ HelpText<"Log every file the linker loads: object, archive, and dylib">,
+ Group<grp_introspect>;
def whatsloaded : Flag<["-"], "whatsloaded">,
- HelpText<"Logs only the object files the linker loads">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ HelpText<"Logs only the object files the linker loads">,
+ Flags<[HelpHidden]>,
+ Group<grp_introspect>;
def order_file_statistics : Flag<["-"], "order_file_statistics">,
- HelpText<"Logs information about -order_file">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ HelpText<"Logs information about -order_file">,
+ Flags<[HelpHidden]>,
+ Group<grp_introspect>;
def map : Separate<["-"], "map">,
- MetaVarName<"<path>">,
- HelpText<"Writes all symbols and their addresses to <path>">,
- Flags<[HelpHidden]>,
- Group<grp_introspect>;
+ MetaVarName<"<path>">,
+ HelpText<"Writes all symbols and their addresses to <path>">,
+ Group<grp_introspect>;
+def dependency_info : Separate<["-"], "dependency_info">,
+ MetaVarName<"<path>">,
+ HelpText<"Dump dependency info">,
+ Group<grp_introspect>;
+def save_temps : Flag<["-"], "save-temps">,
+ HelpText<"Save intermediate LTO compilation results">,
+ Group<grp_introspect>;
-def grp_symtab : OptionGroup<"symtab">, HelpText<"SYMBOL TABLE OPTIMIZATIONS">;
+def grp_symtab : OptionGroup<"symtab">, HelpText<"SYMBOL TABLE">;
def S : Flag<["-"], "S">,
- HelpText<"Strip debug information (STABS or DWARF) from the output">,
- Flags<[HelpHidden]>,
- Group<grp_symtab>;
+ HelpText<"Strip debug information (STABS or DWARF) from the output">,
+ Flags<[HelpHidden]>,
+ Group<grp_symtab>;
def x : Flag<["-"], "x">,
- HelpText<"Exclude non-global symbols from the output symbol table">,
- Flags<[HelpHidden]>,
- Group<grp_symtab>;
+ HelpText<"Exclude non-global symbols from the output symbol table">,
+ Flags<[HelpHidden]>,
+ Group<grp_symtab>;
def non_global_symbols_strip_list : Separate<["-"], "non_global_symbols_strip_list">,
- MetaVarName<"<path>">,
- HelpText<"Specify in <path> the non-global symbols that should be removed from the output symbol table">,
- Flags<[HelpHidden]>,
- Group<grp_symtab>;
+ MetaVarName<"<path>">,
+ HelpText<"Specify in <path> the non-global symbols that should be removed from the output symbol table">,
+ Flags<[HelpHidden]>,
+ Group<grp_symtab>;
def non_global_symbols_no_strip_list : Separate<["-"], "non_global_symbols_no_strip_list">,
- MetaVarName<"<path>">,
- HelpText<"Specify in <path> the non-global symbols that should remain in the output symbol table">,
- Flags<[HelpHidden]>,
- Group<grp_symtab>;
+ MetaVarName<"<path>">,
+ HelpText<"Specify in <path> the non-global symbols that should remain in the output symbol table">,
+ Flags<[HelpHidden]>,
+ Group<grp_symtab>;
def oso_prefix : Separate<["-"], "oso_prefix">,
- MetaVarName<"<path>">,
- HelpText<"Remove the prefix <path> from OSO symbols in the debug map">,
- Flags<[HelpHidden]>,
- Group<grp_symtab>;
+ MetaVarName<"<path>">,
+ HelpText<"Remove the prefix <path> from OSO symbols in the debug map">,
+ Flags<[HelpHidden]>,
+ Group<grp_symtab>;
+def add_ast_path : Separate<["-"], "add_ast_path">,
+ MetaVarName<"<path>">,
+ HelpText<"AST paths will be emitted as STABS">,
+ Group<grp_symtab>;
def grp_bitcode : OptionGroup<"bitcode">, HelpText<"BITCODE BUILD FLOW">;
def bitcode_bundle : Flag<["-"], "bitcode_bundle">,
- HelpText<"Generate an embedded bitcode bundle in the __LLVM,__bundle section of the output">,
- Flags<[HelpHidden]>,
- Group<grp_bitcode>;
+ HelpText<"Generate an embedded bitcode bundle in the __LLVM,__bundle section of the output">,
+ Group<grp_bitcode>;
def bitcode_hide_symbols : Flag<["-"], "bitcode_hide_symbols">,
- HelpText<"With -bitcode_bundle, hide all non-exported symbols from output bitcode bundle.">,
- Flags<[HelpHidden]>,
- Group<grp_bitcode>;
+ HelpText<"With -bitcode_bundle, hide all non-exported symbols from output bitcode bundle.">,
+ Flags<[HelpHidden]>,
+ Group<grp_bitcode>;
def bitcode_symbol_map : Separate<["-"], "bitcode_symbol_map">,
- MetaVarName<"<path>">,
- HelpText<"Write the bitcode symbol reverse mapping to file <path>, or if a directory, to <path>/UUID.bcsymbolmap">,
- Flags<[HelpHidden]>,
- Group<grp_bitcode>;
+ MetaVarName<"<path>">,
+ HelpText<"Write the bitcode symbol reverse mapping to file <path>, or if a directory, to <path>/UUID.bcsymbolmap">,
+ Flags<[HelpHidden]>,
+ Group<grp_bitcode>;
def grp_rare : OptionGroup<"rare">, HelpText<"RARELY USED">;
def v : Flag<["-"], "v">,
- HelpText<"Print the linker version">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Print the linker version and search paths in addition to linking">,
+ Group<grp_rare>;
+def adhoc_codesign : Flag<["-"], "adhoc_codesign">,
+ HelpText<"Write an ad-hoc code signature to the output file (default for arm64 binaries)">,
+ Group<grp_rare>;
+def no_adhoc_codesign : Flag<["-"], "no_adhoc_codesign">,
+ HelpText<"Do not write an ad-hoc code signature to the output file (default for x86_64 binaries)">,
+ Group<grp_rare>;
def version_details : Flag<["-"], "version_details">,
- HelpText<"Print the linker version in JSON form">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Print the linker version in JSON form">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_weak_imports : Flag<["-"], "no_weak_imports">,
- HelpText<"Fail if any symbols are weak imports, allowed to be NULL at runtime">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
-def no_deduplicate : Flag<["-"], "no_deduplicate">,
- HelpText<"Omit the deduplication pass">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Fail if any symbols are weak imports, allowed to be NULL at runtime">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def verbose_deduplicate : Flag<["-"], "verbose_deduplicate">,
- HelpText<"Print function names eliminated by deduplication and the total size of code savings">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Print function names eliminated by deduplication and the total size of code savings">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_inits : Flag<["-"], "no_inits">,
- HelpText<"Fail if the output contains static initializers">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Fail if the output contains static initializers">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_warn_inits : Flag<["-"], "no_warn_inits">,
- HelpText<"Suppress warnings for static initializers in the output">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Suppress warnings for static initializers in the output">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def debug_variant : Flag<["-"], "debug_variant">,
- HelpText<"Suppress warnings germane to binaries shipping to customers">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Suppress warnings germane to binaries shipping to customers">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def unaligned_pointers : Separate<["-"], "unaligned_pointers">,
- MetaVarName<"<treatment>">,
- HelpText<"Handle unaligned pointers in __DATA segments according to <treatment>: warning, error, or suppress (default for arm64e is error, otherwise suppress)">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<treatment>">,
+ HelpText<"Handle unaligned pointers in __DATA segments according to <treatment>: warning, error, or suppress (default for arm64e is error, otherwise suppress)">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def dirty_data_list : Separate<["-"], "dirty_data_list">,
- MetaVarName<"<path>">,
- HelpText<"Specify data symbols in <path> destined for the __DATA_DIRTY segment">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<path>">,
+ HelpText<"Specify data symbols in <path> destined for the __DATA_DIRTY segment">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def max_default_common_align : Separate<["-"], "max_default_common_align">,
- MetaVarName<"<boundary>">,
- HelpText<"Reduce maximum alignment for common symbols to a hex power-of-2 <boundary>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<boundary>">,
+ HelpText<"Reduce maximum alignment for common symbols to a hex power-of-2 <boundary>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def move_to_rw_segment : MultiArg<["-"], "move_to_rw_segment", 2>,
- MetaVarName<"<segment> <path>">,
- HelpText<"Move data symbols listed in <path> to another <segment>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <path>">,
+ HelpText<"Move data symbols listed in <path> to another <segment>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def move_to_ro_segment : MultiArg<["-"], "move_to_ro_segment", 2>,
- MetaVarName<"<segment> <path>">,
- HelpText<"Move code symbols listed in <path> to another <segment>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <path>">,
+ HelpText<"Move code symbols listed in <path> to another <segment>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def rename_section : MultiArg<["-"], "rename_section", 4>,
- MetaVarName<"<from_segment> <from_section> <to_segment> <to_section>">,
- HelpText<"Rename <from_segment>/<from_section> as <to_segment>/<to_section>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<from_segment> <from_section> <to_segment> <to_section>">,
+ HelpText<"Rename <from_segment>/<from_section> as <to_segment>/<to_section>">,
+ Group<grp_rare>;
def rename_segment : MultiArg<["-"], "rename_segment", 2>,
- MetaVarName<"<from_segment> <to_segment>">,
- HelpText<"Rename <from_segment> as <to_segment>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<from_segment> <to_segment>">,
+ HelpText<"Rename <from_segment> as <to_segment>">,
+ Group<grp_rare>;
def trace_symbol_layout : Flag<["-"], "trace_symbol_layout">,
- HelpText<"Show where and why symbols move, as specified by -move_to_ro_segment, -move_to_rw_segment, -rename_section, and -rename_segment">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Show where and why symbols move, as specified by -move_to_ro_segment, -move_to_rw_segment, -rename_section, and -rename_segment">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
+def data_const : Flag<["-"], "data_const">,
+ HelpText<"Force migration of readonly data into __DATA_CONST segment">,
+ Group<grp_rare>;
+def no_data_const : Flag<["-"], "no_data_const">,
+ HelpText<"Block migration of readonly data away from __DATA segment">,
+ Group<grp_rare>;
+def text_exec : Flag<["-"], "text_exec">,
+ HelpText<"Rename __segment TEXT to __TEXT_EXEC for sections __text and __stubs">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def section_order : MultiArg<["-"], "section_order", 2>,
- MetaVarName<"<segment> <sections>">,
- HelpText<"With -preload, specify layout sequence of colon-separated <sections> in <segment>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <sections>">,
+ HelpText<"With -preload, specify layout sequence of colon-separated <sections> in <segment>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def segment_order : Separate<["-"], "segment_order">,
- MetaVarName<"<colon_separated_segment_list>">,
- HelpText<"With -preload, specify layout sequence of colon-separated <segments>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<colon_separated_segment_list>">,
+ HelpText<"With -preload, specify layout sequence of colon-separated <segments>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def allow_heap_execute : Flag<["-"], "allow_heap_execute">,
- HelpText<"On i386, allow any page to execute code">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"On i386, allow any page to execute code">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def application_extension : Flag<["-"], "application_extension">,
- HelpText<"Designate the linker output as safe for use in an application extension">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Mark output as safe for use in an application extension, and validate that linked dylibs are safe">,
+ Group<grp_rare>;
def no_application_extension : Flag<["-"], "no_application_extension">,
- HelpText<"Designate the linker output as unsafe for use in an application extension">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Disable application extension functionality (default)">,
+ Group<grp_rare>;
def fatal_warnings : Flag<["-"], "fatal_warnings">,
- HelpText<"Escalate warnings as errors">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Treat warnings as errors">,
+ Group<grp_rare>;
def no_eh_labels : Flag<["-"], "no_eh_labels">,
- HelpText<"In -r mode, suppress .eh labels in the __eh_frame section">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"In -r mode, suppress .eh labels in the __eh_frame section">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def warn_compact_unwind : Flag<["-"], "warn_compact_unwind">,
- HelpText<"Warn for each FDE that cannot compact into the __unwind_info section and must remain in the __eh_frame section">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Warn for each FDE that cannot compact into the __unwind_info section and must remain in the __eh_frame section">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def warn_weak_exports : Flag<["-"], "warn_weak_exports">,
- HelpText<"Warn if the linked image contains weak external symbols">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Warn if the linked image contains weak external symbols">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_weak_exports : Flag<["-"], "no_weak_exports">,
- HelpText<"Fail if the linked image contains weak external symbols">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Fail if the linked image contains weak external symbols">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def objc_gc_compaction : Flag<["-"], "objc_gc_compaction">,
- HelpText<"Mark the Objective-C image as compatible with compacting garbage collection">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Mark the Objective-C image as compatible with compacting garbage collection">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def objc_gc : Flag<["-"], "objc_gc">,
- HelpText<"Verify that all code was compiled with -fobjc-gc or -fobjc-gc-only">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Verify that all code was compiled with -fobjc-gc or -fobjc-gc-only">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def objc_gc_only : Flag<["-"], "objc_gc_only">,
- HelpText<"Verify that all code was compiled with -fobjc-gc-only">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Verify that all code was compiled with -fobjc-gc-only">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def dead_strip_dylibs : Flag<["-"], "dead_strip_dylibs">,
- HelpText<"Remove dylibs that are unreachable by the entry point or exported symbols">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Remove dylibs that are unreachable by the entry point or exported symbols">,
+ Group<grp_rare>;
def allow_sub_type_mismatches : Flag<["-"], "allow_sub_type_mismatches">,
- HelpText<"Permit mixing objects compiled for different ARM CPU subtypes">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Permit mixing objects compiled for different ARM CPU subtypes">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_uuid : Flag<["-"], "no_uuid">,
- HelpText<"Do not generate the LC_UUID load command">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Do not generate the LC_UUID load command">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def root_safe : Flag<["-"], "root_safe">,
- HelpText<"Set the MH_ROOT_SAFE bit in the mach-o header">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Set the MH_ROOT_SAFE bit in the mach-o header">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def setuid_safe : Flag<["-"], "setuid_safe">,
- HelpText<"Set the MH_SETUID_SAFE bit in the mach-o header">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Set the MH_SETUID_SAFE bit in the mach-o header">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def interposable : Flag<["-"], "interposable">,
- HelpText<"Indirects access to all to exported symbols in a dylib">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Indirects access to all to exported symbols in a dylib">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def multi_module : Flag<["-"], "multi_module">,
- Alias<interposable>,
- HelpText<"Alias for -interposable">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ Alias<interposable>,
+ HelpText<"Alias for -interposable">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def init : Separate<["-"], "init">,
- MetaVarName<"<symbol>">,
- HelpText<"Run <symbol> as the first initializer in a dylib">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Run <symbol> as the first initializer in a dylib">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def sub_library : Separate<["-"], "sub_library">,
- MetaVarName<"<name>">,
- HelpText<"Re-export the dylib as <name>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Re-export the dylib as <name>">,
+ Group<grp_rare>;
def sub_umbrella : Separate<["-"], "sub_umbrella">,
- MetaVarName<"<name>">,
- HelpText<"Re-export the framework as <name>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Re-export the framework as <name>">,
+ Group<grp_rare>;
def allowable_client : Separate<["-"], "allowable_client">,
- MetaVarName<"<name>">,
- HelpText<"Specify <name> of a dylib or framework that is allowed to link to this dylib">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Specify <name> of a dylib or framework that is allowed to link to this dylib">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def client_name : Separate<["-"], "client_name">,
- MetaVarName<"<name>">,
- HelpText<"Specifies a <name> this client should match with the -allowable_client <name> in a dependent dylib">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Specifies a <name> this client should match with the -allowable_client <name> in a dependent dylib">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def umbrella : Separate<["-"], "umbrella">,
- MetaVarName<"<<name>>">,
- HelpText<"Re-export this dylib through the umbrella framework <name>a">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Re-export this dylib through the umbrella framework <name>">,
+ Group<grp_rare>;
def headerpad : Separate<["-"], "headerpad">,
- MetaVarName<"<size>">,
- HelpText<"Allocate hex <size> extra space for future expansion of the load commands via install_name_tool">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<size>">,
+ HelpText<"Allocate hex <size> extra space for future expansion of the load commands via install_name_tool (default is 0x20)">,
+ Group<grp_rare>;
def headerpad_max_install_names : Flag<["-"], "headerpad_max_install_names">,
- HelpText<"Allocate extra space so all load-command paths can expand to MAXPATHLEN via install_name_tool">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Allocate extra space so all load-command paths can expand to MAXPATHLEN via install_name_tool">,
+ Group<grp_rare>;
def bind_at_load : Flag<["-"], "bind_at_load">,
- HelpText<"Tell dyld to bind all symbols at load time, rather than lazily">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Tell dyld to bind all symbols at load time, rather than lazily">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def force_flat_namespace : Flag<["-"], "force_flat_namespace">,
- HelpText<"Tell dyld to use a flat namespace on this executable and all its dependent dylibs & bundles">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Tell dyld to use a flat namespace on this executable and all its dependent dylibs & bundles">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def segalign : Separate<["-"], "segalign">,
- MetaVarName<"<boundary>">,
- HelpText<"Align all segments to hex power-of-2 <boundary>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<boundary>">,
+ HelpText<"Align all segments to hex power-of-2 <boundary>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def sectalign : MultiArg<["-"], "sectalign", 3>,
- MetaVarName<"<segment> <section> <boundary>">,
- HelpText<"Align <section> within <segment> to hex power-of-2 <boundary>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <section> <boundary>">,
+ HelpText<"Align <section> within <segment> to hex power-of-2 <boundary>">,
+ Group<grp_rare>;
def stack_addr : Separate<["-"], "stack_addr">,
- MetaVarName<"<address>">,
- HelpText<"Initialize stack pointer to hex <address> rounded to a page boundary">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<address>">,
+ HelpText<"Initialize stack pointer to hex <address> rounded to a page boundary">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def segprot : MultiArg<["-"], "segprot", 3>,
- MetaVarName<"<segment> <max> <init>">,
- HelpText<"Specifies the <max> and <init> virtual memory protection of <segment> as r/w/x/-seg_addr_table path Specify hex base addresses and dylib install names on successive lines in <path>. This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <max> <init>">,
+ HelpText<"Specifies the <max> and <init> virtual memory protection of <segment> as r/w/x/-seg_addr_table path">,
+ Group<grp_rare>;
def segs_read_write_addr : Separate<["-"], "segs_read_write_addr">,
- MetaVarName<"<address>">,
- HelpText<"This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<address>">,
+ HelpText<"This option is obsolete">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def segs_read_only_addr : Separate<["-"], "segs_read_only_addr">,
- MetaVarName<"<address>">,
- HelpText<"This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<address>">,
+ HelpText<"This option is obsolete">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def segaddr : MultiArg<["-"], "segaddr", 2>,
- MetaVarName<"<segment> <address>">,
- HelpText<"Specify the starting hex <address> at a 4KiB page boundary for <segment>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <address>">,
+ HelpText<"Specify the starting hex <address> at a 4KiB page boundary for <segment>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def seg_page_size : MultiArg<["-"], "seg_page_size", 2>,
- MetaVarName<"<segment> <size>">,
- HelpText<"Specifies the page <size> for <segment>. Segment size will be a multiple of its page size">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<segment> <size>">,
+ HelpText<"Specifies the page <size> for <segment>. Segment size will be a multiple of its page size">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def dylib_file : Separate<["-"], "dylib_file">,
- MetaVarName<"<install_path:current_path>">,
- HelpText<"Specify <current_path> as different from where a dylib normally resides at <install_path>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
-def prebind : Flag<["-"], "prebind">,
- HelpText<"This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<install_path:current_path>">,
+ HelpText<"Specify <current_path> as different from where a dylib normally resides at <install_path>">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def weak_reference_mismatches : Separate<["-"], "weak_reference_mismatches">,
- MetaVarName<"<treatment>">,
- HelpText<"Resolve symbol imports of conflicting weakness according to <treatment> as weak, non-weak, or error (default is non-weak)">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<treatment>">,
+ HelpText<"Resolve symbol imports of conflicting weakness according to <treatment> as weak, non-weak, or error (default is non-weak)">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def read_only_relocs : Separate<["-"], "read_only_relocs">,
- MetaVarName<"<treatment>">,
- HelpText<"Handle relocations that modify read-only pages according to <treatment> of warning, error, or suppress (i.e., allow)">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<treatment>">,
+ HelpText<"Handle relocations that modify read-only pages according to <treatment> of warning, error, or suppress (i.e., allow)">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def force_cpusubtype_ALL : Flag<["-"], "force_cpusubtype_ALL">,
- HelpText<"Mark binary as runnable on any PowerPC, ignoring any PowerPC cpu requirements encoded in the object files">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Mark binary as runnable on any PowerPC, ignoring any PowerPC cpu requirements encoded in the object files">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_arch_warnings : Flag<["-"], "no_arch_warnings">,
- HelpText<"Suppresses warnings about inputs whose architecture does not match the -arch option">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Suppresses warnings about inputs whose architecture does not match the -arch option">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def arch_errors_fatal : Flag<["-"], "arch_errors_fatal">,
- HelpText<"Escalate to errors any warnings about inputs whose architecture does not match the -arch option">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Escalate to errors any warnings about inputs whose architecture does not match the -arch option">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def e : Separate<["-"], "e">,
- MetaVarName<"<symbol>">,
- HelpText<"Make <symbol> the entry point of an executable (default is \"start\" from crt1.o)">,
- Group<grp_rare>;
+ MetaVarName<"<symbol>">,
+ HelpText<"Make <symbol> the entry point of an executable (default is \"start\" from crt1.o)">,
+ Group<grp_rare>;
def w : Flag<["-"], "w">,
- HelpText<"Suppress all warnings">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Suppress all warnings">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def final_output : Separate<["-"], "final_output">,
- MetaVarName<"<name>">,
- HelpText<"Specify the dylib install name if -install_name is not used--used by compiler driver for multiple -arch arguments">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<name>">,
+ HelpText<"Specify dylib install name if -install_name is not used; used by compiler driver for multiple -arch arguments">,
+ Group<grp_rare>;
def arch_multiple : Flag<["-"], "arch_multiple">,
- HelpText<"Augment error and warning messages with the architecture name">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
-def twolevel_namespace_hints : Flag<["-"], "twolevel_namespace_hints">,
- HelpText<"This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Augment error and warning messages with the architecture name">,
+ Group<grp_rare>;
def dot : Separate<["-"], "dot">,
- MetaVarName<"<path>">,
- HelpText<"Write a graph of symbol dependencies to <path> as a .dot file viewable with GraphViz">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<path>">,
+ HelpText<"Write a graph of symbol dependencies to <path> as a .dot file viewable with GraphViz">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def keep_relocs : Flag<["-"], "keep_relocs">,
- HelpText<"Retain section-based relocation records in the output, which are ignored at runtime by dyld">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Retain section-based relocation records in the output, which are ignored at runtime by dyld">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def warn_stabs : Flag<["-"], "warn_stabs">,
- HelpText<"Warn when bad stab symbols inside a BINCL/EINCL prevent optimization">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Warn when bad stab symbols inside a BINCL/EINCL prevent optimization">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def warn_commons : Flag<["-"], "warn_commons">,
- HelpText<"Warn when a tentative definition in an object file matches an external symbol in a dylib, which often means \"extern\" is missing from a variable declaration in a header file">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Warn when a tentative definition in an object file matches an external symbol in a dylib, which often means \"extern\" is missing from a variable declaration in a header file">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def read_only_stubs : Flag<["-"], "read_only_stubs">,
- HelpText<"On i386, make the __IMPORT segment of a final linked image read-only">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
-def slow_stubs : Flag<["-"], "slow_stubs">,
- HelpText<"This option is obsolete">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"On i386, make the __IMPORT segment of a final linked image read-only">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def interposable_list : Separate<["-"], "interposable_list">,
- MetaVarName<"<path>">,
- HelpText<"Access global symbols listed in <path> indirectly">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<path>">,
+ HelpText<"Access global symbols listed in <path> indirectly">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def no_function_starts : Flag<["-"], "no_function_starts">,
- HelpText<"Do not creates a compressed table of function start addresses">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Do not create table of function start addresses">,
+ Group<grp_rare>;
def no_objc_category_merging : Flag<["-"], "no_objc_category_merging">,
- HelpText<"Do not merge Objective-C categories into their classes">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Do not merge Objective-C categories into their classes">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def object_path_lto : Separate<["-"], "object_path_lto">,
- MetaVarName<"<path>">,
- HelpText<"Retain any temporary mach-o file in <path> that would otherwise be deleted during LTO">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
-def lto_library : Separate<["-"], "lto_library">,
- MetaVarName<"<path>">,
- HelpText<"Override the default ../lib/libLTO.dylib as <path>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<path>">,
+ HelpText<"Retain any temporary mach-o file in <path> that would otherwise be deleted during LTO">,
+ Group<grp_rare>;
def cache_path_lto : Separate<["-"], "cache_path_lto">,
- MetaVarName<"<path>">,
- HelpText<"Use <path> as a directory for the incremental LTO cache">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<path>">,
+ HelpText<"Use <path> as a directory for the incremental LTO cache">,
+ Group<grp_rare>;
def prune_interval_lto : Separate<["-"], "prune_interval_lto">,
- MetaVarName<"<seconds>">,
- HelpText<"Prune the incremental LTO cache after <seconds> (-1 disables pruning)">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<seconds>">,
+ HelpText<"Prune the incremental LTO cache after <seconds> (-1 disables pruning)">,
+ Group<grp_rare>;
def prune_after_lto : Separate<["-"], "prune_after_lto">,
- MetaVarName<"<seconds>">,
- HelpText<"Remove LTO cache entries after <seconds>">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<seconds>">,
+ HelpText<"Remove LTO cache entries after <seconds>">,
+ Group<grp_rare>;
def max_relative_cache_size_lto : Separate<["-"], "max_relative_cache_size_lto">,
- MetaVarName<"<percent>">,
- HelpText<"Limit the incremental LTO cache growth to <percent> of free disk, space">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ MetaVarName<"<percent>">,
+ HelpText<"Limit the incremental LTO cache growth to <percent> of free disk, space">,
+ Group<grp_rare>;
def page_align_data_atoms : Flag<["-"], "page_align_data_atoms">,
- HelpText<"Distribute global variables on separate pages so page used/dirty status can guide creation of an order file to cluster commonly used/dirty globals">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Distribute global variables on separate pages so page used/dirty status can guide creation of an order file to cluster commonly used/dirty globals">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
def not_for_dyld_shared_cache : Flag<["-"], "not_for_dyld_shared_cache">,
- HelpText<"Prevent system dylibs from being placed into the dylib shared cache">,
- Flags<[HelpHidden]>,
- Group<grp_rare>;
+ HelpText<"Prevent system dylibs from being placed into the dylib shared cache">,
+ Flags<[HelpHidden]>,
+ Group<grp_rare>;
+def mllvm : Separate<["-"], "mllvm">,
+ HelpText<"Options to pass to LLVM">,
+ Group<grp_rare>;
+def mcpu : Separate<["-"], "mcpu">,
+ HelpText<"Processor family target for LTO code generation">,
+ Group<grp_rare>;
def grp_deprecated : OptionGroup<"deprecated">, HelpText<"DEPRECATED">;
def lazy_framework : Separate<["-"], "lazy_framework">,
- MetaVarName<"<name>">,
- HelpText<"This option is deprecated and is now an alias for -framework.">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ MetaVarName<"<name>">,
+ HelpText<"This option is deprecated and is now an alias for -framework.">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def lazy_library : Separate<["-"], "lazy_library">,
- MetaVarName<"<path>">,
- HelpText<"This option is deprecated and is now an alias for regular linking">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ MetaVarName<"<path>">,
+ HelpText<"This option is deprecated and is now an alias for regular linking">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def lazy_l : Joined<["-"], "lazy-l">,
- MetaVarName<"<name>">,
- HelpText<"This option is deprecated and is now an alias for -l<path>.">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ MetaVarName<"<name>">,
+ HelpText<"This option is deprecated and is now an alias for -l<path>.">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def single_module : Flag<["-"], "single_module">,
- HelpText<"Unnecessary option: this is already the default">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ HelpText<"Unnecessary option: this is already the default">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def no_dead_strip_inits_and_terms : Flag<["-"], "no_dead_strip_inits_and_terms">,
- HelpText<"Unnecessary option: initialization and termination are roots of the dead strip graph, so never dead stripped">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ HelpText<"Unnecessary option: initialization and termination are roots of the dead strip graph, so never dead stripped">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def noall_load : Flag<["-"], "noall_load">,
- HelpText<"Unnecessary option: this is already the default">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
+ HelpText<"Unnecessary option: this is already the default">,
+ Flags<[HelpHidden]>,
+ Group<grp_deprecated>;
def grp_obsolete : OptionGroup<"obsolete">, HelpText<"OBSOLETE">;
+def sectorder : MultiArg<["-"], "sectorder", 3>,
+ MetaVarName<"<segname> <sectname> <orderfile>">,
+ HelpText<"Obsolete. Replaced by more general -order_file option">,
+ Group<grp_obsolete>;
+def lto_library : Separate<["-"], "lto_library">,
+ MetaVarName<"<path>">,
+ HelpText<"Obsolete. LLD supports LTO directly, without using an external dylib.">,
+ Group<grp_obsolete>;
def y : Joined<["-"], "y">,
- MetaVarName<"<symbol>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<symbol>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def sectobjectsymbols : MultiArg<["-"], "sectobjectsymbols", 2>,
- MetaVarName<"<segname> <sectname>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<segname> <sectname>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def nofixprebinding : Flag<["-"], "nofixprebinding">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def noprebind_all_twolevel_modules : Flag<["-"], "noprebind_all_twolevel_modules">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def prebind_all_twolevel_modules : Flag<["-"], "prebind_all_twolevel_modules">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def prebind_allow_overlap : Flag<["-"], "prebind_allow_overlap">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def noprebind : Flag<["-"], "noprebind">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def sect_diff_relocs : Separate<["-"], "sect_diff_relocs">,
- MetaVarName<"<treatment>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<treatment>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def A : Separate<["-"], "A">,
- MetaVarName<"<basefile>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<basefile>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def b : Flag<["-"], "b">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def Sn : Flag<["-"], "Sn">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def Si : Flag<["-"], "Si">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def Sp : Flag<["-"], "Sp">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def X : Flag<["-"], "X">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def s : Flag<["-"], "s">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def m : Flag<["-"], "m">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def Y : Separate<["-"], "Y">,
- MetaVarName<"<number>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<number>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def nomultidefs : Flag<["-"], "nomultidefs">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def multiply_defined_unused : Separate<["-"], "multiply_defined_unused">,
- MetaVarName<"<treatment>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<treatment>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def multiply_defined : Separate<["-"], "multiply_defined">,
- MetaVarName<"<treatment>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<treatment>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def private_bundle : Flag<["-"], "private_bundle">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def seg_addr_table_filename : Separate<["-"], "seg_addr_table_filename">,
- MetaVarName<"<path>">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ MetaVarName<"<path>">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def sectorder_detail : Flag<["-"], "sectorder_detail">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def no_compact_linkedit : Flag<["-"], "no_compact_linkedit">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def dependent_dr_info : Flag<["-"], "dependent_dr_info">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def no_dependent_dr_info : Flag<["-"], "no_dependent_dr_info">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def seglinkedit : Flag<["-"], "seglinkedit">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def noseglinkedit : Flag<["-"], "noseglinkedit">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def fvmlib : Flag<["-"], "fvmlib">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def run_init_lazily : Flag<["-"], "run_init_lazily">,
- HelpText<"This option is obsolete in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_obsolete>;
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
+def prebind : Flag<["-"], "prebind">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
+def twolevel_namespace_hints : Flag<["-"], "twolevel_namespace_hints">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
+def slow_stubs : Flag<["-"], "slow_stubs">,
+ HelpText<"This option is obsolete in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_obsolete>;
def grp_undocumented : OptionGroup<"undocumented">, HelpText<"UNDOCUMENTED">;
-def add_ast_path : Flag<["-"], "add_ast_path">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
def add_linker_option : Flag<["-"], "add_linker_option">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def add_source_version : Flag<["-"], "add_source_version">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_source_version : Flag<["-"], "no_source_version">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def add_split_seg_info : Flag<["-"], "add_split_seg_info">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def allow_dead_duplicates : Flag<["-"], "allow_dead_duplicates">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def allow_simulator_linking_to_macosx_dylibs : Flag<["-"], "allow_simulator_linking_to_macosx_dylibs">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def bitcode_process_mode : Flag<["-"], "bitcode_process_mode">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def bitcode_verify : Flag<["-"], "bitcode_verify">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def classic_linker : Flag<["-"], "classic_linker">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def data_const : Flag<["-"], "data_const">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def no_data_const : Flag<["-"], "no_data_const">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def data_in_code_info : Flag<["-"], "data_in_code_info">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"Emit data-in-code information (default)">,
+ Group<grp_undocumented>;
def no_data_in_code_info : Flag<["-"], "no_data_in_code_info">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"Do not emit data-in-code information">,
+ Group<grp_undocumented>;
def debug_snapshot : Flag<["-"], "debug_snapshot">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def demangle : Flag<["-"], "demangle">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def dependency_info : Flag<["-"], "dependency_info">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"Demangle symbol names in diagnostics">;
def dyld_env : Flag<["-"], "dyld_env">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def encryptable : Flag<["-"], "encryptable">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"Generate the LC_ENCRYPTION_INFO load command">,
+ Group<grp_undocumented>;
+def no_encryption : Flag<["-"], "no_encryption">,
+ HelpText<"Do not generate the LC_ENCRYPTION_INFO load command">,
+ Group<grp_undocumented>;
def executable_path : Flag<["-"], "executable_path">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def fixup_chains : Flag<["-"], "fixup_chains">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def fixup_chains_section : Flag<["-"], "fixup_chains_section">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def flto_codegen_only : Flag<["-"], "flto-codegen-only">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def force_load_swift_libs : Flag<["-"], "force_load_swift_libs">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def force_symbol_not_weak : Flag<["-"], "force_symbol_not_weak">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def force_symbols_coalesce_list : Flag<["-"], "force_symbols_coalesce_list">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def force_symbols_not_weak_list : Flag<["-"], "force_symbols_not_weak_list">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def force_symbols_weak_list : Flag<["-"], "force_symbols_weak_list">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def force_symbol_weak : Flag<["-"], "force_symbol_weak">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def function_starts : Flag<["-"], "function_starts">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"Create table of function start addresses (default)">,
+ Group<grp_undocumented>;
def i : Flag<["-"], "i">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def ignore_auto_link : Flag<["-"], "ignore_auto_link">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def ignore_optimization_hints : Flag<["-"], "ignore_optimization_hints">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def init_offsets : Flag<["-"], "init_offsets">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def keep_dwarf_unwind : Flag<["-"], "keep_dwarf_unwind">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_keep_dwarf_unwind : Flag<["-"], "no_keep_dwarf_unwind">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def kext : Flag<["-"], "kext">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def kext_objects_dir : Flag<["-"], "kext_objects_dir">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_kext_objects : Flag<["-"], "no_kext_objects">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def kexts_use_stubs : Flag<["-"], "kexts_use_stubs">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def maccatalyst_version_min : Flag<["-"], "maccatalyst_version_min">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def iosmac_version_min : Flag<["-"], "iosmac_version_min">,
- Alias<maccatalyst_version_min>,
- HelpText<"Alias for -maccatalyst_version_min">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def uikitformac_version_min : Flag<["-"], "uikitformac_version_min">,
- Alias<maccatalyst_version_min>,
- HelpText<"Alias for -maccatalyst_version_min">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def mcpu : Flag<["-"], "mcpu">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def mllvm : Flag<["-"], "mllvm">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_compact_unwind : Flag<["-"], "no_compact_unwind">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_dtrace_dof : Flag<["-"], "no_dtrace_dof">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def no_encryption : Flag<["-"], "no_encryption">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def no_new_main : Flag<["-"], "no_new_main">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def objc_abi_version : Flag<["-"], "objc_abi_version">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
+def objc_abi_version : Separate<["-"], "objc_abi_version">,
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def pause : Flag<["-"], "pause">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def random_uuid : Flag<["-"], "random_uuid">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def save_temps : Flag<["-"], "save-temps">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def sdk_version : Flag<["-"], "sdk_version">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def simulator_support : Flag<["-"], "simulator_support">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def snapshot_dir : Flag<["-"], "snapshot_dir">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def source_version : Flag<["-"], "source_version">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def text_exec : Flag<["-"], "text_exec">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def threaded_starts_section : Flag<["-"], "threaded_starts_section">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
-def twolevel_namespace : Flag<["-"], "twolevel_namespace">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def verbose_optimization_hints : Flag<["-"], "verbose_optimization_hints">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def version_load_command : Flag<["-"], "version_load_command">,
- HelpText<"This option is undocumented in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_undocumented>;
+ HelpText<"This option is undocumented in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_undocumented>;
def grp_ignored : OptionGroup<"ignored">, HelpText<"IGNORED">;
def M : Flag<["-"], "M">,
- HelpText<"This option is ignored in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_ignored>;
+ HelpText<"This option is ignored in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_ignored>;
def new_linker : Flag<["-"], "new_linker">,
- HelpText<"This option is ignored in ld64">,
- Flags<[HelpHidden]>,
- Group<grp_ignored>;
+ HelpText<"This option is ignored in ld64">,
+ Flags<[HelpHidden]>,
+ Group<grp_ignored>;
using namespace lld::macho;
uint64_t OutputSection::getSegmentOffset() const {
- return addr - parent->firstSection()->addr;
+ return addr - parent->addr;
+}
+
+void OutputSection::assignAddressesToStartEndSymbols() {
+ for (Defined *d : sectionStartSymbols)
+ d->value = addr;
+ for (Defined *d : sectionEndSymbols)
+ d->value = addr + getSize();
}
#ifndef LLD_MACHO_OUTPUT_SECTION_H
#define LLD_MACHO_OUTPUT_SECTION_H
+#include "Symbols.h"
#include "lld/Common/LLVM.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+
+#include <limits>
namespace lld {
namespace macho {
+class Defined;
class InputSection;
class OutputSegment;
+// The default order value for OutputSections that are not constructed from
+// InputSections (i.e. SyntheticSections). We make it less than INT_MAX in order
+// not to conflict with the ordering of zerofill sections, which must always be
+// placed at the end of their segment.
+constexpr int UnspecifiedInputOrder = std::numeric_limits<int>::max() - 1024;
+
// Output sections represent the finalized sections present within the final
// linked executable. They can represent special sections (like the symbol
// table), or represent coalesced sections from the various inputs given to the
class OutputSection {
public:
enum Kind {
- MergedKind,
+ ConcatKind,
SyntheticKind,
};
// Unneeded sections are omitted entirely (header and body).
virtual bool isNeeded() const { return true; }
- // Specifically finalizes addresses and section size, not content.
virtual void finalize() {
// TODO investigate refactoring synthetic section finalization logic into
// overrides of this function.
virtual void writeTo(uint8_t *buf) const = 0;
+ void assignAddressesToStartEndSymbols();
+
StringRef name;
+ llvm::TinyPtrVector<Defined *> sectionStartSymbols;
+ llvm::TinyPtrVector<Defined *> sectionEndSymbols;
OutputSegment *parent = nullptr;
+ // For output sections that don't have explicit ordering requirements, their
+ // output order should be based on the order of the input sections they
+ // contain.
+ int inputOrder = UnspecifiedInputOrder;
uint32_t index = 0;
uint64_t addr = 0;
uint64_t fileOff = 0;
uint32_t align = 1;
uint32_t flags = 0;
+ uint32_t reserved1 = 0;
+ uint32_t reserved2 = 0;
private:
Kind sectionKind;
//===----------------------------------------------------------------------===//
#include "OutputSegment.h"
+#include "ConcatOutputSection.h"
#include "InputSection.h"
-#include "MergedOutputSection.h"
+#include "Symbols.h"
#include "SyntheticSections.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/MachO.h"
using namespace llvm;
using namespace lld::macho;
static uint32_t initProt(StringRef name) {
+ auto it = find_if(
+ config->segmentProtections,
+ [&](const SegmentProtection &segprot) { return segprot.name == name; });
+ if (it != config->segmentProtections.end())
+ return it->initProt;
+
if (name == segment_names::text)
return VM_PROT_READ | VM_PROT_EXECUTE;
if (name == segment_names::pageZero)
}
static uint32_t maxProt(StringRef name) {
- if (name == segment_names::pageZero)
- return 0;
- return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ assert(config->arch() != AK_i386 &&
+ "TODO: i386 has different maxProt requirements");
+ return initProt(name);
}
size_t OutputSegment::numNonHiddenSections() const {
size_t count = 0;
- for (const OutputSection *osec : sections) {
+ for (const OutputSection *osec : sections)
count += (!osec->isHidden() ? 1 : 0);
- }
return count;
}
void OutputSegment::addOutputSection(OutputSection *osec) {
+ inputOrder = std::min(inputOrder, osec->inputOrder);
+
osec->parent = this;
sections.push_back(osec);
+
+ for (const SectionAlign §Align : config->sectionAlignments)
+ if (sectAlign.segName == name && sectAlign.sectName == osec->name)
+ osec->align = sectAlign.align;
+}
+
+template <typename T, typename F> static auto compareByOrder(F ord) {
+ return [=](T a, T b) { return ord(a) < ord(b); };
+}
+
+static int segmentOrder(OutputSegment *seg) {
+ return StringSwitch<int>(seg->name)
+ .Case(segment_names::pageZero, -4)
+ .Case(segment_names::text, -3)
+ .Case(segment_names::dataConst, -2)
+ .Case(segment_names::data, -1)
+ .Case(segment_names::llvm, std::numeric_limits<int>::max() - 1)
+ // Make sure __LINKEDIT is the last segment (i.e. all its hidden
+ // sections must be ordered after other sections).
+ .Case(segment_names::linkEdit, std::numeric_limits<int>::max())
+ .Default(seg->inputOrder);
+}
+
+static int sectionOrder(OutputSection *osec) {
+ StringRef segname = osec->parent->name;
+ // Sections are uniquely identified by their segment + section name.
+ if (segname == segment_names::text) {
+ return StringSwitch<int>(osec->name)
+ .Case(section_names::header, -4)
+ .Case(section_names::text, -3)
+ .Case(section_names::stubs, -2)
+ .Case(section_names::stubHelper, -1)
+ .Case(section_names::unwindInfo, std::numeric_limits<int>::max() - 1)
+ .Case(section_names::ehFrame, std::numeric_limits<int>::max())
+ .Default(osec->inputOrder);
+ } else if (segname == segment_names::data ||
+ segname == segment_names::dataConst) {
+ // For each thread spawned, dyld will initialize its TLVs by copying the
+ // address range from the start of the first thread-local data section to
+ // the end of the last one. We therefore arrange these sections contiguously
+ // to minimize the amount of memory used. Additionally, since zerofill
+ // sections must be at the end of their segments, and since TLV data
+ // sections can be zerofills, we end up putting all TLV data sections at the
+ // end of the segment.
+ switch (sectionType(osec->flags)) {
+ case S_THREAD_LOCAL_VARIABLE_POINTERS:
+ return std::numeric_limits<int>::max() - 3;
+ case S_THREAD_LOCAL_REGULAR:
+ return std::numeric_limits<int>::max() - 2;
+ case S_THREAD_LOCAL_ZEROFILL:
+ return std::numeric_limits<int>::max() - 1;
+ case S_ZEROFILL:
+ return std::numeric_limits<int>::max();
+ default:
+ return StringSwitch<int>(osec->name)
+ .Case(section_names::got, -3)
+ .Case(section_names::lazySymbolPtr, -2)
+ .Case(section_names::const_, -1)
+ .Default(osec->inputOrder);
+ }
+ } else if (segname == segment_names::linkEdit) {
+ return StringSwitch<int>(osec->name)
+ .Case(section_names::rebase, -10)
+ .Case(section_names::binding, -9)
+ .Case(section_names::weakBinding, -8)
+ .Case(section_names::lazyBinding, -7)
+ .Case(section_names::export_, -6)
+ .Case(section_names::functionStarts, -5)
+ .Case(section_names::dataInCode, -4)
+ .Case(section_names::symbolTable, -3)
+ .Case(section_names::indirectSymbolTable, -2)
+ .Case(section_names::stringTable, -1)
+ .Case(section_names::codeSignature, std::numeric_limits<int>::max())
+ .Default(osec->inputOrder);
+ }
+ // ZeroFill sections must always be the at the end of their segments:
+ // dyld checks if a segment's file size is smaller than its in-memory
+ // size to detect if a segment has zerofill sections, and if so it maps
+ // the missing tail as zerofill.
+ if (sectionType(osec->flags) == S_ZEROFILL)
+ return std::numeric_limits<int>::max();
+ return osec->inputOrder;
+}
+
+void OutputSegment::sortOutputSections() {
+ // Must be stable_sort() to keep special sections such as
+ // S_THREAD_LOCAL_REGULAR in input order.
+ llvm::stable_sort(sections, compareByOrder<OutputSection *>(sectionOrder));
+}
+
+void OutputSegment::assignAddressesToStartEndSymbols() {
+ for (Defined *d : segmentStartSymbols)
+ d->value = addr;
+ for (Defined *d : segmentEndSymbols)
+ d->value = addr + vmSize;
}
-static llvm::DenseMap<StringRef, OutputSegment *> nameToOutputSegment;
+void macho::sortOutputSegments() {
+ llvm::stable_sort(outputSegments,
+ compareByOrder<OutputSegment *>(segmentOrder));
+}
+
+static DenseMap<StringRef, OutputSegment *> nameToOutputSegment;
std::vector<OutputSegment *> macho::outputSegments;
+static StringRef maybeRenameSegment(StringRef name) {
+ auto newName = config->segmentRenameMap.find(name);
+ if (newName != config->segmentRenameMap.end())
+ return newName->second;
+ return name;
+}
+
OutputSegment *macho::getOrCreateOutputSegment(StringRef name) {
+ name = maybeRenameSegment(name);
+
OutputSegment *&segRef = nameToOutputSegment[name];
- if (segRef != nullptr)
+ if (segRef)
return segRef;
segRef = make<OutputSegment>();
#define LLD_MACHO_OUTPUT_SEGMENT_H
#include "OutputSection.h"
+#include "Symbols.h"
#include "lld/Common/LLVM.h"
+#include "llvm/ADT/TinyPtrVector.h"
+
+#include <limits>
+#include <vector>
namespace lld {
namespace macho {
namespace segment_names {
-constexpr const char pageZero[] = "__PAGEZERO";
-constexpr const char text[] = "__TEXT";
+constexpr const char dataConst[] = "__DATA_CONST";
+constexpr const char dataDirty[] = "__DATA_DIRTY";
constexpr const char data[] = "__DATA";
+constexpr const char dwarf[] = "__DWARF";
+constexpr const char import[] = "__IMPORT";
+constexpr const char ld[] = "__LD"; // output only with -r
constexpr const char linkEdit[] = "__LINKEDIT";
-constexpr const char dataConst[] = "__DATA_CONST";
+constexpr const char llvm[] = "__LLVM";
+constexpr const char pageZero[] = "__PAGEZERO";
+constexpr const char textExec[] = "__TEXT_EXEC";
+constexpr const char text[] = "__TEXT";
} // namespace segment_names
class OutputSegment {
public:
- const OutputSection *firstSection() const { return sections.front(); }
- const OutputSection *lastSection() const { return sections.back(); }
-
void addOutputSection(OutputSection *os);
- void sortOutputSections(
- llvm::function_ref<bool(OutputSection *, OutputSection *)> comparator) {
- llvm::stable_sort(sections, comparator);
- }
+ void sortOutputSections();
+ void assignAddressesToStartEndSymbols();
const std::vector<OutputSection *> &getSections() const { return sections; }
size_t numNonHiddenSections() const;
uint64_t fileOff = 0;
+ uint64_t fileSize = 0;
+ uint64_t addr = 0;
+ uint64_t vmSize = 0;
+ int inputOrder = UnspecifiedInputOrder;
StringRef name;
uint32_t maxProt = 0;
uint32_t initProt = 0;
uint8_t index;
+ llvm::TinyPtrVector<Defined *> segmentStartSymbols;
+ llvm::TinyPtrVector<Defined *> segmentEndSymbols;
+
private:
std::vector<OutputSection *> sections;
};
extern std::vector<OutputSegment *> outputSegments;
+void sortOutputSegments();
+
OutputSegment *getOrCreateOutputSegment(StringRef name);
} // namespace macho
--- /dev/null
+//===- Relocations.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Relocations.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "lld/Common/ErrorHandler.h"
+
+using namespace llvm;
+using namespace lld;
+using namespace lld::macho;
+
+bool macho::validateSymbolRelocation(const Symbol *sym,
+ const InputSection *isec, const Reloc &r) {
+ const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type);
+ bool valid = true;
+ auto message = [relocAttrs, sym, isec, &valid](const Twine &diagnostic) {
+ valid = false;
+ return (relocAttrs.name + " relocation " + diagnostic + " for `" +
+ sym->getName() + "' in " + toString(isec))
+ .str();
+ };
+
+ if (relocAttrs.hasAttr(RelocAttrBits::TLV) != sym->isTlv())
+ error(message(Twine("requires that variable ") +
+ (sym->isTlv() ? "not " : "") + "be thread-local"));
+
+ return valid;
+}
+
+void macho::reportRangeError(const Reloc &r, const Twine &v, uint8_t bits,
+ int64_t min, uint64_t max) {
+ std::string hint;
+ if (auto *sym = r.referent.dyn_cast<Symbol *>())
+ hint = "; references " + toString(*sym);
+ // TODO: get location of reloc using something like LLD-ELF's getErrorPlace()
+ error("relocation " + target->getRelocAttrs(r.type).name +
+ " is out of range: " + v + " is not in [" + Twine(min) + ", " +
+ Twine(max) + "]" + hint);
+}
+
+void macho::reportRangeError(SymbolDiagnostic d, const Twine &v, uint8_t bits,
+ int64_t min, uint64_t max) {
+ std::string hint;
+ if (d.symbol)
+ hint = "; references " + toString(*d.symbol);
+ error(d.reason + " is out of range: " + v + " is not in [" + Twine(min) +
+ ", " + Twine(max) + "]" + hint);
+}
+
+const RelocAttrs macho::invalidRelocAttrs{"INVALID", RelocAttrBits::_0};
--- /dev/null
+//===- Relocations.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_RELOCATIONS_H
+#define LLD_MACHO_RELOCATIONS_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/Endian.h"
+
+#include <cstddef>
+#include <cstdint>
+
+namespace lld {
+namespace macho {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+class Symbol;
+class InputSection;
+
+enum class RelocAttrBits {
+ _0 = 0, // invalid
+ PCREL = 1 << 0, // Value is PC-relative offset
+ ABSOLUTE = 1 << 1, // Value is an absolute address or fixed offset
+ BYTE4 = 1 << 2, // 4 byte datum
+ BYTE8 = 1 << 3, // 8 byte datum
+ EXTERN = 1 << 4, // Can have an external symbol
+ LOCAL = 1 << 5, // Can have a local symbol
+ ADDEND = 1 << 6, // *_ADDEND paired prefix reloc
+ SUBTRAHEND = 1 << 7, // *_SUBTRACTOR paired prefix reloc
+ BRANCH = 1 << 8, // Value is branch target
+ GOT = 1 << 9, // References a symbol in the Global Offset Table
+ TLV = 1 << 10, // References a thread-local symbol
+ LOAD = 1 << 11, // Relaxable indirect load
+ POINTER = 1 << 12, // Non-relaxable indirect load (pointer is taken)
+ UNSIGNED = 1 << 13, // *_UNSIGNED relocs
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ (1 << 14) - 1),
+};
+// Note: SUBTRACTOR always pairs with UNSIGNED (a delta between two symbols).
+
+struct RelocAttrs {
+ llvm::StringRef name;
+ RelocAttrBits bits;
+ bool hasAttr(RelocAttrBits b) const { return (bits & b) == b; }
+};
+
+struct Reloc {
+ uint8_t type = llvm::MachO::GENERIC_RELOC_INVALID;
+ bool pcrel = false;
+ uint8_t length = 0;
+ // The offset from the start of the subsection that this relocation belongs
+ // to.
+ uint64_t offset = 0;
+ // Adding this offset to the address of the referent symbol or subsection
+ // gives the destination that this relocation refers to.
+ int64_t addend = 0;
+ llvm::PointerUnion<Symbol *, InputSection *> referent = nullptr;
+};
+
+bool validateSymbolRelocation(const Symbol *, const InputSection *,
+ const Reloc &);
+
+/*
+ * v: The value the relocation is attempting to encode
+ * bits: The number of bits actually available to encode this relocation
+ */
+void reportRangeError(const Reloc &, const llvm::Twine &v, uint8_t bits,
+ int64_t min, uint64_t max);
+
+struct SymbolDiagnostic {
+ const Symbol *symbol;
+ llvm::StringRef reason;
+};
+
+void reportRangeError(SymbolDiagnostic, const llvm::Twine &v, uint8_t bits,
+ int64_t min, uint64_t max);
+
+template <typename Diagnostic>
+inline void checkInt(Diagnostic d, int64_t v, int bits) {
+ if (v != llvm::SignExtend64(v, bits))
+ reportRangeError(d, llvm::Twine(v), bits, llvm::minIntN(bits),
+ llvm::maxIntN(bits));
+}
+
+template <typename Diagnostic>
+inline void checkUInt(Diagnostic d, uint64_t v, int bits) {
+ if ((v >> bits) != 0)
+ reportRangeError(d, llvm::Twine(v), bits, 0, llvm::maxUIntN(bits));
+}
+
+inline void writeAddress(uint8_t *loc, uint64_t addr, uint8_t length) {
+ switch (length) {
+ case 2:
+ llvm::support::endian::write32le(loc, addr);
+ break;
+ case 3:
+ llvm::support::endian::write64le(loc, addr);
+ break;
+ default:
+ llvm_unreachable("invalid r_length");
+ }
+}
+
+extern const RelocAttrs invalidRelocAttrs;
+
+} // namespace macho
+} // namespace lld
+
+#endif
//===----------------------------------------------------------------------===//
#include "SymbolTable.h"
+#include "ConcatOutputSection.h"
+#include "Config.h"
#include "InputFiles.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
using namespace lld;
using namespace lld::macho;
-Symbol *SymbolTable::find(StringRef name) {
- auto it = symMap.find(llvm::CachedHashStringRef(name));
+Symbol *SymbolTable::find(CachedHashStringRef cachedName) {
+ auto it = symMap.find(cachedName);
if (it == symMap.end())
return nullptr;
return symVector[it->second];
}
-std::pair<Symbol *, bool> SymbolTable::insert(StringRef name) {
+std::pair<Symbol *, bool> SymbolTable::insert(StringRef name,
+ const InputFile *file) {
auto p = symMap.insert({CachedHashStringRef(name), (int)symVector.size()});
- // Name already present in the symbol table.
- if (!p.second)
- return {symVector[p.first->second], false};
+ Symbol *sym;
+ if (!p.second) {
+ // Name already present in the symbol table.
+ sym = symVector[p.first->second];
+ } else {
+ // Name is a new symbol.
+ sym = reinterpret_cast<Symbol *>(make<SymbolUnion>());
+ symVector.push_back(sym);
+ }
- // Name is a new symbol.
- Symbol *sym = reinterpret_cast<Symbol *>(make<SymbolUnion>());
- symVector.push_back(sym);
- return {sym, true};
+ sym->isUsedInRegularObj |= !file || isa<ObjFile>(file);
+ return {sym, p.second};
}
-Symbol *SymbolTable::addDefined(StringRef name, InputSection *isec,
- uint32_t value) {
+Defined *SymbolTable::addDefined(StringRef name, InputFile *file,
+ InputSection *isec, uint64_t value,
+ uint64_t size, bool isWeakDef,
+ bool isPrivateExtern, bool isThumb,
+ bool isReferencedDynamically,
+ bool noDeadStrip) {
Symbol *s;
bool wasInserted;
- std::tie(s, wasInserted) = insert(name);
+ bool overridesWeakDef = false;
+ std::tie(s, wasInserted) = insert(name, file);
- if (!wasInserted && isa<Defined>(s))
- error("duplicate symbol: " + name);
+ assert(!isWeakDef || (isa<BitcodeFile>(file) && !isec) ||
+ (isa<ObjFile>(file) && file == isec->getFile()));
- replaceSymbol<Defined>(s, name, isec, value);
- return s;
+ if (!wasInserted) {
+ if (auto *defined = dyn_cast<Defined>(s)) {
+ if (isWeakDef) {
+ if (defined->isWeakDef()) {
+ // Both old and new symbol weak (e.g. inline function in two TUs):
+ // If one of them isn't private extern, the merged symbol isn't.
+ defined->privateExtern &= isPrivateExtern;
+ defined->referencedDynamically |= isReferencedDynamically;
+ defined->noDeadStrip |= noDeadStrip;
+
+ // FIXME: Handle this for bitcode files.
+ // FIXME: We currently only do this if both symbols are weak.
+ // We could do this if either is weak (but getting the
+ // case where !isWeakDef && defined->isWeakDef() right
+ // requires some care and testing).
+ if (auto concatIsec = dyn_cast_or_null<ConcatInputSection>(isec))
+ concatIsec->wasCoalesced = true;
+ }
+
+ return defined;
+ }
+ if (!defined->isWeakDef())
+ error("duplicate symbol: " + name + "\n>>> defined in " +
+ toString(defined->getFile()) + "\n>>> defined in " +
+ toString(file));
+ } else if (auto *dysym = dyn_cast<DylibSymbol>(s)) {
+ overridesWeakDef = !isWeakDef && dysym->isWeakDef();
+ dysym->unreference();
+ }
+ // Defined symbols take priority over other types of symbols, so in case
+ // of a name conflict, we fall through to the replaceSymbol() call below.
+ }
+
+ Defined *defined = replaceSymbol<Defined>(
+ s, name, file, isec, value, size, isWeakDef, /*isExternal=*/true,
+ isPrivateExtern, isThumb, isReferencedDynamically, noDeadStrip);
+ defined->overridesWeakDef = overridesWeakDef;
+ return defined;
}
-Symbol *SymbolTable::addUndefined(StringRef name) {
+Symbol *SymbolTable::addUndefined(StringRef name, InputFile *file,
+ bool isWeakRef) {
Symbol *s;
bool wasInserted;
- std::tie(s, wasInserted) = insert(name);
+ std::tie(s, wasInserted) = insert(name, file);
+
+ RefState refState = isWeakRef ? RefState::Weak : RefState::Strong;
if (wasInserted)
- replaceSymbol<Undefined>(s, name);
- else if (LazySymbol *lazy = dyn_cast<LazySymbol>(s))
+ replaceSymbol<Undefined>(s, name, file, refState);
+ else if (auto *lazy = dyn_cast<LazySymbol>(s))
lazy->fetchArchiveMember();
+ else if (auto *dynsym = dyn_cast<DylibSymbol>(s))
+ dynsym->reference(refState);
+ else if (auto *undefined = dyn_cast<Undefined>(s))
+ undefined->refState = std::max(undefined->refState, refState);
+ return s;
+}
+
+Symbol *SymbolTable::addCommon(StringRef name, InputFile *file, uint64_t size,
+ uint32_t align, bool isPrivateExtern) {
+ Symbol *s;
+ bool wasInserted;
+ std::tie(s, wasInserted) = insert(name, file);
+
+ if (!wasInserted) {
+ if (auto *common = dyn_cast<CommonSymbol>(s)) {
+ if (size < common->size)
+ return s;
+ } else if (isa<Defined>(s)) {
+ return s;
+ }
+ // Common symbols take priority over all non-Defined symbols, so in case of
+ // a name conflict, we fall through to the replaceSymbol() call below.
+ }
+
+ replaceSymbol<CommonSymbol>(s, name, file, size, align, isPrivateExtern);
return s;
}
-Symbol *SymbolTable::addDylib(StringRef name, DylibFile *file) {
+Symbol *SymbolTable::addDylib(StringRef name, DylibFile *file, bool isWeakDef,
+ bool isTlv) {
Symbol *s;
bool wasInserted;
- std::tie(s, wasInserted) = insert(name);
+ std::tie(s, wasInserted) = insert(name, file);
+
+ RefState refState = RefState::Unreferenced;
+ if (!wasInserted) {
+ if (auto *defined = dyn_cast<Defined>(s)) {
+ if (isWeakDef && !defined->isWeakDef())
+ defined->overridesWeakDef = true;
+ } else if (auto *undefined = dyn_cast<Undefined>(s)) {
+ refState = undefined->refState;
+ } else if (auto *dysym = dyn_cast<DylibSymbol>(s)) {
+ refState = dysym->getRefState();
+ }
+ }
+
+ bool isDynamicLookup = file == nullptr;
+ if (wasInserted || isa<Undefined>(s) ||
+ (isa<DylibSymbol>(s) &&
+ ((!isWeakDef && s->isWeakDef()) ||
+ (!isDynamicLookup && cast<DylibSymbol>(s)->isDynamicLookup())))) {
+ if (auto *dynsym = dyn_cast<DylibSymbol>(s))
+ dynsym->unreference();
+ replaceSymbol<DylibSymbol>(s, file, name, isWeakDef, refState, isTlv);
+ }
- if (wasInserted || isa<Undefined>(s))
- replaceSymbol<DylibSymbol>(s, file, name);
return s;
}
+Symbol *SymbolTable::addDynamicLookup(StringRef name) {
+ return addDylib(name, /*file=*/nullptr, /*isWeakDef=*/false, /*isTlv=*/false);
+}
+
Symbol *SymbolTable::addLazy(StringRef name, ArchiveFile *file,
- const llvm::object::Archive::Symbol &sym) {
+ const object::Archive::Symbol &sym) {
Symbol *s;
bool wasInserted;
- std::tie(s, wasInserted) = insert(name);
+ std::tie(s, wasInserted) = insert(name, file);
if (wasInserted)
replaceSymbol<LazySymbol>(s, file, sym);
- else if (isa<Undefined>(s))
+ else if (isa<Undefined>(s) || (isa<DylibSymbol>(s) && s->isWeakDef()))
file->fetch(sym);
return s;
}
+Defined *SymbolTable::addSynthetic(StringRef name, InputSection *isec,
+ uint64_t value, bool isPrivateExtern,
+ bool includeInSymtab,
+ bool referencedDynamically) {
+ Defined *s = addDefined(name, nullptr, isec, value, /*size=*/0,
+ /*isWeakDef=*/false, isPrivateExtern,
+ /*isThumb=*/false, referencedDynamically,
+ /*noDeadStrip=*/false);
+ s->includeInSymtab = includeInSymtab;
+ return s;
+}
+
+enum class Boundary {
+ Start,
+ End,
+};
+
+static Defined *createBoundarySymbol(const Undefined &sym) {
+ return symtab->addSynthetic(
+ sym.getName(), /*isec=*/nullptr, /*value=*/-1, /*isPrivateExtern=*/true,
+ /*includeInSymtab=*/false, /*referencedDynamically=*/false);
+}
+
+static void handleSectionBoundarySymbol(const Undefined &sym, StringRef segSect,
+ Boundary which) {
+ StringRef segName, sectName;
+ std::tie(segName, sectName) = segSect.split('$');
+
+ // Attach the symbol to any InputSection that will end up in the right
+ // OutputSection -- it doesn't matter which one we pick.
+ // Don't bother looking through inputSections for a matching
+ // ConcatInputSection -- we need to create ConcatInputSection for
+ // non-existing sections anyways, and that codepath works even if we should
+ // already have a ConcatInputSection with the right name.
+
+ OutputSection *osec = nullptr;
+ // This looks for __TEXT,__cstring etc.
+ for (SyntheticSection *ssec : syntheticSections)
+ if (ssec->segname == segName && ssec->name == sectName) {
+ osec = ssec->isec->parent;
+ break;
+ }
+
+ if (!osec) {
+ ConcatInputSection *isec = make<ConcatInputSection>(segName, sectName);
+
+ // This runs after markLive() and is only called for Undefineds that are
+ // live. Marking the isec live ensures an OutputSection is created that the
+ // start/end symbol can refer to.
+ assert(sym.isLive());
+ isec->live = true;
+
+ // This runs after gatherInputSections(), so need to explicitly set parent
+ // and add to inputSections.
+ osec = isec->parent = ConcatOutputSection::getOrCreateForInput(isec);
+ inputSections.push_back(isec);
+ }
+
+ if (which == Boundary::Start)
+ osec->sectionStartSymbols.push_back(createBoundarySymbol(sym));
+ else
+ osec->sectionEndSymbols.push_back(createBoundarySymbol(sym));
+}
+
+static void handleSegmentBoundarySymbol(const Undefined &sym, StringRef segName,
+ Boundary which) {
+ OutputSegment *seg = getOrCreateOutputSegment(segName);
+ if (which == Boundary::Start)
+ seg->segmentStartSymbols.push_back(createBoundarySymbol(sym));
+ else
+ seg->segmentEndSymbols.push_back(createBoundarySymbol(sym));
+}
+
+void lld::macho::treatUndefinedSymbol(const Undefined &sym, StringRef source) {
+ // Handle start/end symbols.
+ StringRef name = sym.getName();
+ if (name.consume_front("section$start$"))
+ return handleSectionBoundarySymbol(sym, name, Boundary::Start);
+ if (name.consume_front("section$end$"))
+ return handleSectionBoundarySymbol(sym, name, Boundary::End);
+ if (name.consume_front("segment$start$"))
+ return handleSegmentBoundarySymbol(sym, name, Boundary::Start);
+ if (name.consume_front("segment$end$"))
+ return handleSegmentBoundarySymbol(sym, name, Boundary::End);
+
+ // Handle -U.
+ if (config->explicitDynamicLookups.count(sym.getName())) {
+ symtab->addDynamicLookup(sym.getName());
+ return;
+ }
+
+ // Handle -undefined.
+ auto message = [source, &sym]() {
+ std::string message = "undefined symbol";
+ if (config->archMultiple)
+ message += (" for arch " + getArchitectureName(config->arch())).str();
+ message += ": " + toString(sym);
+ if (!source.empty())
+ message += "\n>>> referenced by " + source.str();
+ else
+ message += "\n>>> referenced by " + toString(sym.getFile());
+ return message;
+ };
+ switch (config->undefinedSymbolTreatment) {
+ case UndefinedSymbolTreatment::error:
+ error(message());
+ break;
+ case UndefinedSymbolTreatment::warning:
+ warn(message());
+ LLVM_FALLTHROUGH;
+ case UndefinedSymbolTreatment::dynamic_lookup:
+ case UndefinedSymbolTreatment::suppress:
+ symtab->addDynamicLookup(sym.getName());
+ break;
+ case UndefinedSymbolTreatment::unknown:
+ llvm_unreachable("unknown -undefined TREATMENT");
+ }
+}
+
SymbolTable *macho::symtab;
#ifndef LLD_MACHO_SYMBOL_TABLE_H
#define LLD_MACHO_SYMBOL_TABLE_H
+#include "Symbols.h"
+
#include "lld/Common/LLVM.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
class ArchiveFile;
class DylibFile;
+class InputFile;
+class ObjFile;
class InputSection;
+class MachHeaderSection;
class Symbol;
+class Defined;
+class Undefined;
+/*
+ * Note that the SymbolTable handles name collisions by calling
+ * replaceSymbol(), which does an in-place update of the Symbol via `placement
+ * new`. Therefore, there is no need to update any relocations that hold
+ * pointers the "old" Symbol -- they will automatically point to the new one.
+ */
class SymbolTable {
public:
- Symbol *addDefined(StringRef name, InputSection *isec, uint32_t value);
+ Defined *addDefined(StringRef name, InputFile *, InputSection *,
+ uint64_t value, uint64_t size, bool isWeakDef,
+ bool isPrivateExtern, bool isThumb,
+ bool isReferencedDynamically, bool noDeadStrip);
+
+ Symbol *addUndefined(StringRef name, InputFile *, bool isWeakRef);
- Symbol *addUndefined(StringRef name);
+ Symbol *addCommon(StringRef name, InputFile *, uint64_t size, uint32_t align,
+ bool isPrivateExtern);
- Symbol *addDylib(StringRef name, DylibFile *file);
+ Symbol *addDylib(StringRef name, DylibFile *file, bool isWeakDef, bool isTlv);
+ Symbol *addDynamicLookup(StringRef name);
Symbol *addLazy(StringRef name, ArchiveFile *file,
const llvm::object::Archive::Symbol &sym);
+ Defined *addSynthetic(StringRef name, InputSection *, uint64_t value,
+ bool isPrivateExtern, bool includeInSymtab,
+ bool referencedDynamically);
+
ArrayRef<Symbol *> getSymbols() const { return symVector; }
- Symbol *find(StringRef name);
+ Symbol *find(llvm::CachedHashStringRef name);
+ Symbol *find(StringRef name) { return find(llvm::CachedHashStringRef(name)); }
private:
- std::pair<Symbol *, bool> insert(StringRef name);
+ std::pair<Symbol *, bool> insert(StringRef name, const InputFile *);
llvm::DenseMap<llvm::CachedHashStringRef, int> symMap;
std::vector<Symbol *> symVector;
};
+void treatUndefinedSymbol(const Undefined &, StringRef source = "");
+
extern SymbolTable *symtab;
} // namespace macho
#include "Symbols.h"
#include "InputFiles.h"
+#include "SyntheticSections.h"
using namespace llvm;
using namespace lld;
using namespace lld::macho;
-void LazySymbol::fetchArchiveMember() { file->fetch(sym); }
-
// Returns a symbol for an error message.
-std::string lld::toString(const Symbol &sym) {
- if (Optional<std::string> s = demangleItanium(sym.getName()))
- return *s;
- return std::string(sym.getName());
+static std::string demangle(StringRef symName) {
+ if (config->demangle)
+ return demangleItanium(symName);
+ return std::string(symName);
+}
+
+std::string lld::toString(const Symbol &sym) { return demangle(sym.getName()); }
+
+std::string lld::toMachOString(const object::Archive::Symbol &b) {
+ return demangle(b.getName());
+}
+
+uint64_t Symbol::getStubVA() const { return in.stubs->getVA(stubsIndex); }
+uint64_t Symbol::getGotVA() const { return in.got->getVA(gotIndex); }
+uint64_t Symbol::getTlvVA() const { return in.tlvPointers->getVA(gotIndex); }
+
+bool Symbol::isLive() const {
+ if (isa<DylibSymbol>(this) || isa<Undefined>(this))
+ return used;
+
+ if (auto *d = dyn_cast<Defined>(this)) {
+ // Non-absolute symbols might be alive because their section is
+ // no_dead_strip or live_support. In that case, the section will know
+ // that it's live but `used` might be false. Non-absolute symbols always
+ // have to use the section's `live` bit as source of truth.
+ if (d->isAbsolute())
+ return used;
+ return d->isec->canonical()->isLive(d->value);
+ }
+
+ assert(!isa<CommonSymbol>(this) &&
+ "replaceCommonSymbols() runs before dead code stripping, and isLive() "
+ "should only be called after dead code stripping");
+
+ // Assume any other kind of symbol is live.
+ return true;
}
+
+uint64_t Defined::getVA() const {
+ assert(isLive() && "this should only be called for live symbols");
+
+ if (isAbsolute())
+ return value;
+
+ if (!isec->canonical()->isFinal) {
+ // A target arch that does not use thunks ought never ask for
+ // the address of a function that has not yet been finalized.
+ assert(target->usesThunks());
+
+ // ConcatOutputSection::finalize() can seek the address of a
+ // function before its address is assigned. The thunking algorithm
+ // knows that unfinalized functions will be out of range, so it is
+ // expedient to return a contrived out-of-range address.
+ return TargetInfo::outOfRangeVA;
+ }
+ return isec->canonical()->getVA(value);
+}
+
+uint64_t DylibSymbol::getVA() const {
+ return isInStubs() ? getStubVA() : Symbol::getVA();
+}
+
+void LazySymbol::fetchArchiveMember() { getFile()->fetch(sym); }
#ifndef LLD_MACHO_SYMBOLS_H
#define LLD_MACHO_SYMBOLS_H
+#include "InputFiles.h"
#include "InputSection.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Strings.h"
#include "llvm/Object/Archive.h"
+#include "llvm/Support/MathExtras.h"
namespace lld {
namespace macho {
class InputSection;
-class DylibFile;
-class ArchiveFile;
+class MachHeaderSection;
struct StringRefZ {
StringRefZ(const char *s) : data(s), size(-1) {}
enum Kind {
DefinedKind,
UndefinedKind,
+ CommonKind,
DylibKind,
LazyKind,
};
- Kind kind() const { return static_cast<Kind>(symbolKind); }
+ virtual ~Symbol() {}
- StringRef getName() const { return {name.data, name.size}; }
+ Kind kind() const { return symbolKind; }
- uint64_t getVA() const;
+ StringRef getName() const {
+ if (nameSize == (uint32_t)-1)
+ nameSize = strlen(nameData);
+ return {nameData, nameSize};
+ }
- uint64_t getFileOffset() const;
+ bool isLive() const;
+ virtual uint64_t getVA() const { return 0; }
+
+ virtual bool isWeakDef() const { llvm_unreachable("cannot be weak def"); }
+
+ // Only undefined or dylib symbols can be weak references. A weak reference
+ // need not be satisfied at runtime, e.g. due to the symbol not being
+ // available on a given target platform.
+ virtual bool isWeakRef() const { llvm_unreachable("cannot be a weak ref"); }
+
+ virtual bool isTlv() const { llvm_unreachable("cannot be TLV"); }
+
+ // Whether this symbol is in the GOT or TLVPointer sections.
+ bool isInGot() const { return gotIndex != UINT32_MAX; }
+
+ // Whether this symbol is in the StubsSection.
+ bool isInStubs() const { return stubsIndex != UINT32_MAX; }
+
+ uint64_t getStubVA() const;
+ uint64_t getGotVA() const;
+ uint64_t getTlvVA() const;
+ uint64_t resolveBranchVA() const {
+ assert(isa<Defined>(this) || isa<DylibSymbol>(this));
+ return isInStubs() ? getStubVA() : getVA();
+ }
+ uint64_t resolveGotVA() const { return isInGot() ? getGotVA() : getVA(); }
+ uint64_t resolveTlvVA() const { return isInGot() ? getTlvVA() : getVA(); }
+
+ // The index of this symbol in the GOT or the TLVPointer section, depending
+ // on whether it is a thread-local. A given symbol cannot be referenced by
+ // both these sections at once.
uint32_t gotIndex = UINT32_MAX;
+ uint32_t stubsIndex = UINT32_MAX;
+
+ uint32_t symtabIndex = UINT32_MAX;
+
+ InputFile *getFile() const { return file; }
+
protected:
- Symbol(Kind k, StringRefZ name) : symbolKind(k), name(name) {}
+ Symbol(Kind k, StringRefZ name, InputFile *file)
+ : symbolKind(k), nameData(name.data), nameSize(name.size), file(file),
+ isUsedInRegularObj(!file || isa<ObjFile>(file)),
+ used(!config->deadStrip) {}
Kind symbolKind;
- StringRefZ name;
+ const char *nameData;
+ mutable uint32_t nameSize;
+ InputFile *file;
+
+public:
+ // True if this symbol was referenced by a regular (non-bitcode) object.
+ bool isUsedInRegularObj : 1;
+
+ // True if an undefined or dylib symbol is used from a live section.
+ bool used : 1;
};
class Defined : public Symbol {
public:
- Defined(StringRefZ name, InputSection *isec, uint32_t value)
- : Symbol(DefinedKind, name), isec(isec), value(value) {}
+ Defined(StringRefZ name, InputFile *file, InputSection *isec, uint64_t value,
+ uint64_t size, bool isWeakDef, bool isExternal, bool isPrivateExtern,
+ bool isThumb, bool isReferencedDynamically, bool noDeadStrip)
+ : Symbol(DefinedKind, name, file), isec(isec), value(value), size(size),
+ overridesWeakDef(false), privateExtern(isPrivateExtern),
+ includeInSymtab(true), thumb(isThumb),
+ referencedDynamically(isReferencedDynamically),
+ noDeadStrip(noDeadStrip), weakDef(isWeakDef), external(isExternal) {
+ if (auto concatIsec = dyn_cast_or_null<ConcatInputSection>(isec))
+ concatIsec->numRefs++;
+ }
+
+ bool isWeakDef() const override { return weakDef; }
+ bool isExternalWeakDef() const {
+ return isWeakDef() && isExternal() && !privateExtern;
+ }
+ bool isTlv() const override {
+ return !isAbsolute() && isThreadLocalVariables(isec->getFlags());
+ }
+
+ bool isExternal() const { return external; }
+ bool isAbsolute() const { return isec == nullptr; }
+
+ uint64_t getVA() const override;
+
+ static bool classof(const Symbol *s) { return s->kind() == DefinedKind; }
InputSection *isec;
- uint32_t value;
+ // Contains the offset from the containing subsection. Note that this is
+ // different from nlist::n_value, which is the absolute address of the symbol.
+ uint64_t value;
+ // size is only calculated for regular (non-bitcode) symbols.
+ uint64_t size;
+
+ bool overridesWeakDef : 1;
+ // Whether this symbol should appear in the output binary's export trie.
+ bool privateExtern : 1;
+ // Whether this symbol should appear in the output symbol table.
+ bool includeInSymtab : 1;
+ // Only relevant when compiling for Thumb-supporting arm32 archs.
+ bool thumb : 1;
+ // Symbols marked referencedDynamically won't be removed from the output's
+ // symbol table by tools like strip. In theory, this could be set on arbitrary
+ // symbols in input object files. In practice, it's used solely for the
+ // synthetic __mh_execute_header symbol.
+ // This is information for the static linker, and it's also written to the
+ // output file's symbol table for tools running later (such as `strip`).
+ bool referencedDynamically : 1;
+ // Set on symbols that should not be removed by dead code stripping.
+ // Set for example on `__attribute__((used))` globals, or on some Objective-C
+ // metadata. This is information only for the static linker and not written
+ // to the output.
+ bool noDeadStrip : 1;
- static bool classof(const Symbol *s) { return s->kind() == DefinedKind; }
+private:
+ const bool weakDef : 1;
+ const bool external : 1;
};
+// This enum does double-duty: as a symbol property, it indicates whether & how
+// a dylib symbol is referenced. As a DylibFile property, it indicates the kind
+// of referenced symbols contained within the file. If there are both weak
+// and strong references to the same file, we will count the file as
+// strongly-referenced.
+enum class RefState : uint8_t { Unreferenced = 0, Weak = 1, Strong = 2 };
+
class Undefined : public Symbol {
public:
- Undefined(StringRefZ name) : Symbol(UndefinedKind, name) {}
+ Undefined(StringRefZ name, InputFile *file, RefState refState)
+ : Symbol(UndefinedKind, name, file), refState(refState) {
+ assert(refState != RefState::Unreferenced);
+ }
+
+ bool isWeakRef() const override { return refState == RefState::Weak; }
static bool classof(const Symbol *s) { return s->kind() == UndefinedKind; }
+
+ RefState refState : 2;
+};
+
+// On Unix, it is traditionally allowed to write variable definitions without
+// initialization expressions (such as "int foo;") to header files. These are
+// called tentative definitions.
+//
+// Using tentative definitions is usually considered a bad practice; you should
+// write only declarations (such as "extern int foo;") to header files.
+// Nevertheless, the linker and the compiler have to do something to support
+// bad code by allowing duplicate definitions for this particular case.
+//
+// The compiler creates common symbols when it sees tentative definitions.
+// (You can suppress this behavior and let the compiler create a regular
+// defined symbol by passing -fno-common. -fno-common is the default in clang
+// as of LLVM 11.0.) When linking the final binary, if there are remaining
+// common symbols after name resolution is complete, the linker converts them
+// to regular defined symbols in a __common section.
+class CommonSymbol : public Symbol {
+public:
+ CommonSymbol(StringRefZ name, InputFile *file, uint64_t size, uint32_t align,
+ bool isPrivateExtern)
+ : Symbol(CommonKind, name, file), size(size),
+ align(align != 1 ? align : llvm::PowerOf2Ceil(size)),
+ privateExtern(isPrivateExtern) {
+ // TODO: cap maximum alignment
+ }
+
+ static bool classof(const Symbol *s) { return s->kind() == CommonKind; }
+
+ const uint64_t size;
+ const uint32_t align;
+ const bool privateExtern;
};
class DylibSymbol : public Symbol {
public:
- DylibSymbol(DylibFile *file, StringRefZ name)
- : Symbol(DylibKind, name), file(file) {}
+ DylibSymbol(DylibFile *file, StringRefZ name, bool isWeakDef,
+ RefState refState, bool isTlv)
+ : Symbol(DylibKind, name, file), refState(refState), weakDef(isWeakDef),
+ tlv(isTlv) {
+ if (file && refState > RefState::Unreferenced)
+ file->numReferencedSymbols++;
+ }
+
+ uint64_t getVA() const override;
+ bool isWeakDef() const override { return weakDef; }
+ bool isWeakRef() const override { return refState == RefState::Weak; }
+ bool isReferenced() const { return refState != RefState::Unreferenced; }
+ bool isTlv() const override { return tlv; }
+ bool isDynamicLookup() const { return file == nullptr; }
+ bool hasStubsHelper() const { return stubsHelperIndex != UINT32_MAX; }
+
+ DylibFile *getFile() const {
+ assert(!isDynamicLookup());
+ return cast<DylibFile>(file);
+ }
static bool classof(const Symbol *s) { return s->kind() == DylibKind; }
- DylibFile *file;
- uint32_t stubsIndex = UINT32_MAX;
+ uint32_t stubsHelperIndex = UINT32_MAX;
uint32_t lazyBindOffset = UINT32_MAX;
+
+ RefState getRefState() const { return refState; }
+
+ void reference(RefState newState) {
+ assert(newState > RefState::Unreferenced);
+ if (refState == RefState::Unreferenced && file)
+ getFile()->numReferencedSymbols++;
+ refState = std::max(refState, newState);
+ }
+
+ void unreference() {
+ // dynamic_lookup symbols have no file.
+ if (refState > RefState::Unreferenced && file) {
+ assert(getFile()->numReferencedSymbols > 0);
+ getFile()->numReferencedSymbols--;
+ }
+ }
+
+private:
+ RefState refState : 2;
+ const bool weakDef : 1;
+ const bool tlv : 1;
};
class LazySymbol : public Symbol {
public:
LazySymbol(ArchiveFile *file, const llvm::object::Archive::Symbol &sym)
- : Symbol(LazyKind, sym.getName()), file(file), sym(sym) {}
-
- static bool classof(const Symbol *s) { return s->kind() == LazyKind; }
+ : Symbol(LazyKind, sym.getName(), file), sym(sym) {}
+ ArchiveFile *getFile() const { return cast<ArchiveFile>(file); }
void fetchArchiveMember();
+ static bool classof(const Symbol *s) { return s->kind() == LazyKind; }
+
private:
- ArchiveFile *file;
const llvm::object::Archive::Symbol sym;
};
-inline uint64_t Symbol::getVA() const {
- if (auto *d = dyn_cast<Defined>(this))
- return d->isec->getVA() + d->value;
- return 0;
-}
-
-inline uint64_t Symbol::getFileOffset() const {
- if (auto *d = dyn_cast<Defined>(this))
- return d->isec->getFileOffset() + d->value;
- llvm_unreachable("attempt to get an offset from an undefined symbol");
-}
-
union SymbolUnion {
alignas(Defined) char a[sizeof(Defined)];
alignas(Undefined) char b[sizeof(Undefined)];
- alignas(DylibSymbol) char c[sizeof(DylibSymbol)];
- alignas(LazySymbol) char d[sizeof(LazySymbol)];
+ alignas(CommonSymbol) char c[sizeof(CommonSymbol)];
+ alignas(DylibSymbol) char d[sizeof(DylibSymbol)];
+ alignas(LazySymbol) char e[sizeof(LazySymbol)];
};
template <typename T, typename... ArgT>
-void replaceSymbol(Symbol *s, ArgT &&... arg) {
+T *replaceSymbol(Symbol *s, ArgT &&...arg) {
static_assert(sizeof(T) <= sizeof(SymbolUnion), "SymbolUnion too small");
static_assert(alignof(T) <= alignof(SymbolUnion),
"SymbolUnion not aligned enough");
assert(static_cast<Symbol *>(static_cast<T *>(nullptr)) == nullptr &&
"Not a Symbol");
- new (s) T(std::forward<ArgT>(arg)...);
+ bool isUsedInRegularObj = s->isUsedInRegularObj;
+ bool used = s->used;
+ T *sym = new (s) T(std::forward<ArgT>(arg)...);
+ sym->isUsedInRegularObj |= isUsedInRegularObj;
+ sym->used |= used;
+ return sym;
}
} // namespace macho
std::string toString(const macho::Symbol &);
+std::string toMachOString(const llvm::object::Archive::Symbol &);
+
} // namespace lld
#endif
//===----------------------------------------------------------------------===//
#include "SyntheticSections.h"
+#include "ConcatOutputSection.h"
#include "Config.h"
#include "ExportTrie.h"
#include "InputFiles.h"
#include "MachOStructs.h"
-#include "MergedOutputSection.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
-#include "Writer.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/LEB128.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SHA256.h"
+
+#if defined(__APPLE__)
+#include <sys/mman.h>
+#endif
+
+#ifdef LLVM_HAVE_LIBXAR
+#include <fcntl.h>
+#include <xar/xar.h>
+#endif
using namespace llvm;
+using namespace llvm::MachO;
using namespace llvm::support;
using namespace llvm::support::endian;
using namespace lld;
std::vector<SyntheticSection *> macho::syntheticSections;
SyntheticSection::SyntheticSection(const char *segname, const char *name)
- : OutputSection(SyntheticKind, name), segname(segname) {
+ : OutputSection(SyntheticKind, name) {
+ std::tie(this->segname, this->name) = maybeRenameSection({segname, name});
+ isec = make<ConcatInputSection>(segname, name);
+ isec->parent = this;
syntheticSections.push_back(this);
}
// dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts
// from the beginning of the file (i.e. the header).
MachHeaderSection::MachHeaderSection()
- : SyntheticSection(segment_names::text, section_names::header) {}
+ : SyntheticSection(segment_names::text, section_names::header) {
+ // XXX: This is a hack. (See D97007)
+ // Setting the index to 1 to pretend that this section is the text
+ // section.
+ index = 1;
+ isec->isFinal = true;
+}
void MachHeaderSection::addLoadCommand(LoadCommand *lc) {
loadCommands.push_back(lc);
}
uint64_t MachHeaderSection::getSize() const {
- return sizeof(MachO::mach_header_64) + sizeOfCmds;
+ uint64_t size = target->headerSize + sizeOfCmds + config->headerPad;
+ // If we are emitting an encryptable binary, our load commands must have a
+ // separate (non-encrypted) page to themselves.
+ if (config->emitEncryptionInfo)
+ size = alignTo(size, target->getPageSize());
+ return size;
+}
+
+static uint32_t cpuSubtype() {
+ uint32_t subtype = target->cpuSubtype;
+
+ if (config->outputType == MH_EXECUTE && !config->staticLink &&
+ target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&
+ config->platform() == PlatformKind::macOS &&
+ config->platformInfo.minimum >= VersionTuple(10, 5))
+ subtype |= CPU_SUBTYPE_LIB64;
+
+ return subtype;
}
void MachHeaderSection::writeTo(uint8_t *buf) const {
- auto *hdr = reinterpret_cast<MachO::mach_header_64 *>(buf);
- hdr->magic = MachO::MH_MAGIC_64;
- hdr->cputype = MachO::CPU_TYPE_X86_64;
- hdr->cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL | MachO::CPU_SUBTYPE_LIB64;
+ auto *hdr = reinterpret_cast<mach_header *>(buf);
+ hdr->magic = target->magic;
+ hdr->cputype = target->cpuType;
+ hdr->cpusubtype = cpuSubtype();
hdr->filetype = config->outputType;
hdr->ncmds = loadCommands.size();
hdr->sizeofcmds = sizeOfCmds;
- hdr->flags = MachO::MH_NOUNDEFS | MachO::MH_DYLDLINK | MachO::MH_TWOLEVEL;
- if (config->outputType == MachO::MH_DYLIB && !config->hasReexports)
- hdr->flags |= MachO::MH_NO_REEXPORTED_DYLIBS;
+ hdr->flags = MH_DYLDLINK;
+
+ if (config->namespaceKind == NamespaceKind::twolevel)
+ hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL;
+
+ if (config->outputType == MH_DYLIB && !config->hasReexports)
+ hdr->flags |= MH_NO_REEXPORTED_DYLIBS;
- uint8_t *p = reinterpret_cast<uint8_t *>(hdr + 1);
- for (LoadCommand *lc : loadCommands) {
+ if (config->markDeadStrippableDylib)
+ hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB;
+
+ if (config->outputType == MH_EXECUTE && config->isPic)
+ hdr->flags |= MH_PIE;
+
+ if (config->outputType == MH_DYLIB && config->applicationExtension)
+ hdr->flags |= MH_APP_EXTENSION_SAFE;
+
+ if (in.exports->hasWeakSymbol || in.weakBinding->hasNonWeakDefinition())
+ hdr->flags |= MH_WEAK_DEFINES;
+
+ if (in.exports->hasWeakSymbol || in.weakBinding->hasEntry())
+ hdr->flags |= MH_BINDS_TO_WEAK;
+
+ for (const OutputSegment *seg : outputSegments) {
+ for (const OutputSection *osec : seg->getSections()) {
+ if (isThreadLocalVariables(osec->flags)) {
+ hdr->flags |= MH_HAS_TLV_DESCRIPTORS;
+ break;
+ }
+ }
+ }
+
+ uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize;
+ for (const LoadCommand *lc : loadCommands) {
lc->writeTo(p);
p += lc->getSize();
}
PageZeroSection::PageZeroSection()
: SyntheticSection(segment_names::pageZero, section_names::pageZero) {}
-GotSection::GotSection()
- : SyntheticSection(segment_names::dataConst, section_names::got) {
- align = 8;
- flags = MachO::S_NON_LAZY_SYMBOL_POINTERS;
+RebaseSection::RebaseSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::rebase) {}
+
+namespace {
+struct Rebase {
+ OutputSegment *segment = nullptr;
+ uint64_t offset = 0;
+ uint64_t consecutiveCount = 0;
+};
+} // namespace
+
+// Rebase opcodes allow us to describe a contiguous sequence of rebase location
+// using a single DO_REBASE opcode. To take advantage of it, we delay emitting
+// `DO_REBASE` until we have reached the end of a contiguous sequence.
+static void encodeDoRebase(Rebase &rebase, raw_svector_ostream &os) {
+ assert(rebase.consecutiveCount != 0);
+ if (rebase.consecutiveCount <= REBASE_IMMEDIATE_MASK) {
+ os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES |
+ rebase.consecutiveCount);
+ } else {
+ os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
+ encodeULEB128(rebase.consecutiveCount, os);
+ }
+ rebase.consecutiveCount = 0;
+}
+
+static void encodeRebase(const OutputSection *osec, uint64_t outSecOff,
+ Rebase &lastRebase, raw_svector_ostream &os) {
+ OutputSegment *seg = osec->parent;
+ uint64_t offset = osec->getSegmentOffset() + outSecOff;
+ if (lastRebase.segment != seg || lastRebase.offset != offset) {
+ if (lastRebase.consecutiveCount != 0)
+ encodeDoRebase(lastRebase, os);
+
+ if (lastRebase.segment != seg) {
+ os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
+ seg->index);
+ encodeULEB128(offset, os);
+ lastRebase.segment = seg;
+ lastRebase.offset = offset;
+ } else {
+ assert(lastRebase.offset != offset);
+ os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB);
+ encodeULEB128(offset - lastRebase.offset, os);
+ lastRebase.offset = offset;
+ }
+ }
+ ++lastRebase.consecutiveCount;
+ // DO_REBASE causes dyld to both perform the binding and increment the offset
+ lastRebase.offset += target->wordSize;
+}
- // TODO: section_64::reserved1 should be an index into the indirect symbol
- // table, which we do not currently emit
+void RebaseSection::finalizeContents() {
+ if (locations.empty())
+ return;
+
+ raw_svector_ostream os{contents};
+ Rebase lastRebase;
+
+ os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER);
+
+ llvm::sort(locations, [](const Location &a, const Location &b) {
+ return a.isec->getVA(a.offset) < b.isec->getVA(b.offset);
+ });
+ for (const Location &loc : locations)
+ encodeRebase(loc.isec->parent, loc.isec->getOffset(loc.offset), lastRebase,
+ os);
+ if (lastRebase.consecutiveCount != 0)
+ encodeDoRebase(lastRebase, os);
+
+ os << static_cast<uint8_t>(REBASE_OPCODE_DONE);
+}
+
+void RebaseSection::writeTo(uint8_t *buf) const {
+ memcpy(buf, contents.data(), contents.size());
+}
+
+NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname,
+ const char *name)
+ : SyntheticSection(segname, name) {
+ align = target->wordSize;
}
-void GotSection::addEntry(Symbol &sym) {
- if (entries.insert(&sym)) {
- sym.gotIndex = entries.size() - 1;
+void macho::addNonLazyBindingEntries(const Symbol *sym,
+ const InputSection *isec, uint64_t offset,
+ int64_t addend) {
+ if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ in.binding->addEntry(dysym, isec, offset, addend);
+ if (dysym->isWeakDef())
+ in.weakBinding->addEntry(sym, isec, offset, addend);
+ } else if (const auto *defined = dyn_cast<Defined>(sym)) {
+ in.rebase->addEntry(isec, offset);
+ if (defined->isExternalWeakDef())
+ in.weakBinding->addEntry(sym, isec, offset, addend);
+ } else {
+ // Undefined symbols are filtered out in scanRelocations(); we should never
+ // get here
+ llvm_unreachable("cannot bind to an undefined symbol");
}
}
-void GotSection::writeTo(uint8_t *buf) const {
+void NonLazyPointerSectionBase::addEntry(Symbol *sym) {
+ if (entries.insert(sym)) {
+ assert(!sym->isInGot());
+ sym->gotIndex = entries.size() - 1;
+
+ addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize);
+ }
+}
+
+void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const {
for (size_t i = 0, n = entries.size(); i < n; ++i)
if (auto *defined = dyn_cast<Defined>(entries[i]))
- write64le(&buf[i * WordSize], defined->getVA());
+ write64le(&buf[i * target->wordSize], defined->getVA());
}
-BindingSection::BindingSection()
- : SyntheticSection(segment_names::linkEdit, section_names::binding) {}
+GotSection::GotSection()
+ : NonLazyPointerSectionBase(segment_names::dataConst, section_names::got) {
+ flags = S_NON_LAZY_SYMBOL_POINTERS;
+}
-bool BindingSection::isNeeded() const {
- return bindings.size() != 0 || in.got->isNeeded();
+TlvPointerSection::TlvPointerSection()
+ : NonLazyPointerSectionBase(segment_names::data,
+ section_names::threadPtrs) {
+ flags = S_THREAD_LOCAL_VARIABLE_POINTERS;
}
+BindingSection::BindingSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::binding) {}
+
namespace {
struct Binding {
OutputSegment *segment = nullptr;
uint64_t offset = 0;
int64_t addend = 0;
- uint8_t ordinal = 0;
+};
+struct BindIR {
+ // Default value of 0xF0 is not valid opcode and should make the program
+ // scream instead of accidentally writing "valid" values.
+ uint8_t opcode = 0xF0;
+ uint64_t data = 0;
+ uint64_t consecutiveCount = 0;
};
} // namespace
-// Encode a sequence of opcodes that tell dyld to write the address of dysym +
+// Encode a sequence of opcodes that tell dyld to write the address of symbol +
// addend at osec->addr + outSecOff.
//
// The bind opcode "interpreter" remembers the values of each binding field, so
// we only need to encode the differences between bindings. Hence the use of
// lastBinding.
-static void encodeBinding(const DylibSymbol &dysym, const OutputSection *osec,
- uint64_t outSecOff, int64_t addend,
- Binding &lastBinding, raw_svector_ostream &os) {
- using namespace llvm::MachO;
+static void encodeBinding(const OutputSection *osec, uint64_t outSecOff,
+ int64_t addend, Binding &lastBinding,
+ std::vector<BindIR> &opcodes) {
OutputSegment *seg = osec->parent;
uint64_t offset = osec->getSegmentOffset() + outSecOff;
if (lastBinding.segment != seg) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
- seg->index);
- encodeULEB128(offset, os);
+ opcodes.push_back(
+ {static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
+ seg->index),
+ offset});
lastBinding.segment = seg;
lastBinding.offset = offset;
} else if (lastBinding.offset != offset) {
- assert(lastBinding.offset <= offset);
- os << static_cast<uint8_t>(BIND_OPCODE_ADD_ADDR_ULEB);
- encodeULEB128(offset - lastBinding.offset, os);
+ opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset});
lastBinding.offset = offset;
}
- if (lastBinding.ordinal != dysym.file->ordinal) {
- if (dysym.file->ordinal <= BIND_IMMEDIATE_MASK) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM |
- dysym.file->ordinal);
+ if (lastBinding.addend != addend) {
+ opcodes.push_back(
+ {BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)});
+ lastBinding.addend = addend;
+ }
+
+ opcodes.push_back({BIND_OPCODE_DO_BIND, 0});
+ // DO_BIND causes dyld to both perform the binding and increment the offset
+ lastBinding.offset += target->wordSize;
+}
+
+static void optimizeOpcodes(std::vector<BindIR> &opcodes) {
+ // Pass 1: Combine bind/add pairs
+ size_t i;
+ int pWrite = 0;
+ for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
+ if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) &&
+ (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) {
+ opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
+ opcodes[pWrite].data = opcodes[i].data;
+ ++i;
} else {
- error("TODO: Support larger dylib symbol ordinals");
- return;
+ opcodes[pWrite] = opcodes[i - 1];
}
- lastBinding.ordinal = dysym.file->ordinal;
}
+ if (i == opcodes.size())
+ opcodes[pWrite] = opcodes[i - 1];
+ opcodes.resize(pWrite + 1);
- if (lastBinding.addend != addend) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_ADDEND_SLEB);
- encodeSLEB128(addend, os);
- lastBinding.addend = addend;
+ // Pass 2: Compress two or more bind_add opcodes
+ pWrite = 0;
+ for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
+ if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i].data == opcodes[i - 1].data)) {
+ opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
+ opcodes[pWrite].consecutiveCount = 2;
+ opcodes[pWrite].data = opcodes[i].data;
+ ++i;
+ while (i < opcodes.size() &&
+ (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i].data == opcodes[i - 1].data)) {
+ opcodes[pWrite].consecutiveCount++;
+ ++i;
+ }
+ } else {
+ opcodes[pWrite] = opcodes[i - 1];
+ }
}
+ if (i == opcodes.size())
+ opcodes[pWrite] = opcodes[i - 1];
+ opcodes.resize(pWrite + 1);
- os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
- << dysym.getName() << '\0'
- << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER)
- << static_cast<uint8_t>(BIND_OPCODE_DO_BIND);
- // DO_BIND causes dyld to both perform the binding and increment the offset
- lastBinding.offset += WordSize;
+ // Pass 3: Use immediate encodings
+ // Every binding is the size of one pointer. If the next binding is a
+ // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the
+ // opcode can be scaled by wordSize into a single byte and dyld will
+ // expand it to the correct address.
+ for (auto &p : opcodes) {
+ // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK,
+ // but ld64 currently does this. This could be a potential bug, but
+ // for now, perform the same behavior to prevent mysterious bugs.
+ if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) &&
+ ((p.data % target->wordSize) == 0)) {
+ p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
+ p.data /= target->wordSize;
+ }
+ }
+}
+
+static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) {
+ uint8_t opcode = op.opcode & BIND_OPCODE_MASK;
+ switch (opcode) {
+ case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
+ case BIND_OPCODE_ADD_ADDR_ULEB:
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
+ os << op.opcode;
+ encodeULEB128(op.data, os);
+ break;
+ case BIND_OPCODE_SET_ADDEND_SLEB:
+ os << op.opcode;
+ encodeSLEB128(static_cast<int64_t>(op.data), os);
+ break;
+ case BIND_OPCODE_DO_BIND:
+ os << op.opcode;
+ break;
+ case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
+ os << op.opcode;
+ encodeULEB128(op.consecutiveCount, os);
+ encodeULEB128(op.data, os);
+ break;
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
+ os << static_cast<uint8_t>(op.opcode | op.data);
+ break;
+ default:
+ llvm_unreachable("cannot bind to an unrecognized symbol");
+ }
+}
+
+// Non-weak bindings need to have their dylib ordinal encoded as well.
+static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) {
+ if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup())
+ return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP);
+ assert(dysym.getFile()->isReferenced());
+ return dysym.getFile()->ordinal;
+}
+
+static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) {
+ if (ordinal <= 0) {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
+ (ordinal & BIND_IMMEDIATE_MASK));
+ } else if (ordinal <= BIND_IMMEDIATE_MASK) {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
+ } else {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
+ encodeULEB128(ordinal, os);
+ }
+}
+
+static void encodeWeakOverride(const Defined *defined,
+ raw_svector_ostream &os) {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM |
+ BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION)
+ << defined->getName() << '\0';
+}
+
+// Organize the bindings so we can encoded them with fewer opcodes.
+//
+// First, all bindings for a given symbol should be grouped together.
+// BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it
+// has an associated symbol string), so we only want to emit it once per symbol.
+//
+// Within each group, we sort the bindings by address. Since bindings are
+// delta-encoded, sorting them allows for a more compact result. Note that
+// sorting by address alone ensures that bindings for the same segment / section
+// are located together, minimizing the number of times we have to emit
+// BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB.
+//
+// Finally, we sort the symbols by the address of their first binding, again
+// to facilitate the delta-encoding process.
+template <class Sym>
+std::vector<std::pair<const Sym *, std::vector<BindingEntry>>>
+sortBindings(const BindingsMap<const Sym *> &bindingsMap) {
+ std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec(
+ bindingsMap.begin(), bindingsMap.end());
+ for (auto &p : bindingsVec) {
+ std::vector<BindingEntry> &bindings = p.second;
+ llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
+ return a.target.getVA() < b.target.getVA();
+ });
+ }
+ llvm::sort(bindingsVec, [](const auto &a, const auto &b) {
+ return a.second[0].target.getVA() < b.second[0].target.getVA();
+ });
+ return bindingsVec;
}
// Emit bind opcodes, which are a stream of byte-sized opcodes that dyld
void BindingSection::finalizeContents() {
raw_svector_ostream os{contents};
Binding lastBinding;
- bool didEncode = false;
- size_t gotIdx = 0;
- for (const Symbol *sym : in.got->getEntries()) {
- if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
- didEncode = true;
- encodeBinding(*dysym, in.got, gotIdx * WordSize, 0, lastBinding, os);
+ int16_t lastOrdinal = 0;
+
+ for (auto &p : sortBindings(bindingsMap)) {
+ const DylibSymbol *sym = p.first;
+ std::vector<BindingEntry> &bindings = p.second;
+ uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
+ if (sym->isWeakRef())
+ flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
+ os << flags << sym->getName() << '\0'
+ << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
+ int16_t ordinal = ordinalForDylibSymbol(*sym);
+ if (ordinal != lastOrdinal) {
+ encodeDylibOrdinal(ordinal, os);
+ lastOrdinal = ordinal;
}
- ++gotIdx;
- }
-
- // Sorting the relocations by segment and address allows us to encode them
- // more compactly.
- llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
- OutputSegment *segA = a.isec->parent->parent;
- OutputSegment *segB = b.isec->parent->parent;
- if (segA != segB)
- return segA->fileOff < segB->fileOff;
- OutputSection *osecA = a.isec->parent;
- OutputSection *osecB = b.isec->parent;
- if (osecA != osecB)
- return osecA->addr < osecB->addr;
- if (a.isec != b.isec)
- return a.isec->outSecOff < b.isec->outSecOff;
- return a.offset < b.offset;
- });
- for (const BindingEntry &b : bindings) {
- didEncode = true;
- encodeBinding(*b.dysym, b.isec->parent, b.isec->outSecOff + b.offset,
- b.addend, lastBinding, os);
+ std::vector<BindIR> opcodes;
+ for (const BindingEntry &b : bindings)
+ encodeBinding(b.target.isec->parent,
+ b.target.isec->getOffset(b.target.offset), b.addend,
+ lastBinding, opcodes);
+ if (config->optimize > 1)
+ optimizeOpcodes(opcodes);
+ for (const auto &op : opcodes)
+ flushOpcodes(op, os);
}
- if (didEncode)
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_DONE);
+ if (!bindingsMap.empty())
+ os << static_cast<uint8_t>(BIND_OPCODE_DONE);
}
void BindingSection::writeTo(uint8_t *buf) const {
memcpy(buf, contents.data(), contents.size());
}
+WeakBindingSection::WeakBindingSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {}
+
+void WeakBindingSection::finalizeContents() {
+ raw_svector_ostream os{contents};
+ Binding lastBinding;
+
+ for (const Defined *defined : definitions)
+ encodeWeakOverride(defined, os);
+
+ for (auto &p : sortBindings(bindingsMap)) {
+ const Symbol *sym = p.first;
+ std::vector<BindingEntry> &bindings = p.second;
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
+ << sym->getName() << '\0'
+ << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
+ std::vector<BindIR> opcodes;
+ for (const BindingEntry &b : bindings)
+ encodeBinding(b.target.isec->parent,
+ b.target.isec->getOffset(b.target.offset), b.addend,
+ lastBinding, opcodes);
+ if (config->optimize > 1)
+ optimizeOpcodes(opcodes);
+ for (const auto &op : opcodes)
+ flushOpcodes(op, os);
+ }
+ if (!bindingsMap.empty() || !definitions.empty())
+ os << static_cast<uint8_t>(BIND_OPCODE_DONE);
+}
+
+void WeakBindingSection::writeTo(uint8_t *buf) const {
+ memcpy(buf, contents.data(), contents.size());
+}
+
StubsSection::StubsSection()
- : SyntheticSection(segment_names::text, "__stubs") {}
+ : SyntheticSection(segment_names::text, section_names::stubs) {
+ flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
+ // The stubs section comprises machine instructions, which are aligned to
+ // 4 bytes on the archs we care about.
+ align = 4;
+ reserved2 = target->stubSize;
+}
uint64_t StubsSection::getSize() const {
return entries.size() * target->stubSize;
void StubsSection::writeTo(uint8_t *buf) const {
size_t off = 0;
- for (const DylibSymbol *sym : in.stubs->getEntries()) {
+ for (const Symbol *sym : entries) {
target->writeStub(buf + off, *sym);
off += target->stubSize;
}
}
-void StubsSection::addEntry(DylibSymbol &sym) {
- if (entries.insert(&sym))
- sym.stubsIndex = entries.size() - 1;
+void StubsSection::finalize() { isFinal = true; }
+
+bool StubsSection::addEntry(Symbol *sym) {
+ bool inserted = entries.insert(sym);
+ if (inserted)
+ sym->stubsIndex = entries.size() - 1;
+ return inserted;
}
StubHelperSection::StubHelperSection()
- : SyntheticSection(segment_names::text, "__stub_helper") {}
+ : SyntheticSection(segment_names::text, section_names::stubHelper) {
+ flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
+ align = 4; // This section comprises machine instructions
+}
uint64_t StubHelperSection::getSize() const {
return target->stubHelperHeaderSize +
- in.stubs->getEntries().size() * target->stubHelperEntrySize;
+ in.lazyBinding->getEntries().size() * target->stubHelperEntrySize;
}
-bool StubHelperSection::isNeeded() const {
- return !in.stubs->getEntries().empty();
-}
+bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); }
void StubHelperSection::writeTo(uint8_t *buf) const {
target->writeStubHelperHeader(buf);
size_t off = target->stubHelperHeaderSize;
- for (const DylibSymbol *sym : in.stubs->getEntries()) {
+ for (const DylibSymbol *sym : in.lazyBinding->getEntries()) {
target->writeStubHelperEntry(buf + off, *sym, addr + off);
off += target->stubHelperEntrySize;
}
}
void StubHelperSection::setup() {
- stubBinder = dyn_cast_or_null<DylibSymbol>(symtab->find("dyld_stub_binder"));
- if (stubBinder == nullptr) {
- error("symbol dyld_stub_binder not found (normally in libSystem.dylib). "
- "Needed to perform lazy binding.");
+ Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr,
+ /*isWeakRef=*/false);
+ if (auto *undefined = dyn_cast<Undefined>(binder))
+ treatUndefinedSymbol(*undefined,
+ "lazy binding (normally in libSystem.dylib)");
+
+ // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check.
+ stubBinder = dyn_cast_or_null<DylibSymbol>(binder);
+ if (stubBinder == nullptr)
return;
- }
- in.got->addEntry(*stubBinder);
- inputSections.push_back(in.imageLoaderCache);
- symtab->addDefined("__dyld_private", in.imageLoaderCache, 0);
-}
+ in.got->addEntry(stubBinder);
-ImageLoaderCacheSection::ImageLoaderCacheSection() {
- segname = segment_names::data;
- name = "__data";
- uint8_t *arr = bAlloc.Allocate<uint8_t>(WordSize);
- memset(arr, 0, WordSize);
- data = {arr, WordSize};
+ in.imageLoaderCache->parent =
+ ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);
+ inputSections.push_back(in.imageLoaderCache);
+ // Since this isn't in the symbol table or in any input file, the noDeadStrip
+ // argument doesn't matter. It's kept alive by ImageLoaderCacheSection()
+ // setting `live` to true on the backing InputSection.
+ dyldPrivate =
+ make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0,
+ /*isWeakDef=*/false,
+ /*isExternal=*/false, /*isPrivateExtern=*/false,
+ /*isThumb=*/false, /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
}
LazyPointerSection::LazyPointerSection()
- : SyntheticSection(segment_names::data, "__la_symbol_ptr") {
- align = 8;
- flags = MachO::S_LAZY_SYMBOL_POINTERS;
+ : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) {
+ align = target->wordSize;
+ flags = S_LAZY_SYMBOL_POINTERS;
}
uint64_t LazyPointerSection::getSize() const {
- return in.stubs->getEntries().size() * WordSize;
+ return in.stubs->getEntries().size() * target->wordSize;
}
bool LazyPointerSection::isNeeded() const {
void LazyPointerSection::writeTo(uint8_t *buf) const {
size_t off = 0;
- for (const DylibSymbol *sym : in.stubs->getEntries()) {
- uint64_t stubHelperOffset = target->stubHelperHeaderSize +
- sym->stubsIndex * target->stubHelperEntrySize;
- write64le(buf + off, in.stubHelper->addr + stubHelperOffset);
- off += WordSize;
+ for (const Symbol *sym : in.stubs->getEntries()) {
+ if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ if (dysym->hasStubsHelper()) {
+ uint64_t stubHelperOffset =
+ target->stubHelperHeaderSize +
+ dysym->stubsHelperIndex * target->stubHelperEntrySize;
+ write64le(buf + off, in.stubHelper->addr + stubHelperOffset);
+ }
+ } else {
+ write64le(buf + off, sym->getVA());
+ }
+ off += target->wordSize;
}
}
LazyBindingSection::LazyBindingSection()
- : SyntheticSection(segment_names::linkEdit, section_names::lazyBinding) {}
-
-bool LazyBindingSection::isNeeded() const { return in.stubs->isNeeded(); }
+ : LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {}
void LazyBindingSection::finalizeContents() {
// TODO: Just precompute output size here instead of writing to a temporary
// buffer
- for (DylibSymbol *sym : in.stubs->getEntries())
+ for (DylibSymbol *sym : entries)
sym->lazyBindOffset = encode(*sym);
}
memcpy(buf, contents.data(), contents.size());
}
+void LazyBindingSection::addEntry(DylibSymbol *dysym) {
+ if (entries.insert(dysym)) {
+ dysym->stubsHelperIndex = entries.size() - 1;
+ in.rebase->addEntry(in.lazyPointers->isec,
+ dysym->stubsIndex * target->wordSize);
+ }
+}
+
// Unlike the non-lazy binding section, the bind opcodes in this section aren't
// interpreted all at once. Rather, dyld will start interpreting opcodes at a
// given offset, typically only binding a single symbol before it finds a
uint32_t LazyBindingSection::encode(const DylibSymbol &sym) {
uint32_t opstreamOffset = contents.size();
OutputSegment *dataSeg = in.lazyPointers->parent;
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
dataSeg->index);
- uint64_t offset = in.lazyPointers->addr - dataSeg->firstSection()->addr +
- sym.stubsIndex * WordSize;
+ uint64_t offset = in.lazyPointers->addr - dataSeg->addr +
+ sym.stubsIndex * target->wordSize;
encodeULEB128(offset, os);
- if (sym.file->ordinal <= MachO::BIND_IMMEDIATE_MASK)
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_IMM |
- sym.file->ordinal);
- else
- fatal("TODO: Support larger dylib symbol ordinals");
-
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
- << sym.getName() << '\0'
- << static_cast<uint8_t>(MachO::BIND_OPCODE_DO_BIND)
- << static_cast<uint8_t>(MachO::BIND_OPCODE_DONE);
+ encodeDylibOrdinal(ordinalForDylibSymbol(sym), os);
+
+ uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
+ if (sym.isWeakRef())
+ flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
+
+ os << flags << sym.getName() << '\0'
+ << static_cast<uint8_t>(BIND_OPCODE_DO_BIND)
+ << static_cast<uint8_t>(BIND_OPCODE_DONE);
return opstreamOffset;
}
ExportSection::ExportSection()
- : SyntheticSection(segment_names::linkEdit, section_names::export_) {}
+ : LinkEditSection(segment_names::linkEdit, section_names::export_) {}
void ExportSection::finalizeContents() {
- // TODO: We should check symbol visibility.
- for (const Symbol *sym : symtab->getSymbols())
- if (auto *defined = dyn_cast<Defined>(sym))
+ trieBuilder.setImageBase(in.header->addr);
+ for (const Symbol *sym : symtab->getSymbols()) {
+ if (const auto *defined = dyn_cast<Defined>(sym)) {
+ if (defined->privateExtern || !defined->isLive())
+ continue;
trieBuilder.addSymbol(*defined);
+ hasWeakSymbol = hasWeakSymbol || sym->isWeakDef();
+ }
+ }
size = trieBuilder.build();
}
void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); }
+DataInCodeSection::DataInCodeSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {}
+
+template <class LP>
+static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() {
+ using SegmentCommand = typename LP::segment_command;
+ using Section = typename LP::section;
+
+ std::vector<MachO::data_in_code_entry> dataInCodeEntries;
+ for (const InputFile *inputFile : inputFiles) {
+ if (!isa<ObjFile>(inputFile))
+ continue;
+ const ObjFile *objFile = cast<ObjFile>(inputFile);
+ const auto *c = reinterpret_cast<const SegmentCommand *>(
+ findCommand(objFile->mb.getBufferStart(), LP::segmentLCType));
+ if (!c)
+ continue;
+ ArrayRef<Section> sections{reinterpret_cast<const Section *>(c + 1),
+ c->nsects};
+
+ ArrayRef<MachO::data_in_code_entry> entries = objFile->dataInCodeEntries;
+ if (entries.empty())
+ continue;
+ // For each code subsection find 'data in code' entries residing in it.
+ // Compute the new offset values as
+ // <offset within subsection> + <subsection address> - <__TEXT address>.
+ for (size_t i = 0, n = sections.size(); i < n; ++i) {
+ const SubsectionMap &subsecMap = objFile->subsections[i];
+ for (const SubsectionEntry &subsecEntry : subsecMap) {
+ const InputSection *isec = subsecEntry.isec;
+ if (!isCodeSection(isec))
+ continue;
+ if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput())
+ continue;
+ const uint64_t beginAddr = sections[i].addr + subsecEntry.offset;
+ auto it = llvm::lower_bound(
+ entries, beginAddr,
+ [](const MachO::data_in_code_entry &entry, uint64_t addr) {
+ return entry.offset < addr;
+ });
+ const uint64_t endAddr = beginAddr + isec->getFileSize();
+ for (const auto end = entries.end();
+ it != end && it->offset + it->length <= endAddr; ++it)
+ dataInCodeEntries.push_back(
+ {static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) -
+ in.header->addr),
+ it->length, it->kind});
+ }
+ }
+ }
+ return dataInCodeEntries;
+}
+
+void DataInCodeSection::finalizeContents() {
+ entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>()
+ : collectDataInCodeEntries<ILP32>();
+}
+
+void DataInCodeSection::writeTo(uint8_t *buf) const {
+ if (!entries.empty())
+ memcpy(buf, entries.data(), getRawSize());
+}
+
+FunctionStartsSection::FunctionStartsSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {}
+
+void FunctionStartsSection::finalizeContents() {
+ raw_svector_ostream os{contents};
+ std::vector<uint64_t> addrs;
+ for (const Symbol *sym : symtab->getSymbols()) {
+ if (const auto *defined = dyn_cast<Defined>(sym)) {
+ if (!defined->isec || !isCodeSection(defined->isec) || !defined->isLive())
+ continue;
+ if (const auto *concatIsec = dyn_cast<ConcatInputSection>(defined->isec))
+ if (concatIsec->shouldOmitFromOutput())
+ continue;
+ // TODO: Add support for thumbs, in that case
+ // the lowest bit of nextAddr needs to be set to 1.
+ addrs.push_back(defined->getVA());
+ }
+ }
+ llvm::sort(addrs);
+ uint64_t addr = in.header->addr;
+ for (uint64_t nextAddr : addrs) {
+ uint64_t delta = nextAddr - addr;
+ if (delta == 0)
+ continue;
+ encodeULEB128(delta, os);
+ addr = nextAddr;
+ }
+ os << '\0';
+}
+
+void FunctionStartsSection::writeTo(uint8_t *buf) const {
+ memcpy(buf, contents.data(), contents.size());
+}
+
SymtabSection::SymtabSection(StringTableSection &stringTableSection)
- : SyntheticSection(segment_names::linkEdit, section_names::symbolTable),
- stringTableSection(stringTableSection) {
- // TODO: When we introduce the SyntheticSections superclass, we should make
- // all synthetic sections aligned to WordSize by default.
- align = WordSize;
+ : LinkEditSection(segment_names::linkEdit, section_names::symbolTable),
+ stringTableSection(stringTableSection) {}
+
+void SymtabSection::emitBeginSourceStab(DWARFUnit *compileUnit) {
+ StabsEntry stab(N_SO);
+ SmallString<261> dir(compileUnit->getCompilationDir());
+ StringRef sep = sys::path::get_separator();
+ // We don't use `path::append` here because we want an empty `dir` to result
+ // in an absolute path. `append` would give us a relative path for that case.
+ if (!dir.endswith(sep))
+ dir += sep;
+ stab.strx = stringTableSection.addString(
+ saver.save(dir + compileUnit->getUnitDIE().getShortName()));
+ stabs.emplace_back(std::move(stab));
+}
+
+void SymtabSection::emitEndSourceStab() {
+ StabsEntry stab(N_SO);
+ stab.sect = 1;
+ stabs.emplace_back(std::move(stab));
}
-uint64_t SymtabSection::getSize() const {
- return symbols.size() * sizeof(structs::nlist_64);
+void SymtabSection::emitObjectFileStab(ObjFile *file) {
+ StabsEntry stab(N_OSO);
+ stab.sect = target->cpuSubtype;
+ SmallString<261> path(!file->archiveName.empty() ? file->archiveName
+ : file->getName());
+ std::error_code ec = sys::fs::make_absolute(path);
+ if (ec)
+ fatal("failed to get absolute path for " + path);
+
+ if (!file->archiveName.empty())
+ path.append({"(", file->getName(), ")"});
+
+ stab.strx = stringTableSection.addString(saver.save(path.str()));
+ stab.desc = 1;
+ stab.value = file->modTime;
+ stabs.emplace_back(std::move(stab));
+}
+
+void SymtabSection::emitEndFunStab(Defined *defined) {
+ StabsEntry stab(N_FUN);
+ stab.value = defined->size;
+ stabs.emplace_back(std::move(stab));
+}
+
+void SymtabSection::emitStabs() {
+ for (const std::string &s : config->astPaths) {
+ StabsEntry astStab(N_AST);
+ astStab.strx = stringTableSection.addString(s);
+ stabs.emplace_back(std::move(astStab));
+ }
+
+ std::vector<Defined *> symbolsNeedingStabs;
+ for (const SymtabEntry &entry :
+ concat<SymtabEntry>(localSymbols, externalSymbols)) {
+ Symbol *sym = entry.sym;
+ assert(sym->isLive() &&
+ "dead symbols should not be in localSymbols, externalSymbols");
+ if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (defined->isAbsolute())
+ continue;
+ InputSection *isec = defined->isec;
+ ObjFile *file = dyn_cast_or_null<ObjFile>(isec->getFile());
+ if (!file || !file->compileUnit)
+ continue;
+ symbolsNeedingStabs.push_back(defined);
+ }
+ }
+
+ llvm::stable_sort(symbolsNeedingStabs, [&](Defined *a, Defined *b) {
+ return a->isec->getFile()->id < b->isec->getFile()->id;
+ });
+
+ // Emit STABS symbols so that dsymutil and/or the debugger can map address
+ // regions in the final binary to the source and object files from which they
+ // originated.
+ InputFile *lastFile = nullptr;
+ for (Defined *defined : symbolsNeedingStabs) {
+ InputSection *isec = defined->isec;
+ ObjFile *file = cast<ObjFile>(isec->getFile());
+
+ if (lastFile == nullptr || lastFile != file) {
+ if (lastFile != nullptr)
+ emitEndSourceStab();
+ lastFile = file;
+
+ emitBeginSourceStab(file->compileUnit);
+ emitObjectFileStab(file);
+ }
+
+ StabsEntry symStab;
+ symStab.sect = defined->isec->canonical()->parent->index;
+ symStab.strx = stringTableSection.addString(defined->getName());
+ symStab.value = defined->getVA();
+
+ if (isCodeSection(isec)) {
+ symStab.type = N_FUN;
+ stabs.emplace_back(std::move(symStab));
+ emitEndFunStab(defined);
+ } else {
+ symStab.type = defined->isExternal() ? N_GSYM : N_STSYM;
+ stabs.emplace_back(std::move(symStab));
+ }
+ }
+
+ if (!stabs.empty())
+ emitEndSourceStab();
}
void SymtabSection::finalizeContents() {
- // TODO support other symbol types
- for (Symbol *sym : symtab->getSymbols())
- if (isa<Defined>(sym))
- symbols.push_back({sym, stringTableSection.addString(sym->getName())});
+ auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) {
+ uint32_t strx = stringTableSection.addString(sym->getName());
+ symbols.push_back({sym, strx});
+ };
+
+ // Local symbols aren't in the SymbolTable, so we walk the list of object
+ // files to gather them.
+ for (const InputFile *file : inputFiles) {
+ if (auto *objFile = dyn_cast<ObjFile>(file)) {
+ for (Symbol *sym : objFile->symbols) {
+ if (auto *defined = dyn_cast_or_null<Defined>(sym)) {
+ if (!defined->isExternal() && defined->isLive()) {
+ StringRef name = defined->getName();
+ if (!name.startswith("l") && !name.startswith("L"))
+ addSymbol(localSymbols, sym);
+ }
+ }
+ }
+ }
+ }
+
+ // __dyld_private is a local symbol too. It's linker-created and doesn't
+ // exist in any object file.
+ if (Defined *dyldPrivate = in.stubHelper->dyldPrivate)
+ addSymbol(localSymbols, dyldPrivate);
+
+ for (Symbol *sym : symtab->getSymbols()) {
+ if (!sym->isLive())
+ continue;
+ if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (!defined->includeInSymtab)
+ continue;
+ assert(defined->isExternal());
+ if (defined->privateExtern)
+ addSymbol(localSymbols, defined);
+ else
+ addSymbol(externalSymbols, defined);
+ } else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ if (dysym->isReferenced())
+ addSymbol(undefinedSymbols, sym);
+ }
+ }
+
+ emitStabs();
+ uint32_t symtabIndex = stabs.size();
+ for (const SymtabEntry &entry :
+ concat<SymtabEntry>(localSymbols, externalSymbols, undefinedSymbols)) {
+ entry.sym->symtabIndex = symtabIndex++;
+ }
+}
+
+uint32_t SymtabSection::getNumSymbols() const {
+ return stabs.size() + localSymbols.size() + externalSymbols.size() +
+ undefinedSymbols.size();
+}
+
+// This serves to hide (type-erase) the template parameter from SymtabSection.
+template <class LP> class SymtabSectionImpl final : public SymtabSection {
+public:
+ SymtabSectionImpl(StringTableSection &stringTableSection)
+ : SymtabSection(stringTableSection) {}
+ uint64_t getRawSize() const override;
+ void writeTo(uint8_t *buf) const override;
+};
+
+template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const {
+ return getNumSymbols() * sizeof(typename LP::nlist);
}
-void SymtabSection::writeTo(uint8_t *buf) const {
- auto *nList = reinterpret_cast<structs::nlist_64 *>(buf);
- for (const SymtabEntry &entry : symbols) {
+template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const {
+ auto *nList = reinterpret_cast<typename LP::nlist *>(buf);
+ // Emit the stabs entries before the "real" symbols. We cannot emit them
+ // after as that would render Symbol::symtabIndex inaccurate.
+ for (const StabsEntry &entry : stabs) {
nList->n_strx = entry.strx;
- // TODO support other symbol types
- // TODO populate n_desc
+ nList->n_type = entry.type;
+ nList->n_sect = entry.sect;
+ nList->n_desc = entry.desc;
+ nList->n_value = entry.value;
+ ++nList;
+ }
+
+ for (const SymtabEntry &entry : concat<const SymtabEntry>(
+ localSymbols, externalSymbols, undefinedSymbols)) {
+ nList->n_strx = entry.strx;
+ // TODO populate n_desc with more flags
if (auto *defined = dyn_cast<Defined>(entry.sym)) {
- nList->n_type = MachO::N_EXT | MachO::N_SECT;
- nList->n_sect = defined->isec->parent->index;
- // For the N_SECT symbol type, n_value is the address of the symbol
- nList->n_value = defined->value + defined->isec->getVA();
+ uint8_t scope = 0;
+ if (defined->privateExtern) {
+ // Private external -- dylib scoped symbol.
+ // Promote to non-external at link time.
+ scope = N_PEXT;
+ } else if (defined->isExternal()) {
+ // Normal global symbol.
+ scope = N_EXT;
+ } else {
+ // TU-local symbol from localSymbols.
+ scope = 0;
+ }
+
+ if (defined->isAbsolute()) {
+ nList->n_type = scope | N_ABS;
+ nList->n_sect = NO_SECT;
+ nList->n_value = defined->value;
+ } else {
+ nList->n_type = scope | N_SECT;
+ nList->n_sect = defined->isec->canonical()->parent->index;
+ // For the N_SECT symbol type, n_value is the address of the symbol
+ nList->n_value = defined->getVA();
+ }
+ nList->n_desc |= defined->thumb ? N_ARM_THUMB_DEF : 0;
+ nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0;
+ nList->n_desc |=
+ defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0;
+ } else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) {
+ uint16_t n_desc = nList->n_desc;
+ int16_t ordinal = ordinalForDylibSymbol(*dysym);
+ if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP)
+ SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL);
+ else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE)
+ SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL);
+ else {
+ assert(ordinal > 0);
+ SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal));
+ }
+
+ nList->n_type = N_EXT;
+ n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0;
+ n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0;
+ nList->n_desc = n_desc;
}
++nList;
}
}
+template <class LP>
+SymtabSection *
+macho::makeSymtabSection(StringTableSection &stringTableSection) {
+ return make<SymtabSectionImpl<LP>>(stringTableSection);
+}
+
+IndirectSymtabSection::IndirectSymtabSection()
+ : LinkEditSection(segment_names::linkEdit,
+ section_names::indirectSymbolTable) {}
+
+uint32_t IndirectSymtabSection::getNumSymbols() const {
+ return in.got->getEntries().size() + in.tlvPointers->getEntries().size() +
+ 2 * in.stubs->getEntries().size();
+}
+
+bool IndirectSymtabSection::isNeeded() const {
+ return in.got->isNeeded() || in.tlvPointers->isNeeded() ||
+ in.stubs->isNeeded();
+}
+
+void IndirectSymtabSection::finalizeContents() {
+ uint32_t off = 0;
+ in.got->reserved1 = off;
+ off += in.got->getEntries().size();
+ in.tlvPointers->reserved1 = off;
+ off += in.tlvPointers->getEntries().size();
+ in.stubs->reserved1 = off;
+ off += in.stubs->getEntries().size();
+ in.lazyPointers->reserved1 = off;
+}
+
+static uint32_t indirectValue(const Symbol *sym) {
+ return sym->symtabIndex != UINT32_MAX ? sym->symtabIndex
+ : INDIRECT_SYMBOL_LOCAL;
+}
+
+void IndirectSymtabSection::writeTo(uint8_t *buf) const {
+ uint32_t off = 0;
+ for (const Symbol *sym : in.got->getEntries()) {
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
+ ++off;
+ }
+ for (const Symbol *sym : in.tlvPointers->getEntries()) {
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
+ ++off;
+ }
+ for (const Symbol *sym : in.stubs->getEntries()) {
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
+ ++off;
+ }
+ // There is a 1:1 correspondence between stubs and LazyPointerSection
+ // entries. But giving __stubs and __la_symbol_ptr the same reserved1
+ // (the offset into the indirect symbol table) so that they both refer
+ // to the same range of offsets confuses `strip`, so write the stubs
+ // symbol table offsets a second time.
+ for (const Symbol *sym : in.stubs->getEntries()) {
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
+ ++off;
+ }
+}
+
StringTableSection::StringTableSection()
- : SyntheticSection(segment_names::linkEdit, section_names::stringTable) {}
+ : LinkEditSection(segment_names::linkEdit, section_names::stringTable) {}
uint32_t StringTableSection::addString(StringRef str) {
uint32_t strx = size;
- strings.push_back(str);
+ strings.push_back(str); // TODO: consider deduplicating strings
size += str.size() + 1; // account for null terminator
return strx;
}
off += str.size() + 1; // account for null terminator
}
}
+
+static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0, "");
+static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0, "");
+
+CodeSignatureSection::CodeSignatureSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) {
+ align = 16; // required by libstuff
+ // FIXME: Consider using finalOutput instead of outputFile.
+ fileName = config->outputFile;
+ size_t slashIndex = fileName.rfind("/");
+ if (slashIndex != std::string::npos)
+ fileName = fileName.drop_front(slashIndex + 1);
+ allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1);
+ fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size();
+}
+
+uint32_t CodeSignatureSection::getBlockCount() const {
+ return (fileOff + blockSize - 1) / blockSize;
+}
+
+uint64_t CodeSignatureSection::getRawSize() const {
+ return allHeadersSize + getBlockCount() * hashSize;
+}
+
+void CodeSignatureSection::writeHashes(uint8_t *buf) const {
+ uint8_t *code = buf;
+ uint8_t *codeEnd = buf + fileOff;
+ uint8_t *hashes = codeEnd + allHeadersSize;
+ while (code < codeEnd) {
+ StringRef block(reinterpret_cast<char *>(code),
+ std::min(codeEnd - code, static_cast<ssize_t>(blockSize)));
+ SHA256 hasher;
+ hasher.update(block);
+ StringRef hash = hasher.final();
+ assert(hash.size() == hashSize);
+ memcpy(hashes, hash.data(), hashSize);
+ code += blockSize;
+ hashes += hashSize;
+ }
+#if defined(__APPLE__)
+ // This is macOS-specific work-around and makes no sense for any
+ // other host OS. See https://openradar.appspot.com/FB8914231
+ //
+ // The macOS kernel maintains a signature-verification cache to
+ // quickly validate applications at time of execve(2). The trouble
+ // is that for the kernel creates the cache entry at the time of the
+ // mmap(2) call, before we have a chance to write either the code to
+ // sign or the signature header+hashes. The fix is to invalidate
+ // all cached data associated with the output file, thus discarding
+ // the bogus prematurely-cached signature.
+ msync(buf, fileOff + getSize(), MS_INVALIDATE);
+#endif
+}
+
+void CodeSignatureSection::writeTo(uint8_t *buf) const {
+ uint32_t signatureSize = static_cast<uint32_t>(getSize());
+ auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf);
+ write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE);
+ write32be(&superBlob->length, signatureSize);
+ write32be(&superBlob->count, 1);
+ auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]);
+ write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY);
+ write32be(&blobIndex->offset, blobHeadersSize);
+ auto *codeDirectory =
+ reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize);
+ write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY);
+ write32be(&codeDirectory->length, signatureSize - blobHeadersSize);
+ write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG);
+ write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED);
+ write32be(&codeDirectory->hashOffset,
+ sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad);
+ write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory));
+ codeDirectory->nSpecialSlots = 0;
+ write32be(&codeDirectory->nCodeSlots, getBlockCount());
+ write32be(&codeDirectory->codeLimit, fileOff);
+ codeDirectory->hashSize = static_cast<uint8_t>(hashSize);
+ codeDirectory->hashType = kSecCodeSignatureHashSHA256;
+ codeDirectory->platform = 0;
+ codeDirectory->pageSize = blockSizeShift;
+ codeDirectory->spare2 = 0;
+ codeDirectory->scatterOffset = 0;
+ codeDirectory->teamOffset = 0;
+ codeDirectory->spare3 = 0;
+ codeDirectory->codeLimit64 = 0;
+ OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text);
+ write64be(&codeDirectory->execSegBase, textSeg->fileOff);
+ write64be(&codeDirectory->execSegLimit, textSeg->fileSize);
+ write64be(&codeDirectory->execSegFlags,
+ config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0);
+ auto *id = reinterpret_cast<char *>(&codeDirectory[1]);
+ memcpy(id, fileName.begin(), fileName.size());
+ memset(id + fileName.size(), 0, fileNamePad);
+}
+
+BitcodeBundleSection::BitcodeBundleSection()
+ : SyntheticSection(segment_names::llvm, section_names::bitcodeBundle) {}
+
+class ErrorCodeWrapper {
+public:
+ explicit ErrorCodeWrapper(std::error_code ec) : errorCode(ec.value()) {}
+ explicit ErrorCodeWrapper(int ec) : errorCode(ec) {}
+ operator int() const { return errorCode; }
+
+private:
+ int errorCode;
+};
+
+#define CHECK_EC(exp) \
+ do { \
+ ErrorCodeWrapper ec(exp); \
+ if (ec) \
+ fatal(Twine("operation failed with error code ") + Twine(ec) + ": " + \
+ #exp); \
+ } while (0);
+
+void BitcodeBundleSection::finalize() {
+#ifdef LLVM_HAVE_LIBXAR
+ using namespace llvm::sys::fs;
+ CHECK_EC(createTemporaryFile("bitcode-bundle", "xar", xarPath));
+
+ xar_t xar(xar_open(xarPath.data(), O_RDWR));
+ if (!xar)
+ fatal("failed to open XAR temporary file at " + xarPath);
+ CHECK_EC(xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE));
+ // FIXME: add more data to XAR
+ CHECK_EC(xar_close(xar));
+
+ file_size(xarPath, xarSize);
+#endif // defined(LLVM_HAVE_LIBXAR)
+}
+
+void BitcodeBundleSection::writeTo(uint8_t *buf) const {
+ using namespace llvm::sys::fs;
+ file_t handle =
+ CHECK(openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None),
+ "failed to open XAR file");
+ std::error_code ec;
+ mapped_file_region xarMap(handle, mapped_file_region::mapmode::readonly,
+ xarSize, 0, ec);
+ if (ec)
+ fatal("failed to map XAR file");
+ memcpy(buf, xarMap.const_data(), xarSize);
+
+ closeFile(handle);
+ remove(xarPath);
+}
+
+CStringSection::CStringSection()
+ : SyntheticSection(segment_names::text, section_names::cString) {
+ flags = S_CSTRING_LITERALS;
+}
+
+void CStringSection::addInput(CStringInputSection *isec) {
+ isec->parent = this;
+ inputs.push_back(isec);
+ if (isec->align > align)
+ align = isec->align;
+}
+
+void CStringSection::writeTo(uint8_t *buf) const {
+ for (const CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ StringRef string = isec->getStringRef(i);
+ memcpy(buf + isec->pieces[i].outSecOff, string.data(), string.size());
+ }
+ }
+}
+
+void CStringSection::finalizeContents() {
+ uint64_t offset = 0;
+ for (CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ uint32_t pieceAlign = MinAlign(isec->pieces[i].inSecOff, align);
+ offset = alignTo(offset, pieceAlign);
+ isec->pieces[i].outSecOff = offset;
+ isec->isFinal = true;
+ StringRef string = isec->getStringRef(i);
+ offset += string.size();
+ }
+ }
+ size = offset;
+}
+// Mergeable cstring literals are found under the __TEXT,__cstring section. In
+// contrast to ELF, which puts strings that need different alignments into
+// different sections, clang's Mach-O backend puts them all in one section.
+// Strings that need to be aligned have the .p2align directive emitted before
+// them, which simply translates into zero padding in the object file.
+//
+// I *think* ld64 extracts the desired per-string alignment from this data by
+// preserving each string's offset from the last section-aligned address. I'm
+// not entirely certain since it doesn't seem consistent about doing this, and
+// in fact doesn't seem to be correct in general: we can in fact can induce ld64
+// to produce a crashing binary just by linking in an additional object file
+// that only contains a duplicate cstring at a different alignment. See PR50563
+// for details.
+//
+// On x86_64, the cstrings we've seen so far that require special alignment are
+// all accessed by SIMD operations -- x86_64 requires SIMD accesses to be
+// 16-byte-aligned. arm64 also seems to require 16-byte-alignment in some cases
+// (PR50791), but I haven't tracked down the root cause. So for now, I'm just
+// aligning all strings to 16 bytes. This is indeed wasteful, but
+// implementation-wise it's simpler than preserving per-string
+// alignment+offsets. It also avoids the aforementioned crash after
+// deduplication of differently-aligned strings. Finally, the overhead is not
+// huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead
+// when linking chromium_framework on x86_64.
+DeduplicatedCStringSection::DeduplicatedCStringSection()
+ : builder(StringTableBuilder::RAW, /*Alignment=*/16) {}
+
+void DeduplicatedCStringSection::finalizeContents() {
+ // Add all string pieces to the string table builder to create section
+ // contents.
+ for (const CStringInputSection *isec : inputs)
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i)
+ if (isec->pieces[i].live)
+ builder.add(isec->getCachedHashStringRef(i));
+
+ // Fix the string table content. After this, the contents will never change.
+ builder.finalizeInOrder();
+
+ // finalize() fixed tail-optimized strings, so we can now get
+ // offsets of strings. Get an offset for each string and save it
+ // to a corresponding SectionPiece for easy access.
+ for (CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ isec->pieces[i].outSecOff =
+ builder.getOffset(isec->getCachedHashStringRef(i));
+ isec->isFinal = true;
+ }
+ }
+}
+
+// This section is actually emitted as __TEXT,__const by ld64, but clang may
+// emit input sections of that name, and LLD doesn't currently support mixing
+// synthetic and concat-type OutputSections. To work around this, I've given
+// our merged-literals section a different name.
+WordLiteralSection::WordLiteralSection()
+ : SyntheticSection(segment_names::text, section_names::literals) {
+ align = 16;
+}
+
+void WordLiteralSection::addInput(WordLiteralInputSection *isec) {
+ isec->parent = this;
+ inputs.push_back(isec);
+}
+
+void WordLiteralSection::finalizeContents() {
+ for (WordLiteralInputSection *isec : inputs) {
+ // We do all processing of the InputSection here, so it will be effectively
+ // finalized.
+ isec->isFinal = true;
+ const uint8_t *buf = isec->data.data();
+ switch (sectionType(isec->getFlags())) {
+ case S_4BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 4) {
+ if (!isec->isLive(off))
+ continue;
+ uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off);
+ literal4Map.emplace(value, literal4Map.size());
+ }
+ break;
+ }
+ case S_8BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 8) {
+ if (!isec->isLive(off))
+ continue;
+ uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off);
+ literal8Map.emplace(value, literal8Map.size());
+ }
+ break;
+ }
+ case S_16BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 16) {
+ if (!isec->isLive(off))
+ continue;
+ UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off);
+ literal16Map.emplace(value, literal16Map.size());
+ }
+ break;
+ }
+ default:
+ llvm_unreachable("invalid literal section type");
+ }
+ }
+}
+
+void WordLiteralSection::writeTo(uint8_t *buf) const {
+ // Note that we don't attempt to do any endianness conversion in addInput(),
+ // so we don't do it here either -- just write out the original value,
+ // byte-for-byte.
+ for (const auto &p : literal16Map)
+ memcpy(buf + p.second * 16, &p.first, 16);
+ buf += literal16Map.size() * 16;
+
+ for (const auto &p : literal8Map)
+ memcpy(buf + p.second * 8, &p.first, 8);
+ buf += literal8Map.size() * 8;
+
+ for (const auto &p : literal4Map)
+ memcpy(buf + p.second * 4, &p.first, 4);
+}
+
+void macho::createSyntheticSymbols() {
+ auto addHeaderSymbol = [](const char *name) {
+ symtab->addSynthetic(name, in.header->isec, /*value=*/0,
+ /*privateExtern=*/true, /*includeInSymtab=*/false,
+ /*referencedDynamically=*/false);
+ };
+
+ switch (config->outputType) {
+ // FIXME: Assign the right address value for these symbols
+ // (rather than 0). But we need to do that after assignAddresses().
+ case MH_EXECUTE:
+ // If linking PIE, __mh_execute_header is a defined symbol in
+ // __TEXT, __text)
+ // Otherwise, it's an absolute symbol.
+ if (config->isPic)
+ symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0,
+ /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*referencedDynamically=*/true);
+ else
+ symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0,
+ /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*referencedDynamically=*/true);
+ break;
+
+ // The following symbols are N_SECT symbols, even though the header is not
+ // part of any section and that they are private to the bundle/dylib/object
+ // they are part of.
+ case MH_BUNDLE:
+ addHeaderSymbol("__mh_bundle_header");
+ break;
+ case MH_DYLIB:
+ addHeaderSymbol("__mh_dylib_header");
+ break;
+ case MH_DYLINKER:
+ addHeaderSymbol("__mh_dylinker_header");
+ break;
+ case MH_OBJECT:
+ addHeaderSymbol("__mh_object_header");
+ break;
+ default:
+ llvm_unreachable("unexpected outputType");
+ break;
+ }
+
+ // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit
+ // which does e.g. cleanup of static global variables. The ABI document
+ // says that the pointer can point to any address in one of the dylib's
+ // segments, but in practice ld64 seems to set it to point to the header,
+ // so that's what's implemented here.
+ addHeaderSymbol("___dso_handle");
+}
+
+template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &);
+template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &);
#include "ExportTrie.h"
#include "InputSection.h"
#include "OutputSection.h"
+#include "OutputSegment.h"
#include "Target.h"
+#include "Writer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-namespace lld {
-namespace macho {
-
-namespace section_names {
+#include <unordered_map>
-constexpr const char pageZero[] = "__pagezero";
-constexpr const char header[] = "__mach_header";
-constexpr const char binding[] = "__binding";
-constexpr const char lazyBinding[] = "__lazy_binding";
-constexpr const char export_[] = "__export";
-constexpr const char symbolTable[] = "__symbol_table";
-constexpr const char stringTable[] = "__string_table";
-constexpr const char got[] = "__got";
+namespace llvm {
+class DWARFUnit;
+} // namespace llvm
-} // namespace section_names
+namespace lld {
+namespace macho {
+class Defined;
class DylibSymbol;
class LoadCommand;
+class ObjFile;
+class UnwindInfoSection;
class SyntheticSection : public OutputSection {
public:
return sec->kind() == SyntheticKind;
}
- const StringRef segname;
+ StringRef segname;
+ // This fake InputSection makes it easier for us to write code that applies
+ // generically to both user inputs and synthetics.
+ InputSection *isec;
+};
+
+// All sections in __LINKEDIT should inherit from this.
+class LinkEditSection : public SyntheticSection {
+public:
+ LinkEditSection(const char *segname, const char *name)
+ : SyntheticSection(segname, name) {
+ align = target->wordSize;
+ }
+
+ virtual void finalizeContents() {}
+
+ // Sections in __LINKEDIT are special: their offsets are recorded in the
+ // load commands like LC_DYLD_INFO_ONLY and LC_SYMTAB, instead of in section
+ // headers.
+ bool isHidden() const override final { return true; }
+
+ virtual uint64_t getRawSize() const = 0;
+
+ // codesign (or more specifically libstuff) checks that each section in
+ // __LINKEDIT ends where the next one starts -- no gaps are permitted. We
+ // therefore align every section's start and end points to WordSize.
+ //
+ // NOTE: This assumes that the extra bytes required for alignment can be
+ // zero-valued bytes.
+ uint64_t getSize() const override final {
+ return llvm::alignTo(getRawSize(), align);
+ }
};
// The header of the Mach-O file, which must have a file offset of zero.
-class MachHeaderSection : public SyntheticSection {
+class MachHeaderSection final : public SyntheticSection {
public:
MachHeaderSection();
- void addLoadCommand(LoadCommand *);
bool isHidden() const override { return true; }
uint64_t getSize() const override;
void writeTo(uint8_t *buf) const override;
-private:
+ void addLoadCommand(LoadCommand *);
+
+protected:
std::vector<LoadCommand *> loadCommands;
uint32_t sizeOfCmds = 0;
};
// A hidden section that exists solely for the purpose of creating the
// __PAGEZERO segment, which is used to catch null pointer dereferences.
-class PageZeroSection : public SyntheticSection {
+class PageZeroSection final : public SyntheticSection {
public:
PageZeroSection();
bool isHidden() const override { return true; }
- uint64_t getSize() const override { return PageZeroSize; }
+ uint64_t getSize() const override { return target->pageZeroSize; }
uint64_t getFileSize() const override { return 0; }
void writeTo(uint8_t *buf) const override {}
};
-// This section will be populated by dyld with addresses to non-lazily-loaded
-// dylib symbols.
-class GotSection : public SyntheticSection {
+// This is the base class for the GOT and TLVPointer sections, which are nearly
+// functionally identical -- they will both be populated by dyld with addresses
+// to non-lazily-loaded dylib symbols. The main difference is that the
+// TLVPointerSection stores references to thread-local variables.
+class NonLazyPointerSectionBase : public SyntheticSection {
+public:
+ NonLazyPointerSectionBase(const char *segname, const char *name);
+ const llvm::SetVector<const Symbol *> &getEntries() const { return entries; }
+ bool isNeeded() const override { return !entries.empty(); }
+ uint64_t getSize() const override {
+ return entries.size() * target->wordSize;
+ }
+ void writeTo(uint8_t *buf) const override;
+ void addEntry(Symbol *sym);
+ uint64_t getVA(uint32_t gotIndex) const {
+ return addr + gotIndex * target->wordSize;
+ }
+
+private:
+ llvm::SetVector<const Symbol *> entries;
+};
+
+class GotSection final : public NonLazyPointerSectionBase {
public:
GotSection();
+};
- const llvm::SetVector<const Symbol *> &getEntries() const { return entries; }
+class TlvPointerSection final : public NonLazyPointerSectionBase {
+public:
+ TlvPointerSection();
+};
- bool isNeeded() const override { return !entries.empty(); }
+struct Location {
+ const InputSection *isec;
+ uint64_t offset;
- uint64_t getSize() const override { return entries.size() * WordSize; }
+ Location(const InputSection *isec, uint64_t offset)
+ : isec(isec), offset(offset) {}
+ uint64_t getVA() const { return isec->getVA(offset); }
+};
+// Stores rebase opcodes, which tell dyld where absolute addresses have been
+// encoded in the binary. If the binary is not loaded at its preferred address,
+// dyld has to rebase these addresses by adding an offset to them.
+class RebaseSection final : public LinkEditSection {
+public:
+ RebaseSection();
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return contents.size(); }
+ bool isNeeded() const override { return !locations.empty(); }
void writeTo(uint8_t *buf) const override;
- void addEntry(Symbol &sym);
+ void addEntry(const InputSection *isec, uint64_t offset) {
+ if (config->isPic)
+ locations.push_back({isec, offset});
+ }
private:
- llvm::SetVector<const Symbol *> entries;
+ std::vector<Location> locations;
+ SmallVector<char, 128> contents;
};
struct BindingEntry {
- const DylibSymbol *dysym;
- const InputSection *isec;
- uint64_t offset;
int64_t addend;
- BindingEntry(const DylibSymbol *dysym, const InputSection *isec,
- uint64_t offset, int64_t addend)
- : dysym(dysym), isec(isec), offset(offset), addend(addend) {}
+ Location target;
+ BindingEntry(int64_t addend, Location target)
+ : addend(addend), target(std::move(target)) {}
};
+template <class Sym>
+using BindingsMap = llvm::DenseMap<Sym, std::vector<BindingEntry>>;
+
// Stores bind opcodes for telling dyld which symbols to load non-lazily.
-class BindingSection : public SyntheticSection {
+class BindingSection final : public LinkEditSection {
public:
BindingSection();
- void finalizeContents();
- uint64_t getSize() const override { return contents.size(); }
- // Like other sections in __LINKEDIT, the binding section is special: its
- // offsets are recorded in the LC_DYLD_INFO_ONLY load command, instead of in
- // section headers.
- bool isHidden() const override { return true; }
- bool isNeeded() const override;
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return contents.size(); }
+ bool isNeeded() const override { return !bindingsMap.empty(); }
void writeTo(uint8_t *buf) const override;
void addEntry(const DylibSymbol *dysym, const InputSection *isec,
- uint64_t offset, int64_t addend) {
- bindings.emplace_back(dysym, isec, offset, addend);
+ uint64_t offset, int64_t addend = 0) {
+ bindingsMap[dysym].emplace_back(addend, Location(isec, offset));
}
private:
- std::vector<BindingEntry> bindings;
+ BindingsMap<const DylibSymbol *> bindingsMap;
+ SmallVector<char, 128> contents;
+};
+
+// Stores bind opcodes for telling dyld which weak symbols need coalescing.
+// There are two types of entries in this section:
+//
+// 1) Non-weak definitions: This is a symbol definition that weak symbols in
+// other dylibs should coalesce to.
+//
+// 2) Weak bindings: These tell dyld that a given symbol reference should
+// coalesce to a non-weak definition if one is found. Note that unlike the
+// entries in the BindingSection, the bindings here only refer to these
+// symbols by name, but do not specify which dylib to load them from.
+class WeakBindingSection final : public LinkEditSection {
+public:
+ WeakBindingSection();
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return contents.size(); }
+ bool isNeeded() const override {
+ return !bindingsMap.empty() || !definitions.empty();
+ }
+
+ void writeTo(uint8_t *buf) const override;
+
+ void addEntry(const Symbol *symbol, const InputSection *isec, uint64_t offset,
+ int64_t addend = 0) {
+ bindingsMap[symbol].emplace_back(addend, Location(isec, offset));
+ }
+
+ bool hasEntry() const { return !bindingsMap.empty(); }
+
+ void addNonWeakDefinition(const Defined *defined) {
+ definitions.emplace_back(defined);
+ }
+
+ bool hasNonWeakDefinition() const { return !definitions.empty(); }
+
+private:
+ BindingsMap<const Symbol *> bindingsMap;
+ std::vector<const Defined *> definitions;
SmallVector<char, 128> contents;
};
// The following sections implement lazy symbol binding -- very similar to the
// PLT mechanism in ELF.
//
-// ELF's .plt section is broken up into two sections in Mach-O: StubsSection and
-// StubHelperSection. Calls to functions in dylibs will end up calling into
+// ELF's .plt section is broken up into two sections in Mach-O: StubsSection
+// and StubHelperSection. Calls to functions in dylibs will end up calling into
// StubsSection, which contains indirect jumps to addresses stored in the
// LazyPointerSection (the counterpart to ELF's .plt.got).
//
-// Initially, the LazyPointerSection contains addresses that point into one of
-// the entry points in the middle of the StubHelperSection. The code in
+// We will first describe how non-weak symbols are handled.
+//
+// At program start, the LazyPointerSection contains addresses that point into
+// one of the entry points in the middle of the StubHelperSection. The code in
// StubHelperSection will push on the stack an offset into the
// LazyBindingSection. The push is followed by a jump to the beginning of the
// StubHelperSection (similar to PLT0), which then calls into dyld_stub_binder.
// the GOT.
//
// The stub binder will look up the bind opcodes in the LazyBindingSection at
-// the given offset. The bind opcodes will tell the binder to update the address
-// in the LazyPointerSection to point to the symbol, so that subsequent calls
-// don't have to redo the symbol resolution. The binder will then jump to the
-// resolved symbol.
-
-class StubsSection : public SyntheticSection {
+// the given offset. The bind opcodes will tell the binder to update the
+// address in the LazyPointerSection to point to the symbol, so that subsequent
+// calls don't have to redo the symbol resolution. The binder will then jump to
+// the resolved symbol.
+//
+// With weak symbols, the situation is slightly different. Since there is no
+// "weak lazy" lookup, function calls to weak symbols are always non-lazily
+// bound. We emit both regular non-lazy bindings as well as weak bindings, in
+// order that the weak bindings may overwrite the non-lazy bindings if an
+// appropriate symbol is found at runtime. However, the bound addresses will
+// still be written (non-lazily) into the LazyPointerSection.
+
+class StubsSection final : public SyntheticSection {
public:
StubsSection();
uint64_t getSize() const override;
bool isNeeded() const override { return !entries.empty(); }
+ void finalize() override;
void writeTo(uint8_t *buf) const override;
+ const llvm::SetVector<Symbol *> &getEntries() const { return entries; }
+ // Returns whether the symbol was added. Note that every stubs entry will
+ // have a corresponding entry in the LazyPointerSection.
+ bool addEntry(Symbol *);
+ uint64_t getVA(uint32_t stubsIndex) const {
+ assert(isFinal || target->usesThunks());
+ // ConcatOutputSection::finalize() can seek the address of a
+ // stub before its address is assigned. Before __stubs is
+ // finalized, return a contrived out-of-range address.
+ return isFinal ? addr + stubsIndex * target->stubSize
+ : TargetInfo::outOfRangeVA;
+ }
- const llvm::SetVector<DylibSymbol *> &getEntries() const { return entries; }
-
- void addEntry(DylibSymbol &sym);
+ bool isFinal = false; // is address assigned?
private:
- llvm::SetVector<DylibSymbol *> entries;
+ llvm::SetVector<Symbol *> entries;
};
-class StubHelperSection : public SyntheticSection {
+class StubHelperSection final : public SyntheticSection {
public:
StubHelperSection();
uint64_t getSize() const override;
void setup();
DylibSymbol *stubBinder = nullptr;
+ Defined *dyldPrivate = nullptr;
};
-// This section contains space for just a single word, and will be used by dyld
-// to cache an address to the image loader it uses. Note that unlike the other
-// synthetic sections, which are OutputSections, the ImageLoaderCacheSection is
-// an InputSection that gets merged into the __data OutputSection.
-class ImageLoaderCacheSection : public InputSection {
-public:
- ImageLoaderCacheSection();
- uint64_t getSize() const override { return WordSize; }
-};
-
-class LazyPointerSection : public SyntheticSection {
+// Note that this section may also be targeted by non-lazy bindings. In
+// particular, this happens when branch relocations target weak symbols.
+class LazyPointerSection final : public SyntheticSection {
public:
LazyPointerSection();
uint64_t getSize() const override;
void writeTo(uint8_t *buf) const override;
};
-class LazyBindingSection : public SyntheticSection {
+class LazyBindingSection final : public LinkEditSection {
public:
LazyBindingSection();
- void finalizeContents();
- uint64_t getSize() const override { return contents.size(); }
- uint32_t encode(const DylibSymbol &);
- // Like other sections in __LINKEDIT, the lazy binding section is special: its
- // offsets are recorded in the LC_DYLD_INFO_ONLY load command, instead of in
- // section headers.
- bool isHidden() const override { return true; }
- bool isNeeded() const override;
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return contents.size(); }
+ bool isNeeded() const override { return !entries.empty(); }
void writeTo(uint8_t *buf) const override;
+ // Note that every entry here will by referenced by a corresponding entry in
+ // the StubHelperSection.
+ void addEntry(DylibSymbol *dysym);
+ const llvm::SetVector<DylibSymbol *> &getEntries() const { return entries; }
private:
+ uint32_t encode(const DylibSymbol &);
+
+ llvm::SetVector<DylibSymbol *> entries;
SmallVector<char, 128> contents;
llvm::raw_svector_ostream os{contents};
};
// Stores a trie that describes the set of exported symbols.
-class ExportSection : public SyntheticSection {
+class ExportSection final : public LinkEditSection {
public:
ExportSection();
- void finalizeContents();
- uint64_t getSize() const override { return size; }
- // Like other sections in __LINKEDIT, the export section is special: its
- // offsets are recorded in the LC_DYLD_INFO_ONLY load command, instead of in
- // section headers.
- bool isHidden() const override { return true; }
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return size; }
void writeTo(uint8_t *buf) const override;
+ bool hasWeakSymbol = false;
+
private:
TrieBuilder trieBuilder;
size_t size = 0;
};
+// Stores 'data in code' entries that describe the locations of
+// data regions inside code sections.
+class DataInCodeSection final : public LinkEditSection {
+public:
+ DataInCodeSection();
+ void finalizeContents() override;
+ uint64_t getRawSize() const override {
+ return sizeof(llvm::MachO::data_in_code_entry) * entries.size();
+ }
+ void writeTo(uint8_t *buf) const override;
+
+private:
+ std::vector<llvm::MachO::data_in_code_entry> entries;
+};
+
+// Stores ULEB128 delta encoded addresses of functions.
+class FunctionStartsSection final : public LinkEditSection {
+public:
+ FunctionStartsSection();
+ void finalizeContents() override;
+ uint64_t getRawSize() const override { return contents.size(); }
+ void writeTo(uint8_t *buf) const override;
+
+private:
+ SmallVector<char, 128> contents;
+};
+
// Stores the strings referenced by the symbol table.
-class StringTableSection : public SyntheticSection {
+class StringTableSection final : public LinkEditSection {
public:
StringTableSection();
// Returns the start offset of the added string.
uint32_t addString(StringRef);
- uint64_t getSize() const override { return size; }
- // Like other sections in __LINKEDIT, the string table section is special: its
- // offsets are recorded in the LC_SYMTAB load command, instead of in section
- // headers.
- bool isHidden() const override { return true; }
+ uint64_t getRawSize() const override { return size; }
void writeTo(uint8_t *buf) const override;
+ static constexpr size_t emptyStringIndex = 1;
+
private:
- // An n_strx value of 0 always indicates the empty string, so we must locate
- // our non-empty string values at positive offsets in the string table.
- // Therefore we insert a dummy value at position zero.
- std::vector<StringRef> strings{"\0"};
- size_t size = 1;
+ // ld64 emits string tables which start with a space and a zero byte. We
+ // match its behavior here since some tools depend on it.
+ // Consequently, the empty string will be at index 1, not zero.
+ std::vector<StringRef> strings{" "};
+ size_t size = 2;
};
struct SymtabEntry {
size_t strx;
};
-class SymtabSection : public SyntheticSection {
+struct StabsEntry {
+ uint8_t type = 0;
+ uint32_t strx = StringTableSection::emptyStringIndex;
+ uint8_t sect = 0;
+ uint16_t desc = 0;
+ uint64_t value = 0;
+
+ StabsEntry() = default;
+ explicit StabsEntry(uint8_t type) : type(type) {}
+};
+
+// Symbols of the same type must be laid out contiguously: we choose to emit
+// all local symbols first, then external symbols, and finally undefined
+// symbols. For each symbol type, the LC_DYSYMTAB load command will record the
+// range (start index and total number) of those symbols in the symbol table.
+class SymtabSection : public LinkEditSection {
public:
+ void finalizeContents() override;
+ uint32_t getNumSymbols() const;
+ uint32_t getNumLocalSymbols() const {
+ return stabs.size() + localSymbols.size();
+ }
+ uint32_t getNumExternalSymbols() const { return externalSymbols.size(); }
+ uint32_t getNumUndefinedSymbols() const { return undefinedSymbols.size(); }
+
+private:
+ void emitBeginSourceStab(llvm::DWARFUnit *compileUnit);
+ void emitEndSourceStab();
+ void emitObjectFileStab(ObjFile *);
+ void emitEndFunStab(Defined *);
+ void emitStabs();
+
+protected:
SymtabSection(StringTableSection &);
+
+ StringTableSection &stringTableSection;
+ // STABS symbols are always local symbols, but we represent them with special
+ // entries because they may use fields like n_sect and n_desc differently.
+ std::vector<StabsEntry> stabs;
+ std::vector<SymtabEntry> localSymbols;
+ std::vector<SymtabEntry> externalSymbols;
+ std::vector<SymtabEntry> undefinedSymbols;
+};
+
+template <class LP> SymtabSection *makeSymtabSection(StringTableSection &);
+
+// The indirect symbol table is a list of 32-bit integers that serve as indices
+// into the (actual) symbol table. The indirect symbol table is a
+// concatenation of several sub-arrays of indices, each sub-array belonging to
+// a separate section. The starting offset of each sub-array is stored in the
+// reserved1 header field of the respective section.
+//
+// These sub-arrays provide symbol information for sections that store
+// contiguous sequences of symbol references. These references can be pointers
+// (e.g. those in the GOT and TLVP sections) or assembly sequences (e.g.
+// function stubs).
+class IndirectSymtabSection final : public LinkEditSection {
+public:
+ IndirectSymtabSection();
+ void finalizeContents() override;
+ uint32_t getNumSymbols() const;
+ uint64_t getRawSize() const override {
+ return getNumSymbols() * sizeof(uint32_t);
+ }
+ bool isNeeded() const override;
+ void writeTo(uint8_t *buf) const override;
+};
+
+// The code signature comes at the very end of the linked output file.
+class CodeSignatureSection final : public LinkEditSection {
+public:
+ static constexpr uint8_t blockSizeShift = 12;
+ static constexpr size_t blockSize = (1 << blockSizeShift); // 4 KiB
+ static constexpr size_t hashSize = 256 / 8;
+ static constexpr size_t blobHeadersSize = llvm::alignTo<8>(
+ sizeof(llvm::MachO::CS_SuperBlob) + sizeof(llvm::MachO::CS_BlobIndex));
+ static constexpr uint32_t fixedHeadersSize =
+ blobHeadersSize + sizeof(llvm::MachO::CS_CodeDirectory);
+
+ uint32_t fileNamePad = 0;
+ uint32_t allHeadersSize = 0;
+ StringRef fileName;
+
+ CodeSignatureSection();
+ uint64_t getRawSize() const override;
+ bool isNeeded() const override { return true; }
+ void writeTo(uint8_t *buf) const override;
+ uint32_t getBlockCount() const;
+ void writeHashes(uint8_t *buf) const;
+};
+
+class BitcodeBundleSection final : public SyntheticSection {
+public:
+ BitcodeBundleSection();
+ uint64_t getSize() const override { return xarSize; }
+ void finalize() override;
+ void writeTo(uint8_t *buf) const override;
+
+private:
+ llvm::SmallString<261> xarPath;
+ uint64_t xarSize;
+};
+
+class CStringSection : public SyntheticSection {
+public:
+ CStringSection();
+ void addInput(CStringInputSection *);
+ uint64_t getSize() const override { return size; }
+ virtual void finalizeContents();
+ bool isNeeded() const override { return !inputs.empty(); }
+ void writeTo(uint8_t *buf) const override;
+
+ std::vector<CStringInputSection *> inputs;
+
+private:
+ uint64_t size;
+};
+
+class DeduplicatedCStringSection final : public CStringSection {
+public:
+ DeduplicatedCStringSection();
+ uint64_t getSize() const override { return builder.getSize(); }
+ void finalizeContents() override;
+ void writeTo(uint8_t *buf) const override { builder.write(buf); }
+
+private:
+ llvm::StringTableBuilder builder;
+};
+
+/*
+ * This section contains deduplicated literal values. The 16-byte values are
+ * laid out first, followed by the 8- and then the 4-byte ones.
+ */
+class WordLiteralSection final : public SyntheticSection {
+public:
+ using UInt128 = std::pair<uint64_t, uint64_t>;
+ // I don't think the standard guarantees the size of a pair, so let's make
+ // sure it's exact -- that way we can construct it via `mmap`.
+ static_assert(sizeof(UInt128) == 16, "");
+
+ WordLiteralSection();
+ void addInput(WordLiteralInputSection *);
void finalizeContents();
- size_t getNumSymbols() const { return symbols.size(); }
- uint64_t getSize() const override;
- // Like other sections in __LINKEDIT, the symtab section is special: its
- // offsets are recorded in the LC_SYMTAB load command, instead of in section
- // headers.
- bool isHidden() const override { return true; }
void writeTo(uint8_t *buf) const override;
+ uint64_t getSize() const override {
+ return literal16Map.size() * 16 + literal8Map.size() * 8 +
+ literal4Map.size() * 4;
+ }
+
+ bool isNeeded() const override {
+ return !literal16Map.empty() || !literal4Map.empty() ||
+ !literal8Map.empty();
+ }
+
+ uint64_t getLiteral16Offset(const uint8_t *buf) const {
+ return literal16Map.at(*reinterpret_cast<const UInt128 *>(buf)) * 16;
+ }
+
+ uint64_t getLiteral8Offset(const uint8_t *buf) const {
+ return literal16Map.size() * 16 +
+ literal8Map.at(*reinterpret_cast<const uint64_t *>(buf)) * 8;
+ }
+
+ uint64_t getLiteral4Offset(const uint8_t *buf) const {
+ return literal16Map.size() * 16 + literal8Map.size() * 8 +
+ literal4Map.at(*reinterpret_cast<const uint32_t *>(buf)) * 4;
+ }
+
private:
- StringTableSection &stringTableSection;
- std::vector<SymtabEntry> symbols;
+ std::vector<WordLiteralInputSection *> inputs;
+
+ template <class T> struct Hasher {
+ llvm::hash_code operator()(T v) const { return llvm::hash_value(v); }
+ };
+ // We're using unordered_map instead of DenseMap here because we need to
+ // support all possible integer values -- there are no suitable tombstone
+ // values for DenseMap.
+ std::unordered_map<UInt128, uint64_t, Hasher<UInt128>> literal16Map;
+ std::unordered_map<uint64_t, uint64_t> literal8Map;
+ std::unordered_map<uint32_t, uint64_t> literal4Map;
};
struct InStruct {
+ MachHeaderSection *header = nullptr;
+ CStringSection *cStringSection = nullptr;
+ WordLiteralSection *wordLiteralSection = nullptr;
+ RebaseSection *rebase = nullptr;
BindingSection *binding = nullptr;
+ WeakBindingSection *weakBinding = nullptr;
+ LazyBindingSection *lazyBinding = nullptr;
+ ExportSection *exports = nullptr;
GotSection *got = nullptr;
+ TlvPointerSection *tlvPointers = nullptr;
LazyPointerSection *lazyPointers = nullptr;
StubsSection *stubs = nullptr;
StubHelperSection *stubHelper = nullptr;
- ImageLoaderCacheSection *imageLoaderCache = nullptr;
+ UnwindInfoSection *unwindInfo = nullptr;
+ ConcatInputSection *imageLoaderCache = nullptr;
};
extern InStruct in;
extern std::vector<SyntheticSection *> syntheticSections;
+void createSyntheticSymbols();
+
} // namespace macho
} // namespace lld
#ifndef LLD_MACHO_TARGET_H
#define LLD_MACHO_TARGET_H
+#include "MachOStructs.h"
+#include "Relocations.h"
+
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/MemoryBuffer.h"
namespace lld {
namespace macho {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
class Symbol;
+class Defined;
class DylibSymbol;
class InputSection;
-struct Reloc;
-
-enum : uint64_t {
- // We are currently only supporting 64-bit targets since macOS and iOS are
- // deprecating 32-bit apps.
- WordSize = 8,
- PageSize = 4096,
- PageZeroSize = 1ull << 32, // XXX should be 4096 for 32-bit targets
- MaxAlignmentPowerOf2 = 32,
-};
class TargetInfo {
public:
+ template <class LP> TargetInfo(LP) {
+ // Having these values available in TargetInfo allows us to access them
+ // without having to resort to templates.
+ magic = LP::magic;
+ pageZeroSize = LP::pageZeroSize;
+ headerSize = sizeof(typename LP::mach_header);
+ wordSize = LP::wordSize;
+ }
+
virtual ~TargetInfo() = default;
// Validate the relocation structure and get its addend.
- virtual uint64_t
- getImplicitAddend(llvm::MemoryBufferRef, const llvm::MachO::section_64 &,
- const llvm::MachO::relocation_info &) const = 0;
- virtual void relocateOne(uint8_t *loc, const Reloc &, uint64_t val) const = 0;
+ virtual int64_t
+ getEmbeddedAddend(llvm::MemoryBufferRef, uint64_t offset,
+ const llvm::MachO::relocation_info) const = 0;
+ virtual void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
+ uint64_t relocVA) const = 0;
// Write code for lazy binding. See the comments on StubsSection for more
// details.
- virtual void writeStub(uint8_t *buf, const DylibSymbol &) const = 0;
+ virtual void writeStub(uint8_t *buf, const Symbol &) const = 0;
virtual void writeStubHelperHeader(uint8_t *buf) const = 0;
virtual void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
uint64_t entryAddr) const = 0;
// Symbols may be referenced via either the GOT or the stubs section,
// depending on the relocation type. prepareSymbolRelocation() will set up the
- // GOT/stubs entries, and getSymbolVA() will return the addresses of those
- // entries.
- virtual void prepareSymbolRelocation(Symbol &, const InputSection *,
- const Reloc &) = 0;
- virtual uint64_t getSymbolVA(const Symbol &, uint8_t type) const = 0;
+ // GOT/stubs entries, and resolveSymbolVA() will return the addresses of those
+ // entries. resolveSymbolVA() may also relax the target instructions to save
+ // on a level of address indirection.
+ virtual void relaxGotLoad(uint8_t *loc, uint8_t type) const = 0;
+
+ virtual const RelocAttrs &getRelocAttrs(uint8_t type) const = 0;
+
+ virtual uint64_t getPageSize() const = 0;
+
+ virtual void populateThunk(InputSection *thunk, Symbol *funcSym) {
+ llvm_unreachable("target does not use thunks");
+ }
+
+ bool hasAttr(uint8_t type, RelocAttrBits bit) const {
+ return getRelocAttrs(type).hasAttr(bit);
+ }
+
+ bool usesThunks() const { return thunkSize > 0; }
- uint32_t cpuType;
+ uint32_t magic;
+ llvm::MachO::CPUType cpuType;
uint32_t cpuSubtype;
+ uint64_t pageZeroSize;
+ size_t headerSize;
size_t stubSize;
size_t stubHelperHeaderSize;
size_t stubHelperEntrySize;
+ size_t wordSize;
+
+ size_t thunkSize = 0;
+ uint64_t branchRange = 0;
+
+ // We contrive this value as sufficiently far from any valid address that it
+ // will always be out-of-range for any architecture. UINT64_MAX is not a
+ // good choice because it is (a) only 1 away from wrapping to 0, and (b) the
+ // tombstone value for DenseMap<> and caused weird assertions for me.
+ static constexpr uint64_t outOfRangeVA = 0xfull << 60;
};
TargetInfo *createX86_64TargetInfo();
+TargetInfo *createARM64TargetInfo();
+TargetInfo *createARM64_32TargetInfo();
+TargetInfo *createARMTargetInfo(uint32_t cpuSubtype);
+
+struct LP64 {
+ using mach_header = llvm::MachO::mach_header_64;
+ using nlist = structs::nlist_64;
+ using segment_command = llvm::MachO::segment_command_64;
+ using section = llvm::MachO::section_64;
+ using encryption_info_command = llvm::MachO::encryption_info_command_64;
+
+ static constexpr uint32_t magic = llvm::MachO::MH_MAGIC_64;
+ static constexpr uint32_t segmentLCType = llvm::MachO::LC_SEGMENT_64;
+ static constexpr uint32_t encryptionInfoLCType =
+ llvm::MachO::LC_ENCRYPTION_INFO_64;
+
+ static constexpr uint64_t pageZeroSize = 1ull << 32;
+ static constexpr size_t wordSize = 8;
+};
+
+struct ILP32 {
+ using mach_header = llvm::MachO::mach_header;
+ using nlist = structs::nlist;
+ using segment_command = llvm::MachO::segment_command;
+ using section = llvm::MachO::section;
+ using encryption_info_command = llvm::MachO::encryption_info_command;
+
+ static constexpr uint32_t magic = llvm::MachO::MH_MAGIC;
+ static constexpr uint32_t segmentLCType = llvm::MachO::LC_SEGMENT;
+ static constexpr uint32_t encryptionInfoLCType =
+ llvm::MachO::LC_ENCRYPTION_INFO;
+
+ static constexpr uint64_t pageZeroSize = 1ull << 12;
+ static constexpr size_t wordSize = 4;
+};
extern TargetInfo *target;
--- /dev/null
+//===- UnwindInfoSection.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UnwindInfoSection.h"
+#include "ConcatOutputSection.h"
+#include "Config.h"
+#include "InputSection.h"
+#include "OutputSection.h"
+#include "OutputSegment.h"
+#include "SymbolTable.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/MachO.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace lld;
+using namespace lld::macho;
+
+#define COMMON_ENCODINGS_MAX 127
+#define COMPACT_ENCODINGS_MAX 256
+
+#define SECOND_LEVEL_PAGE_BYTES 4096
+#define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
+#define REGULAR_SECOND_LEVEL_ENTRIES_MAX \
+ ((SECOND_LEVEL_PAGE_BYTES - \
+ sizeof(unwind_info_regular_second_level_page_header)) / \
+ sizeof(unwind_info_regular_second_level_entry))
+#define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX \
+ ((SECOND_LEVEL_PAGE_BYTES - \
+ sizeof(unwind_info_compressed_second_level_page_header)) / \
+ sizeof(uint32_t))
+
+#define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
+#define COMPRESSED_ENTRY_FUNC_OFFSET_MASK \
+ UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
+
+// Compact Unwind format is a Mach-O evolution of DWARF Unwind that
+// optimizes space and exception-time lookup. Most DWARF unwind
+// entries can be replaced with Compact Unwind entries, but the ones
+// that cannot are retained in DWARF form.
+//
+// This comment will address macro-level organization of the pre-link
+// and post-link compact unwind tables. For micro-level organization
+// pertaining to the bitfield layout of the 32-bit compact unwind
+// entries, see libunwind/include/mach-o/compact_unwind_encoding.h
+//
+// Important clarifying factoids:
+//
+// * __LD,__compact_unwind is the compact unwind format for compiler
+// output and linker input. It is never a final output. It could be
+// an intermediate output with the `-r` option which retains relocs.
+//
+// * __TEXT,__unwind_info is the compact unwind format for final
+// linker output. It is never an input.
+//
+// * __TEXT,__eh_frame is the DWARF format for both linker input and output.
+//
+// * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
+// level) by ascending address, and the pages are referenced by an
+// index (1st level) in the section header.
+//
+// * Following the headers in __TEXT,__unwind_info, the bulk of the
+// section contains a vector of compact unwind entries
+// `{functionOffset, encoding}` sorted by ascending `functionOffset`.
+// Adjacent entries with the same encoding can be folded to great
+// advantage, achieving a 3-order-of-magnitude reduction in the
+// number of entries.
+//
+// * The __TEXT,__unwind_info format can accommodate up to 127 unique
+// encodings for the space-efficient compressed format. In practice,
+// fewer than a dozen unique encodings are used by C++ programs of
+// all sizes. Therefore, we don't even bother implementing the regular
+// non-compressed format. Time will tell if anyone in the field ever
+// overflows the 127-encodings limit.
+//
+// Refer to the definition of unwind_info_section_header in
+// compact_unwind_encoding.h for an overview of the format we are encoding
+// here.
+
+// TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410
+// TODO(gkm): how do we align the 2nd-level pages?
+
+using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>;
+
+struct SecondLevelPage {
+ uint32_t kind;
+ size_t entryIndex;
+ size_t entryCount;
+ size_t byteCount;
+ std::vector<compact_unwind_encoding_t> localEncodings;
+ EncodingMap localEncodingIndexes;
+};
+
+template <class Ptr>
+class UnwindInfoSectionImpl final : public UnwindInfoSection {
+public:
+ void prepareRelocations(ConcatInputSection *) override;
+ void addInput(ConcatInputSection *) override;
+ void finalize() override;
+ void writeTo(uint8_t *buf) const override;
+
+private:
+ std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
+ EncodingMap commonEncodingIndexes;
+ // Indices of personality functions within the GOT.
+ std::vector<uint32_t> personalities;
+ SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
+ personalityTable;
+ std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries;
+ // Map of function offset (from the image base) to an index within the LSDA
+ // array.
+ DenseMap<uint32_t, uint32_t> functionToLsdaIndex;
+ std::vector<CompactUnwindEntry<Ptr>> cuVector;
+ std::vector<CompactUnwindEntry<Ptr> *> cuPtrVector;
+ std::vector<SecondLevelPage> secondLevelPages;
+ uint64_t level2PagesOffset = 0;
+};
+
+UnwindInfoSection::UnwindInfoSection()
+ : SyntheticSection(segment_names::text, section_names::unwindInfo) {
+ align = 4;
+ compactUnwindSection =
+ make<ConcatOutputSection>(section_names::compactUnwind);
+}
+
+void UnwindInfoSection::prepareRelocations() {
+ for (ConcatInputSection *isec : compactUnwindSection->inputs)
+ prepareRelocations(isec);
+}
+
+template <class Ptr>
+void UnwindInfoSectionImpl<Ptr>::addInput(ConcatInputSection *isec) {
+ assert(isec->getSegName() == segment_names::ld &&
+ isec->getName() == section_names::compactUnwind);
+ isec->parent = compactUnwindSection;
+ compactUnwindSection->addInput(isec);
+}
+
+// Compact unwind relocations have different semantics, so we handle them in a
+// separate code path from regular relocations. First, we do not wish to add
+// rebase opcodes for __LD,__compact_unwind, because that section doesn't
+// actually end up in the final binary. Second, personality pointers always
+// reside in the GOT and must be treated specially.
+template <class Ptr>
+void UnwindInfoSectionImpl<Ptr>::prepareRelocations(ConcatInputSection *isec) {
+ assert(!isec->shouldOmitFromOutput() &&
+ "__compact_unwind section should not be omitted");
+
+ // FIXME: Make this skip relocations for CompactUnwindEntries that
+ // point to dead-stripped functions. That might save some amount of
+ // work. But since there are usually just few personality functions
+ // that are referenced from many places, at least some of them likely
+ // live, it wouldn't reduce number of got entries.
+ for (size_t i = 0; i < isec->relocs.size(); ++i) {
+ Reloc &r = isec->relocs[i];
+ assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
+
+ if (r.offset % sizeof(CompactUnwindEntry<Ptr>) == 0) {
+ InputSection *referentIsec;
+ if (auto *isec = r.referent.dyn_cast<InputSection *>())
+ referentIsec = isec;
+ else
+ referentIsec = cast<Defined>(r.referent.dyn_cast<Symbol *>())->isec;
+
+ if (!cast<ConcatInputSection>(referentIsec)->shouldOmitFromOutput())
+ allEntriesAreOmitted = false;
+ continue;
+ }
+
+ if (r.offset % sizeof(CompactUnwindEntry<Ptr>) !=
+ offsetof(CompactUnwindEntry<Ptr>, personality))
+ continue;
+
+ if (auto *s = r.referent.dyn_cast<Symbol *>()) {
+ if (auto *undefined = dyn_cast<Undefined>(s)) {
+ treatUndefinedSymbol(*undefined);
+ // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
+ if (isa<Undefined>(s))
+ continue;
+ }
+ if (auto *defined = dyn_cast<Defined>(s)) {
+ // Check if we have created a synthetic symbol at the same address.
+ Symbol *&personality =
+ personalityTable[{defined->isec, defined->value}];
+ if (personality == nullptr) {
+ personality = defined;
+ in.got->addEntry(defined);
+ } else if (personality != defined) {
+ r.referent = personality;
+ }
+ continue;
+ }
+ assert(isa<DylibSymbol>(s));
+ in.got->addEntry(s);
+ continue;
+ }
+
+ if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
+ assert(!isCoalescedWeak(referentIsec));
+ // Personality functions can be referenced via section relocations
+ // if they live in the same object file. Create placeholder synthetic
+ // symbols for them in the GOT.
+ Symbol *&s = personalityTable[{referentIsec, r.addend}];
+ if (s == nullptr) {
+ // This runs after dead stripping, so the noDeadStrip argument does not
+ // matter.
+ s = make<Defined>("<internal>", /*file=*/nullptr, referentIsec,
+ r.addend, /*size=*/0, /*isWeakDef=*/false,
+ /*isExternal=*/false, /*isPrivateExtern=*/false,
+ /*isThumb=*/false, /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
+ in.got->addEntry(s);
+ }
+ r.referent = s;
+ r.addend = 0;
+ }
+ }
+}
+
+// Unwind info lives in __DATA, and finalization of __TEXT will occur before
+// finalization of __DATA. Moreover, the finalization of unwind info depends on
+// the exact addresses that it references. So it is safe for compact unwind to
+// reference addresses in __TEXT, but not addresses in any other segment.
+static ConcatInputSection *checkTextSegment(InputSection *isec) {
+ if (isec->getSegName() != segment_names::text)
+ error("compact unwind references address in " + toString(isec) +
+ " which is not in segment __TEXT");
+ // __text should always be a ConcatInputSection.
+ return cast<ConcatInputSection>(isec);
+}
+
+template <class Ptr>
+constexpr Ptr TombstoneValue = std::numeric_limits<Ptr>::max();
+
+// We need to apply the relocations to the pre-link compact unwind section
+// before converting it to post-link form. There should only be absolute
+// relocations here: since we are not emitting the pre-link CU section, there
+// is no source address to make a relative location meaningful.
+template <class Ptr>
+static void
+relocateCompactUnwind(ConcatOutputSection *compactUnwindSection,
+ std::vector<CompactUnwindEntry<Ptr>> &cuVector) {
+ for (const ConcatInputSection *isec : compactUnwindSection->inputs) {
+ assert(isec->parent == compactUnwindSection);
+
+ uint8_t *buf =
+ reinterpret_cast<uint8_t *>(cuVector.data()) + isec->outSecOff;
+ memcpy(buf, isec->data.data(), isec->data.size());
+
+ for (const Reloc &r : isec->relocs) {
+ uint64_t referentVA = TombstoneValue<Ptr>;
+ if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) {
+ if (!isa<Undefined>(referentSym)) {
+ if (auto *defined = dyn_cast<Defined>(referentSym))
+ checkTextSegment(defined->isec);
+ // At this point in the link, we may not yet know the final address of
+ // the GOT, so we just encode the index. We make it a 1-based index so
+ // that we can distinguish the null pointer case.
+ referentVA = referentSym->gotIndex + 1;
+ }
+ } else {
+ auto *referentIsec = r.referent.get<InputSection *>();
+ ConcatInputSection *concatIsec = checkTextSegment(referentIsec);
+ if (!concatIsec->shouldOmitFromOutput())
+ referentVA = referentIsec->getVA(r.addend);
+ }
+ writeAddress(buf + r.offset, referentVA, r.length);
+ }
+ }
+}
+
+// There should only be a handful of unique personality pointers, so we can
+// encode them as 2-bit indices into a small array.
+template <class Ptr>
+static void
+encodePersonalities(const std::vector<CompactUnwindEntry<Ptr> *> &cuPtrVector,
+ std::vector<uint32_t> &personalities) {
+ for (CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
+ if (cu->personality == 0)
+ continue;
+ // Linear search is fast enough for a small array.
+ auto it = find(personalities, cu->personality);
+ uint32_t personalityIndex; // 1-based index
+ if (it != personalities.end()) {
+ personalityIndex = std::distance(personalities.begin(), it) + 1;
+ } else {
+ personalities.push_back(cu->personality);
+ personalityIndex = personalities.size();
+ }
+ cu->encoding |=
+ personalityIndex << countTrailingZeros(
+ static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK));
+ }
+ if (personalities.size() > 3)
+ error("too many personalities (" + std::to_string(personalities.size()) +
+ ") for compact unwind to encode");
+}
+
+// __unwind_info stores unwind data for address ranges. If several
+// adjacent functions have the same unwind encoding, LSDA, and personality
+// function, they share one unwind entry. For this to work, functions without
+// unwind info need explicit "no unwind info" unwind entries -- else the
+// unwinder would think they have the unwind info of the closest function
+// with unwind info right before in the image.
+template <class Ptr>
+static void addEntriesForFunctionsWithoutUnwindInfo(
+ std::vector<CompactUnwindEntry<Ptr>> &cuVector) {
+ DenseSet<Ptr> hasUnwindInfo;
+ for (CompactUnwindEntry<Ptr> &cuEntry : cuVector)
+ if (cuEntry.functionAddress != TombstoneValue<Ptr>)
+ hasUnwindInfo.insert(cuEntry.functionAddress);
+
+ // Add explicit "has no unwind info" entries for all global and local symbols
+ // without unwind info.
+ auto markNoUnwindInfo = [&cuVector, &hasUnwindInfo](const Defined *d) {
+ if (d->isLive() && d->isec && isCodeSection(d->isec)) {
+ Ptr ptr = d->getVA();
+ if (!hasUnwindInfo.count(ptr))
+ cuVector.push_back({ptr, 0, 0, 0, 0});
+ }
+ };
+ for (Symbol *sym : symtab->getSymbols())
+ if (auto *d = dyn_cast<Defined>(sym))
+ markNoUnwindInfo(d);
+ for (const InputFile *file : inputFiles)
+ if (auto *objFile = dyn_cast<ObjFile>(file))
+ for (Symbol *sym : objFile->symbols)
+ if (auto *d = dyn_cast_or_null<Defined>(sym))
+ if (!d->isExternal())
+ markNoUnwindInfo(d);
+}
+
+static bool canFoldEncoding(compact_unwind_encoding_t encoding) {
+ // From compact_unwind_encoding.h:
+ // UNWIND_X86_64_MODE_STACK_IND:
+ // A "frameless" (RBP not used as frame pointer) function large constant
+ // stack size. This case is like the previous, except the stack size is too
+ // large to encode in the compact unwind encoding. Instead it requires that
+ // the function contains "subq $nnnnnnnn,RSP" in its prolog. The compact
+ // encoding contains the offset to the nnnnnnnn value in the function in
+ // UNWIND_X86_64_FRAMELESS_STACK_SIZE.
+ // Since this means the unwinder has to look at the `subq` in the function
+ // of the unwind info's unwind address, two functions that have identical
+ // unwind info can't be folded if it's using this encoding since both
+ // entries need unique addresses.
+ static_assert(UNWIND_X86_64_MODE_MASK == UNWIND_X86_MODE_MASK, "");
+ static_assert(UNWIND_X86_64_MODE_STACK_IND == UNWIND_X86_MODE_STACK_IND, "");
+ if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) &&
+ (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) {
+ // FIXME: Consider passing in the two function addresses and getting
+ // their two stack sizes off the `subq` and only returning false if they're
+ // actually different.
+ return false;
+ }
+ return true;
+}
+
+// Scan the __LD,__compact_unwind entries and compute the space needs of
+// __TEXT,__unwind_info and __TEXT,__eh_frame
+template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
+ if (compactUnwindSection == nullptr)
+ return;
+
+ // At this point, the address space for __TEXT,__text has been
+ // assigned, so we can relocate the __LD,__compact_unwind entries
+ // into a temporary buffer. Relocation is necessary in order to sort
+ // the CU entries by function address. Sorting is necessary so that
+ // we can fold adjacent CU entries with identical
+ // encoding+personality+lsda. Folding is necessary because it reduces
+ // the number of CU entries by as much as 3 orders of magnitude!
+ compactUnwindSection->finalize();
+ assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry<Ptr>) ==
+ 0);
+ size_t cuCount =
+ compactUnwindSection->getSize() / sizeof(CompactUnwindEntry<Ptr>);
+ cuVector.resize(cuCount);
+ relocateCompactUnwind(compactUnwindSection, cuVector);
+
+ addEntriesForFunctionsWithoutUnwindInfo(cuVector);
+
+ // Rather than sort & fold the 32-byte entries directly, we create a
+ // vector of pointers to entries and sort & fold that instead.
+ cuPtrVector.reserve(cuVector.size());
+ for (CompactUnwindEntry<Ptr> &cuEntry : cuVector)
+ cuPtrVector.emplace_back(&cuEntry);
+ llvm::sort(cuPtrVector, [](const CompactUnwindEntry<Ptr> *a,
+ const CompactUnwindEntry<Ptr> *b) {
+ return a->functionAddress < b->functionAddress;
+ });
+
+ // Dead-stripped functions get a functionAddress of TombstoneValue in
+ // relocateCompactUnwind(). Filter them out here.
+ // FIXME: This doesn't yet collect associated data like LSDAs kept
+ // alive only by a now-removed CompactUnwindEntry or other comdat-like
+ // data (`kindNoneGroupSubordinate*` in ld64).
+ CompactUnwindEntry<Ptr> tombstone;
+ tombstone.functionAddress = TombstoneValue<Ptr>;
+ cuPtrVector.erase(
+ std::lower_bound(cuPtrVector.begin(), cuPtrVector.end(), &tombstone,
+ [](const CompactUnwindEntry<Ptr> *a,
+ const CompactUnwindEntry<Ptr> *b) {
+ return a->functionAddress < b->functionAddress;
+ }),
+ cuPtrVector.end());
+
+ // If there are no entries left after adding explicit "no unwind info"
+ // entries and removing entries for dead-stripped functions, don't write
+ // an __unwind_info section at all.
+ assert(allEntriesAreOmitted == cuPtrVector.empty());
+ if (cuPtrVector.empty())
+ return;
+
+ // Fold adjacent entries with matching encoding+personality+lsda
+ // We use three iterators on the same cuPtrVector to fold in-situ:
+ // (1) `foldBegin` is the first of a potential sequence of matching entries
+ // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
+ // The semi-open interval [ foldBegin .. foldEnd ) contains a range
+ // entries that can be folded into a single entry and written to ...
+ // (3) `foldWrite`
+ auto foldWrite = cuPtrVector.begin();
+ for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) {
+ auto foldEnd = foldBegin;
+ while (++foldEnd < cuPtrVector.end() &&
+ (*foldBegin)->encoding == (*foldEnd)->encoding &&
+ (*foldBegin)->personality == (*foldEnd)->personality &&
+ (*foldBegin)->lsda == (*foldEnd)->lsda &&
+ canFoldEncoding((*foldEnd)->encoding))
+ ;
+ *foldWrite++ = *foldBegin;
+ foldBegin = foldEnd;
+ }
+ cuPtrVector.erase(foldWrite, cuPtrVector.end());
+
+ encodePersonalities(cuPtrVector, personalities);
+
+ // Count frequencies of the folded encodings
+ EncodingMap encodingFrequencies;
+ for (const CompactUnwindEntry<Ptr> *cuPtrEntry : cuPtrVector)
+ encodingFrequencies[cuPtrEntry->encoding]++;
+
+ // Make a vector of encodings, sorted by descending frequency
+ for (const auto &frequency : encodingFrequencies)
+ commonEncodings.emplace_back(frequency);
+ llvm::sort(commonEncodings,
+ [](const std::pair<compact_unwind_encoding_t, size_t> &a,
+ const std::pair<compact_unwind_encoding_t, size_t> &b) {
+ if (a.second == b.second)
+ // When frequencies match, secondarily sort on encoding
+ // to maintain parity with validate-unwind-info.py
+ return a.first > b.first;
+ return a.second > b.second;
+ });
+
+ // Truncate the vector to 127 elements.
+ // Common encoding indexes are limited to 0..126, while encoding
+ // indexes 127..255 are local to each second-level page
+ if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
+ commonEncodings.resize(COMMON_ENCODINGS_MAX);
+
+ // Create a map from encoding to common-encoding-table index
+ for (size_t i = 0; i < commonEncodings.size(); i++)
+ commonEncodingIndexes[commonEncodings[i].first] = i;
+
+ // Split folded encodings into pages, where each page is limited by ...
+ // (a) 4 KiB capacity
+ // (b) 24-bit difference between first & final function address
+ // (c) 8-bit compact-encoding-table index,
+ // for which 0..126 references the global common-encodings table,
+ // and 127..255 references a local per-second-level-page table.
+ // First we try the compact format and determine how many entries fit.
+ // If more entries fit in the regular format, we use that.
+ for (size_t i = 0; i < cuPtrVector.size();) {
+ secondLevelPages.emplace_back();
+ SecondLevelPage &page = secondLevelPages.back();
+ page.entryIndex = i;
+ uintptr_t functionAddressMax =
+ cuPtrVector[i]->functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
+ size_t n = commonEncodings.size();
+ size_t wordsRemaining =
+ SECOND_LEVEL_PAGE_WORDS -
+ sizeof(unwind_info_compressed_second_level_page_header) /
+ sizeof(uint32_t);
+ while (wordsRemaining >= 1 && i < cuPtrVector.size()) {
+ const CompactUnwindEntry<Ptr> *cuPtr = cuPtrVector[i];
+ if (cuPtr->functionAddress >= functionAddressMax) {
+ break;
+ } else if (commonEncodingIndexes.count(cuPtr->encoding) ||
+ page.localEncodingIndexes.count(cuPtr->encoding)) {
+ i++;
+ wordsRemaining--;
+ } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
+ page.localEncodings.emplace_back(cuPtr->encoding);
+ page.localEncodingIndexes[cuPtr->encoding] = n++;
+ i++;
+ wordsRemaining -= 2;
+ } else {
+ break;
+ }
+ }
+ page.entryCount = i - page.entryIndex;
+
+ // If this is not the final page, see if it's possible to fit more
+ // entries by using the regular format. This can happen when there
+ // are many unique encodings, and we we saturated the local
+ // encoding table early.
+ if (i < cuPtrVector.size() &&
+ page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
+ page.kind = UNWIND_SECOND_LEVEL_REGULAR;
+ page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
+ cuPtrVector.size() - page.entryIndex);
+ i = page.entryIndex + page.entryCount;
+ } else {
+ page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
+ }
+ }
+
+ for (const CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
+ uint32_t functionOffset = cu->functionAddress - in.header->addr;
+ functionToLsdaIndex[functionOffset] = lsdaEntries.size();
+ if (cu->lsda != 0)
+ lsdaEntries.push_back(
+ {functionOffset, static_cast<uint32_t>(cu->lsda - in.header->addr)});
+ }
+
+ // compute size of __TEXT,__unwind_info section
+ level2PagesOffset =
+ sizeof(unwind_info_section_header) +
+ commonEncodings.size() * sizeof(uint32_t) +
+ personalities.size() * sizeof(uint32_t) +
+ // The extra second-level-page entry is for the sentinel
+ (secondLevelPages.size() + 1) *
+ sizeof(unwind_info_section_header_index_entry) +
+ lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
+ unwindInfoSize =
+ level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
+}
+
+// All inputs are relocated and output addresses are known, so write!
+
+template <class Ptr>
+void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
+ assert(!cuPtrVector.empty() && "call only if there is unwind info");
+
+ // section header
+ auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
+ uip->version = 1;
+ uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
+ uip->commonEncodingsArrayCount = commonEncodings.size();
+ uip->personalityArraySectionOffset =
+ uip->commonEncodingsArraySectionOffset +
+ (uip->commonEncodingsArrayCount * sizeof(uint32_t));
+ uip->personalityArrayCount = personalities.size();
+ uip->indexSectionOffset = uip->personalityArraySectionOffset +
+ (uip->personalityArrayCount * sizeof(uint32_t));
+ uip->indexCount = secondLevelPages.size() + 1;
+
+ // Common encodings
+ auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
+ for (const auto &encoding : commonEncodings)
+ *i32p++ = encoding.first;
+
+ // Personalities
+ for (const uint32_t &personality : personalities)
+ *i32p++ =
+ in.got->addr + (personality - 1) * target->wordSize - in.header->addr;
+
+ // Level-1 index
+ uint32_t lsdaOffset =
+ uip->indexSectionOffset +
+ uip->indexCount * sizeof(unwind_info_section_header_index_entry);
+ uint64_t l2PagesOffset = level2PagesOffset;
+ auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
+ for (const SecondLevelPage &page : secondLevelPages) {
+ iep->functionOffset =
+ cuPtrVector[page.entryIndex]->functionAddress - in.header->addr;
+ iep->secondLevelPagesSectionOffset = l2PagesOffset;
+ iep->lsdaIndexArraySectionOffset =
+ lsdaOffset + functionToLsdaIndex.lookup(iep->functionOffset) *
+ sizeof(unwind_info_section_header_lsda_index_entry);
+ iep++;
+ l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
+ }
+ // Level-1 sentinel
+ const CompactUnwindEntry<Ptr> &cuEnd = *cuPtrVector.back();
+ assert(cuEnd.functionAddress != TombstoneValue<Ptr>);
+ iep->functionOffset =
+ cuEnd.functionAddress - in.header->addr + cuEnd.functionLength;
+ iep->secondLevelPagesSectionOffset = 0;
+ iep->lsdaIndexArraySectionOffset =
+ lsdaOffset +
+ lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
+ iep++;
+
+ // LSDAs
+ size_t lsdaBytes =
+ lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
+ if (lsdaBytes > 0)
+ memcpy(iep, lsdaEntries.data(), lsdaBytes);
+
+ // Level-2 pages
+ auto *pp = reinterpret_cast<uint32_t *>(reinterpret_cast<uint8_t *>(iep) +
+ lsdaBytes);
+ for (const SecondLevelPage &page : secondLevelPages) {
+ if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
+ uintptr_t functionAddressBase =
+ cuPtrVector[page.entryIndex]->functionAddress;
+ auto *p2p =
+ reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
+ pp);
+ p2p->kind = page.kind;
+ p2p->entryPageOffset =
+ sizeof(unwind_info_compressed_second_level_page_header);
+ p2p->entryCount = page.entryCount;
+ p2p->encodingsPageOffset =
+ p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
+ p2p->encodingsCount = page.localEncodings.size();
+ auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
+ for (size_t i = 0; i < page.entryCount; i++) {
+ const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
+ auto it = commonEncodingIndexes.find(cuep->encoding);
+ if (it == commonEncodingIndexes.end())
+ it = page.localEncodingIndexes.find(cuep->encoding);
+ *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
+ (cuep->functionAddress - functionAddressBase);
+ }
+ if (page.localEncodings.size() != 0)
+ memcpy(ep, page.localEncodings.data(),
+ page.localEncodings.size() * sizeof(uint32_t));
+ } else {
+ auto *p2p =
+ reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
+ p2p->kind = page.kind;
+ p2p->entryPageOffset =
+ sizeof(unwind_info_regular_second_level_page_header);
+ p2p->entryCount = page.entryCount;
+ auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
+ for (size_t i = 0; i < page.entryCount; i++) {
+ const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
+ *ep++ = cuep->functionAddress;
+ *ep++ = cuep->encoding;
+ }
+ }
+ pp += SECOND_LEVEL_PAGE_WORDS;
+ }
+}
+
+UnwindInfoSection *macho::makeUnwindInfoSection() {
+ if (target->wordSize == 8)
+ return make<UnwindInfoSectionImpl<uint64_t>>();
+ else
+ return make<UnwindInfoSectionImpl<uint32_t>>();
+}
--- /dev/null
+//===- UnwindInfoSection.h ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_UNWIND_INFO_H
+#define LLD_MACHO_UNWIND_INFO_H
+
+#include "ConcatOutputSection.h"
+#include "SyntheticSections.h"
+
+#include "mach-o/compact_unwind_encoding.h"
+
+namespace lld {
+namespace macho {
+
+template <class Ptr> struct CompactUnwindEntry {
+ Ptr functionAddress;
+ uint32_t functionLength;
+ compact_unwind_encoding_t encoding;
+ Ptr personality;
+ Ptr lsda;
+};
+
+class UnwindInfoSection : public SyntheticSection {
+public:
+ bool isNeeded() const override {
+ return !compactUnwindSection->inputs.empty() && !allEntriesAreOmitted;
+ }
+ uint64_t getSize() const override { return unwindInfoSize; }
+ virtual void addInput(ConcatInputSection *) = 0;
+ std::vector<ConcatInputSection *> getInputs() {
+ return compactUnwindSection->inputs;
+ }
+ void prepareRelocations();
+
+protected:
+ UnwindInfoSection();
+ virtual void prepareRelocations(ConcatInputSection *) = 0;
+
+ ConcatOutputSection *compactUnwindSection;
+ uint64_t unwindInfoSize = 0;
+ bool allEntriesAreOmitted = true;
+};
+
+UnwindInfoSection *makeUnwindInfoSection();
+
+} // namespace macho
+} // namespace lld
+
+#endif
//===----------------------------------------------------------------------===//
#include "Writer.h"
+#include "ConcatOutputSection.h"
#include "Config.h"
#include "InputFiles.h"
#include "InputSection.h"
-#include "MergedOutputSection.h"
+#include "MapFile.h"
#include "OutputSection.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
+#include "UnwindInfoSection.h"
+#include "lld/Common/Arrays.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/xxhash.h"
+
+#include <algorithm>
using namespace llvm;
using namespace llvm::MachO;
+using namespace llvm::sys;
using namespace lld;
using namespace lld::macho;
namespace {
-class LCLinkEdit;
-class LCDyldInfo;
-class LCSymtab;
+class LCUuid;
class Writer {
public:
Writer() : buffer(errorHandler().outputBuffer) {}
+ void treatSpecialUndefineds();
void scanRelocations();
- void createOutputSections();
- void createLoadCommands();
+ void scanSymbols();
+ template <class LP> void createOutputSections();
+ template <class LP> void createLoadCommands();
+ void finalizeAddresses();
+ void finalizeLinkEditSegment();
void assignAddresses(OutputSegment *);
- void createSymtabContents();
void openFile();
void writeSections();
+ void writeUuid();
+ void writeCodeSignature();
+ void writeOutputFile();
- void run();
+ template <class LP> void run();
std::unique_ptr<FileOutputBuffer> &buffer;
uint64_t addr = 0;
uint64_t fileOff = 0;
- MachHeaderSection *headerSection = nullptr;
- LazyBindingSection *lazyBindingSection = nullptr;
- ExportSection *exportSection = nullptr;
+ MachHeaderSection *header = nullptr;
StringTableSection *stringTableSection = nullptr;
SymtabSection *symtabSection = nullptr;
+ IndirectSymtabSection *indirectSymtabSection = nullptr;
+ CodeSignatureSection *codeSignatureSection = nullptr;
+ DataInCodeSection *dataInCodeSection = nullptr;
+ FunctionStartsSection *functionStartsSection = nullptr;
+
+ LCUuid *uuidCommand = nullptr;
+ OutputSegment *linkEditSegment = nullptr;
};
// LC_DYLD_INFO_ONLY stores the offsets of symbol import/export information.
-class LCDyldInfo : public LoadCommand {
+class LCDyldInfo final : public LoadCommand {
public:
- LCDyldInfo(BindingSection *bindingSection,
+ LCDyldInfo(RebaseSection *rebaseSection, BindingSection *bindingSection,
+ WeakBindingSection *weakBindingSection,
LazyBindingSection *lazyBindingSection,
ExportSection *exportSection)
- : bindingSection(bindingSection), lazyBindingSection(lazyBindingSection),
- exportSection(exportSection) {}
+ : rebaseSection(rebaseSection), bindingSection(bindingSection),
+ weakBindingSection(weakBindingSection),
+ lazyBindingSection(lazyBindingSection), exportSection(exportSection) {}
uint32_t getSize() const override { return sizeof(dyld_info_command); }
auto *c = reinterpret_cast<dyld_info_command *>(buf);
c->cmd = LC_DYLD_INFO_ONLY;
c->cmdsize = getSize();
+ if (rebaseSection->isNeeded()) {
+ c->rebase_off = rebaseSection->fileOff;
+ c->rebase_size = rebaseSection->getFileSize();
+ }
if (bindingSection->isNeeded()) {
c->bind_off = bindingSection->fileOff;
c->bind_size = bindingSection->getFileSize();
}
+ if (weakBindingSection->isNeeded()) {
+ c->weak_bind_off = weakBindingSection->fileOff;
+ c->weak_bind_size = weakBindingSection->getFileSize();
+ }
if (lazyBindingSection->isNeeded()) {
c->lazy_bind_off = lazyBindingSection->fileOff;
c->lazy_bind_size = lazyBindingSection->getFileSize();
}
}
+ RebaseSection *rebaseSection;
BindingSection *bindingSection;
+ WeakBindingSection *weakBindingSection;
LazyBindingSection *lazyBindingSection;
ExportSection *exportSection;
};
-class LCDysymtab : public LoadCommand {
+class LCSubFramework final : public LoadCommand {
+public:
+ LCSubFramework(StringRef umbrella) : umbrella(umbrella) {}
+
+ uint32_t getSize() const override {
+ return alignTo(sizeof(sub_framework_command) + umbrella.size() + 1,
+ target->wordSize);
+ }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<sub_framework_command *>(buf);
+ buf += sizeof(sub_framework_command);
+
+ c->cmd = LC_SUB_FRAMEWORK;
+ c->cmdsize = getSize();
+ c->umbrella = sizeof(sub_framework_command);
+
+ memcpy(buf, umbrella.data(), umbrella.size());
+ buf[umbrella.size()] = '\0';
+ }
+
+private:
+ const StringRef umbrella;
+};
+
+class LCFunctionStarts final : public LoadCommand {
+public:
+ explicit LCFunctionStarts(FunctionStartsSection *functionStartsSection)
+ : functionStartsSection(functionStartsSection) {}
+
+ uint32_t getSize() const override { return sizeof(linkedit_data_command); }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<linkedit_data_command *>(buf);
+ c->cmd = LC_FUNCTION_STARTS;
+ c->cmdsize = getSize();
+ c->dataoff = functionStartsSection->fileOff;
+ c->datasize = functionStartsSection->getFileSize();
+ }
+
+private:
+ FunctionStartsSection *functionStartsSection;
+};
+
+class LCDataInCode final : public LoadCommand {
+public:
+ explicit LCDataInCode(DataInCodeSection *dataInCodeSection)
+ : dataInCodeSection(dataInCodeSection) {}
+
+ uint32_t getSize() const override { return sizeof(linkedit_data_command); }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<linkedit_data_command *>(buf);
+ c->cmd = LC_DATA_IN_CODE;
+ c->cmdsize = getSize();
+ c->dataoff = dataInCodeSection->fileOff;
+ c->datasize = dataInCodeSection->getFileSize();
+ }
+
+private:
+ DataInCodeSection *dataInCodeSection;
+};
+
+class LCDysymtab final : public LoadCommand {
public:
+ LCDysymtab(SymtabSection *symtabSection,
+ IndirectSymtabSection *indirectSymtabSection)
+ : symtabSection(symtabSection),
+ indirectSymtabSection(indirectSymtabSection) {}
+
uint32_t getSize() const override { return sizeof(dysymtab_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<dysymtab_command *>(buf);
c->cmd = LC_DYSYMTAB;
c->cmdsize = getSize();
+
+ c->ilocalsym = 0;
+ c->iextdefsym = c->nlocalsym = symtabSection->getNumLocalSymbols();
+ c->nextdefsym = symtabSection->getNumExternalSymbols();
+ c->iundefsym = c->iextdefsym + c->nextdefsym;
+ c->nundefsym = symtabSection->getNumUndefinedSymbols();
+
+ c->indirectsymoff = indirectSymtabSection->fileOff;
+ c->nindirectsyms = indirectSymtabSection->getNumSymbols();
}
+
+ SymtabSection *symtabSection;
+ IndirectSymtabSection *indirectSymtabSection;
};
-class LCSegment : public LoadCommand {
+template <class LP> class LCSegment final : public LoadCommand {
public:
LCSegment(StringRef name, OutputSegment *seg) : name(name), seg(seg) {}
uint32_t getSize() const override {
- return sizeof(segment_command_64) +
- seg->numNonHiddenSections() * sizeof(section_64);
+ return sizeof(typename LP::segment_command) +
+ seg->numNonHiddenSections() * sizeof(typename LP::section);
}
void writeTo(uint8_t *buf) const override {
- auto *c = reinterpret_cast<segment_command_64 *>(buf);
- buf += sizeof(segment_command_64);
+ using SegmentCommand = typename LP::segment_command;
+ using Section = typename LP::section;
- c->cmd = LC_SEGMENT_64;
+ auto *c = reinterpret_cast<SegmentCommand *>(buf);
+ buf += sizeof(SegmentCommand);
+
+ c->cmd = LP::segmentLCType;
c->cmdsize = getSize();
memcpy(c->segname, name.data(), name.size());
c->fileoff = seg->fileOff;
c->maxprot = seg->maxProt;
c->initprot = seg->initProt;
- if (seg->getSections().empty())
- return;
-
- c->vmaddr = seg->firstSection()->addr;
- c->vmsize =
- seg->lastSection()->addr + seg->lastSection()->getSize() - c->vmaddr;
+ c->vmaddr = seg->addr;
+ c->vmsize = seg->vmSize;
+ c->filesize = seg->fileSize;
c->nsects = seg->numNonHiddenSections();
- for (OutputSection *osec : seg->getSections()) {
- c->filesize += osec->getFileSize();
-
+ for (const OutputSection *osec : seg->getSections()) {
if (osec->isHidden())
continue;
- auto *sectHdr = reinterpret_cast<section_64 *>(buf);
- buf += sizeof(section_64);
+ auto *sectHdr = reinterpret_cast<Section *>(buf);
+ buf += sizeof(Section);
memcpy(sectHdr->sectname, osec->name.data(), osec->name.size());
memcpy(sectHdr->segname, name.data(), name.size());
sectHdr->align = Log2_32(osec->align);
sectHdr->flags = osec->flags;
sectHdr->size = osec->getSize();
+ sectHdr->reserved1 = osec->reserved1;
+ sectHdr->reserved2 = osec->reserved2;
}
}
OutputSegment *seg;
};
-class LCMain : public LoadCommand {
- uint32_t getSize() const override { return sizeof(entry_point_command); }
+class LCMain final : public LoadCommand {
+ uint32_t getSize() const override {
+ return sizeof(structs::entry_point_command);
+ }
void writeTo(uint8_t *buf) const override {
- auto *c = reinterpret_cast<entry_point_command *>(buf);
+ auto *c = reinterpret_cast<structs::entry_point_command *>(buf);
c->cmd = LC_MAIN;
c->cmdsize = getSize();
- c->entryoff = config->entry->getFileOffset();
+
+ if (config->entry->isInStubs())
+ c->entryoff =
+ in.stubs->fileOff + config->entry->stubsIndex * target->stubSize;
+ else
+ c->entryoff = config->entry->getVA() - in.header->addr;
+
c->stacksize = 0;
}
};
-class LCSymtab : public LoadCommand {
+class LCSymtab final : public LoadCommand {
public:
LCSymtab(SymtabSection *symtabSection, StringTableSection *stringTableSection)
: symtabSection(symtabSection), stringTableSection(stringTableSection) {}
// * LC_LOAD_DYLIB
// * LC_ID_DYLIB
// * LC_REEXPORT_DYLIB
-class LCDylib : public LoadCommand {
+class LCDylib final : public LoadCommand {
public:
- LCDylib(LoadCommandType type, StringRef path) : type(type), path(path) {}
+ LCDylib(LoadCommandType type, StringRef path,
+ uint32_t compatibilityVersion = 0, uint32_t currentVersion = 0)
+ : type(type), path(path), compatibilityVersion(compatibilityVersion),
+ currentVersion(currentVersion) {
+ instanceCount++;
+ }
uint32_t getSize() const override {
return alignTo(sizeof(dylib_command) + path.size() + 1, 8);
c->cmd = type;
c->cmdsize = getSize();
c->dylib.name = sizeof(dylib_command);
+ c->dylib.timestamp = 0;
+ c->dylib.compatibility_version = compatibilityVersion;
+ c->dylib.current_version = currentVersion;
memcpy(buf, path.data(), path.size());
buf[path.size()] = '\0';
}
+ static uint32_t getInstanceCount() { return instanceCount; }
+
private:
LoadCommandType type;
StringRef path;
+ uint32_t compatibilityVersion;
+ uint32_t currentVersion;
+ static uint32_t instanceCount;
};
-class LCLoadDylinker : public LoadCommand {
+uint32_t LCDylib::instanceCount = 0;
+
+class LCLoadDylinker final : public LoadCommand {
public:
uint32_t getSize() const override {
return alignTo(sizeof(dylinker_command) + path.size() + 1, 8);
// different location.
const StringRef path = "/usr/lib/dyld";
};
+
+class LCRPath final : public LoadCommand {
+public:
+ explicit LCRPath(StringRef path) : path(path) {}
+
+ uint32_t getSize() const override {
+ return alignTo(sizeof(rpath_command) + path.size() + 1, target->wordSize);
+ }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<rpath_command *>(buf);
+ buf += sizeof(rpath_command);
+
+ c->cmd = LC_RPATH;
+ c->cmdsize = getSize();
+ c->path = sizeof(rpath_command);
+
+ memcpy(buf, path.data(), path.size());
+ buf[path.size()] = '\0';
+ }
+
+private:
+ StringRef path;
+};
+
+class LCMinVersion final : public LoadCommand {
+public:
+ explicit LCMinVersion(const PlatformInfo &platformInfo)
+ : platformInfo(platformInfo) {}
+
+ uint32_t getSize() const override { return sizeof(version_min_command); }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<version_min_command *>(buf);
+ switch (platformInfo.target.Platform) {
+ case PlatformKind::macOS:
+ c->cmd = LC_VERSION_MIN_MACOSX;
+ break;
+ case PlatformKind::iOS:
+ case PlatformKind::iOSSimulator:
+ c->cmd = LC_VERSION_MIN_IPHONEOS;
+ break;
+ case PlatformKind::tvOS:
+ case PlatformKind::tvOSSimulator:
+ c->cmd = LC_VERSION_MIN_TVOS;
+ break;
+ case PlatformKind::watchOS:
+ case PlatformKind::watchOSSimulator:
+ c->cmd = LC_VERSION_MIN_WATCHOS;
+ break;
+ default:
+ llvm_unreachable("invalid platform");
+ break;
+ }
+ c->cmdsize = getSize();
+ c->version = encodeVersion(platformInfo.minimum);
+ c->sdk = encodeVersion(platformInfo.sdk);
+ }
+
+private:
+ const PlatformInfo &platformInfo;
+};
+
+class LCBuildVersion final : public LoadCommand {
+public:
+ explicit LCBuildVersion(const PlatformInfo &platformInfo)
+ : platformInfo(platformInfo) {}
+
+ const int ntools = 1;
+
+ uint32_t getSize() const override {
+ return sizeof(build_version_command) + ntools * sizeof(build_tool_version);
+ }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<build_version_command *>(buf);
+ c->cmd = LC_BUILD_VERSION;
+ c->cmdsize = getSize();
+ c->platform = static_cast<uint32_t>(platformInfo.target.Platform);
+ c->minos = encodeVersion(platformInfo.minimum);
+ c->sdk = encodeVersion(platformInfo.sdk);
+ c->ntools = ntools;
+ auto *t = reinterpret_cast<build_tool_version *>(&c[1]);
+ t->tool = TOOL_LD;
+ t->version = encodeVersion(VersionTuple(
+ LLVM_VERSION_MAJOR, LLVM_VERSION_MINOR, LLVM_VERSION_PATCH));
+ }
+
+private:
+ const PlatformInfo &platformInfo;
+};
+
+// Stores a unique identifier for the output file based on an MD5 hash of its
+// contents. In order to hash the contents, we must first write them, but
+// LC_UUID itself must be part of the written contents in order for all the
+// offsets to be calculated correctly. We resolve this circular paradox by
+// first writing an LC_UUID with an all-zero UUID, then updating the UUID with
+// its real value later.
+class LCUuid final : public LoadCommand {
+public:
+ uint32_t getSize() const override { return sizeof(uuid_command); }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<uuid_command *>(buf);
+ c->cmd = LC_UUID;
+ c->cmdsize = getSize();
+ uuidBuf = c->uuid;
+ }
+
+ void writeUuid(uint64_t digest) const {
+ // xxhash only gives us 8 bytes, so put some fixed data in the other half.
+ static_assert(sizeof(uuid_command::uuid) == 16, "unexpected uuid size");
+ memcpy(uuidBuf, "LLD\xa1UU1D", 8);
+ memcpy(uuidBuf + 8, &digest, 8);
+
+ // RFC 4122 conformance. We need to fix 4 bits in byte 6 and 2 bits in
+ // byte 8. Byte 6 is already fine due to the fixed data we put in. We don't
+ // want to lose bits of the digest in byte 8, so swap that with a byte of
+ // fixed data that happens to have the right bits set.
+ std::swap(uuidBuf[3], uuidBuf[8]);
+
+ // Claim that this is an MD5-based hash. It isn't, but this signals that
+ // this is not a time-based and not a random hash. MD5 seems like the least
+ // bad lie we can put here.
+ assert((uuidBuf[6] & 0xf0) == 0x30 && "See RFC 4122 Sections 4.2.2, 4.1.3");
+ assert((uuidBuf[8] & 0xc0) == 0x80 && "See RFC 4122 Section 4.2.2");
+ }
+
+ mutable uint8_t *uuidBuf;
+};
+
+template <class LP> class LCEncryptionInfo final : public LoadCommand {
+public:
+ uint32_t getSize() const override {
+ return sizeof(typename LP::encryption_info_command);
+ }
+
+ void writeTo(uint8_t *buf) const override {
+ using EncryptionInfo = typename LP::encryption_info_command;
+ auto *c = reinterpret_cast<EncryptionInfo *>(buf);
+ buf += sizeof(EncryptionInfo);
+ c->cmd = LP::encryptionInfoLCType;
+ c->cmdsize = getSize();
+ c->cryptoff = in.header->getSize();
+ auto it = find_if(outputSegments, [](const OutputSegment *seg) {
+ return seg->name == segment_names::text;
+ });
+ assert(it != outputSegments.end());
+ c->cryptsize = (*it)->fileSize - c->cryptoff;
+ }
+};
+
+class LCCodeSignature final : public LoadCommand {
+public:
+ LCCodeSignature(CodeSignatureSection *section) : section(section) {}
+
+ uint32_t getSize() const override { return sizeof(linkedit_data_command); }
+
+ void writeTo(uint8_t *buf) const override {
+ auto *c = reinterpret_cast<linkedit_data_command *>(buf);
+ c->cmd = LC_CODE_SIGNATURE;
+ c->cmdsize = getSize();
+ c->dataoff = static_cast<uint32_t>(section->fileOff);
+ c->datasize = section->getSize();
+ }
+
+ CodeSignatureSection *section;
+};
+
} // namespace
+void Writer::treatSpecialUndefineds() {
+ if (config->entry)
+ if (auto *undefined = dyn_cast<Undefined>(config->entry))
+ treatUndefinedSymbol(*undefined, "the entry point");
+
+ // FIXME: This prints symbols that are undefined both in input files and
+ // via -u flag twice.
+ for (const Symbol *sym : config->explicitUndefineds) {
+ if (const auto *undefined = dyn_cast<Undefined>(sym))
+ treatUndefinedSymbol(*undefined, "-u");
+ }
+ // Literal exported-symbol names must be defined, but glob
+ // patterns need not match.
+ for (const CachedHashStringRef &cachedName :
+ config->exportedSymbols.literals) {
+ if (const Symbol *sym = symtab->find(cachedName))
+ if (const auto *undefined = dyn_cast<Undefined>(sym))
+ treatUndefinedSymbol(*undefined, "-exported_symbol(s_list)");
+ }
+}
+
+// Add stubs and bindings where necessary (e.g. if the symbol is a
+// DylibSymbol.)
+static void prepareBranchTarget(Symbol *sym) {
+ if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ if (in.stubs->addEntry(dysym)) {
+ if (sym->isWeakDef()) {
+ in.binding->addEntry(dysym, in.lazyPointers->isec,
+ sym->stubsIndex * target->wordSize);
+ in.weakBinding->addEntry(sym, in.lazyPointers->isec,
+ sym->stubsIndex * target->wordSize);
+ } else {
+ in.lazyBinding->addEntry(dysym);
+ }
+ }
+ } else if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (defined->isExternalWeakDef()) {
+ if (in.stubs->addEntry(sym)) {
+ in.rebase->addEntry(in.lazyPointers->isec,
+ sym->stubsIndex * target->wordSize);
+ in.weakBinding->addEntry(sym, in.lazyPointers->isec,
+ sym->stubsIndex * target->wordSize);
+ }
+ }
+ } else {
+ llvm_unreachable("invalid branch target symbol type");
+ }
+}
+
+// Can a symbol's address can only be resolved at runtime?
+static bool needsBinding(const Symbol *sym) {
+ if (isa<DylibSymbol>(sym))
+ return true;
+ if (const auto *defined = dyn_cast<Defined>(sym))
+ return defined->isExternalWeakDef();
+ return false;
+}
+
+static void prepareSymbolRelocation(Symbol *sym, const InputSection *isec,
+ const Reloc &r) {
+ assert(sym->isLive());
+ const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type);
+
+ if (relocAttrs.hasAttr(RelocAttrBits::BRANCH)) {
+ prepareBranchTarget(sym);
+ } else if (relocAttrs.hasAttr(RelocAttrBits::GOT)) {
+ if (relocAttrs.hasAttr(RelocAttrBits::POINTER) || needsBinding(sym))
+ in.got->addEntry(sym);
+ } else if (relocAttrs.hasAttr(RelocAttrBits::TLV)) {
+ if (needsBinding(sym))
+ in.tlvPointers->addEntry(sym);
+ } else if (relocAttrs.hasAttr(RelocAttrBits::UNSIGNED)) {
+ // References from thread-local variable sections are treated as offsets
+ // relative to the start of the referent section, and therefore have no
+ // need of rebase opcodes.
+ if (!(isThreadLocalVariables(isec->getFlags()) && isa<Defined>(sym)))
+ addNonLazyBindingEntries(sym, isec, r.offset, r.addend);
+ }
+}
+
void Writer::scanRelocations() {
- for (InputSection *isec : inputSections) {
- for (Reloc &r : isec->relocs) {
- if (auto *s = r.target.dyn_cast<lld::macho::Symbol *>()) {
- if (isa<Undefined>(s))
- error("undefined symbol " + s->getName() + ", referenced from " +
- sys::path::filename(isec->file->getName()));
- else
- target->prepareSymbolRelocation(*s, isec, r);
+ TimeTraceScope timeScope("Scan relocations");
+
+ // This can't use a for-each loop: It calls treatUndefinedSymbol(), which can
+ // add to inputSections, which invalidates inputSections's iterators.
+ for (size_t i = 0; i < inputSections.size(); ++i) {
+ ConcatInputSection *isec = inputSections[i];
+
+ if (isec->shouldOmitFromOutput())
+ continue;
+
+ for (auto it = isec->relocs.begin(); it != isec->relocs.end(); ++it) {
+ Reloc &r = *it;
+ if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) {
+ // Skip over the following UNSIGNED relocation -- it's just there as the
+ // minuend, and doesn't have the usual UNSIGNED semantics. We don't want
+ // to emit rebase opcodes for it.
+ it++;
+ continue;
}
+ if (auto *sym = r.referent.dyn_cast<Symbol *>()) {
+ if (auto *undefined = dyn_cast<Undefined>(sym))
+ treatUndefinedSymbol(*undefined);
+ // treatUndefinedSymbol() can replace sym with a DylibSymbol; re-check.
+ if (!isa<Undefined>(sym) && validateSymbolRelocation(sym, isec, r))
+ prepareSymbolRelocation(sym, isec, r);
+ } else {
+ // Canonicalize the referent so that later accesses in Writer won't
+ // have to worry about it. Perhaps we should do this for Defined::isec
+ // too...
+ auto *referentIsec = r.referent.get<InputSection *>();
+ r.referent = referentIsec->canonical();
+ if (!r.pcrel)
+ in.rebase->addEntry(isec, r.offset);
+ }
+ }
+ }
+
+ in.unwindInfo->prepareRelocations();
+}
+
+void Writer::scanSymbols() {
+ TimeTraceScope timeScope("Scan symbols");
+ for (const Symbol *sym : symtab->getSymbols()) {
+ if (const auto *defined = dyn_cast<Defined>(sym)) {
+ if (defined->overridesWeakDef && defined->isLive())
+ in.weakBinding->addNonWeakDefinition(defined);
+ } else if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ // This branch intentionally doesn't check isLive().
+ if (dysym->isDynamicLookup())
+ continue;
+ dysym->getFile()->refState =
+ std::max(dysym->getFile()->refState, dysym->getRefState());
}
}
}
-void Writer::createLoadCommands() {
- headerSection->addLoadCommand(
- make<LCDyldInfo>(in.binding, lazyBindingSection, exportSection));
- headerSection->addLoadCommand(
- make<LCSymtab>(symtabSection, stringTableSection));
- headerSection->addLoadCommand(make<LCDysymtab>());
+// TODO: ld64 enforces the old load commands in a few other cases.
+static bool useLCBuildVersion(const PlatformInfo &platformInfo) {
+ static const std::vector<std::pair<PlatformKind, VersionTuple>> minVersion = {
+ {PlatformKind::macOS, VersionTuple(10, 14)},
+ {PlatformKind::iOS, VersionTuple(12, 0)},
+ {PlatformKind::iOSSimulator, VersionTuple(13, 0)},
+ {PlatformKind::tvOS, VersionTuple(12, 0)},
+ {PlatformKind::tvOSSimulator, VersionTuple(13, 0)},
+ {PlatformKind::watchOS, VersionTuple(5, 0)},
+ {PlatformKind::watchOSSimulator, VersionTuple(6, 0)}};
+ auto it = llvm::find_if(minVersion, [&](const auto &p) {
+ return p.first == platformInfo.target.Platform;
+ });
+ return it == minVersion.end() ? true : platformInfo.minimum >= it->second;
+}
+
+template <class LP> void Writer::createLoadCommands() {
+ uint8_t segIndex = 0;
+ for (OutputSegment *seg : outputSegments) {
+ in.header->addLoadCommand(make<LCSegment<LP>>(seg->name, seg));
+ seg->index = segIndex++;
+ }
+
+ in.header->addLoadCommand(make<LCDyldInfo>(
+ in.rebase, in.binding, in.weakBinding, in.lazyBinding, in.exports));
+ in.header->addLoadCommand(make<LCSymtab>(symtabSection, stringTableSection));
+ in.header->addLoadCommand(
+ make<LCDysymtab>(symtabSection, indirectSymtabSection));
+ if (!config->umbrella.empty())
+ in.header->addLoadCommand(make<LCSubFramework>(config->umbrella));
+ if (config->emitEncryptionInfo)
+ in.header->addLoadCommand(make<LCEncryptionInfo<LP>>());
+ for (StringRef path : config->runtimePaths)
+ in.header->addLoadCommand(make<LCRPath>(path));
switch (config->outputType) {
case MH_EXECUTE:
- headerSection->addLoadCommand(make<LCMain>());
- headerSection->addLoadCommand(make<LCLoadDylinker>());
+ in.header->addLoadCommand(make<LCLoadDylinker>());
break;
case MH_DYLIB:
- headerSection->addLoadCommand(
- make<LCDylib>(LC_ID_DYLIB, config->installName));
+ in.header->addLoadCommand(make<LCDylib>(LC_ID_DYLIB, config->installName,
+ config->dylibCompatibilityVersion,
+ config->dylibCurrentVersion));
+ break;
+ case MH_BUNDLE:
break;
default:
llvm_unreachable("unhandled output file type");
}
- uint8_t segIndex = 0;
- for (OutputSegment *seg : outputSegments) {
- headerSection->addLoadCommand(make<LCSegment>(seg->name, seg));
- seg->index = segIndex++;
- }
+ uuidCommand = make<LCUuid>();
+ in.header->addLoadCommand(uuidCommand);
+
+ if (useLCBuildVersion(config->platformInfo))
+ in.header->addLoadCommand(make<LCBuildVersion>(config->platformInfo));
+ else
+ in.header->addLoadCommand(make<LCMinVersion>(config->platformInfo));
+
+ // This is down here to match ld64's load command order.
+ if (config->outputType == MH_EXECUTE)
+ in.header->addLoadCommand(make<LCMain>());
- uint64_t dylibOrdinal = 1;
+ int64_t dylibOrdinal = 1;
+ DenseMap<StringRef, int64_t> ordinalForInstallName;
for (InputFile *file : inputFiles) {
if (auto *dylibFile = dyn_cast<DylibFile>(file)) {
- headerSection->addLoadCommand(
- make<LCDylib>(LC_LOAD_DYLIB, dylibFile->dylibName));
- dylibFile->ordinal = dylibOrdinal++;
+ if (dylibFile->isBundleLoader) {
+ dylibFile->ordinal = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
+ // Shortcut since bundle-loader does not re-export the symbols.
+
+ dylibFile->reexport = false;
+ continue;
+ }
+
+ // Don't emit load commands for a dylib that is not referenced if:
+ // - it was added implicitly (via a reexport, an LC_LOAD_DYLINKER --
+ // if it's on the linker command line, it's explicit)
+ // - or it's marked MH_DEAD_STRIPPABLE_DYLIB
+ // - or the flag -dead_strip_dylibs is used
+ // FIXME: `isReferenced()` is currently computed before dead code
+ // stripping, so references from dead code keep a dylib alive. This
+ // matches ld64, but it's something we should do better.
+ if (!dylibFile->isReferenced() && !dylibFile->forceNeeded &&
+ (!dylibFile->explicitlyLinked || dylibFile->deadStrippable ||
+ config->deadStripDylibs))
+ continue;
+
+ // Several DylibFiles can have the same installName. Only emit a single
+ // load command for that installName and give all these DylibFiles the
+ // same ordinal.
+ // This can happen in several cases:
+ // - a new framework could change its installName to an older
+ // framework name via an $ld$ symbol depending on platform_version
+ // - symlinks (for example, libpthread.tbd is a symlink to libSystem.tbd;
+ // Foo.framework/Foo.tbd is usually a symlink to
+ // Foo.framework/Versions/Current/Foo.tbd, where
+ // Foo.framework/Versions/Current is usually a symlink to
+ // Foo.framework/Versions/A)
+ // - a framework can be linked both explicitly on the linker
+ // command line and implicitly as a reexport from a different
+ // framework. The re-export will usually point to the tbd file
+ // in Foo.framework/Versions/A/Foo.tbd, while the explicit link will
+ // usually find Foo.framework/Foo.tbd. These are usually symlinks,
+ // but in a --reproduce archive they will be identical but distinct
+ // files.
+ // In the first case, *semantically distinct* DylibFiles will have the
+ // same installName.
+ int64_t &ordinal = ordinalForInstallName[dylibFile->installName];
+ if (ordinal) {
+ dylibFile->ordinal = ordinal;
+ continue;
+ }
+
+ ordinal = dylibFile->ordinal = dylibOrdinal++;
+ LoadCommandType lcType =
+ dylibFile->forceWeakImport || dylibFile->refState == RefState::Weak
+ ? LC_LOAD_WEAK_DYLIB
+ : LC_LOAD_DYLIB;
+ in.header->addLoadCommand(make<LCDylib>(lcType, dylibFile->installName,
+ dylibFile->compatibilityVersion,
+ dylibFile->currentVersion));
if (dylibFile->reexport)
- headerSection->addLoadCommand(
- make<LCDylib>(LC_REEXPORT_DYLIB, dylibFile->dylibName));
+ in.header->addLoadCommand(
+ make<LCDylib>(LC_REEXPORT_DYLIB, dylibFile->installName));
}
}
+
+ if (functionStartsSection)
+ in.header->addLoadCommand(make<LCFunctionStarts>(functionStartsSection));
+ if (dataInCodeSection)
+ in.header->addLoadCommand(make<LCDataInCode>(dataInCodeSection));
+ if (codeSignatureSection)
+ in.header->addLoadCommand(make<LCCodeSignature>(codeSignatureSection));
+
+ const uint32_t MACOS_MAXPATHLEN = 1024;
+ config->headerPad = std::max(
+ config->headerPad, (config->headerPadMaxInstallNames
+ ? LCDylib::getInstanceCount() * MACOS_MAXPATHLEN
+ : 0));
}
static size_t getSymbolPriority(const SymbolPriorityEntry &entry,
- const InputFile &file) {
- return std::max(entry.objectFiles.lookup(sys::path::filename(file.getName())),
- entry.anyObjectFile);
+ const InputFile *f) {
+ // We don't use toString(InputFile *) here because it returns the full path
+ // for object files, and we only want the basename.
+ StringRef filename;
+ if (f->archiveName.empty())
+ filename = path::filename(f->getName());
+ else
+ filename = saver.save(path::filename(f->archiveName) + "(" +
+ path::filename(f->getName()) + ")");
+ return std::max(entry.objectFiles.lookup(filename), entry.anyObjectFile);
}
// Each section gets assigned the priority of the highest-priority symbol it
return sectionPriorities;
auto addSym = [&](Defined &sym) {
+ if (sym.isAbsolute())
+ return;
+
auto it = config->priorities.find(sym.getName());
if (it == config->priorities.end())
return;
SymbolPriorityEntry &entry = it->second;
size_t &priority = sectionPriorities[sym.isec];
- priority = std::max(priority, getSymbolPriority(entry, *sym.isec->file));
+ priority =
+ std::max(priority, getSymbolPriority(entry, sym.isec->getFile()));
};
// TODO: Make sure this handles weak symbols correctly.
- for (InputFile *file : inputFiles)
- if (isa<ObjFile>(file) || isa<ArchiveFile>(file))
- for (lld::macho::Symbol *sym : file->symbols)
- if (auto *d = dyn_cast<Defined>(sym))
+ for (const InputFile *file : inputFiles) {
+ if (isa<ObjFile>(file))
+ for (Symbol *sym : file->symbols)
+ if (auto *d = dyn_cast_or_null<Defined>(sym))
addSym(*d);
+ }
return sectionPriorities;
}
-static int segmentOrder(OutputSegment *seg) {
- return StringSwitch<int>(seg->name)
- .Case(segment_names::pageZero, -2)
- .Case(segment_names::text, -1)
- // Make sure __LINKEDIT is the last segment (i.e. all its hidden
- // sections must be ordered after other sections).
- .Case(segment_names::linkEdit, std::numeric_limits<int>::max())
- .Default(0);
-}
-
-static int sectionOrder(OutputSection *osec) {
- StringRef segname = osec->parent->name;
- // Sections are uniquely identified by their segment + section name.
- if (segname == segment_names::text) {
- if (osec->name == section_names::header)
- return -1;
- } else if (segname == segment_names::linkEdit) {
- return StringSwitch<int>(osec->name)
- .Case(section_names::binding, -4)
- .Case(section_names::export_, -3)
- .Case(section_names::symbolTable, -2)
- .Case(section_names::stringTable, -1)
- .Default(0);
- }
- // ZeroFill sections must always be the at the end of their segments,
- // otherwise subsequent sections may get overwritten with zeroes at runtime.
- if (isZeroFill(osec->flags))
- return std::numeric_limits<int>::max();
- return 0;
-}
-
-template <typename T, typename F>
-static std::function<bool(T, T)> compareByOrder(F ord) {
- return [=](T a, T b) { return ord(a) < ord(b); };
-}
-
// Sorting only can happen once all outputs have been collected. Here we sort
// segments, output sections within each segment, and input sections within each
// output segment.
static void sortSegmentsAndSections() {
- llvm::stable_sort(outputSegments,
- compareByOrder<OutputSegment *>(segmentOrder));
+ TimeTraceScope timeScope("Sort segments and sections");
+ sortOutputSegments();
DenseMap<const InputSection *, size_t> isecPriorities =
buildInputSectionPriorities();
uint32_t sectionIndex = 0;
for (OutputSegment *seg : outputSegments) {
- seg->sortOutputSections(compareByOrder<OutputSection *>(sectionOrder));
- for (auto *osec : seg->getSections()) {
+ seg->sortOutputSections();
+ for (OutputSection *osec : seg->getSections()) {
// Now that the output sections are sorted, assign the final
// output section indices.
if (!osec->isHidden())
osec->index = ++sectionIndex;
+ if (!firstTLVDataSection && isThreadLocalData(osec->flags))
+ firstTLVDataSection = osec;
if (!isecPriorities.empty()) {
- if (auto *merged = dyn_cast<MergedOutputSection>(osec)) {
+ if (auto *merged = dyn_cast<ConcatOutputSection>(osec)) {
llvm::stable_sort(merged->inputs,
[&](InputSection *a, InputSection *b) {
return isecPriorities[a] > isecPriorities[b];
}
}
-void Writer::createOutputSections() {
+template <class LP> void Writer::createOutputSections() {
+ TimeTraceScope timeScope("Create output sections");
// First, create hidden sections
- headerSection = make<MachHeaderSection>();
- lazyBindingSection = make<LazyBindingSection>();
stringTableSection = make<StringTableSection>();
- symtabSection = make<SymtabSection>(*stringTableSection);
- exportSection = make<ExportSection>();
+ symtabSection = makeSymtabSection<LP>(*stringTableSection);
+ indirectSymtabSection = make<IndirectSymtabSection>();
+ if (config->adhocCodesign)
+ codeSignatureSection = make<CodeSignatureSection>();
+ if (config->emitDataInCodeInfo)
+ dataInCodeSection = make<DataInCodeSection>();
+ if (config->emitFunctionStarts)
+ functionStartsSection = make<FunctionStartsSection>();
+ if (config->emitBitcodeBundle)
+ make<BitcodeBundleSection>();
switch (config->outputType) {
case MH_EXECUTE:
make<PageZeroSection>();
break;
case MH_DYLIB:
+ case MH_BUNDLE:
break;
default:
llvm_unreachable("unhandled output file type");
}
- // Then merge input sections into output sections.
- MapVector<std::pair<StringRef, StringRef>, MergedOutputSection *>
- mergedOutputSections;
- for (InputSection *isec : inputSections) {
- MergedOutputSection *&osec =
- mergedOutputSections[{isec->segname, isec->name}];
- if (osec == nullptr)
- osec = make<MergedOutputSection>(isec->name);
- osec->mergeInput(isec);
+ // Then add input sections to output sections.
+ for (ConcatInputSection *isec : inputSections) {
+ if (isec->shouldOmitFromOutput())
+ continue;
+ ConcatOutputSection *osec = cast<ConcatOutputSection>(isec->parent);
+ osec->addInput(isec);
+ osec->inputOrder =
+ std::min(osec->inputOrder, static_cast<int>(isec->outSecOff));
}
- for (const auto &it : mergedOutputSections) {
+ // Once all the inputs are added, we can finalize the output section
+ // properties and create the corresponding output segments.
+ for (const auto &it : concatOutputSections) {
StringRef segname = it.first.first;
- MergedOutputSection *osec = it.second;
- getOrCreateOutputSegment(segname)->addOutputSection(osec);
+ ConcatOutputSection *osec = it.second;
+ assert(segname != segment_names::ld);
+ if (osec->isNeeded())
+ getOrCreateOutputSegment(segname)->addOutputSection(osec);
}
for (SyntheticSection *ssec : syntheticSections) {
- auto it = mergedOutputSections.find({ssec->segname, ssec->name});
- if (it == mergedOutputSections.end()) {
- if (ssec->isNeeded())
+ auto it = concatOutputSections.find({ssec->segname, ssec->name});
+ if (ssec->isNeeded()) {
+ if (it == concatOutputSections.end()) {
getOrCreateOutputSegment(ssec->segname)->addOutputSection(ssec);
- } else {
- error("section from " + it->second->firstSection()->file->getName() +
- " conflicts with synthetic section " + ssec->segname + "," +
- ssec->name);
+ } else {
+ fatal("section from " +
+ toString(it->second->firstSection()->getFile()) +
+ " conflicts with synthetic section " + ssec->segname + "," +
+ ssec->name);
+ }
}
}
+
+ // dyld requires __LINKEDIT segment to always exist (even if empty).
+ linkEditSegment = getOrCreateOutputSegment(segment_names::linkEdit);
+}
+
+void Writer::finalizeAddresses() {
+ TimeTraceScope timeScope("Finalize addresses");
+ uint64_t pageSize = target->getPageSize();
+ // Ensure that segments (and the sections they contain) are allocated
+ // addresses in ascending order, which dyld requires.
+ //
+ // Note that at this point, __LINKEDIT sections are empty, but we need to
+ // determine addresses of other segments/sections before generating its
+ // contents.
+ for (OutputSegment *seg : outputSegments) {
+ if (seg == linkEditSegment)
+ continue;
+ seg->addr = addr;
+ assignAddresses(seg);
+ // codesign / libstuff checks for segment ordering by verifying that
+ // `fileOff + fileSize == next segment fileOff`. So we call alignTo() before
+ // (instead of after) computing fileSize to ensure that the segments are
+ // contiguous. We handle addr / vmSize similarly for the same reason.
+ fileOff = alignTo(fileOff, pageSize);
+ addr = alignTo(addr, pageSize);
+ seg->vmSize = addr - seg->addr;
+ seg->fileSize = fileOff - seg->fileOff;
+ seg->assignAddressesToStartEndSymbols();
+ }
+}
+
+void Writer::finalizeLinkEditSegment() {
+ TimeTraceScope timeScope("Finalize __LINKEDIT segment");
+ // Fill __LINKEDIT contents.
+ std::vector<LinkEditSection *> linkEditSections{
+ in.rebase,
+ in.binding,
+ in.weakBinding,
+ in.lazyBinding,
+ in.exports,
+ symtabSection,
+ indirectSymtabSection,
+ dataInCodeSection,
+ functionStartsSection,
+ };
+ parallelForEach(linkEditSections, [](LinkEditSection *osec) {
+ if (osec)
+ osec->finalizeContents();
+ });
+
+ // Now that __LINKEDIT is filled out, do a proper calculation of its
+ // addresses and offsets.
+ linkEditSegment->addr = addr;
+ assignAddresses(linkEditSegment);
+ // No need to page-align fileOff / addr here since this is the last segment.
+ linkEditSegment->vmSize = addr - linkEditSegment->addr;
+ linkEditSegment->fileSize = fileOff - linkEditSegment->fileOff;
}
void Writer::assignAddresses(OutputSegment *seg) {
- addr = alignTo(addr, PageSize);
- fileOff = alignTo(fileOff, PageSize);
seg->fileOff = fileOff;
- for (auto *osec : seg->getSections()) {
+ for (OutputSection *osec : seg->getSections()) {
+ if (!osec->isNeeded())
+ continue;
addr = alignTo(addr, osec->align);
fileOff = alignTo(fileOff, osec->align);
osec->addr = addr;
osec->fileOff = isZeroFill(osec->flags) ? 0 : fileOff;
osec->finalize();
+ osec->assignAddressesToStartEndSymbols();
addr += osec->getSize();
fileOff += osec->getFileSize();
void Writer::writeSections() {
uint8_t *buf = buffer->getBufferStart();
- for (OutputSegment *seg : outputSegments)
- for (OutputSection *osec : seg->getSections())
+ for (const OutputSegment *seg : outputSegments)
+ for (const OutputSection *osec : seg->getSections())
osec->writeTo(buf + osec->fileOff);
}
-void Writer::run() {
- // dyld requires __LINKEDIT segment to always exist (even if empty).
- OutputSegment *linkEditSegment =
- getOrCreateOutputSegment(segment_names::linkEdit);
-
- scanRelocations();
- if (in.stubHelper->isNeeded())
- in.stubHelper->setup();
-
- // Sort and assign sections to their respective segments. No more sections nor
- // segments may be created after these methods run.
- createOutputSections();
- sortSegmentsAndSections();
-
- createLoadCommands();
-
- // Ensure that segments (and the sections they contain) are allocated
- // addresses in ascending order, which dyld requires.
- //
- // Note that at this point, __LINKEDIT sections are empty, but we need to
- // determine addresses of other segments/sections before generating its
- // contents.
- for (OutputSegment *seg : outputSegments)
- if (seg != linkEditSegment)
- assignAddresses(seg);
-
- // Fill __LINKEDIT contents.
- in.binding->finalizeContents();
- lazyBindingSection->finalizeContents();
- exportSection->finalizeContents();
- symtabSection->finalizeContents();
+// In order to utilize multiple cores, we first split the buffer into chunks,
+// compute a hash for each chunk, and then compute a hash value of the hash
+// values.
+void Writer::writeUuid() {
+ TimeTraceScope timeScope("Computing UUID");
+ ArrayRef<uint8_t> data{buffer->getBufferStart(), buffer->getBufferEnd()};
+ unsigned chunkCount = parallel::strategy.compute_thread_count() * 10;
+ // Round-up integer division
+ size_t chunkSize = (data.size() + chunkCount - 1) / chunkCount;
+ std::vector<ArrayRef<uint8_t>> chunks = split(data, chunkSize);
+ std::vector<uint64_t> hashes(chunks.size());
+ parallelForEachN(0, chunks.size(),
+ [&](size_t i) { hashes[i] = xxHash64(chunks[i]); });
+ uint64_t digest = xxHash64({reinterpret_cast<uint8_t *>(hashes.data()),
+ hashes.size() * sizeof(uint64_t)});
+ uuidCommand->writeUuid(digest);
+}
- // Now that __LINKEDIT is filled out, do a proper calculation of its
- // addresses and offsets.
- assignAddresses(linkEditSegment);
+void Writer::writeCodeSignature() {
+ if (codeSignatureSection)
+ codeSignatureSection->writeHashes(buffer->getBufferStart());
+}
+void Writer::writeOutputFile() {
+ TimeTraceScope timeScope("Write output file");
openFile();
if (errorCount())
return;
-
writeSections();
+ writeUuid();
+ writeCodeSignature();
if (auto e = buffer->commit())
error("failed to write to the output file: " + toString(std::move(e)));
}
-void macho::writeResult() { Writer().run(); }
+template <class LP> void Writer::run() {
+ treatSpecialUndefineds();
+ if (config->entry && !isa<Undefined>(config->entry))
+ prepareBranchTarget(config->entry);
+ scanRelocations();
+ if (in.stubHelper->isNeeded())
+ in.stubHelper->setup();
+ scanSymbols();
+ createOutputSections<LP>();
+ // After this point, we create no new segments; HOWEVER, we might
+ // yet create branch-range extension thunks for architectures whose
+ // hardware call instructions have limited range, e.g., ARM(64).
+ // The thunks are created as InputSections interspersed among
+ // the ordinary __TEXT,_text InputSections.
+ sortSegmentsAndSections();
+ createLoadCommands<LP>();
+ finalizeAddresses();
+ finalizeLinkEditSegment();
+ writeMapFile();
+ writeOutputFile();
+}
+
+template <class LP> void macho::writeResult() { Writer().run<LP>(); }
void macho::createSyntheticSections() {
+ in.header = make<MachHeaderSection>();
+ if (config->dedupLiterals) {
+ in.cStringSection = make<DeduplicatedCStringSection>();
+ } else {
+ in.cStringSection = make<CStringSection>();
+ }
+ in.wordLiteralSection =
+ config->dedupLiterals ? make<WordLiteralSection>() : nullptr;
+ in.rebase = make<RebaseSection>();
in.binding = make<BindingSection>();
+ in.weakBinding = make<WeakBindingSection>();
+ in.lazyBinding = make<LazyBindingSection>();
+ in.exports = make<ExportSection>();
in.got = make<GotSection>();
+ in.tlvPointers = make<TlvPointerSection>();
in.lazyPointers = make<LazyPointerSection>();
in.stubs = make<StubsSection>();
in.stubHelper = make<StubHelperSection>();
- in.imageLoaderCache = make<ImageLoaderCacheSection>();
+ in.unwindInfo = makeUnwindInfoSection();
+
+ // This section contains space for just a single word, and will be used by
+ // dyld to cache an address to the image loader it uses.
+ uint8_t *arr = bAlloc.Allocate<uint8_t>(target->wordSize);
+ memset(arr, 0, target->wordSize);
+ in.imageLoaderCache = make<ConcatInputSection>(
+ segment_names::data, section_names::data, /*file=*/nullptr,
+ ArrayRef<uint8_t>{arr, target->wordSize},
+ /*align=*/target->wordSize, /*flags=*/S_REGULAR);
+ // References from dyld are not visible to us, so ensure this section is
+ // always treated as live.
+ in.imageLoaderCache->live = true;
}
+
+OutputSection *macho::firstTLVDataSection = nullptr;
+
+template void macho::writeResult<LP64>();
+template void macho::writeResult<ILP32>();
namespace lld {
namespace macho {
+class OutputSection;
+class InputSection;
+class Symbol;
+
class LoadCommand {
public:
virtual ~LoadCommand() = default;
virtual void writeTo(uint8_t *buf) const = 0;
};
-void writeResult();
+template <class LP> void writeResult();
void createSyntheticSections();
+// Add bindings for symbols that need weak or non-lazy bindings.
+void addNonLazyBindingEntries(const Symbol *, const InputSection *,
+ uint64_t offset, int64_t addend = 0);
+
+extern OutputSection *firstTLVDataSection;
+
} // namespace macho
} // namespace lld
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(MinGWOptionsTableGen)
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
add_lld_library(lldMinGW
Driver.cpp
DEPENDS
MinGWOptionsTableGen
- ${tablegen_deps}
+ intrinsics_gen
)
} // namespace
static void printHelp(const char *argv0) {
- MinGWOptTable().PrintHelp(
+ MinGWOptTable().printHelp(
lld::outs(), (std::string(argv0) + " [options] file...").c_str(), "lld",
false /*ShowHidden*/, true /*ShowAllAliases*/);
lld::outs() << "\n";
if (!bStatic) {
if (Optional<std::string> s = findFile(dir, name + ".lib"))
return *s;
- if (Optional<std::string> s = findFile(dir, "lib" + name + ".dll")) {
- error("lld doesn't support linking directly against " + *s +
- ", use an import library");
- return "";
- }
- if (Optional<std::string> s = findFile(dir, name + ".dll")) {
- error("lld doesn't support linking directly against " + *s +
- ", use an import library");
- return "";
- }
+ if (Optional<std::string> s = findFile(dir, "lib" + name + ".dll"))
+ return *s;
+ if (Optional<std::string> s = findFile(dir, name + ".dll"))
+ return *s;
}
}
error("unable to find library -l" + name);
if (args.hasArg(OPT_major_os_version, OPT_minor_os_version,
OPT_major_subsystem_version, OPT_minor_subsystem_version)) {
- auto *majOSVer = args.getLastArg(OPT_major_os_version);
- auto *minOSVer = args.getLastArg(OPT_minor_os_version);
- auto *majSubSysVer = args.getLastArg(OPT_major_subsystem_version);
- auto *minSubSysVer = args.getLastArg(OPT_minor_subsystem_version);
- if (majOSVer && majSubSysVer &&
- StringRef(majOSVer->getValue()) != StringRef(majSubSysVer->getValue()))
- warn("--major-os-version and --major-subsystem-version set to differing "
- "versions, not supported");
- if (minOSVer && minSubSysVer &&
- StringRef(minOSVer->getValue()) != StringRef(minSubSysVer->getValue()))
- warn("--minor-os-version and --minor-subsystem-version set to differing "
- "versions, not supported");
+ StringRef majOSVer = args.getLastArgValue(OPT_major_os_version, "6");
+ StringRef minOSVer = args.getLastArgValue(OPT_minor_os_version, "0");
+ StringRef majSubSysVer = "6";
+ StringRef minSubSysVer = "0";
+ StringRef subSysName = "default";
+ StringRef subSysVer;
+ // Iterate over --{major,minor}-subsystem-version and --subsystem, and pick
+ // the version number components from the last one of them that specifies
+ // a version.
+ for (auto *a : args.filtered(OPT_major_subsystem_version,
+ OPT_minor_subsystem_version, OPT_subs)) {
+ switch (a->getOption().getID()) {
+ case OPT_major_subsystem_version:
+ majSubSysVer = a->getValue();
+ break;
+ case OPT_minor_subsystem_version:
+ minSubSysVer = a->getValue();
+ break;
+ case OPT_subs:
+ std::tie(subSysName, subSysVer) = StringRef(a->getValue()).split(':');
+ if (!subSysVer.empty()) {
+ if (subSysVer.contains('.'))
+ std::tie(majSubSysVer, minSubSysVer) = subSysVer.split('.');
+ else
+ majSubSysVer = subSysVer;
+ }
+ break;
+ }
+ }
+ add("-osversion:" + majOSVer + "." + minOSVer);
+ add("-subsystem:" + subSysName + "," + majSubSysVer + "." + minSubSysVer);
+ } else if (args.hasArg(OPT_subs)) {
StringRef subSys = args.getLastArgValue(OPT_subs, "default");
- StringRef major = majOSVer ? majOSVer->getValue()
- : majSubSysVer ? majSubSysVer->getValue() : "6";
- StringRef minor = minOSVer ? minOSVer->getValue()
- : minSubSysVer ? minSubSysVer->getValue() : "";
- StringRef sep = minor.empty() ? "" : ".";
- add("-subsystem:" + subSys + "," + major + sep + minor);
- } else if (auto *a = args.getLastArg(OPT_subs)) {
- add("-subsystem:" + StringRef(a->getValue()));
+ StringRef subSysName, subSysVer;
+ std::tie(subSysName, subSysVer) = subSys.split(':');
+ StringRef sep = subSysVer.empty() ? "" : ",";
+ add("-subsystem:" + subSysName + sep + subSysVer);
}
if (auto *a = args.getLastArg(OPT_out_implib))
add("-debug:dwarf");
}
+ if (args.hasFlag(OPT_fatal_warnings, OPT_no_fatal_warnings, false))
+ add("-WX");
+ else
+ add("-WX:no");
+
+ if (args.hasFlag(OPT_enable_stdcall_fixup, OPT_disable_stdcall_fixup, false))
+ add("-stdcall-fixup");
+ else if (args.hasArg(OPT_disable_stdcall_fixup))
+ add("-stdcall-fixup:no");
+
if (args.hasArg(OPT_shared))
add("-dll");
if (args.hasArg(OPT_verbose))
add("-kill-at");
if (args.hasArg(OPT_appcontainer))
add("-appcontainer");
- if (args.hasArg(OPT_no_seh))
+ if (args.hasFlag(OPT_no_seh, OPT_disable_no_seh, false))
add("-noseh");
if (args.getLastArgValue(OPT_m) != "thumb2pe" &&
- args.getLastArgValue(OPT_m) != "arm64pe" && !args.hasArg(OPT_dynamicbase))
+ args.getLastArgValue(OPT_m) != "arm64pe" &&
+ args.hasFlag(OPT_disable_dynamicbase, OPT_dynamicbase, false))
add("-dynamicbase:no");
+ if (args.hasFlag(OPT_disable_high_entropy_va, OPT_high_entropy_va, false))
+ add("-highentropyva:no");
+ if (args.hasFlag(OPT_disable_nxcompat, OPT_nxcompat, false))
+ add("-nxcompat:no");
+ if (args.hasFlag(OPT_disable_tsaware, OPT_tsaware, false))
+ add("-tsaware:no");
if (args.hasFlag(OPT_no_insert_timestamp, OPT_insert_timestamp, false))
add("-timestamp:0");
else
add("-opt:noref");
+ if (args.hasFlag(OPT_demangle, OPT_no_demangle, true))
+ add("-demangle");
+ else
+ add("-demangle:no");
+
if (args.hasFlag(OPT_enable_auto_import, OPT_disable_auto_import, true))
add("-auto-import");
else
else
add("-runtime-pseudo-reloc:no");
+ if (args.hasFlag(OPT_allow_multiple_definition,
+ OPT_no_allow_multiple_definition, false))
+ add("-force:multiple");
+
if (auto *a = args.getLastArg(OPT_icf)) {
StringRef s = a->getValue();
if (s == "all")
add("-includeoptional:" + StringRef(a->getValue()));
for (auto *a : args.filtered(OPT_delayload))
add("-delayload:" + StringRef(a->getValue()));
+ for (auto *a : args.filtered(OPT_wrap))
+ add("-wrap:" + StringRef(a->getValue()));
std::vector<StringRef> searchPaths;
for (auto *a : args.filtered(OPT_L)) {
for (auto *a : args) {
switch (a->getOption().getID()) {
case OPT_INPUT:
- if (StringRef(a->getValue()).endswith_lower(".def"))
+ if (StringRef(a->getValue()).endswith_insensitive(".def"))
add("-def:" + StringRef(a->getValue()));
else
add(prefix + StringRef(a->getValue()));
return false;
if (args.hasArg(OPT_verbose) || args.hasArg(OPT__HASH_HASH_HASH))
- lld::outs() << llvm::join(linkArgs, " ") << "\n";
+ lld::errs() << llvm::join(linkArgs, " ") << "\n";
if (args.hasArg(OPT__HASH_HASH_HASH))
return true;
std::vector<const char *> vec;
for (const std::string &s : linkArgs)
vec.push_back(s.c_str());
- return coff::link(vec, true, stdoutOS, stderrOS);
+ // Pass the actual binary name, to make error messages be printed with
+ // the right prefix.
+ vec[0] = argsArr[0];
+ return coff::link(vec, canExitEarly, stdoutOS, stderrOS);
}
HelpText<help>;
}
+multiclass EqNoHelp<string name> {
+ def NAME: Separate<["--", "-"], name>;
+ def NAME # _eq: Joined<["--", "-"], name # "=">, Alias<!cast<Separate>(NAME)>;
+}
+
+multiclass B<string name, string help1, string help2> {
+ def NAME: Flag<["--", "-"], name>, HelpText<help1>;
+ def no_ # NAME: Flag<["--", "-"], "no-" # name>, HelpText<help2>;
+}
+
+multiclass B_disable<string name, string help1, string help2> {
+ def NAME: Flag<["--", "-"], name>, HelpText<help1>;
+ def disable_ # NAME: Flag<["--", "-"], "disable-" # name>, HelpText<help2>;
+}
+
def L: JoinedOrSeparate<["-"], "L">, MetaVarName<"<dir>">,
HelpText<"Add a directory to the library search path">;
+defm allow_multiple_definition: B<"allow-multiple-definition",
+ "Allow multiple definitions",
+ "Do not allow multiple definitions (default)">;
def Bdynamic: F<"Bdynamic">, HelpText<"Link against shared libraries">;
def Bstatic: F<"Bstatic">, HelpText<"Do not link against shared libraries">;
+defm demangle: B<"demangle",
+ "Demangle symbol names (default)",
+ "Do not demangle symbol names">;
def disable_auto_import: F<"disable-auto-import">,
HelpText<"Don't automatically import data symbols from other DLLs without dllimport">;
def disable_runtime_pseudo_reloc: F<"disable-runtime-pseudo-reloc">,
HelpText<"Don't do automatic imports that require runtime fixups">;
-def dynamicbase: F<"dynamicbase">, HelpText<"Enable ASLR">;
+def disable_stdcall_fixup: F<"disable-stdcall-fixup">,
+ HelpText<"Don't resolve stdcall/fastcall/vectorcall to undecorated symbols">;
+defm dynamicbase: B_disable<"dynamicbase", "Enable ASLR", "Disable ASLR">;
def enable_auto_import: F<"enable-auto-import">,
HelpText<"Automatically import data symbols from other DLLs where needed">;
def enable_runtime_pseudo_reloc: F<"enable-runtime-pseudo-reloc">,
HelpText<"Allow automatic imports that require runtime fixups">;
+def enable_stdcall_fixup: F<"enable-stdcall-fixup">,
+ HelpText<"Resolve stdcall/fastcall/vectorcall to undecorated symbols without warnings">;
defm entry: Eq<"entry", "Name of entry point symbol">, MetaVarName<"<entry>">;
def exclude_all_symbols: F<"exclude-all-symbols">,
HelpText<"Don't automatically export any symbols">;
def export_all_symbols: F<"export-all-symbols">,
HelpText<"Export all symbols even if a def file or dllexport attributes are used">;
+defm fatal_warnings: B<"fatal-warnings",
+ "Treat warnings as errors",
+ "Do not treat warnings as errors (default)">;
defm file_alignment: Eq<"file-alignment", "Set file alignment">;
-def gc_sections: F<"gc-sections">, HelpText<"Remove unused sections">;
+defm gc_sections: B<"gc-sections",
+ "Remove unused sections",
+ "Don't remove unused sections">;
def help: F<"help">, HelpText<"Print option help">;
-def icf: J<"icf=">, HelpText<"Identical code folding">;
-def image_base: S<"image-base">, HelpText<"Base address of the program">;
-def insert_timestamp: F<"insert-timestamp">,
- HelpText<"Include PE header timestamp">;
+defm high_entropy_va: B_disable<"high-entropy-va",
+ "Set the 'high entropy VA' flag", "Don't set the 'high entropy VA' flag">;
+defm icf: Eq<"icf", "Identical code folding">;
+defm image_base: Eq<"image-base", "Base address of the program">;
+defm insert_timestamp: B<"insert-timestamp",
+ "Include PE header timestamp",
+ "Don't include PE header timestamp">;
def kill_at: F<"kill-at">, HelpText<"Remove @n from exported symbols">;
def l: JoinedOrSeparate<["-"], "l">, MetaVarName<"<libName>">,
HelpText<"Root name of library to use">;
"Set the OS and subsystem minor version">;
defm minor_subsystem_version: EqLong<"minor-subsystem-version",
"Set the OS and subsystem minor version">;
-def no_insert_timestamp: F<"no-insert-timestamp">,
- HelpText<"Don't include PE header timestamp">;
-def no_seh: F<"no-seh">, HelpText<"Set the 'no SEH' flag in the executable">;
-def no_whole_archive: F<"no-whole-archive">,
- HelpText<"No longer include all object files for following archives">;
+defm no_seh: B_disable<"no-seh",
+ "Set the 'no SEH' flag in the executable", "Don't set the 'no SEH' flag">;
+defm nxcompat: B_disable<"nxcompat",
+ "Set the 'nxcompat' flag in the executable", "Don't set the 'nxcompat' flag">;
def large_address_aware: Flag<["--"], "large-address-aware">,
HelpText<"Enable large addresses">;
-def no_gc_sections: F<"no-gc-sections">, HelpText<"Don't remove unused sections">;
def o: JoinedOrSeparate<["-"], "o">, MetaVarName<"<path>">,
HelpText<"Path to file to write output">;
defm out_implib: Eq<"out-implib", "Import library name">;
defm section_alignment: Eq<"section-alignment", "Set section alignment">;
def shared: F<"shared">, HelpText<"Build a shared object">;
defm subs: Eq<"subsystem", "Specify subsystem">;
-def stack: S<"stack">;
+defm stack: Eq<"stack", "Set size of the initial stack">;
def strip_all: F<"strip-all">,
HelpText<"Omit all symbol information from the output binary">;
def strip_debug: F<"strip-debug">,
HelpText<"Omit all debug information, but keep symbol information">;
-defm reproduce: Eq<"reproduce", "Write a tar file containing input files and command line options to reproduce link">;
+defm reproduce: Eq<"reproduce",
+ "Write a tar file containing input files and command line options to reproduce link">;
+defm require_defined: Eq<"require-defined",
+ "Force symbol to be added to symbol table as an undefined one">;
+defm tsaware: B_disable<"tsaware",
+ "Set the 'Terminal Server aware' flag", "Don't set the 'Terminal Server aware' flag">;
defm undefined: Eq<"undefined", "Include symbol in the link, if available">;
-def whole_archive: F<"whole-archive">,
- HelpText<"Include all object files for following archives">;
+defm whole_archive: B<"whole-archive",
+ "Include all object files for following archives",
+ "No longer include all object files for following archives">;
def v: Flag<["-"], "v">, HelpText<"Display the version number">;
def verbose: F<"verbose">, HelpText<"Verbose mode">;
def version: F<"version">, HelpText<"Display the version number and exit">;
-defm require_defined: Eq<"require-defined",
- "Force symbol to be added to symbol table as an undefined one">;
+defm wrap: Eq<"wrap", "Use wrapper functions for symbol">,
+ MetaVarName<"<symbol>">;
// LLD specific options
def _HASH_HASH_HASH : Flag<["-"], "###">,
HelpText<"Print (but do not run) the commands to run for this compilation">;
def appcontainer: F<"appcontainer">, HelpText<"Set the appcontainer flag in the executable">;
defm delayload: Eq<"delayload", "DLL to load only on demand">;
-def mllvm: S<"mllvm">;
+defm mllvm: EqNoHelp<"mllvm">;
defm pdb: Eq<"pdb", "Output PDB debug info file, chosen implicitly if the argument is empty">;
defm thinlto_cache_dir: EqLong<"thinlto-cache-dir",
"Path to ThinLTO cached object file directory">;
-def Xlink : J<"Xlink=">, MetaVarName<"<arg>">,
- HelpText<"Pass <arg> to the COFF linker">;
+defm Xlink : Eq<"Xlink", "Pass <arg> to the COFF linker">, MetaVarName<"<arg>">;
// Alias
+def alias_Bdynamic_call_shared: Flag<["-"], "call_shared">, Alias<Bdynamic>;
+def alias_Bdynamic_dy: Flag<["-"], "dy">, Alias<Bdynamic>;
+def alias_Bstatic_dn: Flag<["-"], "dn">, Alias<Bstatic>;
+def alias_Bstatic_non_shared: Flag<["-"], "non_shared">, Alias<Bstatic>;
+def alias_Bstatic_static: Flag<["-"], "static">, Alias<Bstatic>;
def alias_entry_e: JoinedOrSeparate<["-"], "e">, Alias<entry>;
+def alias_no_dynamicbase: F<"no-dynamicbase">, Alias<disable_dynamicbase>;
def alias_strip_s: Flag<["-"], "s">, Alias<strip_all>;
def alias_strip_S: Flag<["-"], "S">, Alias<strip_debug>;
def alias_undefined_u: JoinedOrSeparate<["-"], "u">, Alias<undefined>;
def: F<"enable-auto-image-base">;
def: F<"end-group">;
def: Flag<["--"], "full-shutdown">;
-def: F<"high-entropy-va">;
-def: S<"major-image-version">;
-def: S<"minor-image-version">;
-def: F<"nxcompat">;
+defm: EqNoHelp<"major-image-version">;
+defm: EqNoHelp<"minor-image-version">;
+def: F<"no-undefined">;
def: F<"pic-executable">;
-def: S<"plugin">;
-def: J<"plugin=">;
-def: S<"plugin-opt">;
-def: J<"plugin-opt=">;
-def: J<"sysroot">;
+defm: EqNoHelp<"plugin">;
+defm: EqNoHelp<"plugin-opt">;
+defm: EqNoHelp<"sysroot">;
def: F<"start-group">;
-def: F<"tsaware">;
It is hosted at https://s3-us-west-2.amazonaws.com/linker-tests/lld-speed-test.tar.xz
-The current sha256 is 10eec685463d5a8bbf08d77f4ca96282161d396c65bd97dc99dbde644a31610f.
+The current sha256 is `10eec685463d5a8bbf08d77f4ca96282161d396c65bd97dc99dbde644a31610f`.
+include(LLVMDistributionSupport)
+
macro(add_lld_library name)
cmake_parse_arguments(ARG
"SHARED"
set_target_properties(${name} PROPERTIES FOLDER "lld libraries")
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
- if(${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS OR
- NOT LLVM_DISTRIBUTION_COMPONENTS)
- set(export_to_lldtargets EXPORT LLDTargets)
- set_property(GLOBAL PROPERTY LLD_HAS_EXPORTS True)
- endif()
-
+ get_target_export_arg(${name} LLD export_to_lldtargets)
install(TARGETS ${name}
COMPONENT ${name}
${export_to_lldtargets}
add_lld_executable(${name} ${ARGN})
if (LLD_BUILD_TOOLS)
- if(${name} IN_LIST LLVM_DISTRIBUTION_COMPONENTS OR
- NOT LLVM_DISTRIBUTION_COMPONENTS)
- set(export_to_lldtargets EXPORT LLDTargets)
- set_property(GLOBAL PROPERTY LLD_HAS_EXPORTS True)
- endif()
-
+ get_target_export_arg(${name} LLD export_to_lldtargets)
install(TARGETS ${name}
${export_to_lldtargets}
RUNTIME DESTINATION bin
# Generate LLDConfig.cmake for the build tree.
set(LLD_CONFIG_CMAKE_DIR "${lld_cmake_builddir}")
set(LLD_CONFIG_LLVM_CMAKE_DIR "${llvm_cmake_builddir}")
-set(LLD_CONFIG_EXPORTS_FILE "${lld_cmake_builddir}/LLDTargets.cmake")
+set(LLD_CONFIG_INCLUDE_EXPORTS "include(\"${lld_cmake_builddir}/LLDTargets.cmake\")")
set(LLD_CONFIG_INCLUDE_DIRS
"${LLD_SOURCE_DIR}/include"
"${LLD_BINARY_DIR}/include"
@ONLY)
set(LLD_CONFIG_CMAKE_DIR)
set(LLD_CONFIG_LLVM_CMAKE_DIR)
-set(LLD_CONFIG_EXPORTS_FILE)
# Generate LLDConfig.cmake for the install tree.
set(LLD_CONFIG_CODE "
endforeach(p)
set(LLD_CONFIG_CMAKE_DIR "\${LLD_INSTALL_PREFIX}/${LLD_INSTALL_PACKAGE_DIR}")
set(LLD_CONFIG_LLVM_CMAKE_DIR "\${LLD_INSTALL_PREFIX}/${LLVM_INSTALL_PACKAGE_DIR}")
-set(LLD_CONFIG_EXPORTS_FILE "\${LLD_CMAKE_DIR}/LLDTargets.cmake")
+get_config_exports_includes(LLD LLD_CONFIG_INCLUDE_EXPORTS)
set(LLD_CONFIG_INCLUDE_DIRS "\${LLD_INSTALL_PREFIX}/include")
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/LLDConfig.cmake.in
@ONLY)
set(LLD_CONFIG_CODE)
set(LLD_CONFIG_CMAKE_DIR)
-set(LLD_CONFIG_EXPORTS_FILE)
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
- get_property(lld_has_exports GLOBAL PROPERTY LLD_HAS_EXPORTS)
- if(lld_has_exports)
- install(EXPORT LLDTargets DESTINATION ${LLD_INSTALL_PACKAGE_DIR}
- COMPONENT lld-cmake-exports)
- endif()
+ install_distribution_exports(LLD)
install(FILES
${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/LLDConfig.cmake
set(LLD_INCLUDE_DIRS "@LLD_CONFIG_INCLUDE_DIRS@")
# Provide all our library targets to users.
-include("@LLD_CONFIG_EXPORTS_FILE@")
+@LLD_CONFIG_INCLUDE_EXPORTS@
The ``st_size`` field is set to 0.
+SECTIONS command
+~~~~~~~~~~~~~~~~
+
+A ``SECTIONS`` command looks like:
+
+::
+
+ SECTIONS {
+ section-command
+ section-command
+ ...
+ } [INSERT [AFTER|BEFORE] anchor_section;]
+
+Each section-command can be a symbol assignment, an output section description,
+or an overlay description.
+
+When the ``INSERT`` keyword is present, the ``SECTIONS`` command describes some
+output sections which should be inserted after or before the specified anchor
+section. The insertion occurs after input sections have been mapped to output
+sections but before orphan sections have been processed.
+
+In the case where no linker script has been provided or every ``SECTIONS``
+command is followed by ``INSERT``, LLD applies built-in rules which are similar
+to GNU ld's internal linker scripts.
+
+- Align the first section in a ``PT_LOAD`` segment according to ``-z noseparate-code``,
+ ``-z separate-code``, or ``-z separate-loadable-segments``
+- Define ``__bss_start``, ``end``, ``_end``, ``etext``, ``_etext``, ``edata``, ``_edata``
+- Sort ``.ctors.*``/``.dtors.*``/``.init_array.*``/``.fini_array.*`` and PowerPC64 specific ``.toc``
+- Place input ``.text.*`` into output ``.text``, and handle certain variants
+ (``.text.hot.``, ``.text.unknown.``, ``.text.unlikely.``, etc) in the precense of
+ ``-z keep-text-section-prefix``.
+
Output section description
~~~~~~~~~~~~~~~~~~~~~~~~~~
section have the same memory regions, the difference between the LMA and the
VMA is computed to be the same as the previous difference.
- Otherwise, the LMA is set to the VMA.
+
+Overwrite sections
+~~~~~~~~~~~~~~~~~~
+
+An ``OVERWRITE_SECTIONS`` command looks like:
+
+::
+
+ OVERWRITE_SECTIONS {
+ output-section-description
+ output-section-description
+ ...
+ }
+
+Unlike a ``SECTIONS`` command, ``OVERWRITE_SECTIONS`` does not specify a
+section order or suppress the built-in rules.
+
+If a described output section description also appears in a ``SECTIONS``
+command, the ``OVERWRITE_SECTIONS`` command wins; otherwise, the output section
+will be added somewhere following the usual orphan section placement rules.
+
+If a described output section description also appears in an ``INSERT
+[AFTER|BEFORE]`` command, the description will be provided by the
+description in the ``OVERWRITE_SECTIONS`` command while the insert command
+still applies (possibly after orphan section placement). It is recommended to
+leave the brace empty (i.e. ``section : {}``) for the insert command, because
+its description will be ignored anyway.
--- /dev/null
+--warn-backrefs
+===============
+
+``--warn-backrefs`` gives a warning when an undefined symbol reference is
+resolved by a definition in an archive to the left of it on the command line.
+
+A linker such as GNU ld makes a single pass over the input files from left to
+right maintaining the set of undefined symbol references from the files loaded
+so far. When encountering an archive or an object file surrounded by
+``--start-lib`` and ``--end-lib`` that archive will be searched for resolving
+symbol definitions; this may result in input files being loaded, updating the
+set of undefined symbol references. When all resolving definitions have been
+loaded from the archive, the linker moves on the next file and will not return
+to it. This means that if an input file to the right of a archive cannot have
+an undefined symbol resolved by a archive to the left of it. For example:
+
+ ld def.a ref.o
+
+will result in an ``undefined reference`` error. If there are no cyclic
+references, the archives can be ordered in such a way that there are no
+backward references. If there are cyclic references then the ``--start-group``
+and ``--end-group`` options can be used, or the same archive can be placed on
+the command line twice.
+
+LLD remembers the symbol table of archives that it has previously seen, so if
+there is a reference from an input file to the right of an archive, LLD will
+still search that archive for resolving any undefined references. This means
+that an archive only needs to be included once on the command line and the
+``--start-group`` and ``--end-group`` options are redundant.
+
+A consequence of the differing archive searching semantics is that the same
+linker command line can result in different outcomes. A link may succeed with
+LLD that will fail with GNU ld, or even worse both links succeed but they have
+selected different objects from different archives that both define the same
+symbols.
+
+The ``warn-backrefs`` option provides information that helps identify cases
+where LLD and GNU ld archive selection may differ.
+
+ | % ld.lld --warn-backrefs ... -lB -lA
+ | ld.lld: warning: backward reference detected: system in A.a(a.o) refers to B.a(b.o)
+
+ | % ld.lld --warn-backrefs ... --start-lib B/b.o --end-lib --start-lib A/a.o --end-lib
+ | ld.lld: warning: backward reference detected: system in A/a.o refers to B/b.o
+
+ # To suppress the warning, you can specify --warn-backrefs-exclude=<glob> to match B/b.o or B.a(b.o)
+
+The ``--warn-backrefs`` option can also provide a check to enforce a
+topological order of archives, which can be useful to detect layering
+violations (albeit unable to catch all cases). There are two cases where GNU ld
+will result in an ``undefined reference`` error:
+
+* If adding the dependency does not form a cycle: conceptually ``A`` is higher
+ level library while ``B`` is at a lower level. When you are developing an
+ application ``P`` which depends on ``A``, but does not directly depend on
+ ``B``, your link may fail surprisingly with ``undefined symbol:
+ symbol_defined_in_B`` if the used/linked part of ``A`` happens to need some
+ components of ``B``. It is inappropriate for ``P`` to add a dependency on
+ ``B`` since ``P`` does not use ``B`` directly.
+* If adding the dependency forms a cycle, e.g. ``B->C->A ~> B``. ``A``
+ is supposed to be at the lowest level while ``B`` is supposed to be at the
+ highest level. When you are developing ``C_test`` testing ``C``, your link may
+ fail surprisingly with ``undefined symbol`` if there is somehow a dependency on
+ some components of ``B``. You could fix the issue by adding the missing
+ dependency (``B``), however, then every test (``A_test``, ``B_test``,
+ ``C_test``) will link against every library. This breaks the motivation
+ of splitting ``B``, ``C`` and ``A`` into separate libraries and makes binaries
+ unnecessarily large. Moreover, the layering violation makes lower-level
+ libraries (e.g. ``A``) vulnerable to changes to higher-level libraries (e.g.
+ ``B``, ``C``).
+
+Resolution:
+
+* Add a dependency from ``A`` to ``B``.
+* The reference may be unintended and can be removed.
+* The dependency may be intentionally omitted because there are multiple
+ libraries like ``B``. Consider linking ``B`` with object semantics by
+ surrounding it with ``--whole-archive`` and ``--no-whole-archive``.
+* In the case of circular dependency, sometimes merging the libraries are the best.
+
+There are two cases like a library sandwich where GNU ld will select a
+different object.
+
+* ``A.a B A2.so``: ``A.a`` may be used as an interceptor (e.g. it provides some
+ optimized libc functions and ``A2`` is libc). ``B`` does not need to know
+ about ``A.a``, and ``A.a`` may be pulled into the link by other part of the
+ program. For linker portability, consider ``--whole-archive`` and
+ ``--no-whole-archive``.
+
+* ``A.a B A2.a``: similar to the above case but ``--warn-backrefs`` does not
+ flag the problem, because ``A2.a`` may be a replicate of ``A.a``, which is
+ redundant but benign. In some cases ``A.a`` and ``B`` should be surrounded by
+ a pair of ``--start-group`` and ``--end-group``. This is especially common
+ among system libraries (e.g. ``-lc __isnanl references -lm``, ``-lc
+ _IO_funlockfile references -lpthread``, ``-lc __gcc_personality_v0 references
+ -lgcc_eh``, and ``-lpthread _Unwind_GetCFA references -lunwind``).
+
+ In C++, this is likely an ODR violation. We probably need a dedicated option
+ for ODR detection.
========================
-lld 11.0.0 Release Notes
+lld 13.0.0 Release Notes
========================
.. contents::
:local:
+.. warning::
+ These are in-progress notes for the upcoming LLVM 13.0.0 release.
+ Release notes for previous releases can be found on
+ `the Download Page <https://releases.llvm.org/download.html>`_.
+
Introduction
============
-This document contains the release notes for the lld linker, release 11.0.0.
+This document contains the release notes for the lld linker, release 13.0.0.
Here we describe the status of lld, including major improvements
from the previous release. All lld releases may be downloaded
from the `LLVM releases web site <https://llvm.org/releases/>`_.
ELF Improvements
----------------
-* ``--lto-emit-asm`` is added to emit assembly output for debugging purposes.
- (`D77231 <https://reviews.llvm.org/D77231>`_)
-* ``--lto-whole-program-visibility`` is added to specify that classes have hidden LTO visibility in LTO and ThinLTO links of source files compiled with ``-fwhole-program-vtables``. See `LTOVisibility <https://clang.llvm.org/docs/LTOVisibility.html>`_ for details.
- (`D71913 <https://reviews.llvm.org/D71913>`_)
-* ``--print-archive-stats=`` is added to print the number of members and the number of fetched members for each archive.
- The feature is similar to GNU gold's ``--print-symbol-counts=``.
- (`D78983 <https://reviews.llvm.org/D78983>`_)
-* ``--shuffle-sections=`` is added to introduce randomization in the output to help reduce measurement bias and detect static initialization order fiasco.
- (`D74791 <https://reviews.llvm.org/D74791>`_)
- (`D74887 <https://reviews.llvm.org/D74887>`_)
-* ``--time-trace`` is added. It records a time trace file that can be viewed in
- chrome://tracing. The file can be specified with ``--time-trace-file``.
- Trace granularity can be specified with ``--time-trace-granularity``.
- (`D71060 <https://reviews.llvm.org/D71060>`_)
-* ``--thinlto-single-module`` is added to compile a subset of modules in ThinLTO for debugging purposes.
- (`D80406 <https://reviews.llvm.org/D80406>`_)
-* ``--unique`` is added to create separate output sections for orphan sections.
- (`D75536 <https://reviews.llvm.org/D75536>`_)
-* ``--warn-backrefs`` has been improved to emulate GNU ld's archive semantics.
- If a link passes with warnings from ``--warn-backrefs``, it almost assuredly
- means that the link will fail with GNU ld, or the symbol will get different
- resolutions in GNU ld and LLD. ``--warn-backrefs-exclude=`` is added to
- exclude known issues.
- (`D77522 <https://reviews.llvm.org/D77522>`_)
- (`D77630 <https://reviews.llvm.org/D77630>`_)
- (`D77512 <https://reviews.llvm.org/D77512>`_)
-* ``--no-relax`` is accepted but ignored. The Linux kernel's RISC-V port uses this option.
- (`D81359 <https://reviews.llvm.org/D81359>`_)
-* ``--rosegment`` (default) is added to complement ``--no-rosegment``.
- GNU gold from 2.35 onwards support both options.
-* ``--threads=N`` is added. The default uses all threads.
- (`D76885 <https://reviews.llvm.org/D76885>`_)
-* ``--wrap`` has better compatibility with GNU ld.
-* ``-z dead-reloc-in-nonalloc=<section_glob>=<value>`` is added to resolve an absolute relocation
- referencing a discarded symbol.
- (`D83264 <https://reviews.llvm.org/D83264>`_)
-* Changed tombstone values to (``.debug_ranges``/``.debug_loc``) 1 and (other ``.debug_*``) 0.
- A tombstone value is the computed value of a relocation referencing a discarded symbol (``--gc-sections``, ICF or ``/DISCARD/``).
- (`D84825 <https://reviews.llvm.org/D84825>`_)
- In the future many .debug_* may switch to 0xffffffff/0xffffffffffffffff as the tombstone value.
-* ``-z keep-text-section-prefix`` moves ``.text.unknown.*`` input sections to ``.text.unknown``.
-* ``-z rel`` and ``-z rela`` are added to select the REL/RELA format for dynamic relocations.
- The default is target specific and typically matches the form used in relocatable objects.
-* ``-z start-stop-visibility={default,protected,internal,hidden}`` is added.
- GNU ld/gold from 2.35 onwards support this option.
- (`D55682 <https://reviews.llvm.org/D55682>`_)
-* When ``-r`` or ``--emit-relocs`` is specified, the GNU ld compatible
- ``--discard-all`` and ``--discard-locals`` semantics are implemented.
- (`D77807 <https://reviews.llvm.org/D77807>`_)
-* ``--emit-relocs --strip-debug`` can now be used together.
- (`D74375 <https://reviews.llvm.org/D74375>`_)
-* ``--gdb-index`` supports DWARF v5.
- (`D79061 <https://reviews.llvm.org/D79061>`_)
- (`D85579 <https://reviews.llvm.org/D85579>`_)
-* ``-r`` allows SHT_X86_64_UNWIND to be merged into SHT_PROGBITS.
- This allows clang/GCC produced object files to be mixed together.
- (`D85785 <https://reviews.llvm.org/D85785>`_)
-* Better linker script support related to output section alignments and LMA regions.
- (`D74286 <https://reviews.llvm.org/D75724>`_)
- (`D74297 <https://reviews.llvm.org/D75724>`_)
- (`D75724 <https://reviews.llvm.org/D75724>`_)
- (`D81986 <https://reviews.llvm.org/D81986>`_)
-* In a input section description, the filename can be specified in double quotes.
- ``archive:file`` syntax is added.
- (`D72517 <https://reviews.llvm.org/D72517>`_)
- (`D75100 <https://reviews.llvm.org/D75100>`_)
-* Linker script specified empty ``(.init|.preinit|.fini)_array`` are allowed with RELRO.
- (`D76915 <https://reviews.llvm.org/D76915>`_)
-* ``INSERT AFTER`` and ``INSERT BEFORE`` work for orphan sections now.
- (`D74375 <https://reviews.llvm.org/D74375>`_)
-* ``INPUT_SECTION_FLAGS`` is supported in linker scripts.
- (`D72745 <https://reviews.llvm.org/D72745>`_)
-* ``DF_1_PIE`` is set for position-independent executables.
- (`D80872 <https://reviews.llvm.org/D80872>`_)
-* For a symbol assignment ``alias = aliasee;``, ``alias`` inherits the ``aliasee``'s symbol type.
- (`D86263 <https://reviews.llvm.org/D86263>`_)
-* ``SHT_GNU_verneed`` in shared objects are parsed, and versioned undefined symbols in shared objects are respected.
- (`D80059 <https://reviews.llvm.org/D80059>`_)
-* SHF_LINK_ORDER and non-SHF_LINK_ORDER sections can be mixed along as the SHF_LINK_ORDER components are contiguous.
- (`D77007 <https://reviews.llvm.org/D77007>`_)
-* An out-of-range relocation diagnostic mentions the referenced symbol now.
- (`D73518 <https://reviews.llvm.org/D73518>`_)
-* AArch64: ``R_AARCH64_PLT32`` is supported.
- (`D81184 <https://reviews.llvm.org/D81184>`_)
-* ARM: SBREL type relocations are supported.
- (`D74375 <https://reviews.llvm.org/D74375>`_)
-* ARM: ``R_ARM_ALU_PC_G0``, ``R_ARM_LDR_PC_G0``, ``R_ARM_THUMB_PC8`` and ``R_ARM_THUMB__PC12`` are supported.
- (`D75349 <https://reviews.llvm.org/D75349>`_)
- (`D77200 <https://reviews.llvm.org/D77200>`_)
-* ARM: various improvements to .ARM.exidx: ``/DISCARD/`` support for a subset, out-of-range handling, support for non monotonic section order.
- (`PR44824 <https://llvm.org/PR44824>`_)
-* AVR: many relocation types are supported.
- (`D78741 <https://reviews.llvm.org/D78741>`_)
-* Hexagon: General Dynamic and some other relocation types are supported.
-* PPC: Canonical PLT and range extension thunks with addends are supported.
- (`D73399 <https://reviews.llvm.org/D73399>`_)
- (`D73424 <https://reviews.llvm.org/D73424>`_)
- (`D75394 <https://reviews.llvm.org/D75394>`_)
-* PPC and PPC64: copy relocations.
- (`D73255 <https://reviews.llvm.org/D73255>`_)
-* PPC64: ``_savegpr[01]_{14..31}`` and ``_restgpr[01]_{14..31}`` can be synthesized.
- (`D79977 <https://reviews.llvm.org/D79977>`_)
-* PPC64: ``R_PPC64_GOT_PCREL34`` and ``R_PPC64_REL24_NOTOC`` are supported. r2 save stub is supported.
- (`D81948 <https://reviews.llvm.org/D81948>`_)
- (`D82950 <https://reviews.llvm.org/D82950>`_)
- (`D82816 <https://reviews.llvm.org/D82816>`_)
-* RISC-V: ``R_RISCV_IRELATIVE`` is supported.
- (`D74022 <https://reviews.llvm.org/D74022>`_)
-* RISC-V: ``R_RISCV_ALIGN`` is errored because GNU ld style linker relaxation is not supported.
- (`D71820 <https://reviews.llvm.org/D71820>`_)
-* SPARCv9: more relocation types are supported.
- (`D77672 <https://reviews.llvm.org/D77672>`_)
+* ``-z start-stop-gc`` is now supported and becomes the default.
+ (`D96914 <https://reviews.llvm.org/D96914>`_)
+ (`rG6d2d3bd0 <https://reviews.llvm.org/rG6d2d3bd0a61f5fc7fd9f61f48bc30e9ca77cc619>`_)
+* ``--shuffle-sections=<seed>`` has been changed to ``--shuffle-sections=<section-glob>=<seed>``.
+ If seed is -1, the matched input sections are reversed.
+ (`D98445 <https://reviews.llvm.org/D98445>`_)
+ (`D98679 <https://reviews.llvm.org/D98679>`_)
+* ``-Bsymbolic -Bsymbolic-functions`` has been changed to behave the same as ``-Bsymbolic-functions``. This matches GNU ld.
+ (`D102461 <https://reviews.llvm.org/D102461>`_)
+* ``-Bno-symbolic`` has been added.
+ (`D102461 <https://reviews.llvm.org/D102461>`_)
+* A new linker script command ``OVERWRITE_SECTIONS`` has been added.
+ (`D103303 <https://reviews.llvm.org/D103303>`_)
+* ``-Bsymbolic-non-weak-functions`` has been added as a ``STB_GLOBAL`` subset of ``-Bsymbolic-functions``.
+ (`D102570 <https://reviews.llvm.org/D102570>`_)
+* ``--no-allow-shlib-undefined`` has been improved to catch more cases.
+ (`D101996 <https://reviews.llvm.org/D101996>`_)
+* ``__rela_iplt_start`` is no longer defined for -pie/-shared.
+ This makes GCC/Clang ``-static-pie`` built executables work.
+ (`rG8cb78e99 <https://reviews.llvm.org/rf8cb78e99aae9aa3f89f7bfe667db2c5b767f21f>`_)
+* IRELATIVE/TLSDESC relocations now support ``-z rel``.
+ (`D100544 <https://reviews.llvm.org/D100544>`_)
+* Section groups with a zero flag are now supported.
+ This is used by ``comdat nodeduplicate`` in LLVM IR.
+ (`D96636 <https://reviews.llvm.org/D96636>`_)
+ (`D106228 <https://reviews.llvm.org/D106228>`_)
+* Defined symbols are now resolved before undefined symbols to stabilize the bheavior of archive member extraction.
+ (`D95985 <https://reviews.llvm.org/D95985>`_)
+* ``STB_WEAK`` symbols are now preferred over COMMON symbols as a fix to a ``--fortran-common`` regression.
+ (`D105945 <https://reviews.llvm.org/D105945>`_)
+* Absolute relocations referencing undef weak now produce dynamic relocations for -pie, matching GOT-generating relocations.
+ (`D105164 <https://reviews.llvm.org/D105164>`_)
+* Exported symbols are now communicated to the LTO library so as to make LTO
+ based whole program devirtualization (``-flto=thin -fwhole-program-vtables``)
+ work with shared objects.
+ (`D91583 <https://reviews.llvm.org/D91583>`_)
+* Whole program devirtualization now respects ``local:`` version nodes in a version script.
+ (`D98220 <https://reviews.llvm.org/D98220>`_)
+ (`D98686 <https://reviews.llvm.org/D98686>`_)
+* ``local:`` version nodes in a version script now apply to non-default version symbols.
+ (`D107234 <https://reviews.llvm.org/D107234>`_)
+* If an object file defines both ``foo`` and ``foo@v1``, now only ``foo@v1`` will be in the output.
+ (`D107235 <https://reviews.llvm.org/D107235>`_)
+* Copy relocations on non-default version symbols are now supported.
+ (`D107535 <https://reviews.llvm.org/D107535>`_)
+
+Linker script changes:
+
+* ``.``, ``$``, and double quotes can now be used in symbol names in expressions.
+ (`D98306 <https://reviews.llvm.org/D98306>`_)
+ (`rGe7a7ad13 <https://reviews.llvm.org/rGe7a7ad134fe182aad190cb3ebc441164470e92f5>`_)
+* Fixed value of ``.`` in the output section description of ``.tbss``.
+ (`D107288 <https://reviews.llvm.org/D107288>`_)
+* ``NOLOAD`` sections can now be placed in a ``PT_LOAD`` program header.
+ (`D103815 <https://reviews.llvm.org/D103815>`_)
+* ``OUTPUT_FORMAT(default, big, little)`` now consults ``-EL`` and ``-EB``.
+ (`D96214 <https://reviews.llvm.org/D96214>`_)
+* The ``OVERWRITE_SECTIONS`` command has been added.
+ (`D103303 <https://reviews.llvm.org/D103303>`_)
+* The section order within an ``INSERT AFTER`` command is now preserved.
+ (`D105158 <https://reviews.llvm.org/D105158>`_)
+
+Architecture specific changes:
+
+* aarch64_be is now supported.
+ (`D96188 <https://reviews.llvm.org/D96188>`_)
+* The AMDGPU port now supports ``--amdhsa-code-object-version=4`` object files;
+ (`D95811 <https://reviews.llvm.org/D95811>`_)
+* The ARM port now accounts for PC biases in range extension thunk creation.
+ (`D97550 <https://reviews.llvm.org/D97550>`_)
+* The AVR port now computes ``e_flags``.
+ (`D99754 <https://reviews.llvm.org/D99754>`_)
+* The Mips port now omits unneeded dynamic relocations for PIE non-preemptible TLS.
+ (`D101382 <https://reviews.llvm.org/D101382>`_)
+* The PowerPC port now supports ``--power10-stubs=no`` to omit Power10 instructions from call stubs.
+ (`D94625 <https://reviews.llvm.org/D94625>`_)
+* Fixed a thunk creation bug in the PowerPC port when TOC/NOTOC calls are mixed.
+ (`D101837 <https://reviews.llvm.org/D101837>`_)
+* The RISC-V port now resolves undefined weak relocations to the current location if not using PLT.
+ (`D103001 <https://reviews.llvm.org/D103001>`_)
+* ``R_386_GOTOFF`` relocations from .debug_info are now allowed to be compatible with GCC.
+ (`D95994 <https://reviews.llvm.org/D95994>`_)
+* ``gotEntrySize`` has been added to improve support for the ILP32 ABI of x86-64.
+ (`D102569 <https://reviews.llvm.org/D102569>`_)
Breaking changes
----------------
-* One-dash form of some long option (``--thinlto-*``, ``--lto-*``, ``--shuffle-sections=``)
- are no longer supported.
- (`D79371 <https://reviews.llvm.org/D79371>`_)
-* ``--export-dynamic-symbol`` no longer implies ``-u``.
- The new behavior matches GNU ld from binutils 2.35 onwards.
- (`D80487 <https://reviews.llvm.org/D80487>`_)
-* ARM: the default max page size was increased from 4096 to 65536.
- This increases compatibility with systems where a non standard page
- size was configured. This also is inline with GNU ld defaults.
- (`D77330 <https://reviews.llvm.org/D77330>`_)
-* ARM: for non-STT_FUNC symbols, Thumb interworking thunks are not added and BL/BLX are not substituted.
- (`D73474 <https://reviews.llvm.org/D73474>`_)
- (`D73542 <https://reviews.llvm.org/D73542>`_)
-* AArch64: ``--force-bti`` is renamed to ``-z force-bti`. ``--pac-plt`` is renamed to ``-z pac-plt``.
- This change is compatibile with GNU ld.
-* A readonly ``PT_LOAD`` is created in the presence of a ``SECTIONS`` command.
- The new behavior is consistent with the longstanding behavior in the absence of a SECTIONS command.
-* Orphan section names like ``.rodata.foo`` and ``.text.foo`` are not grouped into ``.rodata`` and ``.text`` in the presence of a ``SECTIONS`` command.
- The new behavior matches GNU ld.
- (`D75225 <https://reviews.llvm.org/D75225>`_)
-* ``--no-threads`` is removed. Use ``--threads=1`` instead. ``--threads`` (no-op) is removed.
+* ``--shuffle-sections=<seed>`` has been changed to ``--shuffle-sections=<section-glob>=<seed>``.
+ Specify ``*`` as ``<section-glob>`` to get the previous behavior.
COFF Improvements
-----------------
-* Fixed exporting symbols whose names contain a period (``.``), which was
- a regression in lld 7.
+* Avoid thread exhaustion when running on 32 bit Windows.
+ (`D105506 <https://reviews.llvm.org/D105506>`_)
+
+* Improve terminating the process on Windows while a thread pool might be
+ running. (`D102944 <https://reviews.llvm.org/D102944>`_)
MinGW Improvements
------------------
-* Implemented new options for disabling auto import and runtime pseudo
- relocations (``--disable-auto-import`` and
- ``--disable-runtime-pseudo-reloc``), the ``--no-seh`` flag and options
- for selecting file and section alignment (``--file-alignment`` and
- ``--section-alignment``).
+* Support for linking directly against a DLL without using an import library
+ has been added. (`D104530 <https://reviews.llvm.org/D104530>`_ and
+ `D104531 <https://reviews.llvm.org/D104531>`_)
+
+* Fix linking with ``--export-all-symbols`` in combination with
+ ``-function-sections``. (`D101522 <https://reviews.llvm.org/D101522>`_ and
+ `D101615 <https://reviews.llvm.org/D101615>`_)
+
+* Fix automatic export of symbols from LTO objects.
+ (`D101569 <https://reviews.llvm.org/D101569>`_)
+
+* Accept more spellings of some options.
+ (`D107237 <https://reviews.llvm.org/D107237>`_ and
+ `D107253 <https://reviews.llvm.org/D107253>`_)
+
+Mach-O Improvements
+-------------------
+
+The Mach-O backend is now able to link several large, real-world programs,
+though we are still working out the kinks.
+
+* arm64 is now supported as a target. (`D88629 <https://reviews.llvm.org/D88629>`_)
+* arm64_32 is now supported as a target. (`D99822 <https://reviews.llvm.org/D99822>`_)
+* Branch-range-extension thunks are now supported. (`D100818 <https://reviews.llvm.org/D100818>`_)
+* ``-dead_strip`` is now supported. (`D103324 <https://reviews.llvm.org/D103324>`_)
+* Support for identical code folding (``--icf=all``) has been added.
+ (`D103292 <https://reviews.llvm.org/D103292>`_)
+* Support for special ``$start`` and ``$end`` symbols for segment & sections has been
+ added. (`D106767 <https://reviews.llvm.org/D106767>`_, `D106629 <https://reviews.llvm.org/D106629>`_)
+* ``$ld$previous`` symbols are now supported. (`D103505 <https://reviews.llvm.org/D103505 >`_)
+* ``$ld$install_name`` symbols are now supported. (`D103746 <https://reviews.llvm.org/D103746>`_)
+* ``__mh_*_header`` symbols are now supported. (`D97007 <https://reviews.llvm.org/D97007>`_)
+* LC_CODE_SIGNATURE is now supported. (`D96164 <https://reviews.llvm.org/D96164>`_)
+* LC_FUNCTION_STARTS is now supported. (`D97260 <https://reviews.llvm.org/D97260>`_)
+* LC_DATA_IN_CODE is now supported. (`D103006 <https://reviews.llvm.org/D103006>`_)
+* Bind opcodes are more compactly encoded. (`D106128 <https://reviews.llvm.org/D106128>`_,
+ `D105075 <https://reviews.llvm.org/D105075>`_)
+* LTO cache support has been added. (`D105922 <https://reviews.llvm.org/D105922>`_)
+* ``-application_extension`` is now supported. (`D105818 <https://reviews.llvm.org/D105818>`_)
+* ``-export_dynamic`` is now partially supported. (`D105482 <https://reviews.llvm.org/D105482>`_)
+* ``-arch_multiple`` is now supported. (`D105450 <https://reviews.llvm.org/D105450>`_)
+* ``-final_output`` is now supported. (`D105449 <https://reviews.llvm.org/D105449>`_)
+* ``-umbrella`` is now supported. (`D105448 <https://reviews.llvm.org/D105448>`_)
+* ``--print-dylib-search`` is now supported. (`D103985 <https://reviews.llvm.org/D103985>`_)
+* ``-force_load_swift_libs`` is now supported. (`D103709 <https://reviews.llvm.org/D103709>`_)
+* ``-reexport_framework``, ``-reexport_library``, ``-reexport-l`` are now supported.
+ (`D103497 <https://reviews.llvm.org/D103497>`_)
+* ``.weak_def_can_be_hidden`` is now supported. (`D101080 <https://reviews.llvm.org/D101080>`_)
+* ``-add_ast_path`` is now supported. (`D100076 <https://reviews.llvm.org/D100076>`_)
+* ``-segprot`` is now supported. (`D99389 <https://reviews.llvm.org/D99389>`_)
+* ``-dependency_info`` is now partially supported. (`D98559 <https://reviews.llvm.org/D98559>`_)
+* ``--time-trace`` is now supported. (`D98419 <https://reviews.llvm.org/D98419>`_)
+* ``-mark_dead_strippable_dylib`` is now supported. (`D98262 <https://reviews.llvm.org/D98262>`_)
+* ``-[un]exported_symbol[s_list]`` is now supported. (`D98223 <https://reviews.llvm.org/D98223>`_)
+* ``-flat_namespace`` is now supported. (`D97641 <https://reviews.llvm.org/D97641>`_)
+* ``-rename_section`` and ``-rename_segment`` are now supported. (`D97600 <https://reviews.llvm.org/D97600>`_)
+* ``-bundle_loader`` is now supported. (`D95913 <https://reviews.llvm.org/D95913>`_)
+* ``-map`` is now partially supported. (`D98323 <https://reviews.llvm.org/D98323>`_)
+
+There were numerous other bug-fixes as well.
+
+WebAssembly Improvements
+------------------------
+
Export all symbols (normally combined with --no-gc-sections)
+ Note that this will not export linker-generated mutable globals unless
+ the resulting binaryen already includes the 'mutable-globals' features
+ since that would otherwise create and invalid binaryen.
+
.. option:: --export-dynamic
When building an executable, export any non-hidden symbols. By default only
.. option:: --allow-undefined
- Allow undefined symbols in linked binary.
+ Allow undefined symbols in linked binary. This is the legacy
+ flag which corresponds to ``--unresolve-symbols=ignore`` +
+ ``--import-undefined``.
+
+.. option:: --unresolved-symbols=<method>
+
+ This is a more full featured version of ``--allow-undefined``.
+ The semanatics of the different methods are as follows:
+
+ report-all:
+
+ Report all unresolved symbols. This is the default. Normally the linker
+ will generate an error message for each reported unresolved symbol but the
+ option ``--warn-unresolved-symbols`` can change this to a warning.
+
+ ignore-all:
+
+ Resolve all undefined symbols to zero. For data and function addresses
+ this is trivial. For direct function calls, the linker will generate a
+ trapping stub function in place of the undefined function.
.. option:: --import-memory
Import memory from the environment.
+.. option:: --import-undefined
+
+ Generate WebAssembly imports for undefined symbols, where possible. For
+ example, for function symbols this is always possible, but in general this
+ is not possible for undefined data symbols. Undefined data symbols will
+ still be reported as normal (in accordance with ``--unresolved-symbols``).
+
.. option:: --initial-memory=<value>
Initial size of the linear memory. Default: static data size.
in turn can be set using ``__attribute__((export_name))`` clang attribute.
In addition, symbols can be exported via the linker command line using
-``--export``.
+``--export`` (which will error if the symbol is not found) or
+``--export-if-defined`` (which will not).
Finally, just like with native ELF linker the ``--export-dynamic`` flag can be
used to export symbols in the executable which are marked as
# built documents.
#
# The short version.
-version = '11'
+version = '13'
# The full version, including alpha/beta/rc tags.
-release = '11'
+release = '13'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
--- /dev/null
+=====================
+Error Handling Script
+=====================
+
+LLD provides the ability to hook into some error handling routines through a
+user-provided script specified with ``--error-handling-script=<path to the script>``
+when certain errors are encountered. This document specifies the requirements of
+such a script.
+
+Generic Requirements
+====================
+
+The script is expected to be available in the ``PATH`` or to be provided using a
+full path. It must be executable. It is executed in the same environment as the
+parent process.
+
+Arguments
+=========
+
+LLD calls the error handling script using the following arguments::
+
+ error-handling-script <tag> <tag-specific-arguments...>
+
+The following tags are supported:
+
+- ``missing-lib``: indicates that LLD failed to find a library. The library name
+ is specified as the second argument, e.g. ``error-handling-script missing-lib
+ mylib``
+
+- ``undefined-symbol``: indicates that given symbol is marked as undefined. The
+ unmangled symbol name is specified as the second argument, e.g.
+ ``error-handling-script undefined-symbol mysymbol``
+
+Return Value
+============
+
+Upon success, the script is expected to return 0. A non-zero value is
+interpreted as an error and reported to the user. In both cases, LLD still
+reports the original error.
WebAssembly
windows_support
missingkeyfunction
+ error_handling_script
Partitions
ReleaseNotes
ELF/linker_script
+ ELF/warn_backrefs
Link against shared libraries.
.It Fl -Bstatic , Fl -static , Fl -dn
Do not link against shared libraries.
-.It Fl -Bsymbolic
-Bind defined symbols locally.
-.It Fl -Bsymbolic-functions
-Bind defined function symbols locally.
+.It Fl Bno-symbolic
+Don't bind default visibility defined symbols locally for
+.Fl shared
+(default).
+.It Fl Bsymbolic
+Bind default visibility defined symbols locally for
+.Fl shared.
+Also set the
+.Dv DF_SYMBOLIC
+flag.
+.It Fl Bsymbolic-functions
+Bind default visibility defined function symbols locally for
+.Fl shared.
+.It Fl Bsymbolic-non-weak-functions
+Bind default visibility defined STB_GLOBAL function symbols locally for
+.Fl shared.
.It Fl -build-id Ns = Ns Ar value
Generate a build ID note.
.Ar value
(shared object) References to matched non-local STV_DEFAULT symbols shouldn't be bound to definitions within the shared object. Implies
.Cm -Bsymbolic
but does not set DF_SYMBOLIC
+.It Fl -EB
+Select the big-endian format in the OUTPUT_FORMAT command.
+.It Fl -EL
+Select the little-endian format in the OUTPUT_FORMAT command.
.It Fl -eh-frame-hdr
Request creation of
.Li .eh_frame_hdr
A value of zero indicates that there is no limit.
.It Fl -error-unresolved-symbols
Report unresolved symbols as errors.
+.It Fl -error-handing-script Ns = Ns Ar script_path
+Call script
+.Ar script_path
+upon some error, with
+.Ar tag
+as first argument, and an extra parameter as second argument. The script is
+expected to return 0 on success. Any other value is considered a generic error.
+.Ar tag
+may be
+.Cm missing-lib
+followed by the name of the missing library.
+.Cm undefined-symbol
+followed by the name of the undefined symbol.
.It Fl -execute-only
Mark executable sections unreadable.
This option is currently only supported on AArch64.
Inhibit output of an
.Li .interp
section.
+.It Fl -no-fortran-common
+Do not search archive members for definitions to override COMMON symbols.
.It Fl -no-gc-sections
Disable garbage collection of unused sections.
.It Fl -no-gnu-unique
.It Fl -shared , Fl -Bsharable
Build a shared object.
.It Fl -shuffle-sections Ns = Ns Ar seed
-Shuffle input sections using the given seed. If 0, use a random seed.
+Shuffle matched sections using the given seed before mapping them to the output sections.
+If -1, reverse the section order. If 0, use a random seed.
.It Fl -soname Ns = Ns Ar value , Fl h Ar value
Set
.Dv DT_SONAME
in an archive.
.It Fl -strip-all , Fl s
Strip all symbols.
+Implies
+.Fl -strip-debug .
.It Fl -strip-debug , Fl S
Strip debugging information.
.It Fl -symbol-ordering-file Ns = Ns Ar file
.It Fl -whole-archive
Force load of all members in a static library.
.It Fl -wrap Ns = Ns Ar symbol
-Use wrapper functions for symbol.
+Redirect
+.Ar symbol
+references to
+.Ar __wrap_symbol
+and
+.Ar __real_symbol
+references to
+.Ar symbol.
.It Fl z Ar option
Linker option extensions.
.Bl -tag -width indent -compact
.Dv PT_GNU_STACK
program segment.
.Pp
+.It Cm start-stop-gc
+Don't let __start_/__stop_ references retain the associated C identifier name sections (default).
+.Pp
+.It Cm nostart-stop-gc
+Let __start_/__stop_ references retain the associated C identifier name sections.
+.Pp
.It Cm text
Do not allow relocations against read-only segments.
This is the default.
int64_t getInteger(llvm::opt::InputArgList &args, unsigned key,
int64_t Default);
+int64_t getHex(llvm::opt::InputArgList &args, unsigned key, int64_t Default);
+
std::vector<StringRef> getStrings(llvm::opt::InputArgList &args, int id);
uint64_t getZOptionValue(llvm::opt::InputArgList &args, int id, StringRef key,
--- /dev/null
+//===- Arrays.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_ARRAYS_H
+#define LLD_ARRAYS_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+#include <vector>
+
+namespace lld {
+// Split one uint8 array into small pieces of uint8 arrays.
+inline std::vector<llvm::ArrayRef<uint8_t>> split(llvm::ArrayRef<uint8_t> arr,
+ size_t chunkSize) {
+ std::vector<llvm::ArrayRef<uint8_t>> ret;
+ while (arr.size() > chunkSize) {
+ ret.push_back(arr.take_front(chunkSize));
+ arr = arr.drop_front(chunkSize);
+ }
+ if (!arr.empty())
+ ret.push_back(arr);
+ return ret;
+}
+
+} // namespace lld
+
+#endif
#include "llvm/Support/raw_ostream.h"
namespace lld {
+struct SafeReturn {
+ int ret;
+ bool canRunAgain;
+};
+
+// Generic entry point when using LLD as a library, safe for re-entry, supports
+// crash recovery. Returns a general completion code and a boolean telling
+// whether it can be called again. In some cases, a crash could corrupt memory
+// and re-entry would not be possible anymore. Use exitLld() in that case to
+// properly exit your application and avoid intermittent crashes on exit caused
+// by cleanup.
+SafeReturn safeLldMain(int argc, const char **argv, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS);
+
namespace coff {
bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
llvm::raw_ostream &outs();
llvm::raw_ostream &errs();
+enum class ErrorTag { LibNotFound, SymbolNotFound };
+
class ErrorHandler {
public:
uint64_t errorCount = 0;
uint64_t errorLimit = 20;
StringRef errorLimitExceededMsg = "too many errors emitted, stopping now";
+ StringRef errorHandlingScript;
StringRef logName = "lld";
bool exitEarly = true;
bool fatalWarnings = false;
bool verbose = false;
bool vsDiagnostics = false;
+ bool disableOutput = false;
+ std::function<void()> cleanupCallback;
void error(const Twine &msg);
- LLVM_ATTRIBUTE_NORETURN void fatal(const Twine &msg);
+ void error(const Twine &msg, ErrorTag tag, ArrayRef<StringRef> args);
+ [[noreturn]] void fatal(const Twine &msg);
void log(const Twine &msg);
void message(const Twine &msg);
void warn(const Twine &msg);
+ void reset() {
+ if (cleanupCallback)
+ cleanupCallback();
+ *this = ErrorHandler();
+ }
+
std::unique_ptr<llvm::FileOutputBuffer> outputBuffer;
private:
ErrorHandler &errorHandler();
inline void error(const Twine &msg) { errorHandler().error(msg); }
-inline LLVM_ATTRIBUTE_NORETURN void fatal(const Twine &msg) {
- errorHandler().fatal(msg);
+inline void error(const Twine &msg, ErrorTag tag, ArrayRef<StringRef> args) {
+ errorHandler().error(msg, tag, args);
}
+[[noreturn]] inline void fatal(const Twine &msg) { errorHandler().fatal(msg); }
inline void log(const Twine &msg) { errorHandler().log(msg); }
inline void message(const Twine &msg) { errorHandler().message(msg); }
inline void warn(const Twine &msg) { errorHandler().warn(msg); }
inline uint64_t errorCount() { return errorHandler().errorCount; }
-LLVM_ATTRIBUTE_NORETURN void exitLld(int val);
+[[noreturn]] void exitLld(int val);
void diagnosticHandler(const llvm::DiagnosticInfo &di);
void checkError(Error e);
return std::move(*e);
}
+// Don't move from Expected wrappers around references.
+template <class T> T &check(Expected<T &> e) {
+ if (!e)
+ fatal(llvm::toString(e.takeError()));
+ return *e;
+}
+
template <class T>
T check2(ErrorOr<T> e, llvm::function_ref<std::string()> prefix) {
if (auto ec = e.getError())
} // namespace object
namespace wasm {
-struct WasmEvent;
-struct WasmEventType;
+struct WasmTag;
+struct WasmTagType;
struct WasmFunction;
struct WasmGlobal;
struct WasmGlobalType;
+struct WasmInitExpr;
+struct WasmLimits;
struct WasmRelocation;
struct WasmSignature;
+struct WasmTable;
+struct WasmTableType;
} // namespace wasm
} // namespace llvm
using llvm::object::WasmSection;
using llvm::object::WasmSegment;
using llvm::object::WasmSymbol;
-using llvm::wasm::WasmEvent;
-using llvm::wasm::WasmEventType;
using llvm::wasm::WasmFunction;
using llvm::wasm::WasmGlobal;
using llvm::wasm::WasmGlobalType;
+using llvm::wasm::WasmInitExpr;
+using llvm::wasm::WasmLimits;
using llvm::wasm::WasmRelocation;
using llvm::wasm::WasmSignature;
+using llvm::wasm::WasmTable;
+using llvm::wasm::WasmTableType;
+using llvm::wasm::WasmTag;
+using llvm::wasm::WasmTagType;
} // end namespace lld.
namespace std {
// glob pattern in the sense of GlobPattern.
class SingleStringMatcher {
public:
- // Create a StringPattern from Pattern to be matched exactly irregardless
+ // Create a StringPattern from Pattern to be matched exactly regardless
// of globbing characters if ExactMatch is true.
SingleStringMatcher(llvm::StringRef Pattern);
// Match s against this pattern, exactly if ExactMatch is true.
bool match(llvm::StringRef s) const;
+ // Returns true for pattern "*" which will match all inputs.
+ bool isTrivialMatchAll() const {
+ return !ExactMatch && GlobPatternMatcher.isTrivialMatchAll();
+ }
+
private:
- // Whether to do an exact match irregardless of the presence of wildcard
- // character.
+ // Whether to do an exact match regardless of wildcard characters.
bool ExactMatch;
// GlobPattern object if not doing an exact match.
// Add a new pattern to the existing ones to match against.
void addPattern(SingleStringMatcher Matcher) { patterns.push_back(Matcher); }
- bool empty() { return patterns.empty(); }
+ bool empty() const { return patterns.empty(); }
// Match s against the patterns.
bool match(llvm::StringRef s) const;
//
//===----------------------------------------------------------------------===//
+#ifndef LLD_COMMON_TARGETOPTIONSCOMMANDFLAGS_H
+#define LLD_COMMON_TARGETOPTIONSCOMMANDFLAGS_H
+
#include "llvm/ADT/Optional.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
std::string getCPUStr();
std::vector<std::string> getMAttrs();
}
+
+#endif
#include <chrono>
#include <map>
#include <memory>
+#include <vector>
namespace lld {
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
add_lld_library(lldCore
DefinedAtom.cpp
Error.cpp
${LLVM_PTHREAD_LIB}
DEPENDS
- ${tablegen_deps}
+ intrinsics_gen
)
/// Parse number assuming it is base 16, but allow 0x prefix.
static bool parseNumberBase16(StringRef numStr, uint64_t &baseAddress) {
- if (numStr.startswith_lower("0x"))
+ if (numStr.startswith_insensitive("0x"))
numStr = numStr.drop_front(2);
return numStr.getAsInteger(16, baseAddress);
}
for (unsigned i = 0; i != numArgs; ++i)
args[i + 1] = ctx.llvmOptions()[i];
args[numArgs + 1] = nullptr;
+ llvm::cl::ResetAllOptionOccurrences();
llvm::cl::ParseCommandLineOptions(numArgs + 1, args);
}
}
!parsedArgs.getLastArg(OPT_test_file_usage)) {
// If no -arch and no options at all, print usage message.
if (parsedArgs.size() == 0) {
- table.PrintHelp(llvm::outs(),
+ table.printHelp(llvm::outs(),
(std::string(args[0]) + " [options] file...").c_str(),
"LLVM Linker", false);
} else {
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Format.h"
-#include "llvm/TextAPI/MachO/InterfaceFile.h"
-#include "llvm/TextAPI/MachO/TextAPIReader.h"
+#include "llvm/TextAPI/InterfaceFile.h"
+#include "llvm/TextAPI/TextAPIReader.h"
#include <unordered_map>
namespace lld {
uint32_t Util::fileFlags() {
// FIXME: these need to determined at runtime.
if (_ctx.outputMachOType() == MH_OBJECT) {
- return _subsectionsViaSymbols ? MH_SUBSECTIONS_VIA_SYMBOLS : 0;
+ return _subsectionsViaSymbols ? (uint32_t)MH_SUBSECTIONS_VIA_SYMBOLS : 0;
} else {
uint32_t flags = MH_DYLDLINK;
if (!_ctx.useFlatNamespace())
// inspection" code if possible.
static uint64_t getCUAbbrevOffset(llvm::DataExtractor abbrevData,
uint64_t abbrCode) {
- uint64_t curCode;
uint64_t offset = 0;
- while ((curCode = abbrevData.getULEB128(&offset)) != abbrCode) {
+ while (abbrevData.getULEB128(&offset) != abbrCode) {
// Tag
abbrevData.getULEB128(&offset);
// DW_CHILDREN
io.mapOptional("exports", file.exportInfo);
io.mapOptional("dataInCode", file.dataInCode);
}
- static StringRef validate(IO &io, NormalizedFile &file) {
- return StringRef();
- }
+ static std::string validate(IO &io, NormalizedFile &file) { return {}; }
};
} // namespace llvm
llvm::Error writeFile(const lld::File &file, StringRef outPath) override {
// Create stream to path.
std::error_code ec;
- llvm::raw_fd_ostream out(outPath, ec, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream out(outPath, ec, llvm::sys::fs::OF_TextWithCRLF);
if (ec)
return llvm::errorCodeToError(ec);
RUNTIME DESTINATION bin)
if(NOT LLD_SYMLINKS_TO_CREATE)
- set(LLD_SYMLINKS_TO_CREATE lld-link ld.lld ld64.lld wasm-ld)
+ set(LLD_SYMLINKS_TO_CREATE
+ lld-link ld.lld ld64.lld ld64.lld.darwinnew ld64.lld.darwinold wasm-ld)
endif()
foreach(link ${LLD_SYMLINKS_TO_CREATE})
tablegen(LLVM Options.inc -gen-opt-parser-defs)
add_public_tablegen_target(WasmOptionsTableGen)
-if(NOT LLD_BUILT_STANDALONE)
- set(tablegen_deps intrinsics_gen)
-endif()
-
add_lld_library(lldWasm
Driver.cpp
InputChunks.cpp
InputFiles.cpp
LTO.cpp
+ MapFile.cpp
MarkLive.cpp
OutputSections.cpp
+ OutputSegment.cpp
Relocations.cpp
SymbolTable.cpp
Symbols.cpp
DEPENDS
WasmOptionsTableGen
- ${tablegen_deps}
+ intrinsics_gen
)
namespace lld {
namespace wasm {
+// For --unresolved-symbols.
+enum class UnresolvedPolicy { ReportError, Warn, Ignore };
+
// This struct contains the global configuration for the linker.
// Most fields are direct mapping from the command line options
// and such fields have the same name as the corresponding options.
// Most fields are initialized by the driver.
struct Configuration {
- bool allowUndefined;
+ bool bsymbolic;
bool checkFeatures;
bool compressRelocations;
bool demangle;
bool importMemory;
bool sharedMemory;
bool importTable;
- bool is64;
+ bool importUndefined;
+ llvm::Optional<bool> is64;
bool mergeDataSegments;
bool pie;
bool printGcSections;
unsigned ltoo;
unsigned optimize;
llvm::StringRef thinLTOJobs;
+ bool ltoNewPassManager;
+ bool ltoDebugPassManager;
+ UnresolvedPolicy unresolvedSymbols;
llvm::StringRef entry;
+ llvm::StringRef mapFile;
llvm::StringRef outputFile;
llvm::StringRef thinLTOCacheDir;
llvm::StringSet<> allowUndefinedSymbols;
llvm::StringSet<> exportedSymbols;
+ std::vector<llvm::StringRef> requiredExports;
std::vector<llvm::StringRef> searchPaths;
llvm::CachePruningPolicy thinLTOCachePolicy;
llvm::Optional<std::vector<std::string>> features;
// True if we are creating position-independent code.
bool isPic;
+ // True if we have an MVP input that uses __indirect_function_table and which
+ // requires it to be allocated to table number 0.
+ bool legacyFunctionTable = false;
+
// The table offset at which to place function addresses. We reserve zero
// for the null function pointer. This gets set to 1 for executables and 0
// for shared libraries (since they always added to a dynamic offset at
#include "lld/Common/Driver.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
#include "MarkLive.h"
#include "SymbolTable.h"
#include "Writer.h"
#include "lld/Common/Strings.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Object/Wasm.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
class LinkerDriver {
public:
- void link(ArrayRef<const char *> argsArr);
+ void linkerMain(ArrayRef<const char *> argsArr);
private:
void createFiles(opt::InputArgList &args);
lld::stdoutOS = &stdoutOS;
lld::stderrOS = &stderrOS;
+ errorHandler().cleanupCallback = []() { freeArena(); };
+
errorHandler().logName = args::getFilenameWithoutExe(args[0]);
errorHandler().errorLimitExceededMsg =
"too many errors emitted, stopping now (use "
symtab = make<SymbolTable>();
initLLVM();
- LinkerDriver().link(args);
+ LinkerDriver().linkerMain(args);
// Exit immediately if we don't need to return to the caller.
// This saves time because the overhead of calling destructors
if (canExitEarly)
exitLld(errorCount() ? 1 : 0);
- freeArena();
return !errorCount();
}
// Handle -whole-archive.
if (inWholeArchive) {
- for (MemoryBufferRef &m : getArchiveMembers(mbref))
- files.push_back(createObjectFile(m, path));
+ for (MemoryBufferRef &m : getArchiveMembers(mbref)) {
+ auto *object = createObjectFile(m, path);
+ // Mark object as live; object members are normally not
+ // live by default but -whole-archive is designed to treat
+ // them as such.
+ object->markLive();
+ files.push_back(object);
+ }
+
return;
}
return arg->getValue();
}
+// Determines what we should do if there are remaining unresolved
+// symbols after the name resolution.
+static UnresolvedPolicy getUnresolvedSymbolPolicy(opt::InputArgList &args) {
+ UnresolvedPolicy errorOrWarn = args.hasFlag(OPT_error_unresolved_symbols,
+ OPT_warn_unresolved_symbols, true)
+ ? UnresolvedPolicy::ReportError
+ : UnresolvedPolicy::Warn;
+
+ if (auto *arg = args.getLastArg(OPT_unresolved_symbols)) {
+ StringRef s = arg->getValue();
+ if (s == "ignore-all")
+ return UnresolvedPolicy::Ignore;
+ if (s == "report-all")
+ return errorOrWarn;
+ error("unknown --unresolved-symbols value: " + s);
+ }
+
+ return errorOrWarn;
+}
+
// Initializes Config members by the command line options.
static void readConfigs(opt::InputArgList &args) {
- config->allowUndefined = args.hasArg(OPT_allow_undefined);
+ config->bsymbolic = args.hasArg(OPT_Bsymbolic);
config->checkFeatures =
args.hasFlag(OPT_check_features, OPT_no_check_features, true);
config->compressRelocations = args.hasArg(OPT_compress_relocations);
config->importMemory = args.hasArg(OPT_import_memory);
config->sharedMemory = args.hasArg(OPT_shared_memory);
config->importTable = args.hasArg(OPT_import_table);
+ config->importUndefined = args.hasArg(OPT_import_undefined);
config->ltoo = args::getInteger(args, OPT_lto_O, 2);
config->ltoPartitions = args::getInteger(args, OPT_lto_partitions, 1);
- config->optimize = args::getInteger(args, OPT_O, 0);
+ config->ltoNewPassManager =
+ args.hasFlag(OPT_no_lto_legacy_pass_manager, OPT_lto_legacy_pass_manager,
+ LLVM_ENABLE_NEW_PASS_MANAGER);
+ config->ltoDebugPassManager = args.hasArg(OPT_lto_debug_pass_manager);
+ config->mapFile = args.getLastArgValue(OPT_Map);
+ config->optimize = args::getInteger(args, OPT_O, 1);
config->outputFile = args.getLastArgValue(OPT_o);
config->relocatable = args.hasArg(OPT_relocatable);
config->gcSections =
config->thinLTOCachePolicy = CHECK(
parseCachePruningPolicy(args.getLastArgValue(OPT_thinlto_cache_policy)),
"--thinlto-cache-policy: invalid cache policy");
+ config->unresolvedSymbols = getUnresolvedSymbolPolicy(args);
errorHandler().verbose = args.hasArg(OPT_verbose);
LLVM_DEBUG(errorHandler().verbose = true);
args.hasFlag(OPT_export_dynamic, OPT_no_export_dynamic, config->shared);
// Parse wasm32/64.
- config->is64 = false;
if (auto *arg = args.getLastArg(OPT_m)) {
StringRef s = arg->getValue();
if (s == "wasm32")
for (StringRef s : arg->getValues())
config->features->push_back(std::string(s));
}
+
+ // Legacy --allow-undefined flag which is equivalent to
+ // --unresolve-symbols=ignore + --import-undefined
+ if (args.hasArg(OPT_allow_undefined)) {
+ config->importUndefined = true;
+ config->unresolvedSymbols = UnresolvedPolicy::Ignore;
+ }
+
+ if (args.hasArg(OPT_print_map))
+ config->mapFile = "-";
}
// Some Config members do not directly correspond to any particular
config->importTable = true;
}
+ if (config->relocatable) {
+ if (config->exportTable)
+ error("--relocatable is incompatible with --export-table");
+ if (config->growableTable)
+ error("--relocatable is incompatible with --growable-table");
+ // Ignore any --import-table, as it's redundant.
+ config->importTable = true;
+ }
+
if (config->shared) {
config->importMemory = true;
- config->allowUndefined = true;
+ config->importUndefined = true;
+ config->unresolvedSymbols = UnresolvedPolicy::Ignore;
}
}
warn("creating PIEs, with -pie, is not yet stable");
}
}
+
+ if (config->bsymbolic && !config->shared) {
+ warn("-Bsymbolic is only meaningful when combined with -shared");
+ }
}
// Force Sym to be entered in the output. Used for -u or equivalent.
return sym;
}
-static GlobalSymbol *createGlobalVariable(StringRef name, bool isMutable,
- int value) {
+static InputGlobal *createGlobal(StringRef name, bool isMutable) {
llvm::wasm::WasmGlobal wasmGlobal;
- if (config->is64) {
- wasmGlobal.Type = {WASM_TYPE_I64, isMutable};
- wasmGlobal.InitExpr.Value.Int64 = value;
- wasmGlobal.InitExpr.Opcode = WASM_OPCODE_I64_CONST;
- } else {
- wasmGlobal.Type = {WASM_TYPE_I32, isMutable};
- wasmGlobal.InitExpr.Value.Int32 = value;
- wasmGlobal.InitExpr.Opcode = WASM_OPCODE_I32_CONST;
- }
+ bool is64 = config->is64.getValueOr(false);
+ wasmGlobal.Type = {uint8_t(is64 ? WASM_TYPE_I64 : WASM_TYPE_I32), isMutable};
+ wasmGlobal.InitExpr = intConst(0, is64);
wasmGlobal.SymbolName = name;
- return symtab->addSyntheticGlobal(name, WASM_SYMBOL_VISIBILITY_HIDDEN,
- make<InputGlobal>(wasmGlobal, nullptr));
+ return make<InputGlobal>(wasmGlobal, nullptr);
+}
+
+static GlobalSymbol *createGlobalVariable(StringRef name, bool isMutable) {
+ InputGlobal *g = createGlobal(name, isMutable);
+ return symtab->addSyntheticGlobal(name, WASM_SYMBOL_VISIBILITY_HIDDEN, g);
+}
+
+static GlobalSymbol *createOptionalGlobal(StringRef name, bool isMutable) {
+ InputGlobal *g = createGlobal(name, isMutable);
+ return symtab->addOptionalGlobalSymbol(name, g);
}
// Create ABI-defined synthetic symbols
"__wasm_call_ctors", WASM_SYMBOL_VISIBILITY_HIDDEN,
make<SyntheticFunction>(nullSignature, "__wasm_call_ctors"));
- if (config->isPic) {
- // For PIC code we create a synthetic function __wasm_apply_relocs which
- // is called from __wasm_call_ctors before the user-level constructors.
- WasmSym::applyRelocs = symtab->addSyntheticFunction(
- "__wasm_apply_relocs", WASM_SYMBOL_VISIBILITY_HIDDEN,
- make<SyntheticFunction>(nullSignature, "__wasm_apply_relocs"));
- }
-
+ bool is64 = config->is64.getValueOr(false);
if (config->isPic) {
- WasmSym::stackPointer = createUndefinedGlobal(
- "__stack_pointer",
- config->is64 ? &mutableGlobalTypeI64 : &mutableGlobalTypeI32);
+ WasmSym::stackPointer =
+ createUndefinedGlobal("__stack_pointer", config->is64.getValueOr(false)
+ ? &mutableGlobalTypeI64
+ : &mutableGlobalTypeI32);
// For PIC code, we import two global variables (__memory_base and
// __table_base) from the environment and use these as the offset at
// which to load our static data and function table.
// See:
// https://github.com/WebAssembly/tool-conventions/blob/master/DynamicLinking.md
- WasmSym::memoryBase = createUndefinedGlobal(
- "__memory_base", config->is64 ? &globalTypeI64 : &globalTypeI32);
- WasmSym::tableBase = createUndefinedGlobal("__table_base", &globalTypeI32);
+ auto *globalType = is64 ? &globalTypeI64 : &globalTypeI32;
+ WasmSym::memoryBase = createUndefinedGlobal("__memory_base", globalType);
+ WasmSym::tableBase = createUndefinedGlobal("__table_base", globalType);
WasmSym::memoryBase->markLive();
WasmSym::tableBase->markLive();
+ if (is64) {
+ WasmSym::tableBase32 =
+ createUndefinedGlobal("__table_base32", &globalTypeI32);
+ WasmSym::tableBase32->markLive();
+ } else {
+ WasmSym::tableBase32 = nullptr;
+ }
} else {
// For non-PIC code
- WasmSym::stackPointer = createGlobalVariable("__stack_pointer", true, 0);
+ WasmSym::stackPointer = createGlobalVariable("__stack_pointer", true);
WasmSym::stackPointer->markLive();
}
- if (config->sharedMemory && !config->shared) {
- // Passive segments are used to avoid memory being reinitialized on each
- // thread's instantiation. These passive segments are initialized and
- // dropped in __wasm_init_memory, which is registered as the start function
- WasmSym::initMemory = symtab->addSyntheticFunction(
- "__wasm_init_memory", WASM_SYMBOL_VISIBILITY_HIDDEN,
- make<SyntheticFunction>(nullSignature, "__wasm_init_memory"));
- WasmSym::initMemoryFlag = symtab->addSyntheticDataSymbol(
- "__wasm_init_memory_flag", WASM_SYMBOL_VISIBILITY_HIDDEN);
- assert(WasmSym::initMemoryFlag);
- WasmSym::tlsBase = createGlobalVariable("__tls_base", true, 0);
- WasmSym::tlsSize = createGlobalVariable("__tls_size", false, 0);
- WasmSym::tlsAlign = createGlobalVariable("__tls_align", false, 1);
+ if (config->sharedMemory && !config->relocatable) {
+ WasmSym::tlsBase = createGlobalVariable("__tls_base", true);
+ WasmSym::tlsSize = createGlobalVariable("__tls_size", false);
+ WasmSym::tlsAlign = createGlobalVariable("__tls_align", false);
WasmSym::initTLS = symtab->addSyntheticFunction(
"__wasm_init_tls", WASM_SYMBOL_VISIBILITY_HIDDEN,
- make<SyntheticFunction>(config->is64 ? i64ArgSignature
- : i32ArgSignature,
- "__wasm_init_tls"));
+ make<SyntheticFunction>(
+ is64 ? i64ArgSignature : i32ArgSignature,
+ "__wasm_init_tls"));
}
}
WasmSym::heapBase = symtab->addOptionalDataSymbol("__heap_base");
WasmSym::definedMemoryBase = symtab->addOptionalDataSymbol("__memory_base");
WasmSym::definedTableBase = symtab->addOptionalDataSymbol("__table_base");
+ if (config->is64.getValueOr(false))
+ WasmSym::definedTableBase32 =
+ symtab->addOptionalDataSymbol("__table_base32");
}
+
+ // For non-shared memory programs we still need to define __tls_base since we
+ // allow object files built with TLS to be linked into single threaded
+ // programs, and such object files can contains refernced to this symbol.
+ //
+ // However, in this case __tls_base is immutable and points directly to the
+ // start of the `.tdata` static segment.
+ //
+ // __tls_size and __tls_align are not needed in this case since they are only
+ // needed for __wasm_init_tls (which we do not create in this case).
+ if (!config->sharedMemory)
+ WasmSym::tlsBase = createOptionalGlobal("__tls_base", false);
}
// Reconstructs command line arguments so that so that you can re-run
symtab->wrap(w.sym, w.real, w.wrap);
}
-void LinkerDriver::link(ArrayRef<const char *> argsArr) {
+static void splitSections() {
+ // splitIntoPieces needs to be called on each MergeInputChunk
+ // before calling finalizeContents().
+ LLVM_DEBUG(llvm::dbgs() << "splitSections\n");
+ parallelForEach(symtab->objectFiles, [](ObjFile *file) {
+ for (InputChunk *seg : file->segments) {
+ if (auto *s = dyn_cast<MergeInputChunk>(seg))
+ s->splitIntoPieces();
+ }
+ for (InputChunk *sec : file->customSections) {
+ if (auto *s = dyn_cast<MergeInputChunk>(sec))
+ s->splitIntoPieces();
+ }
+ });
+}
+
+void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
WasmOptTable parser;
opt::InputArgList args = parser.parse(argsArr.slice(1));
// Handle --help
if (args.hasArg(OPT_help)) {
- parser.PrintHelp(lld::outs(),
+ parser.printHelp(lld::outs(),
(std::string(argsArr[0]) + " [options] file...").c_str(),
"LLVM Linker", false);
return;
v.push_back("wasm-ld (LLVM option parsing)");
for (auto *arg : args.filtered(OPT_mllvm))
v.push_back(arg->getValue());
+ cl::ResetAllOptionOccurrences();
cl::ParseCommandLineOptions(v.size(), v.data());
errorHandler().errorLimit = args::getInteger(args, OPT_error_limit, 20);
// find that it failed because there was a mistake in their command-line.
if (auto e = tryCreateFile(config->outputFile))
error("cannot open output file " + config->outputFile + ": " + e.message());
- // TODO(sbc): add check for map file too once we add support for that.
+ if (auto e = tryCreateFile(config->mapFile))
+ error("cannot open map file " + config->mapFile + ": " + e.message());
if (errorCount())
return;
for (auto *arg : args.filtered(OPT_trace_symbol))
symtab->trace(arg->getValue());
- for (auto *arg : args.filtered(OPT_export))
+ for (auto *arg : args.filtered(OPT_export_if_defined))
config->exportedSymbols.insert(arg->getValue());
+ for (auto *arg : args.filtered(OPT_export)) {
+ config->exportedSymbols.insert(arg->getValue());
+ config->requiredExports.push_back(arg->getValue());
+ }
+
createSyntheticSymbols();
// Add all files to the symbol table. This will add almost all
// Handle the `--export <sym>` options
// This works like --undefined but also exports the symbol if its found
- for (auto *arg : args.filtered(OPT_export))
- handleUndefined(arg->getValue());
+ for (auto &iter : config->exportedSymbols)
+ handleUndefined(iter.first());
Symbol *entrySym = nullptr;
if (!config->relocatable && !config->entry.empty()) {
config->entry);
}
+ // If the user code defines a `__wasm_call_dtors` function, remember it so
+ // that we can call it from the command export wrappers. Unlike
+ // `__wasm_call_ctors` which we synthesize, `__wasm_call_dtors` is defined
+ // by libc/etc., because destructors are registered dynamically with
+ // `__cxa_atexit` and friends.
+ if (!config->relocatable && !config->shared &&
+ !WasmSym::callCtors->isUsedInRegularObj &&
+ WasmSym::callCtors->getName() != config->entry &&
+ !config->exportedSymbols.count(WasmSym::callCtors->getName())) {
+ if (Symbol *callDtors = handleUndefined("__wasm_call_dtors")) {
+ if (auto *callDtorsFunc = dyn_cast<DefinedFunction>(callDtors)) {
+ if (callDtorsFunc->signature &&
+ (!callDtorsFunc->signature->Params.empty() ||
+ !callDtorsFunc->signature->Returns.empty())) {
+ error("__wasm_call_dtors must have no argument or return values");
+ }
+ WasmSym::callDtors = callDtorsFunc;
+ } else {
+ error("__wasm_call_dtors must be a function");
+ }
+ }
+ }
+
createOptionalSymbols();
if (errorCount())
if (!wrapped.empty())
wrapSymbols(wrapped);
- for (auto *arg : args.filtered(OPT_export)) {
- Symbol *sym = symtab->find(arg->getValue());
+ for (auto &iter : config->exportedSymbols) {
+ Symbol *sym = symtab->find(iter.first());
if (sym && sym->isDefined())
sym->forceExport = true;
- else if (!config->allowUndefined)
- error(Twine("symbol exported via --export not found: ") +
- arg->getValue());
}
- if (!config->relocatable) {
+ if (!config->relocatable && !config->isPic) {
// Add synthetic dummies for weak undefined functions. Must happen
// after LTO otherwise functions may not yet have signatures.
symtab->handleWeakUndefines();
if (errorCount())
return;
+ // Split WASM_SEG_FLAG_STRINGS sections into pieces in preparation for garbage
+ // collection.
+ splitSections();
+
// Do size optimizations: garbage collection
markLive();
+ // Provide the indirect function table if needed.
+ WasmSym::indirectFunctionTable =
+ symtab->resolveIndirectFunctionTable(/*required =*/false);
+
+ if (errorCount())
+ return;
+
// Write the result to the file.
writeResult();
}
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/LLVM.h"
#include "llvm/Support/LEB128.h"
+#include "llvm/Support/xxhash.h"
#define DEBUG_TYPE "lld"
case R_WASM_MEMORY_ADDR_SLEB64:
case R_WASM_MEMORY_ADDR_REL_SLEB64:
case R_WASM_MEMORY_ADDR_I64:
+ case R_WASM_TABLE_INDEX_SLEB64:
+ case R_WASM_TABLE_INDEX_I64:
+ case R_WASM_FUNCTION_OFFSET_I64:
+ case R_WASM_TABLE_INDEX_REL_SLEB64:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
return true;
default:
return false;
return file->getWasmObj()->linkingData().Comdats[index];
}
-void InputChunk::verifyRelocTargets() const {
- for (const WasmRelocation &rel : relocations) {
- uint64_t existingValue;
- unsigned bytesRead = 0;
- auto offset = rel.Offset - getInputSectionOffset();
- const uint8_t *loc = data().data() + offset;
- switch (rel.Type) {
- case R_WASM_TYPE_INDEX_LEB:
- case R_WASM_FUNCTION_INDEX_LEB:
- case R_WASM_GLOBAL_INDEX_LEB:
- case R_WASM_EVENT_INDEX_LEB:
- case R_WASM_MEMORY_ADDR_LEB:
- case R_WASM_MEMORY_ADDR_LEB64:
- existingValue = decodeULEB128(loc, &bytesRead);
- break;
- case R_WASM_TABLE_INDEX_SLEB:
- case R_WASM_TABLE_INDEX_REL_SLEB:
- case R_WASM_MEMORY_ADDR_SLEB:
- case R_WASM_MEMORY_ADDR_SLEB64:
- case R_WASM_MEMORY_ADDR_REL_SLEB:
- case R_WASM_MEMORY_ADDR_REL_SLEB64:
- existingValue = static_cast<uint64_t>(decodeSLEB128(loc, &bytesRead));
- break;
- case R_WASM_TABLE_INDEX_I32:
- case R_WASM_MEMORY_ADDR_I32:
- case R_WASM_FUNCTION_OFFSET_I32:
- case R_WASM_SECTION_OFFSET_I32:
- case R_WASM_GLOBAL_INDEX_I32:
- existingValue = read32le(loc);
- break;
- case R_WASM_MEMORY_ADDR_I64:
- existingValue = read64le(loc);
- break;
- default:
- llvm_unreachable("unknown relocation type");
- }
+uint32_t InputChunk::getSize() const {
+ if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this))
+ return ms->builder.getSize();
- if (bytesRead && bytesRead != 5)
- warn("expected LEB at relocation site be 5-byte padded");
-
- if (rel.Type != R_WASM_GLOBAL_INDEX_LEB &&
- rel.Type != R_WASM_GLOBAL_INDEX_I32) {
- auto expectedValue = file->calcExpectedValue(rel);
- if (expectedValue != existingValue)
- warn("unexpected existing value for " + relocTypeToString(rel.Type) +
- ": existing=" + Twine(existingValue) +
- " expected=" + Twine(expectedValue));
+ if (const auto *f = dyn_cast<InputFunction>(this)) {
+ if (config->compressRelocations && f->file) {
+ return f->getCompressedSize();
}
}
+
+ return data().size();
+}
+
+uint32_t InputChunk::getInputSize() const {
+ if (const auto *f = dyn_cast<InputFunction>(this))
+ return f->function->Size;
+ return getSize();
}
// Copy this input chunk to an mmap'ed output file and apply relocations.
void InputChunk::writeTo(uint8_t *buf) const {
+ if (const auto *f = dyn_cast<InputFunction>(this)) {
+ if (file && config->compressRelocations)
+ return f->writeCompressed(buf);
+ } else if (const auto *ms = dyn_cast<SyntheticMergedChunk>(this)) {
+ ms->builder.write(buf + outSecOff);
+ // Apply relocations
+ ms->relocate(buf + outSecOff);
+ return;
+ }
+
// Copy contents
- memcpy(buf + outputOffset, data().data(), data().size());
+ memcpy(buf + outSecOff, data().data(), data().size());
// Apply relocations
+ relocate(buf + outSecOff);
+}
+
+void InputChunk::relocate(uint8_t *buf) const {
if (relocations.empty())
return;
-#ifndef NDEBUG
- verifyRelocTargets();
-#endif
-
LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
<< " count=" << relocations.size() << "\n");
- int32_t off = outputOffset - getInputSectionOffset();
+ int32_t inputSectionOffset = getInputSectionOffset();
+ uint64_t tombstone = getTombstone();
for (const WasmRelocation &rel : relocations) {
- uint8_t *loc = buf + rel.Offset + off;
- auto value = file->calcNewValue(rel);
+ uint8_t *loc = buf + rel.Offset - inputSectionOffset;
LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
if (rel.Type != R_WASM_TYPE_INDEX_LEB)
LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
- << " value=" << value << " offset=" << rel.Offset
- << "\n");
+ << " offset=" << rel.Offset << "\n");
+ auto value = file->calcNewValue(rel, tombstone, this);
switch (rel.Type) {
case R_WASM_TYPE_INDEX_LEB:
case R_WASM_FUNCTION_INDEX_LEB:
case R_WASM_GLOBAL_INDEX_LEB:
- case R_WASM_EVENT_INDEX_LEB:
+ case R_WASM_TAG_INDEX_LEB:
case R_WASM_MEMORY_ADDR_LEB:
+ case R_WASM_TABLE_NUMBER_LEB:
encodeULEB128(value, loc, 5);
break;
case R_WASM_MEMORY_ADDR_LEB64:
case R_WASM_TABLE_INDEX_REL_SLEB:
case R_WASM_MEMORY_ADDR_SLEB:
case R_WASM_MEMORY_ADDR_REL_SLEB:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB:
encodeSLEB128(static_cast<int32_t>(value), loc, 5);
break;
+ case R_WASM_TABLE_INDEX_SLEB64:
+ case R_WASM_TABLE_INDEX_REL_SLEB64:
case R_WASM_MEMORY_ADDR_SLEB64:
case R_WASM_MEMORY_ADDR_REL_SLEB64:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
encodeSLEB128(static_cast<int64_t>(value), loc, 10);
break;
case R_WASM_TABLE_INDEX_I32:
case R_WASM_FUNCTION_OFFSET_I32:
case R_WASM_SECTION_OFFSET_I32:
case R_WASM_GLOBAL_INDEX_I32:
+ case R_WASM_MEMORY_ADDR_LOCREL_I32:
write32le(loc, value);
break;
+ case R_WASM_TABLE_INDEX_I64:
case R_WASM_MEMORY_ADDR_I64:
+ case R_WASM_FUNCTION_OFFSET_I64:
write64le(loc, value);
break;
default:
if (relocations.empty())
return;
- int32_t off = outputOffset - getInputSectionOffset();
+ int32_t off = outSecOff - getInputSectionOffset();
LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
<< " offset=" << Twine(off) << "\n");
}
}
+uint64_t InputChunk::getTombstone() const {
+ if (const auto *s = dyn_cast<InputSection>(this)) {
+ return s->tombstoneValue;
+ }
+
+ return 0;
+}
+
void InputFunction::setFunctionIndex(uint32_t index) {
LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << getName()
<< " -> " << index << "\n");
case R_WASM_TYPE_INDEX_LEB:
case R_WASM_FUNCTION_INDEX_LEB:
case R_WASM_GLOBAL_INDEX_LEB:
- case R_WASM_EVENT_INDEX_LEB:
+ case R_WASM_TAG_INDEX_LEB:
case R_WASM_MEMORY_ADDR_LEB:
case R_WASM_MEMORY_ADDR_LEB64:
+ case R_WASM_TABLE_NUMBER_LEB:
return encodeULEB128(value, buf);
case R_WASM_TABLE_INDEX_SLEB:
+ case R_WASM_TABLE_INDEX_SLEB64:
case R_WASM_MEMORY_ADDR_SLEB:
case R_WASM_MEMORY_ADDR_SLEB64:
return encodeSLEB128(static_cast<int64_t>(value), buf);
case R_WASM_TYPE_INDEX_LEB:
case R_WASM_FUNCTION_INDEX_LEB:
case R_WASM_GLOBAL_INDEX_LEB:
- case R_WASM_EVENT_INDEX_LEB:
+ case R_WASM_TAG_INDEX_LEB:
case R_WASM_MEMORY_ADDR_LEB:
+ case R_WASM_TABLE_NUMBER_LEB:
case R_WASM_TABLE_INDEX_SLEB:
case R_WASM_MEMORY_ADDR_SLEB:
return 5;
+ case R_WASM_TABLE_INDEX_SLEB64:
case R_WASM_MEMORY_ADDR_LEB64:
case R_WASM_MEMORY_ADDR_SLEB64:
return 10;
uint32_t start = getInputSectionOffset();
uint32_t end = start + function->Size;
+ uint64_t tombstone = getTombstone();
+
uint32_t lastRelocEnd = start + functionSizeLength;
for (const WasmRelocation &rel : relocations) {
LLVM_DEBUG(dbgs() << " region: " << (rel.Offset - lastRelocEnd) << "\n");
compressedFuncSize += rel.Offset - lastRelocEnd;
- compressedFuncSize += getRelocWidth(rel, file->calcNewValue(rel));
+ compressedFuncSize +=
+ getRelocWidth(rel, file->calcNewValue(rel, tombstone, this));
lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
}
LLVM_DEBUG(dbgs() << " final region: " << (end - lastRelocEnd) << "\n");
// Override the default writeTo method so that we can (optionally) write the
// compressed version of the function.
-void InputFunction::writeTo(uint8_t *buf) const {
- if (!file || !config->compressRelocations)
- return InputChunk::writeTo(buf);
-
- buf += outputOffset;
+void InputFunction::writeCompressed(uint8_t *buf) const {
+ buf += outSecOff;
uint8_t *orig = buf;
(void)orig;
const uint8_t *secStart = file->codeSection->Content.data();
const uint8_t *funcStart = secStart + getInputSectionOffset();
const uint8_t *end = funcStart + function->Size;
+ uint64_t tombstone = getTombstone();
uint32_t count;
decodeULEB128(funcStart, &count);
funcStart += count;
LLVM_DEBUG(dbgs() << " write chunk: " << chunkSize << "\n");
memcpy(buf, lastRelocEnd, chunkSize);
buf += chunkSize;
- buf += writeCompressedReloc(buf, rel, file->calcNewValue(rel));
+ buf += writeCompressedReloc(buf, rel,
+ file->calcNewValue(rel, tombstone, this));
lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
}
LLVM_DEBUG(dbgs() << " total: " << (buf + chunkSize - orig) << "\n");
}
+uint64_t InputChunk::getChunkOffset(uint64_t offset) const {
+ if (const auto *ms = dyn_cast<MergeInputChunk>(this)) {
+ LLVM_DEBUG(dbgs() << "getChunkOffset(merged): " << getName() << "\n");
+ LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
+ LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
+ << "\n");
+ assert(ms->parent);
+ return ms->parent->getChunkOffset(ms->getParentOffset(offset));
+ }
+ return outputSegmentOffset + offset;
+}
+
+uint64_t InputChunk::getOffset(uint64_t offset) const {
+ return outSecOff + getChunkOffset(offset);
+}
+
+uint64_t InputChunk::getVA(uint64_t offset) const {
+ return (outputSeg ? outputSeg->startVA : 0) + getChunkOffset(offset);
+}
+
// Generate code to apply relocations to the data section at runtime.
// This is only called when generating shared libaries (PIC) where address are
// not known at static link time.
-void InputSegment::generateRelocationCode(raw_ostream &os) const {
+void InputChunk::generateRelocationCode(raw_ostream &os) const {
LLVM_DEBUG(dbgs() << "generating runtime relocations: " << getName()
<< " count=" << relocations.size() << "\n");
- unsigned opcode_ptr_const =
- config->is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
- unsigned opcode_ptr_add =
- config->is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
+ bool is64 = config->is64.getValueOr(false);
+ unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST
+ : WASM_OPCODE_I32_CONST;
+ unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD
+ : WASM_OPCODE_I32_ADD;
+ uint64_t tombstone = getTombstone();
// TODO(sbc): Encode the relocations in the data section and write a loop
// here to apply them.
- uint32_t segmentVA = outputSeg->startVA + outputSegmentOffset;
for (const WasmRelocation &rel : relocations) {
- uint64_t offset = rel.Offset - getInputSectionOffset();
- uint64_t outputOffset = segmentVA + offset;
+ uint64_t offset = getVA(rel.Offset) - getInputSectionOffset();
LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
<< " addend=" << rel.Addend << " index=" << rel.Index
- << " output offset=" << outputOffset << "\n");
+ << " output offset=" << offset << "\n");
// Get __memory_base
writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
// Add the offset of the relocation
writeU8(os, opcode_ptr_const, "CONST");
- writeSleb128(os, outputOffset, "offset");
+ writeSleb128(os, offset, "offset");
writeU8(os, opcode_ptr_add, "ADD");
bool is64 = relocIs64(rel.Type);
}
} else {
const GlobalSymbol* baseSymbol = WasmSym::memoryBase;
- if (rel.Type == R_WASM_TABLE_INDEX_I32)
+ if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
+ rel.Type == R_WASM_TABLE_INDEX_I64)
baseSymbol = WasmSym::tableBase;
writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
writeUleb128(os, baseSymbol->getGlobalIndex(), "base");
writeU8(os, opcode_reloc_const, "CONST");
- writeSleb128(os, file->calcNewValue(rel), "offset");
+ writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset");
writeU8(os, opcode_reloc_add, "ADD");
}
}
}
+// Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
+// null-terminated strings.
+void MergeInputChunk::splitStrings(ArrayRef<uint8_t> data) {
+ LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
+ size_t off = 0;
+ StringRef s = toStringRef(data);
+
+ while (!s.empty()) {
+ size_t end = s.find(0);
+ if (end == StringRef::npos)
+ fatal(toString(this) + ": string is not null terminated");
+ size_t size = end + 1;
+
+ pieces.emplace_back(off, xxHash64(s.substr(0, size)), true);
+ s = s.substr(size);
+ off += size;
+ }
+}
+
+// This function is called after we obtain a complete list of input sections
+// that need to be linked. This is responsible to split section contents
+// into small chunks for further processing.
+//
+// Note that this function is called from parallelForEach. This must be
+// thread-safe (i.e. no memory allocation from the pools).
+void MergeInputChunk::splitIntoPieces() {
+ assert(pieces.empty());
+ // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
+ // could add other types of splitting (see ELF's splitIntoPieces).
+ assert(flags & WASM_SEG_FLAG_STRINGS);
+ splitStrings(data());
+}
+
+SectionPiece *MergeInputChunk::getSectionPiece(uint64_t offset) {
+ if (this->data().size() <= offset)
+ fatal(toString(this) + ": offset is outside the section");
+
+ // If Offset is not at beginning of a section piece, it is not in the map.
+ // In that case we need to do a binary search of the original section piece
+ // vector.
+ auto it = partition_point(
+ pieces, [=](SectionPiece p) { return p.inputOff <= offset; });
+ return &it[-1];
+}
+
+// Returns the offset in an output section for a given input offset.
+// Because contents of a mergeable section is not contiguous in output,
+// it is not just an addition to a base output offset.
+uint64_t MergeInputChunk::getParentOffset(uint64_t offset) const {
+ // If Offset is not at beginning of a section piece, it is not in the map.
+ // In that case we need to search from the original section piece vector.
+ const SectionPiece *piece = getSectionPiece(offset);
+ uint64_t addend = offset - piece->inputOff;
+ return piece->outputOff + addend;
+}
+
+void SyntheticMergedChunk::finalizeContents() {
+ // Add all string pieces to the string table builder to create section
+ // contents.
+ for (MergeInputChunk *sec : chunks)
+ for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
+ if (sec->pieces[i].live)
+ builder.add(sec->getData(i));
+
+ // Fix the string table content. After this, the contents will never change.
+ builder.finalize();
+
+ // finalize() fixed tail-optimized strings, so we can now get
+ // offsets of strings. Get an offset for each string and save it
+ // to a corresponding SectionPiece for easy access.
+ for (MergeInputChunk *sec : chunks)
+ for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
+ if (sec->pieces[i].live)
+ sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
+}
+
+uint64_t InputSection::getTombstoneForSection(StringRef name) {
+ // When a function is not live we need to update relocations referring to it.
+ // If they occur in DWARF debug symbols, we want to change the pc of the
+ // function to -1 to avoid overlapping with a valid range. However for the
+ // debug_ranges and debug_loc sections that would conflict with the existing
+ // meaning of -1 so we use -2.
+ // Returning 0 means there is no tombstone value for this section, and relocation
+ // will just use the addend.
+ if (!name.startswith(".debug_"))
+ return 0;
+ if (name.equals(".debug_ranges") || name.equals(".debug_loc"))
+ return UINT64_C(-2);
+ return UINT64_C(-1);
+}
+
} // namespace wasm
} // namespace lld
#include "InputFiles.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/LLVM.h"
+#include "llvm/ADT/CachedHashString.h"
+#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Object/Wasm.h"
namespace lld {
class InputChunk {
public:
- enum Kind { DataSegment, Function, SyntheticFunction, Section };
+ enum Kind {
+ DataSegment,
+ Merge,
+ MergedChunk,
+ Function,
+ SyntheticFunction,
+ Section,
+ };
- Kind kind() const { return sectionKind; }
+ StringRef name;
+ StringRef debugName;
- virtual uint32_t getSize() const { return data().size(); }
- virtual uint32_t getInputSize() const { return getSize(); };
+ StringRef getName() const { return name; }
+ StringRef getDebugName() const { return debugName; }
+ Kind kind() const { return (Kind)sectionKind; }
- virtual void writeTo(uint8_t *sectionStart) const;
+ uint32_t getSize() const;
+ uint32_t getInputSize() const;
+
+ void writeTo(uint8_t *buf) const;
+ void relocate(uint8_t *buf) const;
ArrayRef<WasmRelocation> getRelocations() const { return relocations; }
void setRelocations(ArrayRef<WasmRelocation> rs) { relocations = rs; }
- virtual StringRef getName() const = 0;
- virtual StringRef getDebugName() const = 0;
- virtual uint32_t getComdat() const = 0;
+ // Translate an offset into the input chunk to an offset in the output
+ // section.
+ uint64_t getOffset(uint64_t offset) const;
+ // Translate an offset into the input chunk into an offset into the output
+ // chunk. For data segments (InputSegment) this will return and offset into
+ // the output segment. For MergeInputChunk, this will return an offset into
+ // the parent merged chunk. For other chunk types this is no-op and we just
+ // return unmodified offset.
+ uint64_t getChunkOffset(uint64_t offset) const;
+ uint64_t getVA(uint64_t offset = 0) const;
+
+ uint32_t getComdat() const { return comdat; }
StringRef getComdatName() const;
- virtual uint32_t getInputSectionOffset() const = 0;
+ uint32_t getInputSectionOffset() const { return inputSectionOffset; }
size_t getNumRelocations() const { return relocations.size(); }
void writeRelocations(llvm::raw_ostream &os) const;
+ void generateRelocationCode(raw_ostream &os) const;
+
+ bool isTLS() const {
+ // Older object files don't include WASM_SEG_FLAG_TLS and instead
+ // relied on the naming convention.
+ return flags & llvm::wasm::WASM_SEG_FLAG_TLS || name.startswith(".tdata") ||
+ name.startswith(".tbss");
+ }
ObjFile *file;
- int32_t outputOffset = 0;
+ OutputSection *outputSec = nullptr;
+ uint32_t comdat = UINT32_MAX;
+ uint32_t inputSectionOffset = 0;
+ uint32_t alignment;
+ uint32_t flags;
+
+ // Only applies to data segments.
+ uint32_t outputSegmentOffset = 0;
+ const OutputSegment *outputSeg = nullptr;
+
+ // After assignAddresses is called, this represents the offset from
+ // the beginning of the output section this chunk was assigned to.
+ int32_t outSecOff = 0;
+
+ uint8_t sectionKind : 3;
// Signals that the section is part of the output. The garbage collector,
// and COMDAT handling can set a sections' Live bit.
unsigned discarded : 1;
protected:
- InputChunk(ObjFile *f, Kind k)
- : file(f), live(!config->gcSections), discarded(false), sectionKind(k) {}
- virtual ~InputChunk() = default;
- virtual ArrayRef<uint8_t> data() const = 0;
-
- // Verifies the existing data at relocation targets matches our expectations.
- // This is performed only debug builds as an extra sanity check.
- void verifyRelocTargets() const;
+ InputChunk(ObjFile *f, Kind k, StringRef name, uint32_t alignment = 0,
+ uint32_t flags = 0)
+ : name(name), file(f), alignment(alignment), flags(flags), sectionKind(k),
+ live(!config->gcSections), discarded(false) {}
+ ArrayRef<uint8_t> data() const { return rawData; }
+ uint64_t getTombstone() const;
ArrayRef<WasmRelocation> relocations;
- Kind sectionKind;
+ ArrayRef<uint8_t> rawData;
};
// Represents a WebAssembly data segment which can be included as part of
class InputSegment : public InputChunk {
public:
InputSegment(const WasmSegment &seg, ObjFile *f)
- : InputChunk(f, InputChunk::DataSegment), segment(seg) {}
+ : InputChunk(f, InputChunk::DataSegment, seg.Data.Name,
+ seg.Data.Alignment, seg.Data.LinkingFlags),
+ segment(seg) {
+ rawData = segment.Data.Content;
+ comdat = segment.Data.Comdat;
+ inputSectionOffset = segment.SectionOffset;
+ }
static bool classof(const InputChunk *c) { return c->kind() == DataSegment; }
- void generateRelocationCode(raw_ostream &os) const;
+protected:
+ const WasmSegment &segment;
+};
+
+class SyntheticMergedChunk;
+
+// Merge segment handling copied from lld/ELF/InputSection.h. Keep in sync
+// where possible.
+
+// SectionPiece represents a piece of splittable segment contents.
+// We allocate a lot of these and binary search on them. This means that they
+// have to be as compact as possible, which is why we don't store the size (can
+// be found by looking at the next one).
+struct SectionPiece {
+ SectionPiece(size_t off, uint32_t hash, bool live)
+ : inputOff(off), live(live || !config->gcSections), hash(hash >> 1) {}
- uint32_t getAlignment() const { return segment.Data.Alignment; }
- StringRef getName() const override { return segment.Data.Name; }
- StringRef getDebugName() const override { return StringRef(); }
- uint32_t getComdat() const override { return segment.Data.Comdat; }
- uint32_t getInputSectionOffset() const override {
- return segment.SectionOffset;
+ uint32_t inputOff;
+ uint32_t live : 1;
+ uint32_t hash : 31;
+ uint64_t outputOff = 0;
+};
+
+static_assert(sizeof(SectionPiece) == 16, "SectionPiece is too big");
+
+// This corresponds segments marked as WASM_SEG_FLAG_STRINGS.
+class MergeInputChunk : public InputChunk {
+public:
+ MergeInputChunk(const WasmSegment &seg, ObjFile *f)
+ : InputChunk(f, Merge, seg.Data.Name, seg.Data.Alignment,
+ seg.Data.LinkingFlags) {
+ rawData = seg.Data.Content;
+ comdat = seg.Data.Comdat;
+ inputSectionOffset = seg.SectionOffset;
}
- const OutputSegment *outputSeg = nullptr;
- int32_t outputSegmentOffset = 0;
+ MergeInputChunk(const WasmSection &s, ObjFile *f)
+ : InputChunk(f, Merge, s.Name, 0, llvm::wasm::WASM_SEG_FLAG_STRINGS) {
+ assert(s.Type == llvm::wasm::WASM_SEC_CUSTOM);
+ comdat = s.Comdat;
+ rawData = s.Content;
+ }
-protected:
- ArrayRef<uint8_t> data() const override { return segment.Data.Content; }
+ static bool classof(const InputChunk *s) { return s->kind() == Merge; }
+ void splitIntoPieces();
+
+ // Translate an offset in the input section to an offset in the parent
+ // MergeSyntheticSection.
+ uint64_t getParentOffset(uint64_t offset) const;
+
+ // Splittable sections are handled as a sequence of data
+ // rather than a single large blob of data.
+ std::vector<SectionPiece> pieces;
+
+ // Returns I'th piece's data. This function is very hot when
+ // string merging is enabled, so we want to inline.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ llvm::CachedHashStringRef getData(size_t i) const {
+ size_t begin = pieces[i].inputOff;
+ size_t end =
+ (pieces.size() - 1 == i) ? data().size() : pieces[i + 1].inputOff;
+ return {toStringRef(data().slice(begin, end - begin)), pieces[i].hash};
+ }
- const WasmSegment &segment;
+ // Returns the SectionPiece at a given input section offset.
+ SectionPiece *getSectionPiece(uint64_t offset);
+ const SectionPiece *getSectionPiece(uint64_t offset) const {
+ return const_cast<MergeInputChunk *>(this)->getSectionPiece(offset);
+ }
+
+ SyntheticMergedChunk *parent = nullptr;
+
+private:
+ void splitStrings(ArrayRef<uint8_t> a);
+};
+
+// SyntheticMergedChunk is a class that allows us to put mergeable
+// sections with different attributes in a single output sections. To do that we
+// put them into SyntheticMergedChunk synthetic input sections which are
+// attached to regular output sections.
+class SyntheticMergedChunk : public InputChunk {
+public:
+ SyntheticMergedChunk(StringRef name, uint32_t alignment, uint32_t flags)
+ : InputChunk(nullptr, InputChunk::MergedChunk, name, alignment, flags),
+ builder(llvm::StringTableBuilder::RAW, 1ULL << alignment) {}
+
+ static bool classof(const InputChunk *c) {
+ return c->kind() == InputChunk::MergedChunk;
+ }
+
+ void addMergeChunk(MergeInputChunk *ms) {
+ comdat = ms->getComdat();
+ ms->parent = this;
+ chunks.push_back(ms);
+ }
+
+ void finalizeContents();
+
+ llvm::StringTableBuilder builder;
+
+protected:
+ std::vector<MergeInputChunk *> chunks;
};
// Represents a single wasm function within and input file. These are
class InputFunction : public InputChunk {
public:
InputFunction(const WasmSignature &s, const WasmFunction *func, ObjFile *f)
- : InputChunk(f, InputChunk::Function), signature(s), function(func) {}
+ : InputChunk(f, InputChunk::Function, func->SymbolName), signature(s),
+ function(func), exportName(func && func->ExportName.hasValue()
+ ? (*func->ExportName).str()
+ : llvm::Optional<std::string>()) {
+ inputSectionOffset = function->CodeSectionOffset;
+ rawData =
+ file->codeSection->Content.slice(inputSectionOffset, function->Size);
+ debugName = function->DebugName;
+ comdat = function->Comdat;
+ }
+
+ InputFunction(StringRef name, const WasmSignature &s)
+ : InputChunk(nullptr, InputChunk::Function, name), signature(s) {}
static bool classof(const InputChunk *c) {
return c->kind() == InputChunk::Function ||
c->kind() == InputChunk::SyntheticFunction;
}
- void writeTo(uint8_t *sectionStart) const override;
- StringRef getName() const override { return function->SymbolName; }
- StringRef getDebugName() const override { return function->DebugName; }
llvm::Optional<StringRef> getExportName() const {
- return function ? function->ExportName : llvm::Optional<StringRef>();
+ return exportName.hasValue() ? llvm::Optional<StringRef>(*exportName)
+ : llvm::Optional<StringRef>();
}
- uint32_t getComdat() const override { return function->Comdat; }
+ void setExportName(std::string exportName) { this->exportName = exportName; }
uint32_t getFunctionInputOffset() const { return getInputSectionOffset(); }
uint32_t getFunctionCodeOffset() const { return function->CodeOffset; }
- uint32_t getSize() const override {
- if (config->compressRelocations && file) {
- assert(compressedSize);
- return compressedSize;
- }
- return data().size();
- }
- uint32_t getInputSize() const override { return function->Size; }
uint32_t getFunctionIndex() const { return functionIndex.getValue(); }
bool hasFunctionIndex() const { return functionIndex.hasValue(); }
void setFunctionIndex(uint32_t index);
- uint32_t getInputSectionOffset() const override {
- return function->CodeSectionOffset;
- }
uint32_t getTableIndex() const { return tableIndex.getValue(); }
bool hasTableIndex() const { return tableIndex.hasValue(); }
void setTableIndex(uint32_t index);
+ void writeCompressed(uint8_t *buf) const;
// The size of a given input function can depend on the values of the
// LEB relocations within it. This finalizeContents method is called after
const WasmSignature &signature;
-protected:
- ArrayRef<uint8_t> data() const override {
- assert(!config->compressRelocations);
- return file->codeSection->Content.slice(getInputSectionOffset(),
- function->Size);
+ uint32_t getCompressedSize() const {
+ assert(compressedSize);
+ return compressedSize;
}
const WasmFunction *function;
+
+protected:
+ llvm::Optional<std::string> exportName;
llvm::Optional<uint32_t> functionIndex;
llvm::Optional<uint32_t> tableIndex;
uint32_t compressedFuncSize = 0;
public:
SyntheticFunction(const WasmSignature &s, StringRef name,
StringRef debugName = {})
- : InputFunction(s, nullptr, nullptr), name(name), debugName(debugName) {
+ : InputFunction(name, s) {
sectionKind = InputChunk::SyntheticFunction;
+ this->debugName = debugName;
}
static bool classof(const InputChunk *c) {
return c->kind() == InputChunk::SyntheticFunction;
}
- StringRef getName() const override { return name; }
- StringRef getDebugName() const override { return debugName; }
- uint32_t getComdat() const override { return UINT32_MAX; }
-
- void setBody(ArrayRef<uint8_t> body_) { body = body_; }
-
-protected:
- ArrayRef<uint8_t> data() const override { return body; }
-
- StringRef name;
- StringRef debugName;
- ArrayRef<uint8_t> body;
+ void setBody(ArrayRef<uint8_t> body) { rawData = body; }
};
// Represents a single Wasm Section within an input file.
class InputSection : public InputChunk {
public:
InputSection(const WasmSection &s, ObjFile *f)
- : InputChunk(f, InputChunk::Section), section(s) {
+ : InputChunk(f, InputChunk::Section, s.Name),
+ tombstoneValue(getTombstoneForSection(s.Name)), section(s) {
assert(section.Type == llvm::wasm::WASM_SEC_CUSTOM);
+ comdat = section.Comdat;
+ rawData = section.Content;
}
- StringRef getName() const override { return section.Name; }
- StringRef getDebugName() const override { return StringRef(); }
- uint32_t getComdat() const override { return UINT32_MAX; }
+ static bool classof(const InputChunk *c) {
+ return c->kind() == InputChunk::Section;
+ }
- OutputSection *outputSec = nullptr;
+ const uint64_t tombstoneValue;
protected:
- ArrayRef<uint8_t> data() const override { return section.Content; }
-
- // Offset within the input section. This is only zero since this chunk
- // type represents an entire input section, not part of one.
- uint32_t getInputSectionOffset() const override { return 0; }
-
+ static uint64_t getTombstoneForSection(StringRef name);
const WasmSection §ion;
};
--- /dev/null
+//===- InputElement.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_WASM_INPUT_ELEMENT_H
+#define LLD_WASM_INPUT_ELEMENT_H
+
+#include "Config.h"
+#include "InputFiles.h"
+#include "WriterUtils.h"
+#include "lld/Common/LLVM.h"
+#include "llvm/Object/Wasm.h"
+
+namespace lld {
+namespace wasm {
+
+// Represents a single element (Global, Tag, Table, etc) within an input
+// file.
+class InputElement {
+protected:
+ InputElement(StringRef name, ObjFile *f)
+ : file(f), live(!config->gcSections), name(name) {}
+
+public:
+ StringRef getName() const { return name; }
+ uint32_t getAssignedIndex() const { return assignedIndex.getValue(); }
+ bool hasAssignedIndex() const { return assignedIndex.hasValue(); }
+ void assignIndex(uint32_t index) {
+ assert(!hasAssignedIndex());
+ assignedIndex = index;
+ }
+
+ ObjFile *file;
+ bool live = false;
+
+protected:
+ StringRef name;
+ llvm::Optional<uint32_t> assignedIndex;
+};
+
+inline WasmInitExpr intConst(uint64_t value, bool is64) {
+ WasmInitExpr ie;
+ if (is64) {
+ ie.Opcode = llvm::wasm::WASM_OPCODE_I64_CONST;
+ ie.Value.Int64 = static_cast<int64_t>(value);
+ } else {
+ ie.Opcode = llvm::wasm::WASM_OPCODE_I32_CONST;
+ ie.Value.Int32 = static_cast<int32_t>(value);
+ }
+ return ie;
+}
+
+class InputGlobal : public InputElement {
+public:
+ InputGlobal(const WasmGlobal &g, ObjFile *f)
+ : InputElement(g.SymbolName, f), type(g.Type), initExpr(g.InitExpr) {}
+
+ const WasmGlobalType &getType() const { return type; }
+ const WasmInitExpr &getInitExpr() const { return initExpr; }
+
+ void setPointerValue(uint64_t value) {
+ initExpr = intConst(value, config->is64.getValueOr(false));
+ }
+
+private:
+ WasmGlobalType type;
+ WasmInitExpr initExpr;
+};
+
+class InputTag : public InputElement {
+public:
+ InputTag(const WasmSignature &s, const WasmTag &t, ObjFile *f)
+ : InputElement(t.SymbolName, f), signature(s), type(t.Type) {}
+
+ const WasmTagType &getType() const { return type; }
+
+ const WasmSignature &signature;
+
+private:
+ WasmTagType type;
+};
+
+class InputTable : public InputElement {
+public:
+ InputTable(const WasmTable &t, ObjFile *f)
+ : InputElement(t.SymbolName, f), type(t.Type) {}
+
+ const WasmTableType &getType() const { return type; }
+ void setLimits(const WasmLimits &limits) { type.Limits = limits; }
+
+private:
+ WasmTableType type;
+};
+
+} // namespace wasm
+
+inline std::string toString(const wasm::InputElement *d) {
+ return (toString(d->file) + ":(" + d->getName() + ")").str();
+}
+
+} // namespace lld
+
+#endif // LLD_WASM_INPUT_ELEMENT_H
#include "InputFiles.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputEvent.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
+#include "OutputSegment.h"
#include "SymbolTable.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
}
namespace wasm {
+
+void InputFile::checkArch(Triple::ArchType arch) const {
+ bool is64 = arch == Triple::wasm64;
+ if (is64 && !config->is64.hasValue()) {
+ fatal(toString(this) +
+ ": must specify -mwasm64 to process wasm64 object files");
+ } else if (config->is64.getValueOr(false) != is64) {
+ fatal(toString(this) +
+ ": wasm32 object file can't be linked in wasm64 mode");
+ }
+}
+
std::unique_ptr<llvm::TarWriter> tar;
Optional<MemoryBufferRef> readFile(StringRef path) {
return mbref;
}
-InputFile *createObjectFile(MemoryBufferRef mb,
- StringRef archiveName) {
+InputFile *createObjectFile(MemoryBufferRef mb, StringRef archiveName) {
file_magic magic = identify_magic(mb.getBuffer());
if (magic == file_magic::wasm_object) {
std::unique_ptr<Binary> bin =
"\n Symbols : " + Twine(symbols.size()) +
"\n Function Imports : " + Twine(wasmObj->getNumImportedFunctions()) +
"\n Global Imports : " + Twine(wasmObj->getNumImportedGlobals()) +
- "\n Event Imports : " + Twine(wasmObj->getNumImportedEvents()));
+ "\n Tag Imports : " + Twine(wasmObj->getNumImportedTags()) +
+ "\n Table Imports : " + Twine(wasmObj->getNumImportedTables()));
}
// Relocations contain either symbol or type indices. This function takes a
case R_WASM_MEMORY_ADDR_REL_SLEB64:
case R_WASM_MEMORY_ADDR_I32:
case R_WASM_MEMORY_ADDR_I64:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
case R_WASM_FUNCTION_OFFSET_I32:
+ case R_WASM_FUNCTION_OFFSET_I64:
+ case R_WASM_MEMORY_ADDR_LOCREL_I32:
return reloc.Addend;
case R_WASM_SECTION_OFFSET_I32:
- return getSectionSymbol(reloc.Index)->section->outputOffset + reloc.Addend;
+ return getSectionSymbol(reloc.Index)->section->getOffset(reloc.Addend);
default:
llvm_unreachable("unexpected relocation type");
}
}
-// Calculate the value we expect to find at the relocation location.
-// This is used as a sanity check before applying a relocation to a given
-// location. It is useful for catching bugs in the compiler and linker.
-uint64_t ObjFile::calcExpectedValue(const WasmRelocation &reloc) const {
- switch (reloc.Type) {
- case R_WASM_TABLE_INDEX_I32:
- case R_WASM_TABLE_INDEX_SLEB: {
- const WasmSymbol &sym = wasmObj->syms()[reloc.Index];
- return tableEntries[sym.Info.ElementIndex];
- }
- case R_WASM_TABLE_INDEX_REL_SLEB: {
- const WasmSymbol &sym = wasmObj->syms()[reloc.Index];
- return tableEntriesRel[sym.Info.ElementIndex];
- }
- case R_WASM_MEMORY_ADDR_LEB:
- case R_WASM_MEMORY_ADDR_LEB64:
- case R_WASM_MEMORY_ADDR_SLEB:
- case R_WASM_MEMORY_ADDR_SLEB64:
- case R_WASM_MEMORY_ADDR_REL_SLEB:
- case R_WASM_MEMORY_ADDR_REL_SLEB64:
- case R_WASM_MEMORY_ADDR_I32:
- case R_WASM_MEMORY_ADDR_I64: {
- const WasmSymbol &sym = wasmObj->syms()[reloc.Index];
- if (sym.isUndefined())
- return 0;
- const WasmSegment &segment =
- wasmObj->dataSegments()[sym.Info.DataRef.Segment];
- if (segment.Data.Offset.Opcode == WASM_OPCODE_I32_CONST)
- return segment.Data.Offset.Value.Int32 + sym.Info.DataRef.Offset +
- reloc.Addend;
- else if (segment.Data.Offset.Opcode == WASM_OPCODE_I64_CONST)
- return segment.Data.Offset.Value.Int64 + sym.Info.DataRef.Offset +
- reloc.Addend;
- else
- llvm_unreachable("unknown init expr opcode");
- }
- case R_WASM_FUNCTION_OFFSET_I32: {
- const WasmSymbol &sym = wasmObj->syms()[reloc.Index];
- InputFunction *f =
- functions[sym.Info.ElementIndex - wasmObj->getNumImportedFunctions()];
- return f->getFunctionInputOffset() + f->getFunctionCodeOffset() +
- reloc.Addend;
- }
- case R_WASM_SECTION_OFFSET_I32:
- return reloc.Addend;
- case R_WASM_TYPE_INDEX_LEB:
- return reloc.Index;
- case R_WASM_FUNCTION_INDEX_LEB:
- case R_WASM_GLOBAL_INDEX_LEB:
- case R_WASM_GLOBAL_INDEX_I32:
- case R_WASM_EVENT_INDEX_LEB: {
- const WasmSymbol &sym = wasmObj->syms()[reloc.Index];
- return sym.Info.ElementIndex;
- }
- default:
- llvm_unreachable("unknown relocation type");
- }
-}
-
// Translate from the relocation's index into the final linked output value.
-uint64_t ObjFile::calcNewValue(const WasmRelocation &reloc) const {
+uint64_t ObjFile::calcNewValue(const WasmRelocation &reloc, uint64_t tombstone,
+ const InputChunk *chunk) const {
const Symbol* sym = nullptr;
if (reloc.Type != R_WASM_TYPE_INDEX_LEB) {
sym = symbols[reloc.Index];
// We can end up with relocations against non-live symbols. For example
- // in debug sections. We return reloc.Addend because always returning zero
- // causes the generation of spurious range-list terminators in the
- // .debug_ranges section.
- if ((isa<FunctionSymbol>(sym) || isa<DataSymbol>(sym)) && !sym->isLive())
- return reloc.Addend;
+ // in debug sections. We return a tombstone value in debug symbol sections
+ // so this will not produce a valid range conflicting with ranges of actual
+ // code. In other sections we return reloc.Addend.
+
+ if (!isa<SectionSymbol>(sym) && !sym->isLive())
+ return tombstone ? tombstone : reloc.Addend;
}
switch (reloc.Type) {
case R_WASM_TABLE_INDEX_I32:
+ case R_WASM_TABLE_INDEX_I64:
case R_WASM_TABLE_INDEX_SLEB:
- case R_WASM_TABLE_INDEX_REL_SLEB: {
+ case R_WASM_TABLE_INDEX_SLEB64:
+ case R_WASM_TABLE_INDEX_REL_SLEB:
+ case R_WASM_TABLE_INDEX_REL_SLEB64: {
if (!getFunctionSymbol(reloc.Index)->hasTableIndex())
return 0;
uint32_t index = getFunctionSymbol(reloc.Index)->getTableIndex();
- if (reloc.Type == R_WASM_TABLE_INDEX_REL_SLEB)
+ if (reloc.Type == R_WASM_TABLE_INDEX_REL_SLEB ||
+ reloc.Type == R_WASM_TABLE_INDEX_REL_SLEB64)
index -= config->tableBase;
return index;
-
}
case R_WASM_MEMORY_ADDR_LEB:
case R_WASM_MEMORY_ADDR_LEB64:
case R_WASM_MEMORY_ADDR_REL_SLEB64:
case R_WASM_MEMORY_ADDR_I32:
case R_WASM_MEMORY_ADDR_I64:
+ case R_WASM_MEMORY_ADDR_LOCREL_I32: {
if (isa<UndefinedData>(sym) || sym->isUndefWeak())
return 0;
- return cast<DefinedData>(sym)->getVirtualAddress() + reloc.Addend;
+ auto D = cast<DefinedData>(sym);
+ // Treat non-TLS relocation against symbols that live in the TLS segment
+ // like TLS relocations. This beaviour exists to support older object
+ // files created before we introduced TLS relocations.
+ // TODO(sbc): Remove this legacy behaviour one day. This will break
+ // backward compat with old object files built with `-fPIC`.
+ if (D->segment && D->segment->outputSeg->isTLS())
+ return D->getOutputSegmentOffset() + reloc.Addend;
+
+ uint64_t value = D->getVA() + reloc.Addend;
+ if (reloc.Type == R_WASM_MEMORY_ADDR_LOCREL_I32) {
+ const auto *segment = cast<InputSegment>(chunk);
+ uint64_t p = segment->outputSeg->startVA + segment->outputSegmentOffset +
+ reloc.Offset - segment->getInputSectionOffset();
+ value -= p;
+ }
+ return value;
+ }
+ case R_WASM_MEMORY_ADDR_TLS_SLEB:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
+ if (isa<UndefinedData>(sym) || sym->isUndefWeak())
+ return 0;
+ // TLS relocations are relative to the start of the TLS output segment
+ return cast<DefinedData>(sym)->getOutputSegmentOffset() + reloc.Addend;
case R_WASM_TYPE_INDEX_LEB:
return typeMap[reloc.Index];
case R_WASM_FUNCTION_INDEX_LEB:
if (auto gs = dyn_cast<GlobalSymbol>(sym))
return gs->getGlobalIndex();
return sym->getGOTIndex();
- case R_WASM_EVENT_INDEX_LEB:
- return getEventSymbol(reloc.Index)->getEventIndex();
- case R_WASM_FUNCTION_OFFSET_I32: {
+ case R_WASM_TAG_INDEX_LEB:
+ return getTagSymbol(reloc.Index)->getTagIndex();
+ case R_WASM_FUNCTION_OFFSET_I32:
+ case R_WASM_FUNCTION_OFFSET_I64: {
auto *f = cast<DefinedFunction>(sym);
- return f->function->outputOffset +
- (f->function->getFunctionCodeOffset() + reloc.Addend);
+ return f->function->getOffset(f->function->getFunctionCodeOffset() +
+ reloc.Addend);
}
case R_WASM_SECTION_OFFSET_I32:
- return getSectionSymbol(reloc.Index)->section->outputOffset + reloc.Addend;
+ return getSectionSymbol(reloc.Index)->section->getOffset(reloc.Addend);
+ case R_WASM_TABLE_NUMBER_LEB:
+ return getTableSymbol(reloc.Index)->getTableNumber();
default:
llvm_unreachable("unknown relocation type");
}
}
}
+// An object file can have two approaches to tables. With the reference-types
+// feature enabled, input files that define or use tables declare the tables
+// using symbols, and record each use with a relocation. This way when the
+// linker combines inputs, it can collate the tables used by the inputs,
+// assigning them distinct table numbers, and renumber all the uses as
+// appropriate. At the same time, the linker has special logic to build the
+// indirect function table if it is needed.
+//
+// However, MVP object files (those that target WebAssembly 1.0, the "minimum
+// viable product" version of WebAssembly) neither write table symbols nor
+// record relocations. These files can have at most one table, the indirect
+// function table used by call_indirect and which is the address space for
+// function pointers. If this table is present, it is always an import. If we
+// have a file with a table import but no table symbols, it is an MVP object
+// file. synthesizeMVPIndirectFunctionTableSymbolIfNeeded serves as a shim when
+// loading these input files, defining the missing symbol to allow the indirect
+// function table to be built.
+//
+// As indirect function table table usage in MVP objects cannot be relocated,
+// the linker must ensure that this table gets assigned index zero.
+void ObjFile::addLegacyIndirectFunctionTableIfNeeded(
+ uint32_t tableSymbolCount) {
+ uint32_t tableCount = wasmObj->getNumImportedTables() + tables.size();
+
+ // If there are symbols for all tables, then all is good.
+ if (tableCount == tableSymbolCount)
+ return;
+
+ // It's possible for an input to define tables and also use the indirect
+ // function table, but forget to compile with -mattr=+reference-types.
+ // For these newer files, we require symbols for all tables, and
+ // relocations for all of their uses.
+ if (tableSymbolCount != 0) {
+ error(toString(this) +
+ ": expected one symbol table entry for each of the " +
+ Twine(tableCount) + " table(s) present, but got " +
+ Twine(tableSymbolCount) + " symbol(s) instead.");
+ return;
+ }
+
+ // An MVP object file can have up to one table import, for the indirect
+ // function table, but will have no table definitions.
+ if (tables.size()) {
+ error(toString(this) +
+ ": unexpected table definition(s) without corresponding "
+ "symbol-table entries.");
+ return;
+ }
+
+ // An MVP object file can have only one table import.
+ if (tableCount != 1) {
+ error(toString(this) +
+ ": multiple table imports, but no corresponding symbol-table "
+ "entries.");
+ return;
+ }
+
+ const WasmImport *tableImport = nullptr;
+ for (const auto &import : wasmObj->imports()) {
+ if (import.Kind == WASM_EXTERNAL_TABLE) {
+ assert(!tableImport);
+ tableImport = &import;
+ }
+ }
+ assert(tableImport);
+
+ // We can only synthesize a symtab entry for the indirect function table; if
+ // it has an unexpected name or type, assume that it's not actually the
+ // indirect function table.
+ if (tableImport->Field != functionTableName ||
+ tableImport->Table.ElemType != uint8_t(ValType::FUNCREF)) {
+ error(toString(this) + ": table import " + Twine(tableImport->Field) +
+ " is missing a symbol table entry.");
+ return;
+ }
+
+ auto *info = make<WasmSymbolInfo>();
+ info->Name = tableImport->Field;
+ info->Kind = WASM_SYMBOL_TYPE_TABLE;
+ info->ImportModule = tableImport->Module;
+ info->ImportName = tableImport->Field;
+ info->Flags = WASM_SYMBOL_UNDEFINED;
+ info->Flags |= WASM_SYMBOL_NO_STRIP;
+ info->ElementIndex = 0;
+ LLVM_DEBUG(dbgs() << "Synthesizing symbol for table import: " << info->Name
+ << "\n");
+ const WasmGlobalType *globalType = nullptr;
+ const WasmTagType *tagType = nullptr;
+ const WasmSignature *signature = nullptr;
+ auto *wasmSym = make<WasmSymbol>(*info, globalType, &tableImport->Table,
+ tagType, signature);
+ Symbol *sym = createUndefined(*wasmSym, false);
+ // We're only sure it's a TableSymbol if the createUndefined succeeded.
+ if (errorCount())
+ return;
+ symbols.push_back(sym);
+ // Because there are no TABLE_NUMBER relocs, we can't compute accurate
+ // liveness info; instead, just mark the symbol as always live.
+ sym->markLive();
+
+ // We assume that this compilation unit has unrelocatable references to
+ // this table.
+ config->legacyFunctionTable = true;
+}
+
+static bool shouldMerge(const WasmSection &sec) {
+ if (config->optimize == 0)
+ return false;
+ // Sadly we don't have section attributes yet for custom sections, so we
+ // currently go by the name alone.
+ // TODO(sbc): Add ability for wasm sections to carry flags so we don't
+ // need to use names here.
+ // For now, keep in sync with uses of wasm::WASM_SEG_FLAG_STRINGS in
+ // MCObjectFileInfo::initWasmMCObjectFileInfo which creates these custom
+ // sections.
+ return sec.Name == ".debug_str" || sec.Name == ".debug_str.dwo" ||
+ sec.Name == ".debug_line_str";
+}
+
+static bool shouldMerge(const WasmSegment &seg) {
+ // As of now we only support merging strings, and only with single byte
+ // alignment (2^0).
+ if (!(seg.Data.LinkingFlags & WASM_SEG_FLAG_STRINGS) ||
+ (seg.Data.Alignment != 0))
+ return false;
+
+ // On a regular link we don't merge sections if -O0 (default is -O1). This
+ // sometimes makes the linker significantly faster, although the output will
+ // be bigger.
+ if (config->optimize == 0)
+ return false;
+
+ // A mergeable section with size 0 is useless because they don't have
+ // any data to merge. A mergeable string section with size 0 can be
+ // argued as invalid because it doesn't end with a null character.
+ // We'll avoid a mess by handling them as if they were non-mergeable.
+ if (seg.Data.Content.size() == 0)
+ return false;
+
+ return true;
+}
+
void ObjFile::parse(bool ignoreComdats) {
// Parse a memory buffer as a wasm file.
LLVM_DEBUG(dbgs() << "Parsing object: " << toString(this) << "\n");
bin.release();
wasmObj.reset(obj);
+ checkArch(obj->getArch());
+
// Build up a map of function indices to table indices for use when
// verifying the existing table index relocations
uint32_t totalFunctions =
}
}
+ ArrayRef<StringRef> comdats = wasmObj->linkingData().Comdats;
+ for (StringRef comdat : comdats) {
+ bool isNew = ignoreComdats || symtab->addComdat(comdat);
+ keptComdats.push_back(isNew);
+ }
+
uint32_t sectionIndex = 0;
// Bool for each symbol, true if called directly. This allows us to implement
assert(!dataSection);
dataSection = §ion;
} else if (section.Type == WASM_SEC_CUSTOM) {
- customSections.emplace_back(make<InputSection>(section, this));
+ InputChunk *customSec;
+ if (shouldMerge(section))
+ customSec = make<MergeInputChunk>(section, this);
+ else
+ customSec = make<InputSection>(section, this);
+ customSec->discarded = isExcludedByComdat(customSec);
+ customSections.emplace_back(customSec);
customSections.back()->setRelocations(section.Relocations);
customSectionsByIndex[sectionIndex] = customSections.back();
}
typeMap.resize(getWasmObj()->types().size());
typeIsUsed.resize(getWasmObj()->types().size(), false);
- ArrayRef<StringRef> comdats = wasmObj->linkingData().Comdats;
- for (StringRef comdat : comdats) {
- bool isNew = ignoreComdats || symtab->addComdat(comdat);
- keptComdats.push_back(isNew);
- }
// Populate `Segments`.
for (const WasmSegment &s : wasmObj->dataSegments()) {
- auto* seg = make<InputSegment>(s, this);
+ InputChunk *seg;
+ if (shouldMerge(s)) {
+ seg = make<MergeInputChunk>(s, this);
+ } else
+ seg = make<InputSegment>(s, this);
seg->discarded = isExcludedByComdat(seg);
+
segments.emplace_back(seg);
}
setRelocs(segments, dataSection);
}
setRelocs(functions, codeSection);
+ // Populate `Tables`.
+ for (const WasmTable &t : wasmObj->tables())
+ tables.emplace_back(make<InputTable>(t, this));
+
// Populate `Globals`.
for (const WasmGlobal &g : wasmObj->globals())
globals.emplace_back(make<InputGlobal>(g, this));
- // Populate `Events`.
- for (const WasmEvent &e : wasmObj->events())
- events.emplace_back(make<InputEvent>(types[e.Type.SigIndex], e, this));
+ // Populate `Tags`.
+ for (const WasmTag &t : wasmObj->tags())
+ tags.emplace_back(make<InputTag>(types[t.Type.SigIndex], t, this));
// Populate `Symbols` based on the symbols in the object.
symbols.reserve(wasmObj->getNumberOfSymbols());
+ uint32_t tableSymbolCount = 0;
for (const SymbolRef &sym : wasmObj->symbols()) {
const WasmSymbol &wasmSym = wasmObj->getWasmSymbol(sym.getRawDataRefImpl());
+ if (wasmSym.isTypeTable())
+ tableSymbolCount++;
if (wasmSym.isDefined()) {
// createDefined may fail if the symbol is comdat excluded in which case
// we fall back to creating an undefined symbol
size_t idx = symbols.size();
symbols.push_back(createUndefined(wasmSym, isCalledDirectly[idx]));
}
+
+ addLegacyIndirectFunctionTableIfNeeded(tableSymbolCount);
}
bool ObjFile::isExcludedByComdat(InputChunk *chunk) const {
return cast<GlobalSymbol>(symbols[index]);
}
-EventSymbol *ObjFile::getEventSymbol(uint32_t index) const {
- return cast<EventSymbol>(symbols[index]);
+TagSymbol *ObjFile::getTagSymbol(uint32_t index) const {
+ return cast<TagSymbol>(symbols[index]);
+}
+
+TableSymbol *ObjFile::getTableSymbol(uint32_t index) const {
+ return cast<TableSymbol>(symbols[index]);
}
SectionSymbol *ObjFile::getSectionSymbol(uint32_t index) const {
return symtab->addDefinedFunction(name, flags, this, func);
}
case WASM_SYMBOL_TYPE_DATA: {
- InputSegment *seg = segments[sym.Info.DataRef.Segment];
+ InputChunk *seg = segments[sym.Info.DataRef.Segment];
auto offset = sym.Info.DataRef.Offset;
auto size = sym.Info.DataRef.Size;
if (sym.isBindingLocal())
return symtab->addDefinedGlobal(name, flags, this, global);
}
case WASM_SYMBOL_TYPE_SECTION: {
- InputSection *section = customSectionsByIndex[sym.Info.ElementIndex];
+ InputChunk *section = customSectionsByIndex[sym.Info.ElementIndex];
assert(sym.isBindingLocal());
+ // Need to return null if discarded here? data and func only do that when
+ // binding is not local.
+ if (section->discarded)
+ return nullptr;
return make<SectionSymbol>(flags, section, this);
}
- case WASM_SYMBOL_TYPE_EVENT: {
- InputEvent *event =
- events[sym.Info.ElementIndex - wasmObj->getNumImportedEvents()];
+ case WASM_SYMBOL_TYPE_TAG: {
+ InputTag *tag = tags[sym.Info.ElementIndex - wasmObj->getNumImportedTags()];
+ if (sym.isBindingLocal())
+ return make<DefinedTag>(name, flags, this, tag);
+ return symtab->addDefinedTag(name, flags, this, tag);
+ }
+ case WASM_SYMBOL_TYPE_TABLE: {
+ InputTable *table =
+ tables[sym.Info.ElementIndex - wasmObj->getNumImportedTables()];
if (sym.isBindingLocal())
- return make<DefinedEvent>(name, flags, this, event);
- return symtab->addDefinedEvent(name, flags, this, event);
+ return make<DefinedTable>(name, flags, this, table);
+ return symtab->addDefinedTable(name, flags, this, table);
}
}
llvm_unreachable("unknown symbol kind");
return symtab->addUndefinedGlobal(name, sym.Info.ImportName,
sym.Info.ImportModule, flags, this,
sym.GlobalType);
+ case WASM_SYMBOL_TYPE_TABLE:
+ if (sym.isBindingLocal())
+ return make<UndefinedTable>(name, sym.Info.ImportName,
+ sym.Info.ImportModule, flags, this,
+ sym.TableType);
+ return symtab->addUndefinedTable(name, sym.Info.ImportName,
+ sym.Info.ImportModule, flags, this,
+ sym.TableType);
case WASM_SYMBOL_TYPE_SECTION:
llvm_unreachable("section symbols cannot be undefined");
}
obj = check(lto::InputFile::create(MemoryBufferRef(
mb.getBuffer(), saver.save(archiveName + mb.getBufferIdentifier()))));
Triple t(obj->getTargetTriple());
- if (t.getArch() != Triple::wasm32) {
- error(toString(this) + ": machine type must be wasm32");
+ if (!t.isWasm()) {
+ error(toString(this) + ": machine type must be wasm32 or wasm64");
return;
}
+ checkArch(t.getArch());
std::vector<bool> keptComdats;
- for (StringRef s : obj->getComdatTable())
- keptComdats.push_back(symtab->addComdat(s));
+ // TODO Support nodeduplicate https://bugs.llvm.org/show_bug.cgi?id=50531
+ for (std::pair<StringRef, Comdat::SelectionKind> s : obj->getComdatTable())
+ keptComdats.push_back(symtab->addComdat(s.first));
for (const lto::InputFile::Symbol &objSym : obj->symbols())
symbols.push_back(createBitcodeSymbol(keptComdats, objSym, *this));
#include "lld/Common/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/Wasm.h"
class InputFunction;
class InputSegment;
class InputGlobal;
-class InputEvent;
+class InputTag;
+class InputTable;
class InputSection;
// If --reproduce option is given, all input files are written
MutableArrayRef<Symbol *> getMutableSymbols() { return symbols; }
+ // An InputFile is considered live if any of the symbols defined by it
+ // are live.
+ void markLive() { live = true; }
+ bool isLive() const { return live; }
+
protected:
- InputFile(Kind k, MemoryBufferRef m) : mb(m), fileKind(k) {}
+ InputFile(Kind k, MemoryBufferRef m)
+ : mb(m), fileKind(k), live(!config->gcSections) {}
+
+ void checkArch(llvm::Triple::ArchType arch) const;
+
MemoryBufferRef mb;
// List of all symbols referenced or defined by this file.
private:
const Kind fileKind;
+ bool live;
};
// .a file (ar archive)
explicit ObjFile(MemoryBufferRef m, StringRef archiveName)
: InputFile(ObjectKind, m) {
this->archiveName = std::string(archiveName);
+
+ // If this isn't part of an archive, it's eagerly linked, so mark it live.
+ if (archiveName.empty())
+ markLive();
}
static bool classof(const InputFile *f) { return f->kind() == ObjectKind; }
void dumpInfo() const;
uint32_t calcNewIndex(const WasmRelocation &reloc) const;
- uint64_t calcNewValue(const WasmRelocation &reloc) const;
+ uint64_t calcNewValue(const WasmRelocation &reloc, uint64_t tombstone,
+ const InputChunk *chunk) const;
uint64_t calcNewAddend(const WasmRelocation &reloc) const;
- uint64_t calcExpectedValue(const WasmRelocation &reloc) const;
Symbol *getSymbol(const WasmRelocation &reloc) const {
return symbols[reloc.Index];
};
std::vector<uint32_t> tableEntries;
std::vector<uint32_t> tableEntriesRel;
std::vector<bool> keptComdats;
- std::vector<InputSegment *> segments;
+ std::vector<InputChunk *> segments;
std::vector<InputFunction *> functions;
std::vector<InputGlobal *> globals;
- std::vector<InputEvent *> events;
- std::vector<InputSection *> customSections;
- llvm::DenseMap<uint32_t, InputSection *> customSectionsByIndex;
+ std::vector<InputTag *> tags;
+ std::vector<InputTable *> tables;
+ std::vector<InputChunk *> customSections;
+ llvm::DenseMap<uint32_t, InputChunk *> customSectionsByIndex;
Symbol *getSymbol(uint32_t index) const { return symbols[index]; }
FunctionSymbol *getFunctionSymbol(uint32_t index) const;
DataSymbol *getDataSymbol(uint32_t index) const;
GlobalSymbol *getGlobalSymbol(uint32_t index) const;
SectionSymbol *getSectionSymbol(uint32_t index) const;
- EventSymbol *getEventSymbol(uint32_t index) const;
+ TagSymbol *getTagSymbol(uint32_t index) const;
+ TableSymbol *getTableSymbol(uint32_t index) const;
private:
Symbol *createDefined(const WasmSymbol &sym);
Symbol *createUndefined(const WasmSymbol &sym, bool isCalledDirectly);
bool isExcludedByComdat(InputChunk *chunk) const;
+ void addLegacyIndirectFunctionTableIfNeeded(uint32_t tableSymbolCount);
std::unique_ptr<WasmObjectFile> wasmObj;
};
explicit BitcodeFile(MemoryBufferRef m, StringRef archiveName)
: InputFile(BitcodeKind, m) {
this->archiveName = std::string(archiveName);
+
+ // If this isn't part of an archive, it's eagerly linked, so mark it live.
+ if (archiveName.empty())
+ markLive();
}
static bool classof(const InputFile *f) { return f->kind() == BitcodeKind; }
c.OptLevel = config->ltoo;
c.MAttrs = getMAttrs();
c.CGOptLevel = args::getCGOptLevel(config->ltoo);
+ c.UseNewPM = config->ltoNewPassManager;
+ c.DebugPassManager = config->ltoDebugPassManager;
if (config->relocatable)
c.RelocModel = None;
--- /dev/null
+//===- MapFile.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the -Map option. It shows lists in order and
+// hierarchically the output sections, input sections, input files and
+// symbol:
+//
+// Addr Off Size Out In Symbol
+// - 00000015 10 .text
+// - 0000000e 10 test.o:(.text)
+// - 00000000 5 local
+// - 00000000 5 f(int)
+//
+//===----------------------------------------------------------------------===//
+
+#include "MapFile.h"
+#include "InputElement.h"
+#include "InputFiles.h"
+#include "OutputSections.h"
+#include "OutputSegment.h"
+#include "SymbolTable.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "lld/Common/Strings.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Parallel.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace lld;
+using namespace lld::wasm;
+
+using SymbolMapTy = DenseMap<const InputChunk *, SmallVector<Symbol *, 4>>;
+
+// Print out the first three columns of a line.
+static void writeHeader(raw_ostream &os, int64_t vma, uint64_t lma,
+ uint64_t size) {
+ // Not all entries in the map has a virtual memory address (e.g. functions)
+ if (vma == -1)
+ os << format(" - %8llx %8llx ", lma, size);
+ else
+ os << format("%8llx %8llx %8llx ", vma, lma, size);
+}
+
+// Returns a list of all symbols that we want to print out.
+static std::vector<Symbol *> getSymbols() {
+ std::vector<Symbol *> v;
+ for (InputFile *file : symtab->objectFiles)
+ for (Symbol *b : file->getSymbols())
+ if (auto *dr = dyn_cast<Symbol>(b))
+ if ((!isa<SectionSymbol>(dr)) && dr->isLive() &&
+ (dr->getFile() == file))
+ v.push_back(dr);
+ return v;
+}
+
+// Returns a map from sections to their symbols.
+static SymbolMapTy getSectionSyms(ArrayRef<Symbol *> syms) {
+ SymbolMapTy ret;
+ for (Symbol *dr : syms)
+ ret[dr->getChunk()].push_back(dr);
+ return ret;
+}
+
+// Construct a map from symbols to their stringified representations.
+// Demangling symbols (which is what toString() does) is slow, so
+// we do that in batch using parallel-for.
+static DenseMap<Symbol *, std::string>
+getSymbolStrings(ArrayRef<Symbol *> syms) {
+ std::vector<std::string> str(syms.size());
+ parallelForEachN(0, syms.size(), [&](size_t i) {
+ raw_string_ostream os(str[i]);
+ auto *chunk = syms[i]->getChunk();
+ if (chunk == nullptr)
+ return;
+ uint64_t fileOffset = chunk->outputSec != nullptr
+ ? chunk->outputSec->getOffset() + chunk->outSecOff
+ : 0;
+ uint64_t vma = -1;
+ uint64_t size = 0;
+ if (auto *DD = dyn_cast<DefinedData>(syms[i])) {
+ vma = DD->getVA();
+ size = DD->getSize();
+ fileOffset += DD->value;
+ }
+ if (auto *DF = dyn_cast<DefinedFunction>(syms[i])) {
+ size = DF->function->getSize();
+ }
+ writeHeader(os, vma, fileOffset, size);
+ os.indent(16) << toString(*syms[i]);
+ });
+
+ DenseMap<Symbol *, std::string> ret;
+ for (size_t i = 0, e = syms.size(); i < e; ++i)
+ ret[syms[i]] = std::move(str[i]);
+ return ret;
+}
+
+void lld::wasm::writeMapFile(ArrayRef<OutputSection *> outputSections) {
+ if (config->mapFile.empty())
+ return;
+
+ // Open a map file for writing.
+ std::error_code ec;
+ raw_fd_ostream os(config->mapFile, ec, sys::fs::OF_None);
+ if (ec) {
+ error("cannot open " + config->mapFile + ": " + ec.message());
+ return;
+ }
+
+ // Collect symbol info that we want to print out.
+ std::vector<Symbol *> syms = getSymbols();
+ SymbolMapTy sectionSyms = getSectionSyms(syms);
+ DenseMap<Symbol *, std::string> symStr = getSymbolStrings(syms);
+
+ // Print out the header line.
+ os << " Addr Off Size Out In Symbol\n";
+
+ for (OutputSection *osec : outputSections) {
+ writeHeader(os, -1, osec->getOffset(), osec->getSize());
+ os << toString(*osec) << '\n';
+ if (auto *code = dyn_cast<CodeSection>(osec)) {
+ for (auto *chunk : code->functions) {
+ writeHeader(os, -1, chunk->outputSec->getOffset() + chunk->outSecOff,
+ chunk->getSize());
+ os.indent(8) << toString(chunk) << '\n';
+ for (Symbol *sym : sectionSyms[chunk])
+ os << symStr[sym] << '\n';
+ }
+ } else if (auto *data = dyn_cast<DataSection>(osec)) {
+ for (auto *oseg : data->segments) {
+ writeHeader(os, oseg->startVA, data->getOffset() + oseg->sectionOffset,
+ oseg->size);
+ os << oseg->name << '\n';
+ for (auto *chunk : oseg->inputSegments) {
+ uint64_t offset =
+ chunk->outputSec != nullptr
+ ? chunk->outputSec->getOffset() + chunk->outSecOff
+ : 0;
+ writeHeader(os, chunk->getVA(), offset, chunk->getSize());
+ os.indent(8) << toString(chunk) << '\n';
+ for (Symbol *sym : sectionSyms[chunk])
+ os << symStr[sym] << '\n';
+ }
+ }
+ } else if (auto *globals = dyn_cast<GlobalSection>(osec)) {
+ for (auto *global : globals->inputGlobals) {
+ writeHeader(os, global->getAssignedIndex(), 0, 0);
+ os.indent(8) << global->getName() << '\n';
+ }
+ }
+ // TODO: other section/symbol types
+ }
+}
--- /dev/null
+//===- MapFile.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_WASM_MAPFILE_H
+#define LLD_WASM_MAPFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace lld {
+namespace wasm {
+class OutputSection;
+void writeMapFile(llvm::ArrayRef<OutputSection *> outputSections);
+} // namespace wasm
+} // namespace lld
+
+#endif
#include "MarkLive.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputEvent.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
#include "SymbolTable.h"
#include "Symbols.h"
private:
void enqueue(Symbol *sym);
- void markSymbol(Symbol *sym);
+ void enqueueInitFunctions(const ObjFile *sym);
void mark();
+ bool isCallCtorsLive();
// A list of chunks to visit.
SmallVector<InputChunk *, 256> queue;
if (!sym || sym->isLive())
return;
LLVM_DEBUG(dbgs() << "markLive: " << sym->getName() << "\n");
+
+ InputFile *file = sym->getFile();
+ bool needInitFunctions = file && !file->isLive() && sym->isDefined();
+
sym->markLive();
+
+ // Mark ctor functions in the object that defines this symbol live.
+ // The ctor functions are all referenced by the synthetic callCtors
+ // function. However, this function does not contain relocations so we
+ // have to manually mark the ctors as live.
+ if (needInitFunctions)
+ enqueueInitFunctions(cast<ObjFile>(file));
+
if (InputChunk *chunk = sym->getChunk())
queue.push_back(chunk);
+}
- // The ctor functions are all referenced by the synthetic callCtors
- // function. However, this function does not contain relocations so we
- // have to manually mark the ctors as live if callCtors itself is live.
- if (sym == WasmSym::callCtors) {
- if (config->isPic)
- enqueue(WasmSym::applyRelocs);
- for (const ObjFile *obj : symtab->objectFiles) {
- const WasmLinkingData &l = obj->getWasmObj()->linkingData();
- for (const WasmInitFunc &f : l.InitFunctions) {
- auto* initSym = obj->getFunctionSymbol(f.Symbol);
- if (!initSym->isDiscarded())
- enqueue(initSym);
- }
- }
+// The ctor functions are all referenced by the synthetic callCtors
+// function. However, this function does not contain relocations so we
+// have to manually mark the ctors as live.
+void MarkLive::enqueueInitFunctions(const ObjFile *obj) {
+ const WasmLinkingData &l = obj->getWasmObj()->linkingData();
+ for (const WasmInitFunc &f : l.InitFunctions) {
+ auto *initSym = obj->getFunctionSymbol(f.Symbol);
+ if (!initSym->isDiscarded())
+ enqueue(initSym);
}
}
if (sym->isNoStrip() || sym->isExported())
enqueue(sym);
- // For relocatable output, we need to preserve all the ctor functions
- if (config->relocatable) {
- for (const ObjFile *obj : symtab->objectFiles) {
- const WasmLinkingData &l = obj->getWasmObj()->linkingData();
- for (const WasmInitFunc &f : l.InitFunctions)
- enqueue(obj->getFunctionSymbol(f.Symbol));
- }
- }
-
- if (config->isPic)
- enqueue(WasmSym::callCtors);
+ if (WasmSym::callDtors)
+ enqueue(WasmSym::callDtors);
- if (config->sharedMemory && !config->shared)
- enqueue(WasmSym::initMemory);
+ // Enqueue constructors in objects explicitly live from the command-line.
+ for (const ObjFile *obj : symtab->objectFiles)
+ if (obj->isLive())
+ enqueueInitFunctions(obj);
mark();
+
+ // If we have any non-discarded init functions, mark `__wasm_call_ctors` as
+ // live so that we assign it an index and call it.
+ if (isCallCtorsLive())
+ WasmSym::callCtors->markLive();
}
void MarkLive::mark() {
// functions used for weak-undefined symbols have this behaviour (compare
// equal to null pointer, only reachable via direct call).
if (reloc.Type == R_WASM_TABLE_INDEX_SLEB ||
- reloc.Type == R_WASM_TABLE_INDEX_I32) {
+ reloc.Type == R_WASM_TABLE_INDEX_SLEB64 ||
+ reloc.Type == R_WASM_TABLE_INDEX_I32 ||
+ reloc.Type == R_WASM_TABLE_INDEX_I64) {
auto *funcSym = cast<FunctionSymbol>(sym);
- if (funcSym->hasTableIndex() && funcSym->getTableIndex() == 0)
+ if (funcSym->isStub)
continue;
}
for (InputGlobal *g : obj->globals)
if (!g->live)
message("removing unused section " + toString(g));
- for (InputEvent *e : obj->events)
- if (!e->live)
- message("removing unused section " + toString(e));
+ for (InputTag *t : obj->tags)
+ if (!t->live)
+ message("removing unused section " + toString(t));
+ for (InputTable *t : obj->tables)
+ if (!t->live)
+ message("removing unused section " + toString(t));
}
for (InputChunk *c : symtab->syntheticFunctions)
if (!c->live)
for (InputGlobal *g : symtab->syntheticGlobals)
if (!g->live)
message("removing unused section " + toString(g));
+ for (InputTable *t : symtab->syntheticTables)
+ if (!t->live)
+ message("removing unused section " + toString(t));
}
}
+bool MarkLive::isCallCtorsLive() {
+ // In a reloctable link, we don't call `__wasm_call_ctors`.
+ if (config->relocatable)
+ return false;
+
+ // In Emscripten-style PIC, we call `__wasm_call_ctors` which calls
+ // `__wasm_apply_data_relocs`.
+ if (config->isPic)
+ return true;
+
+ // If there are any init functions, mark `__wasm_call_ctors` live so that
+ // it can call them.
+ for (const ObjFile *file : symtab->objectFiles) {
+ const WasmLinkingData &l = file->getWasmObj()->linkingData();
+ for (const WasmInitFunc &f : l.InitFunctions) {
+ auto *sym = file->getFunctionSymbol(f.Symbol);
+ if (!sym->isDiscarded() && sym->isLive())
+ return true;
+ }
+ }
+
+ return false;
+}
+
} // namespace wasm
} // namespace lld
def no_ # NAME: Flag<["--", "-"], "no-" # name>, HelpText<help2>;
}
+multiclass BB<string name, string help1, string help2> {
+ def NAME: Flag<["--"], name>, HelpText<help1>;
+ def no_ # NAME: Flag<["--"], "no-" # name>, HelpText<help2>;
+}
+
// The following flags are shared with the ELF linker
-def color_diagnostics: F<"color-diagnostics">,
- HelpText<"Use colors in diagnostics">;
+def Bsymbolic: F<"Bsymbolic">, HelpText<"Bind defined symbols locally">;
+defm color_diagnostics: B<"color-diagnostics",
+ "Alias for --color-diagnostics=always",
+ "Alias for --color-diagnostics=never">;
def color_diagnostics_eq: J<"color-diagnostics=">,
- HelpText<"Use colors in diagnostics; one of 'always', 'never', 'auto'">;
+ HelpText<"Use colors in diagnostics (default: auto)">,
+ MetaVarName<"[auto,always,never]">;
def compress_relocations: F<"compress-relocations">,
HelpText<"Compress the relocation targets in the code section.">;
def emit_relocs: F<"emit-relocs">, HelpText<"Generate relocations in output">;
+def error_unresolved_symbols: F<"error-unresolved-symbols">,
+ HelpText<"Report unresolved symbols as errors">;
+
defm export_dynamic: B<"export-dynamic",
"Put symbols in the dynamic symbol table",
"Do not put symbols in the dynamic symbol table (default)">;
def mllvm: S<"mllvm">, HelpText<"Options to pass to LLVM">;
-def no_color_diagnostics: F<"no-color-diagnostics">,
- HelpText<"Do not use colors in diagnostics">;
+defm Map: Eq<"Map", "Print a link map to the specified file">;
def no_fatal_warnings: F<"no-fatal-warnings">;
"List removed unused sections",
"Do not list removed unused sections">;
+def print_map: F<"print-map">,
+ HelpText<"Print a link map to the standard output">;
+
def relocatable: F<"relocatable">, HelpText<"Create relocatable object file">;
defm reproduce: Eq<"reproduce", "Dump linker invocation and input files for debugging">;
defm undefined: Eq<"undefined", "Force undefined symbol during linking">;
+defm unresolved_symbols:
+ Eq<"unresolved-symbols", "Determine how to handle unresolved symbols">;
+
def v: Flag<["-"], "v">, HelpText<"Display the version number">;
def verbose: F<"verbose">, HelpText<"Verbose mode">;
def version: F<"version">, HelpText<"Display the version number and exit">;
-def z: JoinedOrSeparate<["-"], "z">, MetaVarName<"<option>">,
- HelpText<"Linker option extensions">;
+def warn_unresolved_symbols: F<"warn-unresolved-symbols">,
+ HelpText<"Report unresolved symbols as warnings">;
defm wrap: Eq<"wrap", "Use wrapper functions for symbol">,
MetaVarName<"<symbol>=<symbol>">;
+def z: JoinedOrSeparate<["-"], "z">, MetaVarName<"<option>">,
+ HelpText<"Linker option extensions">;
+
// The follow flags are unique to wasm
def allow_undefined: F<"allow-undefined">,
- HelpText<"Allow undefined symbols in linked binary">;
+ HelpText<"Allow undefined symbols in linked binary. This options is equivelant "
+ "to --import-undefined and --unresolved-symbols=ignore-all">;
+
+def import_undefined: F<"import-undefined">,
+ HelpText<"Turn undefined symbols into imports where possible">;
def allow_undefined_file: J<"allow-undefined-file=">,
HelpText<"Allow symbols listed in <file> to be undefined in linked binary">;
defm export: Eq<"export", "Force a symbol to be exported">;
+defm export_if_defined: Eq<"export-if-defined",
+ "Force a symbol to be exported, if it is defined in the input">;
+
def export_all: F<"export-all">,
HelpText<"Export all symbols (normally combined with --no-gc-sections)">;
def: J<"entry=">, Alias<entry>;
def: Flag<["-"], "E">, Alias<export_dynamic>, HelpText<"Alias for --export-dynamic">;
def: Flag<["-"], "i">, Alias<initial_memory>;
+def: Flag<["-"], "M">, Alias<print_map>, HelpText<"Alias for --print-map">;
def: Flag<["-"], "r">, Alias<relocatable>;
def: Flag<["-"], "s">, Alias<strip_all>, HelpText<"Alias for --strip-all">;
def: Flag<["-"], "S">, Alias<strip_debug>, HelpText<"Alias for --strip-debug">;
def lto_partitions: J<"lto-partitions=">,
HelpText<"Number of LTO codegen partitions">;
def disable_verify: F<"disable-verify">;
-def save_temps: F<"save-temps">;
+def save_temps: F<"save-temps">, HelpText<"Save intermediate LTO compilation results">;
def thinlto_cache_dir: J<"thinlto-cache-dir=">,
HelpText<"Path to ThinLTO cached object file directory">;
defm thinlto_cache_policy: Eq<"thinlto-cache-policy", "Pruning policy for the ThinLTO cache">;
def thinlto_jobs: J<"thinlto-jobs=">,
HelpText<"Number of ThinLTO jobs. Default to --threads=">;
+defm lto_legacy_pass_manager: BB<"lto-legacy-pass-manager", "Use legacy pass manager", "Use new pass manager">;
+def lto_debug_pass_manager: F<"lto-debug-pass-manager">,
+ HelpText<"Debug new pass manager">;
// Experimental PIC mode.
def experimental_pic: F<"experimental-pic">,
#include "OutputSections.h"
#include "InputChunks.h"
+#include "InputElement.h"
#include "InputFiles.h"
#include "OutputSegment.h"
#include "WriterUtils.h"
#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Parallel.h"
return "MEMORY";
case WASM_SEC_GLOBAL:
return "GLOBAL";
- case WASM_SEC_EVENT:
- return "EVENT";
+ case WASM_SEC_TAG:
+ return "TAG";
case WASM_SEC_EXPORT:
return "EXPORT";
case WASM_SEC_START:
bodySize = codeSectionHeader.size();
for (InputFunction *func : functions) {
- func->outputOffset = bodySize;
+ func->outputSec = this;
+ func->outSecOff = bodySize;
func->calculateSize();
+ // All functions should have a non-empty body at this point
+ assert(func->getSize());
bodySize += func->getSize();
}
std::count_if(segments.begin(), segments.end(),
[](OutputSegment *segment) { return !segment->isBss; });
+#ifndef NDEBUG
+ unsigned activeCount = std::count_if(
+ segments.begin(), segments.end(), [](OutputSegment *segment) {
+ return (segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE) == 0;
+ });
+#endif
+
+ assert((config->sharedMemory || !config->isPic || activeCount <= 1) &&
+ "output segments should have been combined by now");
+
writeUleb128(os, segmentCount, "data segment count");
os.flush();
bodySize = dataSectionHeader.size();
- assert((!config->isPic || segments.size() <= 1) &&
- "Currenly only a single data segment is supported in PIC mode");
-
for (OutputSegment *segment : segments) {
if (segment->isBss)
continue;
raw_string_ostream os(segment->header);
writeUleb128(os, segment->initFlags, "init flags");
- if (segment->initFlags & WASM_SEGMENT_HAS_MEMINDEX)
+ if (segment->initFlags & WASM_DATA_SEGMENT_HAS_MEMINDEX)
writeUleb128(os, 0, "memory index");
- if ((segment->initFlags & WASM_SEGMENT_IS_PASSIVE) == 0) {
+ if ((segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE) == 0) {
WasmInitExpr initExpr;
if (config->isPic) {
initExpr.Opcode = WASM_OPCODE_GLOBAL_GET;
initExpr.Value.Global = WasmSym::memoryBase->getGlobalIndex();
} else {
- initExpr.Opcode = WASM_OPCODE_I32_CONST;
- initExpr.Value.Int32 = segment->startVA;
+ initExpr = intConst(segment->startVA, config->is64.getValueOr(false));
}
writeInitExpr(os, initExpr);
}
log("Data segment: size=" + Twine(segment->size) + ", startVA=" +
Twine::utohexstr(segment->startVA) + ", name=" + segment->name);
- for (InputSegment *inputSeg : segment->inputSegments)
- inputSeg->outputOffset = segment->sectionOffset + segment->header.size() +
- inputSeg->outputSegmentOffset;
+ for (InputChunk *inputSeg : segment->inputSegments) {
+ inputSeg->outputSec = this;
+ inputSeg->outSecOff = segment->sectionOffset + segment->header.size() +
+ inputSeg->outputSegmentOffset;
+ }
}
createHeader(bodySize);
return false;
}
+// Lots of duplication here with OutputSegment::finalizeInputSegments
+void CustomSection::finalizeInputSections() {
+ SyntheticMergedChunk *mergedSection = nullptr;
+ std::vector<InputChunk *> newSections;
+
+ for (InputChunk *s : inputSections) {
+ s->outputSec = this;
+ MergeInputChunk *ms = dyn_cast<MergeInputChunk>(s);
+ if (!ms) {
+ newSections.push_back(s);
+ continue;
+ }
+
+ if (!mergedSection) {
+ mergedSection =
+ make<SyntheticMergedChunk>(name, 0, WASM_SEG_FLAG_STRINGS);
+ newSections.push_back(mergedSection);
+ mergedSection->outputSec = this;
+ }
+ mergedSection->addMergeChunk(ms);
+ }
+
+ if (!mergedSection)
+ return;
+
+ mergedSection->finalizeContents();
+ inputSections = newSections;
+}
+
void CustomSection::finalizeContents() {
+ finalizeInputSections();
+
raw_string_ostream os(nameData);
encodeULEB128(name.size(), os);
os << name;
os.flush();
- for (InputSection *section : inputSections) {
- section->outputOffset = payloadSize;
- section->outputSec = this;
+ for (InputChunk *section : inputSections) {
+ assert(!section->discarded);
+ section->outSecOff = payloadSize;
payloadSize += section->getSize();
}
buf += nameData.size();
// Write custom sections payload
- for (const InputSection *section : inputSections)
+ for (const InputChunk *section : inputSections)
section->writeTo(buf);
}
uint32_t CustomSection::getNumRelocations() const {
uint32_t count = 0;
- for (const InputSection *inputSect : inputSections)
+ for (const InputChunk *inputSect : inputSections)
count += inputSect->getNumRelocations();
return count;
}
void CustomSection::writeRelocations(raw_ostream &os) const {
- for (const InputSection *s : inputSections)
+ for (const InputChunk *s : inputSections)
s->writeRelocations(os);
}
void createHeader(size_t bodySize);
virtual bool isNeeded() const { return true; }
virtual size_t getSize() const = 0;
+ virtual size_t getOffset() { return offset; }
virtual void writeTo(uint8_t *buf) = 0;
virtual void finalizeContents() = 0;
virtual uint32_t getNumRelocations() const { return 0; }
explicit CodeSection(ArrayRef<InputFunction *> functions)
: OutputSection(llvm::wasm::WASM_SEC_CODE), functions(functions) {}
+ static bool classof(const OutputSection *sec) {
+ return sec->type == llvm::wasm::WASM_SEC_CODE;
+ }
+
size_t getSize() const override { return header.size() + bodySize; }
void writeTo(uint8_t *buf) override;
uint32_t getNumRelocations() const override;
bool isNeeded() const override { return functions.size() > 0; }
void finalizeContents() override;
-protected:
ArrayRef<InputFunction *> functions;
+
+protected:
std::string codeSectionHeader;
size_t bodySize = 0;
};
explicit DataSection(ArrayRef<OutputSegment *> segments)
: OutputSection(llvm::wasm::WASM_SEC_DATA), segments(segments) {}
+ static bool classof(const OutputSection *sec) {
+ return sec->type == llvm::wasm::WASM_SEC_DATA;
+ }
+
size_t getSize() const override { return header.size() + bodySize; }
void writeTo(uint8_t *buf) override;
uint32_t getNumRelocations() const override;
bool isNeeded() const override;
void finalizeContents() override;
-protected:
ArrayRef<OutputSegment *> segments;
+
+protected:
std::string dataSectionHeader;
size_t bodySize = 0;
};
// separately and are instead synthesized by the linker.
class CustomSection : public OutputSection {
public:
- CustomSection(std::string name, ArrayRef<InputSection *> inputSections)
+ CustomSection(std::string name, ArrayRef<InputChunk *> inputSections)
: OutputSection(llvm::wasm::WASM_SEC_CUSTOM, name),
inputSections(inputSections) {}
+
+ static bool classof(const OutputSection *sec) {
+ return sec->type == llvm::wasm::WASM_SEC_CUSTOM;
+ }
+
size_t getSize() const override {
return header.size() + nameData.size() + payloadSize;
}
void finalizeContents() override;
protected:
+ void finalizeInputSections();
size_t payloadSize = 0;
- ArrayRef<InputSection *> inputSections;
+ std::vector<InputChunk *> inputSections;
std::string nameData;
};
--- /dev/null
+//===- OutputSegment.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OutputSegment.h"
+#include "InputChunks.h"
+#include "lld/Common/Memory.h"
+
+#define DEBUG_TYPE "lld"
+
+using namespace llvm;
+using namespace llvm::wasm;
+
+namespace lld {
+
+namespace wasm {
+
+void OutputSegment::addInputSegment(InputChunk *inSeg) {
+ alignment = std::max(alignment, inSeg->alignment);
+ inputSegments.push_back(inSeg);
+ size = llvm::alignTo(size, 1ULL << inSeg->alignment);
+ LLVM_DEBUG(dbgs() << "addInputSegment: " << inSeg->getName()
+ << " oname=" << name << " size=" << inSeg->getSize()
+ << " align=" << inSeg->alignment << " at:" << size << "\n");
+ inSeg->outputSeg = this;
+ inSeg->outputSegmentOffset = size;
+ size += inSeg->getSize();
+}
+
+// This function scans over the input segments.
+//
+// It removes MergeInputChunks from the input section array and adds
+// new synthetic sections at the location of the first input section
+// that it replaces. It then finalizes each synthetic section in order
+// to compute an output offset for each piece of each input section.
+void OutputSegment::finalizeInputSegments() {
+ LLVM_DEBUG(llvm::dbgs() << "finalizeInputSegments: " << name << "\n");
+ std::vector<SyntheticMergedChunk *> mergedSegments;
+ std::vector<InputChunk *> newSegments;
+ for (InputChunk *s : inputSegments) {
+ MergeInputChunk *ms = dyn_cast<MergeInputChunk>(s);
+ if (!ms) {
+ newSegments.push_back(s);
+ continue;
+ }
+
+ // A segment should not make it here unless its alive
+ assert(ms->live);
+
+ auto i = llvm::find_if(mergedSegments, [=](SyntheticMergedChunk *seg) {
+ return seg->flags == ms->flags && seg->alignment == ms->alignment;
+ });
+ if (i == mergedSegments.end()) {
+ LLVM_DEBUG(llvm::dbgs() << "new merge segment: " << name
+ << " alignment=" << ms->alignment << "\n");
+ auto *syn = make<SyntheticMergedChunk>(name, ms->alignment, ms->flags);
+ syn->outputSeg = this;
+ mergedSegments.push_back(syn);
+ i = std::prev(mergedSegments.end());
+ newSegments.push_back(syn);
+ } else {
+ LLVM_DEBUG(llvm::dbgs() << "adding to merge segment: " << name << "\n");
+ }
+ (*i)->addMergeChunk(ms);
+ }
+
+ for (auto *ms : mergedSegments)
+ ms->finalizeContents();
+
+ inputSegments = newSegments;
+ size = 0;
+ for (InputChunk *seg : inputSegments) {
+ size = llvm::alignTo(size, 1ULL << seg->alignment);
+ LLVM_DEBUG(llvm::dbgs() << "outputSegmentOffset set: " << seg->getName()
+ << " -> " << size << "\n");
+ seg->outputSegmentOffset = size;
+ size += seg->getSize();
+ }
+}
+
+} // namespace wasm
+} // namespace lld
public:
OutputSegment(StringRef n) : name(n) {}
- void addInputSegment(InputSegment *inSeg) {
- alignment = std::max(alignment, inSeg->getAlignment());
- inputSegments.push_back(inSeg);
- size = llvm::alignTo(size, 1ULL << inSeg->getAlignment());
- inSeg->outputSeg = this;
- inSeg->outputSegmentOffset = size;
- size += inSeg->getSize();
- }
+ void addInputSegment(InputChunk *inSeg);
+ void finalizeInputSegments();
+
+ bool isTLS() const { return name == ".tdata"; }
StringRef name;
bool isBss = false;
uint32_t index = 0;
+ uint32_t linkingFlags = 0;
uint32_t initFlags = 0;
uint32_t sectionOffset = 0;
uint32_t alignment = 0;
- uint32_t startVA = 0;
- std::vector<InputSegment *> inputSegments;
+ uint64_t startVA = 0;
+ std::vector<InputChunk *> inputSegments;
// Sum of the size of the all the input segments
uint32_t size = 0;
#include "Relocations.h"
#include "InputChunks.h"
+#include "OutputSegment.h"
+#include "SymbolTable.h"
#include "SyntheticSections.h"
using namespace llvm;
namespace lld {
namespace wasm {
+
static bool requiresGOTAccess(const Symbol *sym) {
- return config->isPic && !sym->isHidden() && !sym->isLocal();
+ if (!config->isPic)
+ return false;
+ if (sym->isHidden() || sym->isLocal())
+ return false;
+ // With `-Bsymbolic` (or when building an executable) as don't need to use
+ // the GOT for symbols that are defined within the current module.
+ if (sym->isDefined() && (!config->shared || config->bsymbolic))
+ return false;
+ return true;
}
static bool allowUndefined(const Symbol* sym) {
- // Undefined functions with explicit import name are allowed to be undefined
- // at link time.
- if (auto *F = dyn_cast<UndefinedFunction>(sym))
- if (F->importName)
+ // Undefined functions and globals with explicit import name are allowed to be
+ // undefined at link time.
+ if (auto *f = dyn_cast<UndefinedFunction>(sym))
+ if (f->importName || config->importUndefined)
+ return true;
+ if (auto *g = dyn_cast<UndefinedGlobal>(sym))
+ if (g->importName)
return true;
- return (config->allowUndefined ||
- config->allowUndefinedSymbols.count(sym->getName()) != 0);
+ if (auto *g = dyn_cast<UndefinedGlobal>(sym))
+ if (g->importName)
+ return true;
+ return config->allowUndefinedSymbols.count(sym->getName()) != 0;
}
-static void reportUndefined(const Symbol* sym) {
- assert(sym->isUndefined());
- assert(!sym->isWeak());
- if (!allowUndefined(sym))
- error(toString(sym->getFile()) + ": undefined symbol: " + toString(*sym));
+static void reportUndefined(Symbol *sym) {
+ if (!allowUndefined(sym)) {
+ switch (config->unresolvedSymbols) {
+ case UnresolvedPolicy::ReportError:
+ error(toString(sym->getFile()) + ": undefined symbol: " + toString(*sym));
+ break;
+ case UnresolvedPolicy::Warn:
+ warn(toString(sym->getFile()) + ": undefined symbol: " + toString(*sym));
+ break;
+ case UnresolvedPolicy::Ignore:
+ LLVM_DEBUG(dbgs() << "ignoring undefined symbol: " + toString(*sym) +
+ "\n");
+ if (!config->importUndefined) {
+ if (auto *f = dyn_cast<UndefinedFunction>(sym)) {
+ if (!f->stubFunction) {
+ f->stubFunction = symtab->createUndefinedStub(*f->getSignature());
+ f->stubFunction->markLive();
+ // Mark the function itself as a stub which prevents it from being
+ // assigned a table entry.
+ f->isStub = true;
+ }
+ }
+ }
+ break;
+ }
+ }
}
static void addGOTEntry(Symbol *sym) {
- // In PIC mode a GOT entry is an imported global that the dynamic linker
- // will assign.
- // In non-PIC mode (i.e. when code compiled as fPIC is linked into a static
- // binary) we create an internal wasm global with a fixed value that takes the
- // place of th GOT entry and effectivly acts as an i32 const. This can
- // potentially be optimized away at runtime or with a post-link tool.
- // TODO(sbc): Linker relaxation might also be able to optimize this away.
- if (config->isPic)
+ if (requiresGOTAccess(sym))
out.importSec->addGOTEntry(sym);
else
- out.globalSec->addStaticGOTEntry(sym);
+ out.globalSec->addInternalGOTEntry(sym);
}
void scanRelocations(InputChunk *chunk) {
switch (reloc.Type) {
case R_WASM_TABLE_INDEX_I32:
+ case R_WASM_TABLE_INDEX_I64:
case R_WASM_TABLE_INDEX_SLEB:
+ case R_WASM_TABLE_INDEX_SLEB64:
case R_WASM_TABLE_INDEX_REL_SLEB:
+ case R_WASM_TABLE_INDEX_REL_SLEB64:
if (requiresGOTAccess(sym))
break;
out.elemSec->addEntry(cast<FunctionSymbol>(sym));
if (!isa<GlobalSymbol>(sym))
addGOTEntry(sym);
break;
+ case R_WASM_MEMORY_ADDR_TLS_SLEB:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
+ // In single-threaded builds TLS is lowered away and TLS data can be
+ // merged with normal data and allowing TLS relocation in non-TLS
+ // segments.
+ if (config->sharedMemory) {
+ if (auto *D = dyn_cast<DefinedData>(sym)) {
+ if (!D->segment->outputSeg->isTLS()) {
+ error(toString(file) + ": relocation " +
+ relocTypeToString(reloc.Type) + " cannot be used against `" +
+ toString(*sym) +
+ "` in non-TLS section: " + D->segment->outputSeg->name);
+ }
+ }
+ }
+ break;
}
if (config->isPic) {
switch (reloc.Type) {
case R_WASM_TABLE_INDEX_SLEB:
+ case R_WASM_TABLE_INDEX_SLEB64:
case R_WASM_MEMORY_ADDR_SLEB:
case R_WASM_MEMORY_ADDR_LEB:
case R_WASM_MEMORY_ADDR_SLEB64:
" cannot be used against symbol " + toString(*sym) +
"; recompile with -fPIC");
break;
+ case R_WASM_MEMORY_ADDR_TLS_SLEB:
+ case R_WASM_MEMORY_ADDR_TLS_SLEB64:
+ if (!sym->isDefined()) {
+ error(toString(file) +
+ ": TLS symbol is undefined, but TLS symbols cannot yet be "
+ "imported: `" +
+ toString(*sym) + "`");
+ }
+ break;
case R_WASM_TABLE_INDEX_I32:
+ case R_WASM_TABLE_INDEX_I64:
case R_WASM_MEMORY_ADDR_I32:
case R_WASM_MEMORY_ADDR_I64:
// These relocation types are only present in the data section and
addGOTEntry(sym);
break;
}
- } else {
+ } else if (sym->isUndefined() && !config->relocatable && !sym->isWeak()) {
// Report undefined symbols
- if (sym->isUndefined() && !config->relocatable && !sym->isWeak())
- reportUndefined(sym);
+ reportUndefined(sym);
}
-
}
}
#include "SymbolTable.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputEvent.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
#include "WriterUtils.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
}
}
-static void checkEventType(const Symbol *existing, const InputFile *file,
- const WasmEventType *newType,
- const WasmSignature *newSig) {
- auto existingEvent = dyn_cast<EventSymbol>(existing);
- if (!isa<EventSymbol>(existing)) {
- reportTypeError(existing, file, WASM_SYMBOL_TYPE_EVENT);
+static void checkTagType(const Symbol *existing, const InputFile *file,
+ const WasmTagType *newType,
+ const WasmSignature *newSig) {
+ const auto *existingTag = dyn_cast<TagSymbol>(existing);
+ if (!isa<TagSymbol>(existing)) {
+ reportTypeError(existing, file, WASM_SYMBOL_TYPE_TAG);
return;
}
- const WasmEventType *oldType = cast<EventSymbol>(existing)->getEventType();
- const WasmSignature *oldSig = existingEvent->signature;
+ const WasmTagType *oldType = cast<TagSymbol>(existing)->getTagType();
+ const WasmSignature *oldSig = existingTag->signature;
if (newType->Attribute != oldType->Attribute)
- error("Event type mismatch: " + existing->getName() + "\n>>> defined as " +
+ error("Tag type mismatch: " + existing->getName() + "\n>>> defined as " +
toString(*oldType) + " in " + toString(existing->getFile()) +
"\n>>> defined as " + toString(*newType) + " in " + toString(file));
if (*newSig != *oldSig)
- warn("Event signature mismatch: " + existing->getName() +
+ warn("Tag signature mismatch: " + existing->getName() +
"\n>>> defined as " + toString(*oldSig) + " in " +
toString(existing->getFile()) + "\n>>> defined as " +
toString(*newSig) + " in " + toString(file));
}
+static void checkTableType(const Symbol *existing, const InputFile *file,
+ const WasmTableType *newType) {
+ if (!isa<TableSymbol>(existing)) {
+ reportTypeError(existing, file, WASM_SYMBOL_TYPE_TABLE);
+ return;
+ }
+
+ const WasmTableType *oldType = cast<TableSymbol>(existing)->getTableType();
+ if (newType->ElemType != oldType->ElemType) {
+ error("Table type mismatch: " + existing->getName() + "\n>>> defined as " +
+ toString(*oldType) + " in " + toString(existing->getFile()) +
+ "\n>>> defined as " + toString(*newType) + " in " + toString(file));
+ }
+ // FIXME: No assertions currently on the limits.
+}
+
static void checkDataType(const Symbol *existing, const InputFile *file) {
if (!isa<DataSymbol>(existing))
reportTypeError(existing, file, WASM_SYMBOL_TYPE_DATA);
flags, nullptr, function);
}
-// Adds an optional, linker generated, data symbols. The symbol will only be
+// Adds an optional, linker generated, data symbol. The symbol will only be
// added if there is an undefine reference to it, or if it is explicitly
// exported via the --export flag. Otherwise we don't add the symbol and return
// nullptr.
DefinedData *SymbolTable::addOptionalDataSymbol(StringRef name,
- uint32_t value) {
+ uint64_t value) {
Symbol *s = find(name);
if (!s && (config->exportAll || config->exportedSymbols.count(name) != 0))
s = insertName(name).first;
return nullptr;
LLVM_DEBUG(dbgs() << "addOptionalDataSymbol: " << name << "\n");
auto *rtn = replaceSymbol<DefinedData>(s, name, WASM_SYMBOL_VISIBILITY_HIDDEN);
- rtn->setVirtualAddress(value);
+ rtn->setVA(value);
rtn->referenced = true;
return rtn;
}
nullptr, global);
}
+DefinedGlobal *SymbolTable::addOptionalGlobalSymbol(StringRef name,
+ InputGlobal *global) {
+ LLVM_DEBUG(dbgs() << "addOptionalGlobalSymbol: " << name << " -> " << global
+ << "\n");
+ Symbol *s = find(name);
+ if (!s || s->isDefined())
+ return nullptr;
+ syntheticGlobals.emplace_back(global);
+ return replaceSymbol<DefinedGlobal>(s, name, WASM_SYMBOL_VISIBILITY_HIDDEN,
+ nullptr, global);
+}
+
+DefinedTable *SymbolTable::addSyntheticTable(StringRef name, uint32_t flags,
+ InputTable *table) {
+ LLVM_DEBUG(dbgs() << "addSyntheticTable: " << name << " -> " << table
+ << "\n");
+ Symbol *s = find(name);
+ assert(!s || s->isUndefined());
+ if (!s)
+ s = insertName(name).first;
+ syntheticTables.emplace_back(table);
+ return replaceSymbol<DefinedTable>(s, name, flags, nullptr, table);
+}
+
static bool shouldReplace(const Symbol *existing, InputFile *newFile,
uint32_t newFlags) {
// If existing symbol is undefined, replace it.
}
Symbol *SymbolTable::addDefinedData(StringRef name, uint32_t flags,
- InputFile *file, InputSegment *segment,
+ InputFile *file, InputChunk *segment,
uint64_t address, uint64_t size) {
LLVM_DEBUG(dbgs() << "addDefinedData:" << name << " addr:" << address
<< "\n");
return s;
}
-Symbol *SymbolTable::addDefinedEvent(StringRef name, uint32_t flags,
- InputFile *file, InputEvent *event) {
- LLVM_DEBUG(dbgs() << "addDefinedEvent:" << name << "\n");
+Symbol *SymbolTable::addDefinedTag(StringRef name, uint32_t flags,
+ InputFile *file, InputTag *tag) {
+ LLVM_DEBUG(dbgs() << "addDefinedTag:" << name << "\n");
Symbol *s;
bool wasInserted;
std::tie(s, wasInserted) = insert(name, file);
auto replaceSym = [&]() {
- replaceSymbol<DefinedEvent>(s, name, flags, file, event);
+ replaceSymbol<DefinedTag>(s, name, flags, file, tag);
};
if (wasInserted || s->isLazy()) {
return s;
}
- checkEventType(s, file, &event->getType(), &event->signature);
+ checkTagType(s, file, &tag->getType(), &tag->signature);
+
+ if (shouldReplace(s, file, flags))
+ replaceSym();
+ return s;
+}
+
+Symbol *SymbolTable::addDefinedTable(StringRef name, uint32_t flags,
+ InputFile *file, InputTable *table) {
+ LLVM_DEBUG(dbgs() << "addDefinedTable:" << name << "\n");
+
+ Symbol *s;
+ bool wasInserted;
+ std::tie(s, wasInserted) = insert(name, file);
+
+ auto replaceSym = [&]() {
+ replaceSymbol<DefinedTable>(s, name, flags, file, table);
+ };
+
+ if (wasInserted || s->isLazy()) {
+ replaceSym();
+ return s;
+ }
+
+ checkTableType(s, file, &table->getType());
if (shouldReplace(s, file, flags))
replaceSym();
file, sig, isCalledDirectly);
};
- if (wasInserted)
+ if (wasInserted) {
replaceSym();
- else if (auto *lazy = dyn_cast<LazySymbol>(s))
- lazy->fetch();
- else {
+ } else if (auto *lazy = dyn_cast<LazySymbol>(s)) {
+ if ((flags & WASM_SYMBOL_BINDING_MASK) == WASM_SYMBOL_BINDING_WEAK) {
+ lazy->setWeak();
+ lazy->signature = sig;
+ } else {
+ lazy->fetch();
+ }
+ } else {
auto existingFunction = dyn_cast<FunctionSymbol>(s);
if (!existingFunction) {
reportTypeError(s, file, WASM_SYMBOL_TYPE_FUNCTION);
if (s->traced)
printTraceSymbolUndefined(name, file);
- if (wasInserted)
+ if (wasInserted) {
replaceSymbol<UndefinedData>(s, name, flags, file);
- else if (auto *lazy = dyn_cast<LazySymbol>(s))
- lazy->fetch();
- else if (s->isDefined())
+ } else if (auto *lazy = dyn_cast<LazySymbol>(s)) {
+ if ((flags & WASM_SYMBOL_BINDING_MASK) == WASM_SYMBOL_BINDING_WEAK)
+ lazy->setWeak();
+ else
+ lazy->fetch();
+ } else if (s->isDefined()) {
checkDataType(s, file);
+ }
return s;
}
return s;
}
+Symbol *SymbolTable::addUndefinedTable(StringRef name,
+ Optional<StringRef> importName,
+ Optional<StringRef> importModule,
+ uint32_t flags, InputFile *file,
+ const WasmTableType *type) {
+ LLVM_DEBUG(dbgs() << "addUndefinedTable: " << name << "\n");
+ assert(flags & WASM_SYMBOL_UNDEFINED);
+
+ Symbol *s;
+ bool wasInserted;
+ std::tie(s, wasInserted) = insert(name, file);
+ if (s->traced)
+ printTraceSymbolUndefined(name, file);
+
+ if (wasInserted)
+ replaceSymbol<UndefinedTable>(s, name, importName, importModule, flags,
+ file, type);
+ else if (auto *lazy = dyn_cast<LazySymbol>(s))
+ lazy->fetch();
+ else if (s->isDefined())
+ checkTableType(s, file, type);
+ return s;
+}
+
+TableSymbol *SymbolTable::createUndefinedIndirectFunctionTable(StringRef name) {
+ WasmLimits limits{0, 0, 0}; // Set by the writer.
+ WasmTableType *type = make<WasmTableType>();
+ type->ElemType = uint8_t(ValType::FUNCREF);
+ type->Limits = limits;
+ StringRef module(defaultModule);
+ uint32_t flags = config->exportTable ? 0 : WASM_SYMBOL_VISIBILITY_HIDDEN;
+ flags |= WASM_SYMBOL_UNDEFINED;
+ Symbol *sym = addUndefinedTable(name, name, module, flags, nullptr, type);
+ sym->markLive();
+ sym->forceExport = config->exportTable;
+ return cast<TableSymbol>(sym);
+}
+
+TableSymbol *SymbolTable::createDefinedIndirectFunctionTable(StringRef name) {
+ const uint32_t invalidIndex = -1;
+ WasmLimits limits{0, 0, 0}; // Set by the writer.
+ WasmTableType type{uint8_t(ValType::FUNCREF), limits};
+ WasmTable desc{invalidIndex, type, name};
+ InputTable *table = make<InputTable>(desc, nullptr);
+ uint32_t flags = config->exportTable ? 0 : WASM_SYMBOL_VISIBILITY_HIDDEN;
+ TableSymbol *sym = addSyntheticTable(name, flags, table);
+ sym->markLive();
+ sym->forceExport = config->exportTable;
+ return sym;
+}
+
+// Whether or not we need an indirect function table is usually a function of
+// whether an input declares a need for it. However sometimes it's possible for
+// no input to need the indirect function table, but then a late
+// addInternalGOTEntry causes a function to be allocated an address. In that
+// case address we synthesize a definition at the last minute.
+TableSymbol *SymbolTable::resolveIndirectFunctionTable(bool required) {
+ Symbol *existing = find(functionTableName);
+ if (existing) {
+ if (!isa<TableSymbol>(existing)) {
+ error(Twine("reserved symbol must be of type table: `") +
+ functionTableName + "`");
+ return nullptr;
+ }
+ if (existing->isDefined()) {
+ error(Twine("reserved symbol must not be defined in input files: `") +
+ functionTableName + "`");
+ return nullptr;
+ }
+ }
+
+ if (config->importTable) {
+ if (existing)
+ return cast<TableSymbol>(existing);
+ if (required)
+ return createUndefinedIndirectFunctionTable(functionTableName);
+ } else if ((existing && existing->isLive()) || config->exportTable ||
+ required) {
+ // A defined table is required. Either because the user request an exported
+ // table or because the table symbol is already live. The existing table is
+ // guaranteed to be undefined due to the check above.
+ return createDefinedIndirectFunctionTable(functionTableName);
+ }
+
+ // An indirect function table will only be present in the symbol table if
+ // needed by a reloc; if we get here, we don't need one.
+ return nullptr;
+}
+
void SymbolTable::addLazy(ArchiveFile *file, const Archive::Symbol *sym) {
LLVM_DEBUG(dbgs() << "addLazy: " << sym->getName() << "\n");
StringRef name = sym->getName();
// to be exported outside the object file.
replaceSymbol<DefinedFunction>(sym, debugName, WASM_SYMBOL_BINDING_LOCAL,
nullptr, func);
+ // Ensure the stub function doesn't get a table entry. Its address
+ // should always compare equal to the null pointer.
+ sym->isStub = true;
return func;
}
+void SymbolTable::replaceWithUndefined(Symbol *sym) {
+ // Add a synthetic dummy for weak undefined functions. These dummies will
+ // be GC'd if not used as the target of any "call" instructions.
+ StringRef debugName = saver.save("undefined_weak:" + toString(*sym));
+ replaceWithUnreachable(sym, *sym->getSignature(), debugName);
+ // Hide our dummy to prevent export.
+ sym->setHidden(true);
+}
+
// For weak undefined functions, there may be "call" instructions that reference
// the symbol. In this case, we need to synthesise a dummy/stub function that
// will abort at runtime, so that relocations can still provided an operand to
// the call instruction that passes Wasm validation.
void SymbolTable::handleWeakUndefines() {
for (Symbol *sym : getSymbols()) {
- if (!sym->isUndefWeak())
- continue;
-
- const WasmSignature *sig = sym->getSignature();
- if (!sig) {
- // It is possible for undefined functions not to have a signature (eg. if
- // added via "--undefined"), but weak undefined ones do have a signature.
- // Lazy symbols may not be functions and therefore Sig can still be null
- // in some circumstance.
- assert(!isa<FunctionSymbol>(sym));
- continue;
+ if (sym->isUndefWeak()) {
+ if (sym->getSignature()) {
+ replaceWithUndefined(sym);
+ } else {
+ // It is possible for undefined functions not to have a signature (eg.
+ // if added via "--undefined"), but weak undefined ones do have a
+ // signature. Lazy symbols may not be functions and therefore Sig can
+ // still be null in some circumstance.
+ assert(!isa<FunctionSymbol>(sym));
+ }
}
-
- // Add a synthetic dummy for weak undefined functions. These dummies will
- // be GC'd if not used as the target of any "call" instructions.
- StringRef debugName = saver.save("undefined:" + toString(*sym));
- InputFunction* func = replaceWithUnreachable(sym, *sig, debugName);
- // Ensure it compares equal to the null pointer, and so that table relocs
- // don't pull in the stub body (only call-operand relocs should do that).
- func->setTableIndex(0);
- // Hide our dummy to prevent export.
- sym->setHidden(true);
}
}
+DefinedFunction *SymbolTable::createUndefinedStub(const WasmSignature &sig) {
+ if (stubFunctions.count(sig))
+ return stubFunctions[sig];
+ LLVM_DEBUG(dbgs() << "createUndefinedStub: " << toString(sig) << "\n");
+ auto *sym = reinterpret_cast<DefinedFunction *>(make<SymbolUnion>());
+ sym->isUsedInRegularObj = true;
+ sym->canInline = true;
+ sym->traced = false;
+ sym->forceExport = false;
+ sym->signature = &sig;
+ replaceSymbol<DefinedFunction>(
+ sym, "undefined_stub", WASM_SYMBOL_VISIBILITY_HIDDEN, nullptr, nullptr);
+ replaceWithUnreachable(sym, sig, "undefined_stub");
+ stubFunctions[sig] = sym;
+ return sym;
+}
+
static void reportFunctionSignatureMismatch(StringRef symName,
FunctionSymbol *a,
FunctionSymbol *b, bool isError) {
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/BinaryFormat/WasmTraits.h"
namespace lld {
namespace wasm {
Symbol *addDefinedFunction(StringRef name, uint32_t flags, InputFile *file,
InputFunction *function);
Symbol *addDefinedData(StringRef name, uint32_t flags, InputFile *file,
- InputSegment *segment, uint64_t address,
- uint64_t size);
+ InputChunk *segment, uint64_t address, uint64_t size);
Symbol *addDefinedGlobal(StringRef name, uint32_t flags, InputFile *file,
InputGlobal *g);
- Symbol *addDefinedEvent(StringRef name, uint32_t flags, InputFile *file,
- InputEvent *e);
+ Symbol *addDefinedTag(StringRef name, uint32_t flags, InputFile *file,
+ InputTag *t);
+ Symbol *addDefinedTable(StringRef name, uint32_t flags, InputFile *file,
+ InputTable *t);
Symbol *addUndefinedFunction(StringRef name,
llvm::Optional<StringRef> importName,
llvm::Optional<StringRef> importModule,
uint32_t flags, InputFile *file,
const WasmGlobalType *type);
+ Symbol *addUndefinedTable(StringRef name,
+ llvm::Optional<StringRef> importName,
+ llvm::Optional<StringRef> importModule,
+ uint32_t flags, InputFile *file,
+ const WasmTableType *type);
+
+ TableSymbol *resolveIndirectFunctionTable(bool required);
void addLazy(ArchiveFile *f, const llvm::object::Archive::Symbol *sym);
InputGlobal *global);
DefinedFunction *addSyntheticFunction(StringRef name, uint32_t flags,
InputFunction *function);
- DefinedData *addOptionalDataSymbol(StringRef name, uint32_t value = 0);
+ DefinedData *addOptionalDataSymbol(StringRef name, uint64_t value = 0);
+ DefinedGlobal *addOptionalGlobalSymbol(StringRef name, InputGlobal *global);
+ DefinedTable *addSyntheticTable(StringRef name, uint32_t flags,
+ InputTable *global);
void handleSymbolVariants();
void handleWeakUndefines();
+ DefinedFunction *createUndefinedStub(const WasmSignature &sig);
std::vector<ObjFile *> objectFiles;
std::vector<SharedFile *> sharedFiles;
std::vector<BitcodeFile *> bitcodeFiles;
std::vector<InputFunction *> syntheticFunctions;
std::vector<InputGlobal *> syntheticGlobals;
+ std::vector<InputTable *> syntheticTables;
private:
std::pair<Symbol *, bool> insert(StringRef name, const InputFile *file);
const InputFile *file, Symbol **out);
InputFunction *replaceWithUnreachable(Symbol *sym, const WasmSignature &sig,
StringRef debugName);
+ void replaceWithUndefined(Symbol *sym);
+
+ TableSymbol *createDefinedIndirectFunctionTable(StringRef name);
+ TableSymbol *createUndefinedIndirectFunctionTable(StringRef name);
// Maps symbol names to index into the symVector. -1 means that symbols
// is to not yet in the vector but it should have tracing enabled if it is
// For certain symbols types, e.g. function symbols, we allow for multiple
// variants of the same symbol with different signatures.
llvm::DenseMap<llvm::CachedHashStringRef, std::vector<Symbol *>> symVariants;
+ llvm::DenseMap<WasmSignature, DefinedFunction *> stubFunctions;
// Comdat groups define "link once" sections. If two comdat groups have the
// same name, only one of them is linked, and the other is ignored. This set
#include "Symbols.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputEvent.h"
+#include "InputElement.h"
#include "InputFiles.h"
-#include "InputGlobal.h"
#include "OutputSections.h"
#include "OutputSegment.h"
#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#define DEBUG_TYPE "lld"
return "DefinedData";
case wasm::Symbol::DefinedGlobalKind:
return "DefinedGlobal";
- case wasm::Symbol::DefinedEventKind:
- return "DefinedEvent";
+ case wasm::Symbol::DefinedTableKind:
+ return "DefinedTable";
+ case wasm::Symbol::DefinedTagKind:
+ return "DefinedTag";
case wasm::Symbol::UndefinedFunctionKind:
return "UndefinedFunction";
case wasm::Symbol::UndefinedDataKind:
return "UndefinedData";
case wasm::Symbol::UndefinedGlobalKind:
return "UndefinedGlobal";
+ case wasm::Symbol::UndefinedTableKind:
+ return "UndefinedTable";
case wasm::Symbol::LazyKind:
return "LazyKind";
case wasm::Symbol::SectionKind:
namespace wasm {
DefinedFunction *WasmSym::callCtors;
+DefinedFunction *WasmSym::callDtors;
DefinedFunction *WasmSym::initMemory;
-DefinedFunction *WasmSym::applyRelocs;
+DefinedFunction *WasmSym::applyDataRelocs;
+DefinedFunction *WasmSym::applyGlobalRelocs;
DefinedFunction *WasmSym::initTLS;
+DefinedFunction *WasmSym::startFunction;
DefinedData *WasmSym::dsoHandle;
DefinedData *WasmSym::dataEnd;
DefinedData *WasmSym::globalBase;
GlobalSymbol *WasmSym::tlsAlign;
UndefinedGlobal *WasmSym::tableBase;
DefinedData *WasmSym::definedTableBase;
+UndefinedGlobal *WasmSym::tableBase32;
+DefinedData *WasmSym::definedTableBase32;
UndefinedGlobal *WasmSym::memoryBase;
DefinedData *WasmSym::definedMemoryBase;
+TableSymbol *WasmSym::indirectFunctionTable;
WasmSymbolType Symbol::getWasmType() const {
if (isa<FunctionSymbol>(this))
return WASM_SYMBOL_TYPE_DATA;
if (isa<GlobalSymbol>(this))
return WASM_SYMBOL_TYPE_GLOBAL;
- if (isa<EventSymbol>(this))
- return WASM_SYMBOL_TYPE_EVENT;
+ if (isa<TagSymbol>(this))
+ return WASM_SYMBOL_TYPE_TAG;
+ if (isa<TableSymbol>(this))
+ return WASM_SYMBOL_TYPE_TABLE;
if (isa<SectionSymbol>(this) || isa<OutputSectionSymbol>(this))
return WASM_SYMBOL_TYPE_SECTION;
llvm_unreachable("invalid symbol kind");
InputChunk *Symbol::getChunk() const {
if (auto *f = dyn_cast<DefinedFunction>(this))
return f->function;
+ if (auto *f = dyn_cast<UndefinedFunction>(this))
+ if (f->stubFunction)
+ return f->stubFunction->function;
if (auto *d = dyn_cast<DefinedData>(this))
return d->segment;
return nullptr;
bool Symbol::isLive() const {
if (auto *g = dyn_cast<DefinedGlobal>(this))
return g->global->live;
- if (auto *e = dyn_cast<DefinedEvent>(this))
- return e->event->live;
+ if (auto *t = dyn_cast<DefinedTag>(this))
+ return t->tag->live;
+ if (auto *t = dyn_cast<DefinedTable>(this))
+ return t->table->live;
if (InputChunk *c = getChunk())
return c->live;
return referenced;
void Symbol::markLive() {
assert(!isDiscarded());
+ referenced = true;
+ if (file != NULL && isDefined())
+ file->markLive();
if (auto *g = dyn_cast<DefinedGlobal>(this))
g->global->live = true;
- if (auto *e = dyn_cast<DefinedEvent>(this))
- e->event->live = true;
- if (InputChunk *c = getChunk())
+ if (auto *t = dyn_cast<DefinedTag>(this))
+ t->tag->live = true;
+ if (auto *t = dyn_cast<DefinedTable>(this))
+ t->table->live = true;
+ if (InputChunk *c = getChunk()) {
+ // Usually, a whole chunk is marked as live or dead, but in mergeable
+ // (splittable) sections, each piece of data has independent liveness bit.
+ // So we explicitly tell it which offset is in use.
+ if (auto *d = dyn_cast<DefinedData>(this)) {
+ if (auto *ms = dyn_cast<MergeInputChunk>(c)) {
+ ms->getSectionPiece(d->value)->live = true;
+ }
+ }
c->live = true;
- referenced = true;
+ }
}
uint32_t Symbol::getOutputSymbolIndex() const {
if (!isDefined() || isLocal())
return false;
- if (forceExport || config->exportAll)
+ if (config->exportAll || (config->exportDynamic && !isHidden()))
return true;
- if (config->exportDynamic && !isHidden())
- return true;
+ return isExportedExplicit();
+}
- return flags & WASM_SYMBOL_EXPORTED;
+bool Symbol::isExportedExplicit() const {
+ return forceExport || flags & WASM_SYMBOL_EXPORTED;
}
bool Symbol::isNoStrip() const {
uint32_t FunctionSymbol::getFunctionIndex() const {
if (auto *f = dyn_cast<DefinedFunction>(this))
return f->function->getFunctionIndex();
+ if (const auto *u = dyn_cast<UndefinedFunction>(this)) {
+ if (u->stubFunction) {
+ return u->stubFunction->getFunctionIndex();
+ }
+ }
assert(functionIndex != INVALID_INDEX);
return functionIndex;
}
function ? &function->signature : nullptr),
function(function) {}
-uint64_t DefinedData::getVirtualAddress() const {
- LLVM_DEBUG(dbgs() << "getVirtualAddress: " << getName() << "\n");
- if (segment) {
- // For thread local data, the symbol location is relative to the start of
- // the .tdata section, since they are used as offsets from __tls_base.
- // Hence, we do not add in segment->outputSeg->startVA.
- if (segment->outputSeg->name == ".tdata")
- return segment->outputSegmentOffset + offset;
- return segment->outputSeg->startVA + segment->outputSegmentOffset + offset;
- }
- return offset;
+uint64_t DefinedData::getVA() const {
+ LLVM_DEBUG(dbgs() << "getVA: " << getName() << "\n");
+ if (segment)
+ return segment->getVA(value);
+ return value;
}
-void DefinedData::setVirtualAddress(uint64_t value) {
- LLVM_DEBUG(dbgs() << "setVirtualAddress " << name << " -> " << value << "\n");
+void DefinedData::setVA(uint64_t value_) {
+ LLVM_DEBUG(dbgs() << "setVA " << name << " -> " << value_ << "\n");
assert(!segment);
- offset = value;
+ value = value_;
}
uint64_t DefinedData::getOutputSegmentOffset() const {
LLVM_DEBUG(dbgs() << "getOutputSegmentOffset: " << getName() << "\n");
- return segment->outputSegmentOffset + offset;
+ return segment->getChunkOffset(value);
}
uint64_t DefinedData::getOutputSegmentIndex() const {
uint32_t GlobalSymbol::getGlobalIndex() const {
if (auto *f = dyn_cast<DefinedGlobal>(this))
- return f->global->getGlobalIndex();
+ return f->global->getAssignedIndex();
assert(globalIndex != INVALID_INDEX);
return globalIndex;
}
bool GlobalSymbol::hasGlobalIndex() const {
if (auto *f = dyn_cast<DefinedGlobal>(this))
- return f->global->hasGlobalIndex();
+ return f->global->hasAssignedIndex();
return globalIndex != INVALID_INDEX;
}
global ? &global->getType() : nullptr),
global(global) {}
-uint32_t EventSymbol::getEventIndex() const {
- if (auto *f = dyn_cast<DefinedEvent>(this))
- return f->event->getEventIndex();
- assert(eventIndex != INVALID_INDEX);
- return eventIndex;
+uint32_t TagSymbol::getTagIndex() const {
+ if (auto *f = dyn_cast<DefinedTag>(this))
+ return f->tag->getAssignedIndex();
+ assert(tagIndex != INVALID_INDEX);
+ return tagIndex;
}
-void EventSymbol::setEventIndex(uint32_t index) {
- LLVM_DEBUG(dbgs() << "setEventIndex " << name << " -> " << index << "\n");
- assert(eventIndex == INVALID_INDEX);
- eventIndex = index;
+void TagSymbol::setTagIndex(uint32_t index) {
+ LLVM_DEBUG(dbgs() << "setTagIndex " << name << " -> " << index << "\n");
+ assert(tagIndex == INVALID_INDEX);
+ tagIndex = index;
}
-bool EventSymbol::hasEventIndex() const {
- if (auto *f = dyn_cast<DefinedEvent>(this))
- return f->event->hasEventIndex();
- return eventIndex != INVALID_INDEX;
+bool TagSymbol::hasTagIndex() const {
+ if (auto *f = dyn_cast<DefinedTag>(this))
+ return f->tag->hasAssignedIndex();
+ return tagIndex != INVALID_INDEX;
}
-DefinedEvent::DefinedEvent(StringRef name, uint32_t flags, InputFile *file,
- InputEvent *event)
- : EventSymbol(name, DefinedEventKind, flags, file,
- event ? &event->getType() : nullptr,
- event ? &event->signature : nullptr),
- event(event) {}
+DefinedTag::DefinedTag(StringRef name, uint32_t flags, InputFile *file,
+ InputTag *tag)
+ : TagSymbol(name, DefinedTagKind, flags, file,
+ tag ? &tag->getType() : nullptr,
+ tag ? &tag->signature : nullptr),
+ tag(tag) {}
+
+void TableSymbol::setLimits(const WasmLimits &limits) {
+ if (auto *t = dyn_cast<DefinedTable>(this))
+ t->table->setLimits(limits);
+ auto *newType = make<WasmTableType>(*tableType);
+ newType->Limits = limits;
+ tableType = newType;
+}
+
+uint32_t TableSymbol::getTableNumber() const {
+ if (const auto *t = dyn_cast<DefinedTable>(this))
+ return t->table->getAssignedIndex();
+ assert(tableNumber != INVALID_INDEX);
+ return tableNumber;
+}
+
+void TableSymbol::setTableNumber(uint32_t number) {
+ if (const auto *t = dyn_cast<DefinedTable>(this))
+ return t->table->assignIndex(number);
+ LLVM_DEBUG(dbgs() << "setTableNumber " << name << " -> " << number << "\n");
+ assert(tableNumber == INVALID_INDEX);
+ tableNumber = number;
+}
+
+bool TableSymbol::hasTableNumber() const {
+ if (const auto *t = dyn_cast<DefinedTable>(this))
+ return t->table->hasAssignedIndex();
+ return tableNumber != INVALID_INDEX;
+}
+
+DefinedTable::DefinedTable(StringRef name, uint32_t flags, InputFile *file,
+ InputTable *table)
+ : TableSymbol(name, DefinedTableKind, flags, file,
+ table ? &table->getType() : nullptr),
+ table(table) {}
const OutputSectionSymbol *SectionSymbol::getOutputSectionSymbol() const {
assert(section->outputSec && section->outputSec->sectionSym);
void LazySymbol::fetch() { cast<ArchiveFile>(file)->addMember(&archiveSymbol); }
+void LazySymbol::setWeak() {
+ flags |= (flags & ~WASM_SYMBOL_BINDING_MASK) | WASM_SYMBOL_BINDING_WEAK;
+}
+
MemoryBufferRef LazySymbol::getMemberBuffer() {
Archive::Child c =
CHECK(archiveSymbol.getMember(),
class InputSegment;
class InputFunction;
class InputGlobal;
-class InputEvent;
+class InputTag;
class InputSection;
+class InputTable;
class OutputSection;
#define INVALID_INDEX UINT32_MAX
DefinedFunctionKind,
DefinedDataKind,
DefinedGlobalKind,
- DefinedEventKind,
+ DefinedTagKind,
+ DefinedTableKind,
SectionKind,
OutputSectionKind,
UndefinedFunctionKind,
UndefinedDataKind,
UndefinedGlobalKind,
+ UndefinedTableKind,
LazyKind,
};
bool isUndefined() const {
return symbolKind == UndefinedFunctionKind ||
- symbolKind == UndefinedDataKind || symbolKind == UndefinedGlobalKind;
+ symbolKind == UndefinedDataKind ||
+ symbolKind == UndefinedGlobalKind ||
+ symbolKind == UndefinedTableKind;
}
bool isLazy() const { return symbolKind == LazyKind; }
WasmSymbolType getWasmType() const;
bool isExported() const;
+ bool isExportedExplicit() const;
// Indicates that the symbol is used in an __attribute__((used)) directive
// or similar.
Symbol(StringRef name, Kind k, uint32_t flags, InputFile *f)
: name(name), file(f), symbolKind(k), referenced(!config->gcSections),
requiresGOT(false), isUsedInRegularObj(false), forceExport(false),
- canInline(false), traced(false), flags(flags) {}
+ canInline(false), traced(false), isStub(false), flags(flags) {}
StringRef name;
InputFile *file;
// True if this symbol is specified by --trace-symbol option.
bool traced : 1;
+ // True if this symbol is a linker-synthesized stub function (traps when
+ // called) and should otherwise be treated as missing/undefined. See
+ // SymbolTable::replaceWithUndefined.
+ // These stubs never appear in the table and any table index relocations
+ // against them will produce address 0 (The table index representing
+ // the null function pointer).
+ bool isStub : 1;
+
uint32_t flags;
+
+ llvm::Optional<StringRef> importName;
+ llvm::Optional<StringRef> importModule;
};
class FunctionSymbol : public Symbol {
const WasmSignature *type = nullptr,
bool isCalledDirectly = true)
: FunctionSymbol(name, UndefinedFunctionKind, flags, file, type),
- importName(importName), importModule(importModule),
- isCalledDirectly(isCalledDirectly) {}
+ isCalledDirectly(isCalledDirectly) {
+ this->importName = importName;
+ this->importModule = importModule;
+ }
static bool classof(const Symbol *s) {
return s->kind() == UndefinedFunctionKind;
}
- llvm::Optional<StringRef> importName;
- llvm::Optional<StringRef> importModule;
+ DefinedFunction *stubFunction = nullptr;
bool isCalledDirectly;
};
class SectionSymbol : public Symbol {
public:
- SectionSymbol(uint32_t flags, const InputSection *s, InputFile *f = nullptr)
+ SectionSymbol(uint32_t flags, const InputChunk *s, InputFile *f = nullptr)
: Symbol("", SectionKind, flags, f), section(s) {}
static bool classof(const Symbol *s) { return s->kind() == SectionKind; }
const OutputSectionSymbol *getOutputSectionSymbol() const;
- const InputSection *section;
+ const InputChunk *section;
};
class DataSymbol : public Symbol {
class DefinedData : public DataSymbol {
public:
// Constructor for regular data symbols originating from input files.
- DefinedData(StringRef name, uint32_t flags, InputFile *f,
- InputSegment *segment, uint64_t offset, uint64_t size)
+ DefinedData(StringRef name, uint32_t flags, InputFile *f, InputChunk *segment,
+ uint64_t value, uint64_t size)
: DataSymbol(name, DefinedDataKind, flags, f), segment(segment),
- offset(offset), size(size) {}
+ value(value), size(size) {}
// Constructor for linker synthetic data symbols.
DefinedData(StringRef name, uint32_t flags)
static bool classof(const Symbol *s) { return s->kind() == DefinedDataKind; }
// Returns the output virtual address of a defined data symbol.
- uint64_t getVirtualAddress() const;
- void setVirtualAddress(uint64_t va);
+ uint64_t getVA() const;
+ void setVA(uint64_t va);
// Returns the offset of a defined data symbol within its OutputSegment.
uint64_t getOutputSegmentOffset() const;
uint64_t getOutputSegmentIndex() const;
uint64_t getSize() const { return size; }
- InputSegment *segment = nullptr;
+ InputChunk *segment = nullptr;
+ uint64_t value = 0;
protected:
- uint64_t offset = 0;
uint64_t size = 0;
};
llvm::Optional<StringRef> importModule, uint32_t flags,
InputFile *file = nullptr,
const WasmGlobalType *type = nullptr)
- : GlobalSymbol(name, UndefinedGlobalKind, flags, file, type),
- importName(importName), importModule(importModule) {}
+ : GlobalSymbol(name, UndefinedGlobalKind, flags, file, type) {
+ this->importName = importName;
+ this->importModule = importModule;
+ }
static bool classof(const Symbol *s) {
return s->kind() == UndefinedGlobalKind;
}
+};
+
+class TableSymbol : public Symbol {
+public:
+ static bool classof(const Symbol *s) {
+ return s->kind() == DefinedTableKind || s->kind() == UndefinedTableKind;
+ }
- llvm::Optional<StringRef> importName;
- llvm::Optional<StringRef> importModule;
+ const WasmTableType *getTableType() const { return tableType; }
+ void setLimits(const WasmLimits &limits);
+
+ // Get/set the table number
+ uint32_t getTableNumber() const;
+ void setTableNumber(uint32_t number);
+ bool hasTableNumber() const;
+
+protected:
+ TableSymbol(StringRef name, Kind k, uint32_t flags, InputFile *f,
+ const WasmTableType *type)
+ : Symbol(name, k, flags, f), tableType(type) {}
+
+ const WasmTableType *tableType;
+ uint32_t tableNumber = INVALID_INDEX;
+};
+
+class DefinedTable : public TableSymbol {
+public:
+ DefinedTable(StringRef name, uint32_t flags, InputFile *file,
+ InputTable *table);
+
+ static bool classof(const Symbol *s) { return s->kind() == DefinedTableKind; }
+
+ InputTable *table;
+};
+
+class UndefinedTable : public TableSymbol {
+public:
+ UndefinedTable(StringRef name, llvm::Optional<StringRef> importName,
+ llvm::Optional<StringRef> importModule, uint32_t flags,
+ InputFile *file, const WasmTableType *type)
+ : TableSymbol(name, UndefinedTableKind, flags, file, type) {
+ this->importName = importName;
+ this->importModule = importModule;
+ }
+
+ static bool classof(const Symbol *s) {
+ return s->kind() == UndefinedTableKind;
+ }
};
-// Wasm events are features that suspend the current execution and transfer the
-// control flow to a corresponding handler. Currently the only supported event
-// kind is exceptions.
+// A tag is a general format to distinguish typed entities. Each tag has an
+// attribute and a type. Currently the attribute can only specify that the tag
+// is for an exception tag.
//
-// Event tags are values to distinguish different events. For exceptions, they
-// can be used to distinguish different language's exceptions, i.e., all C++
-// exceptions have the same tag. Wasm can generate code capable of doing
-// different handling actions based on the tag of caught exceptions.
+// In exception handling, tags are used to distinguish different kinds of
+// exceptions. For example, they can be used to distinguish different language's
+// exceptions, e.g., all C++ exceptions have the same tag and Java exceptions
+// would have a distinct tag. Wasm can filter the exceptions it catches based on
+// their tag.
//
-// A single EventSymbol object represents a single tag. C++ exception event
-// symbol is a weak symbol generated in every object file in which exceptions
-// are used, and has name '__cpp_exception' for linking.
-class EventSymbol : public Symbol {
+// A single TagSymbol object represents a single tag. The C++ exception symbol
+// is a weak symbol generated in every object file in which exceptions are used,
+// and is named '__cpp_exception' for linking.
+class TagSymbol : public Symbol {
public:
- static bool classof(const Symbol *s) { return s->kind() == DefinedEventKind; }
+ static bool classof(const Symbol *s) { return s->kind() == DefinedTagKind; }
- const WasmEventType *getEventType() const { return eventType; }
+ const WasmTagType *getTagType() const { return tagType; }
- // Get/set the event index
- uint32_t getEventIndex() const;
- void setEventIndex(uint32_t index);
- bool hasEventIndex() const;
+ // Get/set the tag index
+ uint32_t getTagIndex() const;
+ void setTagIndex(uint32_t index);
+ bool hasTagIndex() const;
const WasmSignature *signature;
protected:
- EventSymbol(StringRef name, Kind k, uint32_t flags, InputFile *f,
- const WasmEventType *eventType, const WasmSignature *sig)
- : Symbol(name, k, flags, f), signature(sig), eventType(eventType) {}
+ TagSymbol(StringRef name, Kind k, uint32_t flags, InputFile *f,
+ const WasmTagType *tagType, const WasmSignature *sig)
+ : Symbol(name, k, flags, f), signature(sig), tagType(tagType) {}
- const WasmEventType *eventType;
- uint32_t eventIndex = INVALID_INDEX;
+ const WasmTagType *tagType;
+ uint32_t tagIndex = INVALID_INDEX;
};
-class DefinedEvent : public EventSymbol {
+class DefinedTag : public TagSymbol {
public:
- DefinedEvent(StringRef name, uint32_t flags, InputFile *file,
- InputEvent *event);
+ DefinedTag(StringRef name, uint32_t flags, InputFile *file, InputTag *tag);
- static bool classof(const Symbol *s) { return s->kind() == DefinedEventKind; }
+ static bool classof(const Symbol *s) { return s->kind() == DefinedTagKind; }
- InputEvent *event;
+ InputTag *tag;
};
// LazySymbol represents a symbol that is not yet in the link, but we know where
static bool classof(const Symbol *s) { return s->kind() == LazyKind; }
void fetch();
+ void setWeak();
MemoryBufferRef getMemberBuffer();
// Lazy symbols can have a signature because they can replace an
// Function that directly calls all ctors in priority order.
static DefinedFunction *callCtors;
- // __wasm_apply_relocs
+ // __wasm_call_dtors
+ // Function that calls the libc/etc. cleanup function.
+ static DefinedFunction *callDtors;
+
+ // __wasm_apply_data_relocs
// Function that applies relocations to data segment post-instantiation.
- static DefinedFunction *applyRelocs;
+ static DefinedFunction *applyDataRelocs;
+
+ // __wasm_apply_global_relocs
+ // Function that applies relocations to data segment post-instantiation.
+ // Unlike __wasm_apply_data_relocs this needs to run on every thread.
+ static DefinedFunction *applyGlobalRelocs;
// __wasm_init_tls
// Function that allocates thread-local storage and initializes it.
static DefinedFunction *initTLS;
+ // Pointer to the function that is to be used in the start section.
+ // (normally an alias of initMemory, or applyGlobalRelocs).
+ static DefinedFunction *startFunction;
+
// __dso_handle
// Symbol used in calls to __cxa_atexit to determine current DLL
static DefinedData *dsoHandle;
// Used in PIC code for offset of indirect function table
static UndefinedGlobal *tableBase;
static DefinedData *definedTableBase;
+ // 32-bit copy in wasm64 to work around init expr limitations.
+ // These can potentially be removed again once we have
+ // https://github.com/WebAssembly/extended-const
+ static UndefinedGlobal *tableBase32;
+ static DefinedData *definedTableBase32;
// __memory_base
// Used in PIC code for offset of global data
static UndefinedGlobal *memoryBase;
static DefinedData *definedMemoryBase;
+
+ // __indirect_function_table
+ // Used as an address space for function pointers, with each function that is
+ // used as a function pointer being allocated a slot.
+ static TableSymbol *indirectFunctionTable;
};
// A buffer class that is large enough to hold any Symbol-derived
alignas(DefinedFunction) char a[sizeof(DefinedFunction)];
alignas(DefinedData) char b[sizeof(DefinedData)];
alignas(DefinedGlobal) char c[sizeof(DefinedGlobal)];
- alignas(DefinedEvent) char d[sizeof(DefinedEvent)];
- alignas(LazySymbol) char e[sizeof(LazySymbol)];
- alignas(UndefinedFunction) char f[sizeof(UndefinedFunction)];
- alignas(UndefinedData) char g[sizeof(UndefinedData)];
- alignas(UndefinedGlobal) char h[sizeof(UndefinedGlobal)];
- alignas(SectionSymbol) char i[sizeof(SectionSymbol)];
+ alignas(DefinedTag) char d[sizeof(DefinedTag)];
+ alignas(DefinedTable) char e[sizeof(DefinedTable)];
+ alignas(LazySymbol) char f[sizeof(LazySymbol)];
+ alignas(UndefinedFunction) char g[sizeof(UndefinedFunction)];
+ alignas(UndefinedData) char h[sizeof(UndefinedData)];
+ alignas(UndefinedGlobal) char i[sizeof(UndefinedGlobal)];
+ alignas(UndefinedTable) char j[sizeof(UndefinedTable)];
+ alignas(SectionSymbol) char k[sizeof(SectionSymbol)];
};
// It is important to keep the size of SymbolUnion small for performance and
// memory usage reasons. 96 bytes is a soft limit based on the size of
// UndefinedFunction on a 64-bit system.
-static_assert(sizeof(SymbolUnion) <= 112, "SymbolUnion too large");
+static_assert(sizeof(SymbolUnion) <= 120, "SymbolUnion too large");
void printTraceSymbol(Symbol *sym);
void printTraceSymbolUndefined(StringRef name, const InputFile* file);
#include "SyntheticSections.h"
#include "InputChunks.h"
-#include "InputEvent.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "llvm/Support/Path.h"
uint32_t numImports = importedSymbols.size() + gotSymbols.size();
if (config->importMemory)
++numImports;
- if (config->importTable)
- ++numImports;
return numImports;
}
void ImportSection::addImport(Symbol *sym) {
assert(!isSealed);
- importedSymbols.emplace_back(sym);
- if (auto *f = dyn_cast<FunctionSymbol>(sym))
- f->setFunctionIndex(numImportedFunctions++);
- else if (auto *g = dyn_cast<GlobalSymbol>(sym))
- g->setGlobalIndex(numImportedGlobals++);
- else
- cast<EventSymbol>(sym)->setEventIndex(numImportedEvents++);
+ StringRef module = sym->importModule.getValueOr(defaultModule);
+ StringRef name = sym->importName.getValueOr(sym->getName());
+ if (auto *f = dyn_cast<FunctionSymbol>(sym)) {
+ ImportKey<WasmSignature> key(*(f->getSignature()), module, name);
+ auto entry = importedFunctions.try_emplace(key, numImportedFunctions);
+ if (entry.second) {
+ importedSymbols.emplace_back(sym);
+ f->setFunctionIndex(numImportedFunctions++);
+ } else {
+ f->setFunctionIndex(entry.first->second);
+ }
+ } else if (auto *g = dyn_cast<GlobalSymbol>(sym)) {
+ ImportKey<WasmGlobalType> key(*(g->getGlobalType()), module, name);
+ auto entry = importedGlobals.try_emplace(key, numImportedGlobals);
+ if (entry.second) {
+ importedSymbols.emplace_back(sym);
+ g->setGlobalIndex(numImportedGlobals++);
+ } else {
+ g->setGlobalIndex(entry.first->second);
+ }
+ } else if (auto *t = dyn_cast<TagSymbol>(sym)) {
+ // NB: There's currently only one possible kind of tag, and no
+ // `UndefinedTag`, so we don't bother de-duplicating tag imports.
+ importedSymbols.emplace_back(sym);
+ t->setTagIndex(numImportedTags++);
+ } else {
+ assert(TableSymbol::classof(sym));
+ auto *table = cast<TableSymbol>(sym);
+ ImportKey<WasmTableType> key(*(table->getTableType()), module, name);
+ auto entry = importedTables.try_emplace(key, numImportedTables);
+ if (entry.second) {
+ importedSymbols.emplace_back(sym);
+ table->setTableNumber(numImportedTables++);
+ } else {
+ table->setTableNumber(entry.first->second);
+ }
+ }
}
void ImportSection::writeBody() {
writeUleb128(os, getNumImports(), "import count");
+ bool is64 = config->is64.getValueOr(false);
+
if (config->importMemory) {
WasmImport import;
import.Module = defaultModule;
import.Field = "memory";
import.Kind = WASM_EXTERNAL_MEMORY;
import.Memory.Flags = 0;
- import.Memory.Initial = out.memorySec->numMemoryPages;
+ import.Memory.Minimum = out.memorySec->numMemoryPages;
if (out.memorySec->maxMemoryPages != 0 || config->sharedMemory) {
import.Memory.Flags |= WASM_LIMITS_FLAG_HAS_MAX;
import.Memory.Maximum = out.memorySec->maxMemoryPages;
}
if (config->sharedMemory)
import.Memory.Flags |= WASM_LIMITS_FLAG_IS_SHARED;
- if (config->is64)
+ if (is64)
import.Memory.Flags |= WASM_LIMITS_FLAG_IS_64;
writeImport(os, import);
}
- if (config->importTable) {
- uint32_t tableSize = config->tableBase + out.elemSec->numEntries();
- WasmImport import;
- import.Module = defaultModule;
- import.Field = functionTableName;
- import.Kind = WASM_EXTERNAL_TABLE;
- import.Table.ElemType = WASM_TYPE_FUNCREF;
- import.Table.Limits = {0, tableSize, 0};
- writeImport(os, import);
- }
-
for (const Symbol *sym : importedSymbols) {
WasmImport import;
- if (auto *f = dyn_cast<UndefinedFunction>(sym)) {
- import.Field = f->importName ? *f->importName : sym->getName();
- import.Module = f->importModule ? *f->importModule : defaultModule;
- } else if (auto *g = dyn_cast<UndefinedGlobal>(sym)) {
- import.Field = g->importName ? *g->importName : sym->getName();
- import.Module = g->importModule ? *g->importModule : defaultModule;
- } else {
- import.Field = sym->getName();
- import.Module = defaultModule;
- }
+ import.Field = sym->importName.getValueOr(sym->getName());
+ import.Module = sym->importModule.getValueOr(defaultModule);
if (auto *functionSym = dyn_cast<FunctionSymbol>(sym)) {
import.Kind = WASM_EXTERNAL_FUNCTION;
} else if (auto *globalSym = dyn_cast<GlobalSymbol>(sym)) {
import.Kind = WASM_EXTERNAL_GLOBAL;
import.Global = *globalSym->getGlobalType();
+ } else if (auto *tagSym = dyn_cast<TagSymbol>(sym)) {
+ import.Kind = WASM_EXTERNAL_TAG;
+ import.Tag.Attribute = tagSym->getTagType()->Attribute;
+ import.Tag.SigIndex = out.typeSec->lookupType(*tagSym->signature);
} else {
- auto *eventSym = cast<EventSymbol>(sym);
- import.Kind = WASM_EXTERNAL_EVENT;
- import.Event.Attribute = eventSym->getEventType()->Attribute;
- import.Event.SigIndex = out.typeSec->lookupType(*eventSym->signature);
+ auto *tableSym = cast<TableSymbol>(sym);
+ import.Kind = WASM_EXTERNAL_TABLE;
+ import.Table = *tableSym->getTableType();
}
writeImport(os, import);
}
for (const Symbol *sym : gotSymbols) {
WasmImport import;
import.Kind = WASM_EXTERNAL_GLOBAL;
- import.Global = {WASM_TYPE_I32, true};
+ auto ptrType = is64 ? WASM_TYPE_I64 : WASM_TYPE_I32;
+ import.Global = {static_cast<uint8_t>(ptrType), true};
if (isa<DataSymbol>(sym))
import.Module = "GOT.mem";
else
}
void TableSection::writeBody() {
- uint32_t tableSize = config->tableBase + out.elemSec->numEntries();
-
raw_ostream &os = bodyOutputStream;
- writeUleb128(os, 1, "table count");
- WasmLimits limits;
- if (config->growableTable)
- limits = {0, tableSize, 0};
- else
- limits = {WASM_LIMITS_FLAG_HAS_MAX, tableSize, tableSize};
- writeTableType(os, WasmTable{WASM_TYPE_FUNCREF, limits});
+
+ writeUleb128(os, inputTables.size(), "table count");
+ for (const InputTable *table : inputTables)
+ writeTableType(os, table->getType());
+}
+
+void TableSection::addTable(InputTable *table) {
+ if (!table->live)
+ return;
+ // Some inputs require that the indirect function table be assigned to table
+ // number 0.
+ if (config->legacyFunctionTable &&
+ isa<DefinedTable>(WasmSym::indirectFunctionTable) &&
+ cast<DefinedTable>(WasmSym::indirectFunctionTable)->table == table) {
+ if (out.importSec->getNumImportedTables()) {
+ // Alack! Some other input imported a table, meaning that we are unable
+ // to assign table number 0 to the indirect function table.
+ for (const auto *culprit : out.importSec->importedSymbols) {
+ if (isa<UndefinedTable>(culprit)) {
+ error("object file not built with 'reference-types' feature "
+ "conflicts with import of table " +
+ culprit->getName() + " by file " +
+ toString(culprit->getFile()));
+ return;
+ }
+ }
+ llvm_unreachable("failed to find conflicting table import");
+ }
+ inputTables.insert(inputTables.begin(), table);
+ return;
+ }
+ inputTables.push_back(table);
+}
+
+void TableSection::assignIndexes() {
+ uint32_t tableNumber = out.importSec->getNumImportedTables();
+ for (InputTable *t : inputTables)
+ t->assignIndex(tableNumber++);
}
void MemorySection::writeBody() {
flags |= WASM_LIMITS_FLAG_HAS_MAX;
if (config->sharedMemory)
flags |= WASM_LIMITS_FLAG_IS_SHARED;
- if (config->is64)
+ if (config->is64.getValueOr(false))
flags |= WASM_LIMITS_FLAG_IS_64;
writeUleb128(os, flags, "memory limits flags");
writeUleb128(os, numMemoryPages, "initial pages");
writeUleb128(os, maxMemoryPages, "max pages");
}
-void EventSection::writeBody() {
+void TagSection::writeBody() {
raw_ostream &os = bodyOutputStream;
- writeUleb128(os, inputEvents.size(), "event count");
- for (InputEvent *e : inputEvents) {
- e->event.Type.SigIndex = out.typeSec->lookupType(e->signature);
- writeEvent(os, e->event);
+ writeUleb128(os, inputTags.size(), "tag count");
+ for (InputTag *t : inputTags) {
+ WasmTagType type = t->getType();
+ type.SigIndex = out.typeSec->lookupType(t->signature);
+ writeTagType(os, type);
}
}
-void EventSection::addEvent(InputEvent *event) {
- if (!event->live)
+void TagSection::addTag(InputTag *tag) {
+ if (!tag->live)
return;
- uint32_t eventIndex =
- out.importSec->getNumImportedEvents() + inputEvents.size();
- LLVM_DEBUG(dbgs() << "addEvent: " << eventIndex << "\n");
- event->setEventIndex(eventIndex);
- inputEvents.push_back(event);
+ uint32_t tagIndex = out.importSec->getNumImportedTags() + inputTags.size();
+ LLVM_DEBUG(dbgs() << "addTag: " << tagIndex << "\n");
+ tag->assignIndex(tagIndex);
+ inputTags.push_back(tag);
}
void GlobalSection::assignIndexes() {
uint32_t globalIndex = out.importSec->getNumImportedGlobals();
for (InputGlobal *g : inputGlobals)
- g->setGlobalIndex(globalIndex++);
- for (Symbol *sym : staticGotSymbols)
+ g->assignIndex(globalIndex++);
+ for (Symbol *sym : internalGotSymbols)
sym->setGOTIndex(globalIndex++);
isSealed = true;
}
-void GlobalSection::addStaticGOTEntry(Symbol *sym) {
+static void ensureIndirectFunctionTable() {
+ if (!WasmSym::indirectFunctionTable)
+ WasmSym::indirectFunctionTable =
+ symtab->resolveIndirectFunctionTable(/*required =*/true);
+}
+
+void GlobalSection::addInternalGOTEntry(Symbol *sym) {
assert(!isSealed);
if (sym->requiresGOT)
return;
- LLVM_DEBUG(dbgs() << "addStaticGOTEntry: " << sym->getName() << " "
+ LLVM_DEBUG(dbgs() << "addInternalGOTEntry: " << sym->getName() << " "
<< toString(sym->kind()) << "\n");
sym->requiresGOT = true;
- if (auto *F = dyn_cast<FunctionSymbol>(sym))
+ if (auto *F = dyn_cast<FunctionSymbol>(sym)) {
+ ensureIndirectFunctionTable();
out.elemSec->addEntry(F);
- staticGotSymbols.push_back(sym);
+ }
+ internalGotSymbols.push_back(sym);
+}
+
+void GlobalSection::generateRelocationCode(raw_ostream &os) const {
+ bool is64 = config->is64.getValueOr(false);
+ unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST
+ : WASM_OPCODE_I32_CONST;
+ unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD
+ : WASM_OPCODE_I32_ADD;
+
+ for (const Symbol *sym : internalGotSymbols) {
+ if (auto *d = dyn_cast<DefinedData>(sym)) {
+ // Get __memory_base
+ writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
+ writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "__memory_base");
+
+ // Add the virtual address of the data symbol
+ writeU8(os, opcode_ptr_const, "CONST");
+ writeSleb128(os, d->getVA(), "offset");
+ } else if (auto *f = dyn_cast<FunctionSymbol>(sym)) {
+ if (f->isStub)
+ continue;
+ // Get __table_base
+ writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
+ writeUleb128(os, WasmSym::tableBase->getGlobalIndex(), "__table_base");
+
+ // Add the table index to __table_base
+ writeU8(os, opcode_ptr_const, "CONST");
+ writeSleb128(os, f->getTableIndex(), "offset");
+ } else {
+ assert(isa<UndefinedData>(sym));
+ continue;
+ }
+ writeU8(os, opcode_ptr_add, "ADD");
+ writeU8(os, WASM_OPCODE_GLOBAL_SET, "GLOBAL_SET");
+ writeUleb128(os, sym->getGOTIndex(), "got_entry");
+ }
}
void GlobalSection::writeBody() {
raw_ostream &os = bodyOutputStream;
writeUleb128(os, numGlobals(), "global count");
- for (InputGlobal *g : inputGlobals)
- writeGlobal(os, g->global);
- // TODO(wvo): when do these need I64_CONST?
- for (const Symbol *sym : staticGotSymbols) {
- WasmGlobal global;
- global.Type = {WASM_TYPE_I32, false};
- global.InitExpr.Opcode = WASM_OPCODE_I32_CONST;
+ for (InputGlobal *g : inputGlobals) {
+ writeGlobalType(os, g->getType());
+ writeInitExpr(os, g->getInitExpr());
+ }
+ bool is64 = config->is64.getValueOr(false);
+ uint8_t itype = is64 ? WASM_TYPE_I64 : WASM_TYPE_I32;
+ for (const Symbol *sym : internalGotSymbols) {
+ // In the case of dynamic linking, internal GOT entries
+ // need to be mutable since they get updated to the correct
+ // runtime value during `__wasm_apply_global_relocs`.
+ bool mutable_ = config->isPic & !sym->isStub;
+ WasmGlobalType type{itype, mutable_};
+ WasmInitExpr initExpr;
if (auto *d = dyn_cast<DefinedData>(sym))
- global.InitExpr.Value.Int32 = d->getVirtualAddress();
+ initExpr = intConst(d->getVA(), is64);
else if (auto *f = dyn_cast<FunctionSymbol>(sym))
- global.InitExpr.Value.Int32 = f->getTableIndex();
+ initExpr = intConst(f->isStub ? 0 : f->getTableIndex(), is64);
else {
assert(isa<UndefinedData>(sym));
- global.InitExpr.Value.Int32 = 0;
+ initExpr = intConst(0, is64);
}
- writeGlobal(os, global);
+ writeGlobalType(os, type);
+ writeInitExpr(os, initExpr);
}
for (const DefinedData *sym : dataAddressGlobals) {
- WasmGlobal global;
- global.Type = {WASM_TYPE_I32, false};
- global.InitExpr.Opcode = WASM_OPCODE_I32_CONST;
- global.InitExpr.Value.Int32 = sym->getVirtualAddress();
- writeGlobal(os, global);
+ WasmGlobalType type{itype, false};
+ writeGlobalType(os, type);
+ writeInitExpr(os, intConst(sym->getVA(), is64));
}
}
}
bool StartSection::isNeeded() const {
- return !config->relocatable && hasInitializedSegments && config->sharedMemory;
+ return WasmSym::startFunction != nullptr;
}
void StartSection::writeBody() {
raw_ostream &os = bodyOutputStream;
- writeUleb128(os, WasmSym::initMemory->getFunctionIndex(), "function index");
+ writeUleb128(os, WasmSym::startFunction->getFunctionIndex(),
+ "function index");
}
void ElemSection::addEntry(FunctionSymbol *sym) {
- if (sym->hasTableIndex())
+ // Don't add stub functions to the wasm table. The address of all stub
+ // functions should be zero and they should they don't appear in the table.
+ // They only exist so that the calls to missing functions can validate.
+ if (sym->hasTableIndex() || sym->isStub)
return;
sym->setTableIndex(config->tableBase + indirectFunctions.size());
indirectFunctions.emplace_back(sym);
void ElemSection::writeBody() {
raw_ostream &os = bodyOutputStream;
+ assert(WasmSym::indirectFunctionTable);
writeUleb128(os, 1, "segment count");
- writeUleb128(os, 0, "table index");
+ uint32_t tableNumber = WasmSym::indirectFunctionTable->getTableNumber();
+ uint32_t flags = 0;
+ if (tableNumber)
+ flags |= WASM_ELEM_SEGMENT_HAS_TABLE_NUMBER;
+ writeUleb128(os, flags, "elem segment flags");
+ if (flags & WASM_ELEM_SEGMENT_HAS_TABLE_NUMBER)
+ writeUleb128(os, tableNumber, "table number");
+
WasmInitExpr initExpr;
if (config->isPic) {
initExpr.Opcode = WASM_OPCODE_GLOBAL_GET;
- initExpr.Value.Global = WasmSym::tableBase->getGlobalIndex();
+ initExpr.Value.Global =
+ (config->is64.getValueOr(false) ? WasmSym::tableBase32
+ : WasmSym::tableBase)
+ ->getGlobalIndex();
} else {
initExpr.Opcode = WASM_OPCODE_I32_CONST;
initExpr.Value.Int32 = config->tableBase;
}
writeInitExpr(os, initExpr);
- writeUleb128(os, indirectFunctions.size(), "elem count");
+ if (flags & WASM_ELEM_SEGMENT_MASK_HAS_ELEM_KIND) {
+ // We only write active function table initializers, for which the elem kind
+ // is specified to be written as 0x00 and interpreted to mean "funcref".
+ const uint8_t elemKind = 0;
+ writeU8(os, elemKind, "elem kind");
+ }
+
+ writeUleb128(os, indirectFunctions.size(), "elem count");
uint32_t tableIndex = config->tableBase;
for (const FunctionSymbol *sym : indirectFunctions) {
assert(sym->getTableIndex() == tableIndex);
writeUleb128(sub.os, g->getGlobalIndex(), "index");
if (sym->isDefined() || (flags & WASM_SYMBOL_EXPLICIT_NAME) != 0)
writeStr(sub.os, sym->getName(), "sym name");
- } else if (auto *e = dyn_cast<EventSymbol>(sym)) {
- writeUleb128(sub.os, e->getEventIndex(), "index");
+ } else if (auto *t = dyn_cast<TagSymbol>(sym)) {
+ writeUleb128(sub.os, t->getTagIndex(), "index");
+ if (sym->isDefined() || (flags & WASM_SYMBOL_EXPLICIT_NAME) != 0)
+ writeStr(sub.os, sym->getName(), "sym name");
+ } else if (auto *t = dyn_cast<TableSymbol>(sym)) {
+ writeUleb128(sub.os, t->getTableNumber(), "table number");
if (sym->isDefined() || (flags & WASM_SYMBOL_EXPLICIT_NAME) != 0)
writeStr(sub.os, sym->getName(), "sym name");
} else if (isa<DataSymbol>(sym)) {
for (const OutputSegment *s : dataSegments) {
writeStr(sub.os, s->name, "segment name");
writeUleb128(sub.os, s->alignment, "alignment");
- writeUleb128(sub.os, 0, "flags");
+ writeUleb128(sub.os, s->linkingFlags, "flags");
}
sub.writeTo(os);
}
continue;
StringRef comdat = inputSegments[0]->getComdatName();
#ifndef NDEBUG
- for (const InputSegment *isec : inputSegments)
+ for (const InputChunk *isec : inputSegments)
assert(isec->getComdatName() == comdat);
#endif
if (!comdat.empty())
symtabEntries.emplace_back(sym);
}
-unsigned NameSection::numNames() const {
+unsigned NameSection::numNamedFunctions() const {
unsigned numNames = out.importSec->getNumImportedFunctions();
+
for (const InputFunction *f : out.functionSec->inputFunctions)
if (!f->getName().empty() || !f->getDebugName().empty())
++numNames;
return numNames;
}
+unsigned NameSection::numNamedGlobals() const {
+ unsigned numNames = out.importSec->getNumImportedGlobals();
+
+ for (const InputGlobal *g : out.globalSec->inputGlobals)
+ if (!g->getName().empty())
+ ++numNames;
+
+ numNames += out.globalSec->internalGotSymbols.size();
+ return numNames;
+}
+
+unsigned NameSection::numNamedDataSegments() const {
+ unsigned numNames = 0;
+
+ for (const OutputSegment *s : segments)
+ if (!s->name.empty() && !s->isBss)
+ ++numNames;
+
+ return numNames;
+}
+
// Create the custom "name" section containing debug symbol names.
void NameSection::writeBody() {
- SubSection sub(WASM_NAMES_FUNCTION);
- writeUleb128(sub.os, numNames(), "name count");
-
- // Names must appear in function index order. As it happens importedSymbols
- // and inputFunctions are numbered in order with imported functions coming
- // first.
- for (const Symbol *s : out.importSec->importedSymbols) {
- if (auto *f = dyn_cast<FunctionSymbol>(s)) {
- writeUleb128(sub.os, f->getFunctionIndex(), "func index");
- writeStr(sub.os, toString(*s), "symbol name");
+ unsigned count = numNamedFunctions();
+ if (count) {
+ SubSection sub(WASM_NAMES_FUNCTION);
+ writeUleb128(sub.os, count, "name count");
+
+ // Function names appear in function index order. As it happens
+ // importedSymbols and inputFunctions are numbered in order with imported
+ // functions coming first.
+ for (const Symbol *s : out.importSec->importedSymbols) {
+ if (auto *f = dyn_cast<FunctionSymbol>(s)) {
+ writeUleb128(sub.os, f->getFunctionIndex(), "func index");
+ writeStr(sub.os, toString(*s), "symbol name");
+ }
}
+ for (const InputFunction *f : out.functionSec->inputFunctions) {
+ if (!f->getName().empty()) {
+ writeUleb128(sub.os, f->getFunctionIndex(), "func index");
+ if (!f->getDebugName().empty()) {
+ writeStr(sub.os, f->getDebugName(), "symbol name");
+ } else {
+ writeStr(sub.os, maybeDemangleSymbol(f->getName()), "symbol name");
+ }
+ }
+ }
+ sub.writeTo(bodyOutputStream);
}
- for (const InputFunction *f : out.functionSec->inputFunctions) {
- if (!f->getName().empty()) {
- writeUleb128(sub.os, f->getFunctionIndex(), "func index");
- if (!f->getDebugName().empty()) {
- writeStr(sub.os, f->getDebugName(), "symbol name");
- } else {
- writeStr(sub.os, maybeDemangleSymbol(f->getName()), "symbol name");
+
+ count = numNamedGlobals();
+ if (count) {
+ SubSection sub(WASM_NAMES_GLOBAL);
+ writeUleb128(sub.os, count, "name count");
+
+ for (const Symbol *s : out.importSec->importedSymbols) {
+ if (auto *g = dyn_cast<GlobalSymbol>(s)) {
+ writeUleb128(sub.os, g->getGlobalIndex(), "global index");
+ writeStr(sub.os, toString(*s), "symbol name");
}
}
+ for (const Symbol *s : out.importSec->gotSymbols) {
+ writeUleb128(sub.os, s->getGOTIndex(), "global index");
+ writeStr(sub.os, toString(*s), "symbol name");
+ }
+ for (const InputGlobal *g : out.globalSec->inputGlobals) {
+ if (!g->getName().empty()) {
+ writeUleb128(sub.os, g->getAssignedIndex(), "global index");
+ writeStr(sub.os, maybeDemangleSymbol(g->getName()), "symbol name");
+ }
+ }
+ for (Symbol *s : out.globalSec->internalGotSymbols) {
+ writeUleb128(sub.os, s->getGOTIndex(), "global index");
+ if (isa<FunctionSymbol>(s))
+ writeStr(sub.os, "GOT.func.internal." + toString(*s), "symbol name");
+ else
+ writeStr(sub.os, "GOT.data.internal." + toString(*s), "symbol name");
+ }
+
+ sub.writeTo(bodyOutputStream);
}
- sub.writeTo(bodyOutputStream);
+ count = numNamedDataSegments();
+ if (count) {
+ SubSection sub(WASM_NAMES_DATA_SEGMENT);
+ writeUleb128(sub.os, count, "name count");
+
+ for (OutputSegment *s : segments) {
+ if (!s->name.empty() && !s->isBss) {
+ writeUleb128(sub.os, s->index, "global index");
+ writeStr(sub.os, s->name, "segment name");
+ }
+ }
+
+ sub.writeTo(bodyOutputStream);
+ }
}
void ProducersSection::addInfo(const WasmProducerInfo &info) {
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/Object/WasmTraits.h"
+#include "llvm/BinaryFormat/WasmTraits.h"
#define DEBUG_TYPE "lld"
llvm::DenseMap<WasmSignature, int32_t> typeIndices;
};
+/**
+ * A key for some kind of imported entity of type `T`.
+ *
+ * Used when de-duplicating imports.
+ */
+template <typename T> struct ImportKey {
+public:
+ enum class State { Plain, Empty, Tombstone };
+
+public:
+ T type;
+ llvm::Optional<StringRef> importModule;
+ llvm::Optional<StringRef> importName;
+ State state;
+
+public:
+ ImportKey(T type) : type(type), state(State::Plain) {}
+ ImportKey(T type, State state) : type(type), state(state) {}
+ ImportKey(T type, llvm::Optional<StringRef> importModule,
+ llvm::Optional<StringRef> importName)
+ : type(type), importModule(importModule), importName(importName),
+ state(State::Plain) {}
+};
+
+template <typename T>
+inline bool operator==(const ImportKey<T> &lhs, const ImportKey<T> &rhs) {
+ return lhs.state == rhs.state && lhs.importModule == rhs.importModule &&
+ lhs.importName == rhs.importName && lhs.type == rhs.type;
+}
+
+} // namespace wasm
+} // namespace lld
+
+// `ImportKey<T>` can be used as a key in a `DenseMap` if `T` can be used as a
+// key in a `DenseMap`.
+namespace llvm {
+template <typename T> struct DenseMapInfo<lld::wasm::ImportKey<T>> {
+ static lld::wasm::ImportKey<T> getEmptyKey() {
+ typename lld::wasm::ImportKey<T> key(llvm::DenseMapInfo<T>::getEmptyKey());
+ key.state = lld::wasm::ImportKey<T>::State::Empty;
+ return key;
+ }
+ static lld::wasm::ImportKey<T> getTombstoneKey() {
+ typename lld::wasm::ImportKey<T> key(llvm::DenseMapInfo<T>::getEmptyKey());
+ key.state = lld::wasm::ImportKey<T>::State::Tombstone;
+ return key;
+ }
+ static unsigned getHashValue(const lld::wasm::ImportKey<T> &key) {
+ uintptr_t hash = hash_value(key.importModule);
+ hash = hash_combine(hash, key.importName);
+ hash = hash_combine(hash, llvm::DenseMapInfo<T>::getHashValue(key.type));
+ hash = hash_combine(hash, key.state);
+ return hash;
+ }
+ static bool isEqual(const lld::wasm::ImportKey<T> &lhs,
+ const lld::wasm::ImportKey<T> &rhs) {
+ return lhs == rhs;
+ }
+};
+} // end namespace llvm
+
+namespace lld {
+namespace wasm {
+
class ImportSection : public SyntheticSection {
public:
ImportSection() : SyntheticSection(llvm::wasm::WASM_SEC_IMPORT) {}
assert(isSealed);
return numImportedFunctions;
}
- uint32_t getNumImportedEvents() const {
+ uint32_t getNumImportedTags() const {
assert(isSealed);
- return numImportedEvents;
+ return numImportedTags;
+ }
+ uint32_t getNumImportedTables() const {
+ assert(isSealed);
+ return numImportedTables;
}
std::vector<const Symbol *> importedSymbols;
+ std::vector<const Symbol *> gotSymbols;
protected:
bool isSealed = false;
unsigned numImportedGlobals = 0;
unsigned numImportedFunctions = 0;
- unsigned numImportedEvents = 0;
- std::vector<const Symbol *> gotSymbols;
+ unsigned numImportedTags = 0;
+ unsigned numImportedTables = 0;
+ llvm::DenseMap<ImportKey<WasmGlobalType>, uint32_t> importedGlobals;
+ llvm::DenseMap<ImportKey<WasmSignature>, uint32_t> importedFunctions;
+ llvm::DenseMap<ImportKey<WasmTableType>, uint32_t> importedTables;
};
class FunctionSection : public SyntheticSection {
public:
TableSection() : SyntheticSection(llvm::wasm::WASM_SEC_TABLE) {}
- bool isNeeded() const override {
- // Always output a table section (or table import), even if there are no
- // indirect calls. There are two reasons for this:
- // 1. For executables it is useful to have an empty table slot at 0
- // which can be filled with a null function call handler.
- // 2. If we don't do this, any program that contains a call_indirect but
- // no address-taken function will fail at validation time since it is
- // a validation error to include a call_indirect instruction if there
- // is not table.
- return !config->importTable;
- }
-
+ bool isNeeded() const override { return inputTables.size() > 0; };
+ void assignIndexes() override;
void writeBody() override;
+ void addTable(InputTable *table);
+
+ std::vector<InputTable *> inputTables;
};
class MemorySection : public SyntheticSection {
uint64_t maxMemoryPages = 0;
};
-// The event section contains a list of declared wasm events associated with the
-// module. Currently the only supported event kind is exceptions. A single event
-// entry represents a single event with an event tag. All C++ exceptions are
-// represented by a single event. An event entry in this section contains
-// information on what kind of event it is (e.g. exception) and the type of
-// values contained in a single event object. (In wasm, an event can contain
-// multiple values of primitive types. But for C++ exceptions, we just throw a
-// pointer which is an i32 value (for wasm32 architecture), so the signature of
-// C++ exception is (i32)->(void), because all event types are assumed to have
-// void return type to share WasmSignature with functions.)
-class EventSection : public SyntheticSection {
+// The tag section contains a list of declared wasm tags associated with the
+// module. Currently the only supported tag kind is exceptions. All C++
+// exceptions are represented by a single tag. A tag entry in this section
+// contains information on what kind of tag it is (e.g. exception) and the type
+// of values associated with the tag. (In Wasm, a tag can contain multiple
+// values of primitive types. But for C++ exceptions, we just throw a pointer
+// which is an i32 value (for wasm32 architecture), so the signature of C++
+// exception is (i32)->(void), because all exception tag types are assumed to
+// have void return type to share WasmSignature with functions.)
+class TagSection : public SyntheticSection {
public:
- EventSection() : SyntheticSection(llvm::wasm::WASM_SEC_EVENT) {}
+ TagSection() : SyntheticSection(llvm::wasm::WASM_SEC_TAG) {}
void writeBody() override;
- bool isNeeded() const override { return inputEvents.size() > 0; }
- void addEvent(InputEvent *event);
+ bool isNeeded() const override { return inputTags.size() > 0; }
+ void addTag(InputTag *tag);
- std::vector<InputEvent *> inputEvents;
+ std::vector<InputTag *> inputTags;
};
class GlobalSection : public SyntheticSection {
public:
GlobalSection() : SyntheticSection(llvm::wasm::WASM_SEC_GLOBAL) {}
+
+ static bool classof(const OutputSection *sec) {
+ return sec->type == llvm::wasm::WASM_SEC_GLOBAL;
+ }
+
uint32_t numGlobals() const {
assert(isSealed);
return inputGlobals.size() + dataAddressGlobals.size() +
- staticGotSymbols.size();
+ internalGotSymbols.size();
}
bool isNeeded() const override { return numGlobals() > 0; }
void assignIndexes() override;
void writeBody() override;
void addGlobal(InputGlobal *global);
- void addDataAddressGlobal(DefinedData *global);
- void addStaticGOTEntry(Symbol *sym);
+
+ // Add an internal GOT entry global that corresponds to the given symbol.
+ // Normally GOT entries are imported and assigned by the external dynamic
+ // linker. However, when linking PIC code statically or when linking with
+ // -Bsymbolic we can internalize GOT entries by declaring globals the hold
+ // symbol addresses.
+ //
+ // For the static linking case these internal globals can be completely
+ // eliminated by a post-link optimizer such as wasm-opt.
+ //
+ // TODO(sbc): Another approach to optimizing these away could be to use
+ // specific relocation types combined with linker relaxation which could
+ // transform a `global.get` to an `i32.const`.
+ void addInternalGOTEntry(Symbol *sym);
+ bool needsRelocations() { return internalGotSymbols.size(); }
+ void generateRelocationCode(raw_ostream &os) const;
std::vector<const DefinedData *> dataAddressGlobals;
+ std::vector<InputGlobal *> inputGlobals;
+ std::vector<Symbol *> internalGotSymbols;
protected:
bool isSealed = false;
- std::vector<InputGlobal *> inputGlobals;
- std::vector<Symbol *> staticGotSymbols;
};
class ExportSection : public SyntheticSection {
void writeBody() override;
std::vector<llvm::wasm::WasmExport> exports;
+ std::vector<const Symbol *> exportedSymbols;
};
class StartSection : public SyntheticSection {
public:
- StartSection(bool hasInitializedSegments)
- : SyntheticSection(llvm::wasm::WASM_SEC_START),
- hasInitializedSegments(hasInitializedSegments) {}
+ StartSection() : SyntheticSection(llvm::wasm::WASM_SEC_START) {}
bool isNeeded() const override;
void writeBody() override;
-
-protected:
- bool hasInitializedSegments;
};
class ElemSection : public SyntheticSection {
// Create the custom "name" section containing debug symbol names.
class NameSection : public SyntheticSection {
public:
- NameSection() : SyntheticSection(llvm::wasm::WASM_SEC_CUSTOM, "name") {}
- bool isNeeded() const override {
- return !config->stripDebug && !config->stripAll && numNames() > 0;
- }
+ NameSection(ArrayRef<OutputSegment *> segments)
+ : SyntheticSection(llvm::wasm::WASM_SEC_CUSTOM, "name"),
+ segments(segments) {}
+ bool isNeeded() const override { return !config->stripAll && numNames() > 0; }
void writeBody() override;
- unsigned numNames() const;
+ unsigned numNames() const { return numNamedGlobals() + numNamedFunctions(); }
+ unsigned numNamedGlobals() const;
+ unsigned numNamedFunctions() const;
+ unsigned numNamedDataSegments() const;
+
+protected:
+ ArrayRef<OutputSegment *> segments;
};
class ProducersSection : public SyntheticSection {
TableSection *tableSec;
MemorySection *memorySec;
GlobalSection *globalSec;
- EventSection *eventSec;
+ TagSection *tagSec;
ExportSection *exportSec;
StartSection *startSec;
ElemSection *elemSec;
#include "Writer.h"
#include "Config.h"
#include "InputChunks.h"
-#include "InputEvent.h"
-#include "InputGlobal.h"
+#include "InputElement.h"
+#include "MapFile.h"
#include "OutputSections.h"
#include "OutputSegment.h"
#include "Relocations.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/BinaryFormat/Wasm.h"
-#include "llvm/Object/WasmTraits.h"
+#include "llvm/BinaryFormat/WasmTraits.h"
#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormatVariadic.h"
namespace lld {
namespace wasm {
static constexpr int stackAlignment = 16;
+static constexpr int heapAlignment = 16;
namespace {
bool needsPassiveInitialization(const OutputSegment *segment);
bool hasPassiveInitializedSegments();
+ void createSyntheticInitFunctions();
void createInitMemoryFunction();
- void createApplyRelocationsFunction();
+ void createStartFunction();
+ void createApplyDataRelocationsFunction();
+ void createApplyGlobalRelocationsFunction();
void createCallCtorsFunction();
void createInitTLSFunction();
+ void createCommandExportWrappers();
+ void createCommandExportWrapper(uint32_t functionIndex, DefinedFunction *f);
void assignIndexes();
void populateSymtab();
void calculateCustomSections();
void calculateTypes();
void createOutputSegments();
+ OutputSegment *createOutputSegment(StringRef name);
+ void combineOutputSegments();
void layoutMemory();
void createHeader();
void createCustomSections();
void createSyntheticSections();
+ void createSyntheticSectionsPostLayout();
void finalizeSections();
// Custom sections
uint64_t fileSize = 0;
std::vector<WasmInitEntry> initFunctions;
- llvm::StringMap<std::vector<InputSection *>> customSectionMapping;
+ llvm::StringMap<std::vector<InputChunk *>> customSectionMapping;
+
+ // Stable storage for command export wrapper function name strings.
+ std::list<std::string> commandExportWrapperNames;
// Elements that are used to construct the final output
std::string header;
log("calculateCustomSections");
bool stripDebug = config->stripDebug || config->stripAll;
for (ObjFile *file : symtab->objectFiles) {
- for (InputSection *section : file->customSections) {
+ for (InputChunk *section : file->customSections) {
+ // Exclude COMDAT sections that are not selected for inclusion
+ if (section->discarded)
+ continue;
StringRef name = section->getName();
// These custom sections are known the linker and synthesized rather than
// blindly copied.
});
}
+static void setGlobalPtr(DefinedGlobal *g, uint64_t memoryPtr) {
+ g->global->setPointerValue(memoryPtr);
+}
+
// Fix the memory layout of the output binary. This assigns memory offsets
// to each of the input data sections as well as the explicit stack region.
// The default memory layout is as follows, from low to high.
log("mem: stack size = " + Twine(config->zStackSize));
log("mem: stack base = " + Twine(memoryPtr));
memoryPtr += config->zStackSize;
- auto *sp = cast<DefinedGlobal>(WasmSym::stackPointer);
- switch (sp->global->global.InitExpr.Opcode) {
- case WASM_OPCODE_I32_CONST:
- sp->global->global.InitExpr.Value.Int32 = memoryPtr;
- break;
- case WASM_OPCODE_I64_CONST:
- sp->global->global.InitExpr.Value.Int64 = memoryPtr;
- break;
- default:
- llvm_unreachable("init expr must be i32/i64.const");
- }
+ setGlobalPtr(cast<DefinedGlobal>(WasmSym::stackPointer), memoryPtr);
log("mem: stack top = " + Twine(memoryPtr));
};
}
if (WasmSym::globalBase)
- WasmSym::globalBase->setVirtualAddress(memoryPtr);
+ WasmSym::globalBase->setVA(memoryPtr);
uint64_t dataStart = memoryPtr;
// Arbitrarily set __dso_handle handle to point to the start of the data
// segments.
if (WasmSym::dsoHandle)
- WasmSym::dsoHandle->setVirtualAddress(dataStart);
+ WasmSym::dsoHandle->setVA(dataStart);
out.dylinkSec->memAlign = 0;
for (OutputSegment *seg : segments) {
seg->startVA = memoryPtr;
log(formatv("mem: {0,-15} offset={1,-8} size={2,-8} align={3}", seg->name,
memoryPtr, seg->size, seg->alignment));
- memoryPtr += seg->size;
- if (WasmSym::tlsSize && seg->name == ".tdata") {
- auto *tlsSize = cast<DefinedGlobal>(WasmSym::tlsSize);
- assert(tlsSize->global->global.InitExpr.Opcode == WASM_OPCODE_I32_CONST);
- tlsSize->global->global.InitExpr.Value.Int32 = seg->size;
+ if (!config->relocatable && seg->isTLS()) {
+ if (config->sharedMemory) {
+ auto *tlsSize = cast<DefinedGlobal>(WasmSym::tlsSize);
+ setGlobalPtr(tlsSize, seg->size);
- auto *tlsAlign = cast<DefinedGlobal>(WasmSym::tlsAlign);
- assert(tlsAlign->global->global.InitExpr.Opcode == WASM_OPCODE_I32_CONST);
- tlsAlign->global->global.InitExpr.Value.Int32 = int64_t{1}
- << seg->alignment;
+ auto *tlsAlign = cast<DefinedGlobal>(WasmSym::tlsAlign);
+ setGlobalPtr(tlsAlign, int64_t{1} << seg->alignment);
+ } else {
+ auto *tlsBase = cast<DefinedGlobal>(WasmSym::tlsBase);
+ setGlobalPtr(tlsBase, memoryPtr);
+ }
}
+
+ memoryPtr += seg->size;
}
// Make space for the memory initialization flag
- if (WasmSym::initMemoryFlag) {
+ if (config->sharedMemory && hasPassiveInitializedSegments()) {
memoryPtr = alignTo(memoryPtr, 4);
- WasmSym::initMemoryFlag->setVirtualAddress(memoryPtr);
+ WasmSym::initMemoryFlag = symtab->addSyntheticDataSymbol(
+ "__wasm_init_memory_flag", WASM_SYMBOL_VISIBILITY_HIDDEN);
+ WasmSym::initMemoryFlag->markLive();
+ WasmSym::initMemoryFlag->setVA(memoryPtr);
log(formatv("mem: {0,-15} offset={1,-8} size={2,-8} align={3}",
"__wasm_init_memory_flag", memoryPtr, 4, 4));
memoryPtr += 4;
}
if (WasmSym::dataEnd)
- WasmSym::dataEnd->setVirtualAddress(memoryPtr);
+ WasmSym::dataEnd->setVA(memoryPtr);
- log("mem: static data = " + Twine(memoryPtr - dataStart));
-
- if (config->shared) {
- out.dylinkSec->memSize = memoryPtr;
- return;
- }
+ uint64_t staticDataSize = memoryPtr - dataStart;
+ log("mem: static data = " + Twine(staticDataSize));
+ if (config->isPic)
+ out.dylinkSec->memSize = staticDataSize;
if (!config->stackFirst)
placeStack();
- // Set `__heap_base` to directly follow the end of the stack or global data.
- // The fact that this comes last means that a malloc/brk implementation
- // can grow the heap at runtime.
- log("mem: heap base = " + Twine(memoryPtr));
- if (WasmSym::heapBase)
- WasmSym::heapBase->setVirtualAddress(memoryPtr);
+ if (WasmSym::heapBase) {
+ // Set `__heap_base` to follow the end of the stack or global data. The
+ // fact that this comes last means that a malloc/brk implementation can
+ // grow the heap at runtime.
+ // We'll align the heap base here because memory allocators might expect
+ // __heap_base to be aligned already.
+ memoryPtr = alignTo(memoryPtr, heapAlignment);
+ log("mem: heap base = " + Twine(memoryPtr));
+ WasmSym::heapBase->setVA(memoryPtr);
+ }
- uint64_t maxMemorySetting = 1ULL << (config->is64 ? 48 : 32);
+ uint64_t maxMemorySetting = 1ULL
+ << (config->is64.getValueOr(false) ? 48 : 32);
if (config->initialMemory != 0) {
if (config->initialMemory != alignTo(config->initialMemory, WasmPageSize))
Twine(maxMemorySetting));
memoryPtr = config->initialMemory;
}
- out.dylinkSec->memSize = memoryPtr;
out.memorySec->numMemoryPages =
alignTo(memoryPtr, WasmPageSize) / WasmPageSize;
log("mem: total pages = " + Twine(out.memorySec->numMemoryPages));
- // Check max if explicitly supplied or required by shared memory
- if (config->maxMemory != 0 || config->sharedMemory) {
+ if (config->maxMemory != 0) {
if (config->maxMemory != alignTo(config->maxMemory, WasmPageSize))
error("maximum memory must be " + Twine(WasmPageSize) + "-byte aligned");
if (memoryPtr > config->maxMemory)
if (config->maxMemory > maxMemorySetting)
error("maximum memory too large, cannot be greater than " +
Twine(maxMemorySetting));
- out.memorySec->maxMemoryPages = config->maxMemory / WasmPageSize;
+ }
+
+ // Check max if explicitly supplied or required by shared memory
+ if (config->maxMemory != 0 || config->sharedMemory) {
+ uint64_t max = config->maxMemory;
+ if (max == 0) {
+ // If no maxMemory config was supplied but we are building with
+ // shared memory, we need to pick a sensible upper limit.
+ if (config->isPic)
+ max = maxMemorySetting;
+ else
+ max = alignTo(memoryPtr, WasmPageSize);
+ }
+ out.memorySec->maxMemoryPages = max / WasmPageSize;
log("mem: max pages = " + Twine(out.memorySec->maxMemoryPages));
}
}
if (!isValidCIdentifier(name))
return;
LLVM_DEBUG(dbgs() << "addStartStopSymbols: " << name << "\n");
- uint32_t start = seg->startVA;
- uint32_t stop = start + seg->size;
+ uint64_t start = seg->startVA;
+ uint64_t stop = start + seg->size;
symtab->addOptionalDataSymbol(saver.save("__start_" + name), start);
symtab->addOptionalDataSymbol(saver.save("__stop_" + name), stop);
}
addSection(out.functionSec);
addSection(out.tableSec);
addSection(out.memorySec);
- addSection(out.eventSec);
+ addSection(out.tagSec);
addSection(out.globalSec);
addSection(out.exportSec);
addSection(out.startSec);
}
// Find TLS data segments
- auto isTLS = [](InputSegment *segment) {
- StringRef name = segment->getName();
- return segment->live &&
- (name.startswith(".tdata") || name.startswith(".tbss"));
+ auto isTLS = [](InputChunk *segment) {
+ return segment->live && segment->isTLS();
};
tlsUsed = tlsUsed ||
std::any_of(file->segments.begin(), file->segments.end(), isTLS);
for (const auto &key : used.keys())
allowed.insert(std::string(key));
- if (!config->relocatable && allowed.count("atomics") &&
- !config->sharedMemory) {
- if (inferFeatures)
- error(Twine("'atomics' feature is used by ") + used["atomics"] +
- ", so --shared-memory must be used");
- else
- error("'atomics' feature is used, so --shared-memory must be used");
- }
-
if (!config->checkFeatures)
return;
+ if (!config->relocatable && allowed.count("mutable-globals") == 0) {
+ for (const Symbol *sym : out.importSec->importedSymbols) {
+ if (auto *global = dyn_cast<GlobalSymbol>(sym)) {
+ if (global->getGlobalType()->Mutable) {
+ error(Twine("mutable global imported but 'mutable-globals' feature "
+ "not present in inputs: `") +
+ toString(*sym) + "`. Use --no-check-features to suppress.");
+ }
+ }
+ }
+ for (const Symbol *sym : out.exportSec->exportedSymbols) {
+ if (isa<GlobalSymbol>(sym)) {
+ error(Twine("mutable global exported but 'mutable-globals' feature "
+ "not present in inputs: `") +
+ toString(*sym) + "`. Use --no-check-features to suppress.");
+ }
+ }
+ }
+
if (config->sharedMemory) {
if (disallowed.count("shared-mem"))
error("--shared-memory is disallowed by " + disallowed["shared-mem"] +
}
}
+static bool shouldImport(Symbol *sym) {
+ if (!sym->isUndefined())
+ return false;
+ if (sym->isWeak() && !config->relocatable && !config->isPic)
+ return false;
+ if (!sym->isLive())
+ return false;
+ if (!sym->isUsedInRegularObj)
+ return false;
+
+ // We don't generate imports for data symbols. They however can be imported
+ // as GOT entries.
+ if (isa<DataSymbol>(sym))
+ return false;
+
+ if (config->isPic || config->relocatable || config->importUndefined)
+ return true;
+ if (config->allowUndefinedSymbols.count(sym->getName()) != 0)
+ return true;
+
+ return sym->importName.hasValue();
+}
+
void Writer::calculateImports() {
+ // Some inputs require that the indirect function table be assigned to table
+ // number 0, so if it is present and is an import, allocate it before any
+ // other tables.
+ if (WasmSym::indirectFunctionTable &&
+ shouldImport(WasmSym::indirectFunctionTable))
+ out.importSec->addImport(WasmSym::indirectFunctionTable);
+
for (Symbol *sym : symtab->getSymbols()) {
- if (!sym->isUndefined())
+ if (!shouldImport(sym))
continue;
- if (sym->isWeak() && !config->relocatable)
+ if (sym == WasmSym::indirectFunctionTable)
continue;
- if (!sym->isLive())
- continue;
- if (!sym->isUsedInRegularObj)
- continue;
- // We don't generate imports for data symbols. They however can be imported
- // as GOT entries.
- if (isa<DataSymbol>(sym))
- continue;
-
LLVM_DEBUG(dbgs() << "import: " << sym->getName() << "\n");
out.importSec->addImport(sym);
}
out.exportSec->exports.push_back(
WasmExport{"memory", WASM_EXTERNAL_MEMORY, 0});
- if (!config->relocatable && config->exportTable)
- out.exportSec->exports.push_back(
- WasmExport{functionTableName, WASM_EXTERNAL_TABLE, 0});
-
unsigned globalIndex =
out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals();
}
export_ = {name, WASM_EXTERNAL_FUNCTION, f->getFunctionIndex()};
} else if (auto *g = dyn_cast<DefinedGlobal>(sym)) {
- // TODO(sbc): Remove this check once to mutable global proposal is
- // implement in all major browsers.
- // See: https://github.com/WebAssembly/mutable-global
- if (g->getGlobalType()->Mutable) {
- // Only __stack_pointer and __tls_base should ever be create as mutable.
- assert(g == WasmSym::stackPointer || g == WasmSym::tlsBase);
+ if (g->getGlobalType()->Mutable && !g->getFile() && !g->forceExport) {
+ // Avoid exporting mutable globals are linker synthesized (e.g.
+ // __stack_pointer or __tls_base) unless they are explicitly exported
+ // from the command line.
+ // Without this check `--export-all` would cause any program using the
+ // stack pointer to export a mutable global even if none of the input
+ // files were built with the `mutable-globals` feature.
continue;
}
export_ = {name, WASM_EXTERNAL_GLOBAL, g->getGlobalIndex()};
- } else if (auto *e = dyn_cast<DefinedEvent>(sym)) {
- export_ = {name, WASM_EXTERNAL_EVENT, e->getEventIndex()};
- } else {
- auto *d = cast<DefinedData>(sym);
+ } else if (auto *t = dyn_cast<DefinedTag>(sym)) {
+ export_ = {name, WASM_EXTERNAL_TAG, t->getTagIndex()};
+ } else if (auto *d = dyn_cast<DefinedData>(sym)) {
+ if (d->segment && d->segment->isTLS()) {
+ // We can't currenly export TLS data symbols.
+ if (sym->isExportedExplicit())
+ error("TLS symbols cannot yet be exported: `" + toString(*sym) + "`");
+ continue;
+ }
out.globalSec->dataAddressGlobals.push_back(d);
export_ = {name, WASM_EXTERNAL_GLOBAL, globalIndex++};
+ } else {
+ auto *t = cast<DefinedTable>(sym);
+ export_ = {name, WASM_EXTERNAL_TABLE, t->getTableNumber()};
}
LLVM_DEBUG(dbgs() << "Export: " << name << "\n");
out.exportSec->exports.push_back(export_);
+ out.exportSec->exportedSymbols.push_back(sym);
}
}
// 1. Any signature used in the TYPE relocation
// 2. The signatures of all imported functions
// 3. The signatures of all defined functions
- // 4. The signatures of all imported events
- // 5. The signatures of all defined events
+ // 4. The signatures of all imported tags
+ // 5. The signatures of all defined tags
for (ObjFile *file : symtab->objectFiles) {
ArrayRef<WasmSignature> types = file->getWasmObj()->types();
for (const Symbol *sym : out.importSec->importedSymbols) {
if (auto *f = dyn_cast<FunctionSymbol>(sym))
out.typeSec->registerType(*f->signature);
- else if (auto *e = dyn_cast<EventSymbol>(sym))
- out.typeSec->registerType(*e->signature);
+ else if (auto *t = dyn_cast<TagSymbol>(sym))
+ out.typeSec->registerType(*t->signature);
}
for (const InputFunction *f : out.functionSec->inputFunctions)
out.typeSec->registerType(f->signature);
- for (const InputEvent *e : out.eventSec->inputEvents)
- out.typeSec->registerType(e->signature);
+ for (const InputTag *t : out.tagSec->inputTags)
+ out.typeSec->registerType(t->signature);
+}
+
+// In a command-style link, create a wrapper for each exported symbol
+// which calls the constructors and destructors.
+void Writer::createCommandExportWrappers() {
+ // This logic doesn't currently support Emscripten-style PIC mode.
+ assert(!config->isPic);
+
+ // If there are no ctors and there's no libc `__wasm_call_dtors` to
+ // call, don't wrap the exports.
+ if (initFunctions.empty() && WasmSym::callDtors == NULL)
+ return;
+
+ std::vector<DefinedFunction *> toWrap;
+
+ for (Symbol *sym : symtab->getSymbols())
+ if (sym->isExported())
+ if (auto *f = dyn_cast<DefinedFunction>(sym))
+ toWrap.push_back(f);
+
+ for (auto *f : toWrap) {
+ auto funcNameStr = (f->getName() + ".command_export").str();
+ commandExportWrapperNames.push_back(funcNameStr);
+ const std::string &funcName = commandExportWrapperNames.back();
+
+ auto func = make<SyntheticFunction>(*f->getSignature(), funcName);
+ if (f->function->getExportName().hasValue())
+ func->setExportName(f->function->getExportName()->str());
+ else
+ func->setExportName(f->getName().str());
+
+ DefinedFunction *def =
+ symtab->addSyntheticFunction(funcName, f->flags, func);
+ def->markLive();
+
+ def->flags |= WASM_SYMBOL_EXPORTED;
+ def->flags &= ~WASM_SYMBOL_VISIBILITY_HIDDEN;
+ def->forceExport = f->forceExport;
+
+ f->flags |= WASM_SYMBOL_VISIBILITY_HIDDEN;
+ f->flags &= ~WASM_SYMBOL_EXPORTED;
+ f->forceExport = false;
+
+ out.functionSec->addFunction(func);
+
+ createCommandExportWrapper(f->getFunctionIndex(), def);
+ }
+}
+
+static void finalizeIndirectFunctionTable() {
+ if (!WasmSym::indirectFunctionTable)
+ return;
+
+ if (shouldImport(WasmSym::indirectFunctionTable) &&
+ !WasmSym::indirectFunctionTable->hasTableNumber()) {
+ // Processing -Bsymbolic relocations resulted in a late requirement that the
+ // indirect function table be present, and we are running in --import-table
+ // mode. Add the table now to the imports section. Otherwise it will be
+ // added to the tables section later in assignIndexes.
+ out.importSec->addImport(WasmSym::indirectFunctionTable);
+ }
+
+ uint32_t tableSize = config->tableBase + out.elemSec->numEntries();
+ WasmLimits limits = {0, tableSize, 0};
+ if (WasmSym::indirectFunctionTable->isDefined() && !config->growableTable) {
+ limits.Flags |= WASM_LIMITS_FLAG_HAS_MAX;
+ limits.Maximum = limits.Minimum;
+ }
+ WasmSym::indirectFunctionTable->setLimits(limits);
}
static void scanRelocations() {
}
for (ObjFile *file : symtab->objectFiles) {
- LLVM_DEBUG(dbgs() << "Events: " << file->getName() << "\n");
- for (InputEvent *event : file->events)
- out.eventSec->addEvent(event);
+ LLVM_DEBUG(dbgs() << "Tags: " << file->getName() << "\n");
+ for (InputTag *tag : file->tags)
+ out.tagSec->addTag(tag);
}
+ for (ObjFile *file : symtab->objectFiles) {
+ LLVM_DEBUG(dbgs() << "Tables: " << file->getName() << "\n");
+ for (InputTable *table : file->tables)
+ out.tableSec->addTable(table);
+ }
+
+ for (InputTable *table : symtab->syntheticTables)
+ out.tableSec->addTable(table);
+
out.globalSec->assignIndexes();
+ out.tableSec->assignIndexes();
}
-static StringRef getOutputDataSegmentName(StringRef name) {
- // With PIC code we currently only support a single data segment since
- // we only have a single __memory_base to use as our base address.
- if (config->isPic)
- return ".data";
- // We only support one thread-local segment, so we must merge the segments
- // despite --no-merge-data-segments.
- // We also need to merge .tbss into .tdata so they share the same offsets.
- if (name.startswith(".tdata") || name.startswith(".tbss"))
+static StringRef getOutputDataSegmentName(const InputChunk &seg) {
+ // We always merge .tbss and .tdata into a single TLS segment so all TLS
+ // symbols are be relative to single __tls_base.
+ if (seg.isTLS())
return ".tdata";
+ StringRef name = seg.getName();
if (!config->mergeDataSegments)
return name;
if (name.startswith(".text."))
return name;
}
+OutputSegment *Writer::createOutputSegment(StringRef name) {
+ LLVM_DEBUG(dbgs() << "new segment: " << name << "\n");
+ OutputSegment *s = make<OutputSegment>(name);
+ if (config->sharedMemory)
+ s->initFlags = WASM_DATA_SEGMENT_IS_PASSIVE;
+ // Exported memories are guaranteed to be zero-initialized, so no need
+ // to emit data segments for bss sections.
+ // TODO: consider initializing bss sections with memory.fill
+ // instructions when memory is imported and bulk-memory is available.
+ if (!config->importMemory && !config->relocatable && name.startswith(".bss"))
+ s->isBss = true;
+ segments.push_back(s);
+ return s;
+}
+
void Writer::createOutputSegments() {
for (ObjFile *file : symtab->objectFiles) {
- for (InputSegment *segment : file->segments) {
+ for (InputChunk *segment : file->segments) {
if (!segment->live)
continue;
- StringRef name = getOutputDataSegmentName(segment->getName());
- OutputSegment *&s = segmentMap[name];
- if (s == nullptr) {
- LLVM_DEBUG(dbgs() << "new segment: " << name << "\n");
- s = make<OutputSegment>(name);
- if (config->sharedMemory || name == ".tdata")
- s->initFlags = WASM_SEGMENT_IS_PASSIVE;
- // Exported memories are guaranteed to be zero-initialized, so no need
- // to emit data segments for bss sections.
- // TODO: consider initializing bss sections with memory.fill
- // instructions when memory is imported and bulk-memory is available.
- if (!config->importMemory && !config->relocatable &&
- name.startswith(".bss"))
- s->isBss = true;
- segments.push_back(s);
+ StringRef name = getOutputDataSegmentName(*segment);
+ OutputSegment *s = nullptr;
+ // When running in relocatable mode we can't merge segments that are part
+ // of comdat groups since the ultimate linker needs to be able exclude or
+ // include them individually.
+ if (config->relocatable && !segment->getComdatName().empty()) {
+ s = createOutputSegment(name);
+ } else {
+ if (segmentMap.count(name) == 0)
+ segmentMap[name] = createOutputSegment(name);
+ s = segmentMap[name];
}
s->addInputSegment(segment);
- LLVM_DEBUG(dbgs() << "added data: " << name << ": " << s->size << "\n");
}
}
[](const OutputSegment *a, const OutputSegment *b) {
auto order = [](StringRef name) {
return StringSwitch<int>(name)
- .StartsWith(".rodata", 0)
- .StartsWith(".data", 1)
- .StartsWith(".tdata", 2)
+ .StartsWith(".tdata", 0)
+ .StartsWith(".rodata", 1)
+ .StartsWith(".data", 2)
.StartsWith(".bss", 4)
.Default(3);
};
for (size_t i = 0; i < segments.size(); ++i)
segments[i]->index = i;
+
+ // Merge MergeInputSections into a single MergeSyntheticSection.
+ LLVM_DEBUG(dbgs() << "-- finalize input semgments\n");
+ for (OutputSegment *seg : segments)
+ seg->finalizeInputSegments();
+}
+
+void Writer::combineOutputSegments() {
+ // With PIC code we currently only support a single active data segment since
+ // we only have a single __memory_base to use as our base address. This pass
+ // combines all data segments into a single .data segment.
+ // This restructions can be relaxed once we have extended constant
+ // expressions available:
+ // https://github.com/WebAssembly/extended-const
+ assert(config->isPic && !config->sharedMemory);
+ if (segments.size() <= 1)
+ return;
+ OutputSegment *combined = make<OutputSegment>(".data");
+ combined->startVA = segments[0]->startVA;
+ for (OutputSegment *s : segments) {
+ bool first = true;
+ for (InputChunk *inSeg : s->inputSegments) {
+ if (first)
+ inSeg->alignment = std::max(inSeg->alignment, s->alignment);
+ first = false;
+#ifndef NDEBUG
+ uint64_t oldVA = inSeg->getVA();
+#endif
+ combined->addInputSegment(inSeg);
+#ifndef NDEBUG
+ uint64_t newVA = inSeg->getVA();
+ LLVM_DEBUG(dbgs() << "added input segment. name=" << inSeg->getName()
+ << " oldVA=" << oldVA << " newVA=" << newVA << "\n");
+ assert(oldVA == newVA);
+#endif
+ }
+ }
+
+ segments = {combined};
}
static void createFunction(DefinedFunction *func, StringRef bodyContent) {
}
bool Writer::needsPassiveInitialization(const OutputSegment *segment) {
- return segment->initFlags & WASM_SEGMENT_IS_PASSIVE &&
- segment->name != ".tdata" && !segment->isBss;
+ return segment->initFlags & WASM_DATA_SEGMENT_IS_PASSIVE &&
+ !segment->isTLS() && !segment->isBss;
}
bool Writer::hasPassiveInitializedSegments() {
}) != segments.end();
}
+void Writer::createSyntheticInitFunctions() {
+ if (config->relocatable)
+ return;
+
+ static WasmSignature nullSignature = {{}, {}};
+
+ // Passive segments are used to avoid memory being reinitialized on each
+ // thread's instantiation. These passive segments are initialized and
+ // dropped in __wasm_init_memory, which is registered as the start function
+ if (config->sharedMemory && hasPassiveInitializedSegments()) {
+ WasmSym::initMemory = symtab->addSyntheticFunction(
+ "__wasm_init_memory", WASM_SYMBOL_VISIBILITY_HIDDEN,
+ make<SyntheticFunction>(nullSignature, "__wasm_init_memory"));
+ WasmSym::initMemory->markLive();
+ }
+
+ if (config->isPic) {
+ // For PIC code we create synthetic functions that apply relocations.
+ // These get called from __wasm_call_ctors before the user-level
+ // constructors.
+ WasmSym::applyDataRelocs = symtab->addSyntheticFunction(
+ "__wasm_apply_data_relocs", WASM_SYMBOL_VISIBILITY_HIDDEN,
+ make<SyntheticFunction>(nullSignature, "__wasm_apply_data_relocs"));
+ WasmSym::applyDataRelocs->markLive();
+
+ if (out.globalSec->needsRelocations()) {
+ WasmSym::applyGlobalRelocs = symtab->addSyntheticFunction(
+ "__wasm_apply_global_relocs", WASM_SYMBOL_VISIBILITY_HIDDEN,
+ make<SyntheticFunction>(nullSignature, "__wasm_apply_global_relocs"));
+ WasmSym::applyGlobalRelocs->markLive();
+ }
+ }
+
+ if (WasmSym::applyGlobalRelocs && WasmSym::initMemory) {
+ WasmSym::startFunction = symtab->addSyntheticFunction(
+ "__wasm_start", WASM_SYMBOL_VISIBILITY_HIDDEN,
+ make<SyntheticFunction>(nullSignature, "__wasm_start"));
+ WasmSym::startFunction->markLive();
+ }
+}
+
void Writer::createInitMemoryFunction() {
LLVM_DEBUG(dbgs() << "createInitMemoryFunction\n");
+ assert(WasmSym::initMemory);
assert(WasmSym::initMemoryFlag);
- uint32_t flagAddress = WasmSym::initMemoryFlag->getVirtualAddress();
+ assert(hasPassiveInitializedSegments());
+ uint64_t flagAddress = WasmSym::initMemoryFlag->getVA();
+ bool is64 = config->is64.getValueOr(false);
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
- writeUleb128(os, 0, "num locals");
+ // Initialize memory in a thread-safe manner. The thread that successfully
+ // increments the flag from 0 to 1 is is responsible for performing the
+ // memory initialization. Other threads go sleep on the flag until the
+ // first thread finishing initializing memory, increments the flag to 2,
+ // and wakes all the other threads. Once the flag has been set to 2,
+ // subsequently started threads will skip the sleep. All threads
+ // unconditionally drop their passive data segments once memory has been
+ // initialized. The generated code is as follows:
+ //
+ // (func $__wasm_init_memory
+ // (if
+ // (i32.atomic.rmw.cmpxchg align=2 offset=0
+ // (i32.const $__init_memory_flag)
+ // (i32.const 0)
+ // (i32.const 1)
+ // )
+ // (then
+ // (drop
+ // (i32.atomic.wait align=2 offset=0
+ // (i32.const $__init_memory_flag)
+ // (i32.const 1)
+ // (i32.const -1)
+ // )
+ // )
+ // )
+ // (else
+ // ( ... initialize data segments ... )
+ // (i32.atomic.store align=2 offset=0
+ // (i32.const $__init_memory_flag)
+ // (i32.const 2)
+ // )
+ // (drop
+ // (i32.atomic.notify align=2 offset=0
+ // (i32.const $__init_memory_flag)
+ // (i32.const -1u)
+ // )
+ // )
+ // )
+ // )
+ // ( ... drop data segments ... )
+ // )
+ //
+ // When we are building with PIC, calculate the flag location using:
+ //
+ // (global.get $__memory_base)
+ // (i32.const $__init_memory_flag)
+ // (i32.const 1)
+
+ // With PIC code we cache the flag address in local 0
+ if (config->isPic) {
+ writeUleb128(os, 1, "num local decls");
+ writeUleb128(os, 1, "local count");
+ writeU8(os, is64 ? WASM_TYPE_I64 : WASM_TYPE_I32, "address type");
+ writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
+ writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(), "memory_base");
+ writePtrConst(os, flagAddress, is64, "flag address");
+ writeU8(os, is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD, "add");
+ writeU8(os, WASM_OPCODE_LOCAL_SET, "local.set");
+ writeUleb128(os, 0, "local 0");
+ } else {
+ writeUleb128(os, 0, "num locals");
+ }
+
+ auto writeGetFlagAddress = [&]() {
+ if (config->isPic) {
+ writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
+ writeUleb128(os, 0, "local 0");
+ } else {
+ writePtrConst(os, flagAddress, is64, "flag address");
+ }
+ };
- if (hasPassiveInitializedSegments()) {
- // Initialize memory in a thread-safe manner. The thread that successfully
- // increments the flag from 0 to 1 is is responsible for performing the
- // memory initialization. Other threads go sleep on the flag until the
- // first thread finishing initializing memory, increments the flag to 2,
- // and wakes all the other threads. Once the flag has been set to 2,
- // subsequently started threads will skip the sleep. All threads
- // unconditionally drop their passive data segments once memory has been
- // initialized. The generated code is as follows:
- //
- // (func $__wasm_init_memory
- // (if
- // (i32.atomic.rmw.cmpxchg align=2 offset=0
- // (i32.const $__init_memory_flag)
- // (i32.const 0)
- // (i32.const 1)
- // )
- // (then
- // (drop
- // (i32.atomic.wait align=2 offset=0
- // (i32.const $__init_memory_flag)
- // (i32.const 1)
- // (i32.const -1)
- // )
- // )
- // )
- // (else
- // ( ... initialize data segments ... )
- // (i32.atomic.store align=2 offset=0
- // (i32.const $__init_memory_flag)
- // (i32.const 2)
- // )
- // (drop
- // (i32.atomic.notify align=2 offset=0
- // (i32.const $__init_memory_flag)
- // (i32.const -1u)
- // )
- // )
- // )
- // )
- // ( ... drop data segments ... )
- // )
-
- // Atomically check whether this is the main thread.
- writeI32Const(os, flagAddress, "flag address");
- writeI32Const(os, 0, "expected flag value");
- writeI32Const(os, 1, "flag value");
- writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
- writeUleb128(os, WASM_OPCODE_I32_RMW_CMPXCHG, "i32.atomic.rmw.cmpxchg");
- writeMemArg(os, 2, 0);
- writeU8(os, WASM_OPCODE_IF, "IF");
- writeU8(os, WASM_TYPE_NORESULT, "blocktype");
-
- // Did not increment 0, so wait for main thread to initialize memory
- writeI32Const(os, flagAddress, "flag address");
- writeI32Const(os, 1, "expected flag value");
- writeI64Const(os, -1, "timeout");
- writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
- writeUleb128(os, WASM_OPCODE_I32_ATOMIC_WAIT, "i32.atomic.wait");
- writeMemArg(os, 2, 0);
- writeU8(os, WASM_OPCODE_DROP, "drop");
-
- writeU8(os, WASM_OPCODE_ELSE, "ELSE");
-
- // Did increment 0, so conditionally initialize passive data segments
- for (const OutputSegment *s : segments) {
- if (needsPassiveInitialization(s)) {
- // destination address
- writeI32Const(os, s->startVA, "destination address");
- // source segment offset
- writeI32Const(os, 0, "segment offset");
- // memory region size
- writeI32Const(os, s->size, "memory region size");
- // memory.init instruction
- writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
- writeUleb128(os, WASM_OPCODE_MEMORY_INIT, "memory.init");
- writeUleb128(os, s->index, "segment index immediate");
- writeU8(os, 0, "memory index immediate");
+ // Atomically check whether this is the main thread.
+ writeGetFlagAddress();
+ writeI32Const(os, 0, "expected flag value");
+ writeI32Const(os, 1, "flag value");
+ writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
+ writeUleb128(os, WASM_OPCODE_I32_RMW_CMPXCHG, "i32.atomic.rmw.cmpxchg");
+ writeMemArg(os, 2, 0);
+ writeU8(os, WASM_OPCODE_IF, "IF");
+ writeU8(os, WASM_TYPE_NORESULT, "blocktype");
+
+ // Did not increment 0, so wait for main thread to initialize memory
+ writeGetFlagAddress();
+ writeI32Const(os, 1, "expected flag value");
+ writeI64Const(os, -1, "timeout");
+
+ writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
+ writeUleb128(os, WASM_OPCODE_I32_ATOMIC_WAIT, "i32.atomic.wait");
+ writeMemArg(os, 2, 0);
+ writeU8(os, WASM_OPCODE_DROP, "drop");
+
+ writeU8(os, WASM_OPCODE_ELSE, "ELSE");
+
+ // Did increment 0, so conditionally initialize passive data segments
+ for (const OutputSegment *s : segments) {
+ if (needsPassiveInitialization(s)) {
+ // destination address
+ writePtrConst(os, s->startVA, is64, "destination address");
+ if (config->isPic) {
+ writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");
+ writeUleb128(os, WasmSym::memoryBase->getGlobalIndex(),
+ "memory_base");
+ writeU8(os, is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD,
+ "i32.add");
}
+ // source segment offset
+ writeI32Const(os, 0, "segment offset");
+ // memory region size
+ writeI32Const(os, s->size, "memory region size");
+ // memory.init instruction
+ writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
+ writeUleb128(os, WASM_OPCODE_MEMORY_INIT, "memory.init");
+ writeUleb128(os, s->index, "segment index immediate");
+ writeU8(os, 0, "memory index immediate");
}
+ }
- // Set flag to 2 to mark end of initialization
- writeI32Const(os, flagAddress, "flag address");
- writeI32Const(os, 2, "flag value");
- writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
- writeUleb128(os, WASM_OPCODE_I32_ATOMIC_STORE, "i32.atomic.store");
- writeMemArg(os, 2, 0);
-
- // Notify any waiters that memory initialization is complete
- writeI32Const(os, flagAddress, "flag address");
- writeI32Const(os, -1, "number of waiters");
- writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
- writeUleb128(os, WASM_OPCODE_ATOMIC_NOTIFY, "atomic.notify");
- writeMemArg(os, 2, 0);
- writeU8(os, WASM_OPCODE_DROP, "drop");
+ // Set flag to 2 to mark end of initialization
+ writeGetFlagAddress();
+ writeI32Const(os, 2, "flag value");
+ writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
+ writeUleb128(os, WASM_OPCODE_I32_ATOMIC_STORE, "i32.atomic.store");
+ writeMemArg(os, 2, 0);
+
+ // Notify any waiters that memory initialization is complete
+ writeGetFlagAddress();
+ writeI32Const(os, -1, "number of waiters");
+ writeU8(os, WASM_OPCODE_ATOMICS_PREFIX, "atomics prefix");
+ writeUleb128(os, WASM_OPCODE_ATOMIC_NOTIFY, "atomic.notify");
+ writeMemArg(os, 2, 0);
+ writeU8(os, WASM_OPCODE_DROP, "drop");
- writeU8(os, WASM_OPCODE_END, "END");
+ writeU8(os, WASM_OPCODE_END, "END");
- // Unconditionally drop passive data segments
- for (const OutputSegment *s : segments) {
- if (needsPassiveInitialization(s)) {
- // data.drop instruction
- writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
- writeUleb128(os, WASM_OPCODE_DATA_DROP, "data.drop");
- writeUleb128(os, s->index, "segment index immediate");
- }
+ // Unconditionally drop passive data segments
+ for (const OutputSegment *s : segments) {
+ if (needsPassiveInitialization(s)) {
+ // data.drop instruction
+ writeU8(os, WASM_OPCODE_MISC_PREFIX, "bulk-memory prefix");
+ writeUleb128(os, WASM_OPCODE_DATA_DROP, "data.drop");
+ writeUleb128(os, s->index, "segment index immediate");
}
}
writeU8(os, WASM_OPCODE_END, "END");
createFunction(WasmSym::initMemory, bodyContent);
}
+void Writer::createStartFunction() {
+ if (WasmSym::startFunction) {
+ std::string bodyContent;
+ {
+ raw_string_ostream os(bodyContent);
+ writeUleb128(os, 0, "num locals");
+ writeU8(os, WASM_OPCODE_CALL, "CALL");
+ writeUleb128(os, WasmSym::initMemory->getFunctionIndex(),
+ "function index");
+ writeU8(os, WASM_OPCODE_CALL, "CALL");
+ writeUleb128(os, WasmSym::applyGlobalRelocs->getFunctionIndex(),
+ "function index");
+ writeU8(os, WASM_OPCODE_END, "END");
+ }
+ createFunction(WasmSym::startFunction, bodyContent);
+ } else if (WasmSym::initMemory) {
+ WasmSym::startFunction = WasmSym::initMemory;
+ } else if (WasmSym::applyGlobalRelocs) {
+ WasmSym::startFunction = WasmSym::applyGlobalRelocs;
+ }
+}
+
// For -shared (PIC) output, we create create a synthetic function which will
// apply any relocations to the data segments on startup. This function is
-// called __wasm_apply_relocs and is added at the beginning of __wasm_call_ctors
-// before any of the constructors run.
-void Writer::createApplyRelocationsFunction() {
- LLVM_DEBUG(dbgs() << "createApplyRelocationsFunction\n");
+// called `__wasm_apply_data_relocs` and is added at the beginning of
+// `__wasm_call_ctors` before any of the constructors run.
+void Writer::createApplyDataRelocationsFunction() {
+ LLVM_DEBUG(dbgs() << "createApplyDataRelocationsFunction\n");
// First write the body's contents to a string.
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
for (const OutputSegment *seg : segments)
- for (const InputSegment *inSeg : seg->inputSegments)
+ for (const InputChunk *inSeg : seg->inputSegments)
inSeg->generateRelocationCode(os);
+
+ writeU8(os, WASM_OPCODE_END, "END");
+ }
+
+ createFunction(WasmSym::applyDataRelocs, bodyContent);
+}
+
+// Similar to createApplyDataRelocationsFunction but generates relocation code
+// fro WebAssembly globals. Because these globals are not shared between threads
+// these relocation need to run on every thread.
+void Writer::createApplyGlobalRelocationsFunction() {
+ // First write the body's contents to a string.
+ std::string bodyContent;
+ {
+ raw_string_ostream os(bodyContent);
+ writeUleb128(os, 0, "num locals");
+ out.globalSec->generateRelocationCode(os);
writeU8(os, WASM_OPCODE_END, "END");
}
- createFunction(WasmSym::applyRelocs, bodyContent);
+ createFunction(WasmSym::applyGlobalRelocs, bodyContent);
}
// Create synthetic "__wasm_call_ctors" function based on ctor functions
// in input object.
void Writer::createCallCtorsFunction() {
- if (!WasmSym::callCtors->isLive())
+ // If __wasm_call_ctors isn't referenced, there aren't any ctors, and we
+ // aren't calling `__wasm_apply_data_relocs` for Emscripten-style PIC, don't
+ // define the `__wasm_call_ctors` function.
+ if (!WasmSym::callCtors->isLive() && !WasmSym::applyDataRelocs &&
+ initFunctions.empty())
return;
// First write the body's contents to a string.
raw_string_ostream os(bodyContent);
writeUleb128(os, 0, "num locals");
- if (config->isPic) {
+ if (WasmSym::applyDataRelocs) {
writeU8(os, WASM_OPCODE_CALL, "CALL");
- writeUleb128(os, WasmSym::applyRelocs->getFunctionIndex(),
+ writeUleb128(os, WasmSym::applyDataRelocs->getFunctionIndex(),
"function index");
}
writeU8(os, WASM_OPCODE_DROP, "DROP");
}
}
+
writeU8(os, WASM_OPCODE_END, "END");
}
createFunction(WasmSym::callCtors, bodyContent);
}
-void Writer::createInitTLSFunction() {
- if (!WasmSym::initTLS->isLive())
- return;
+// Create a wrapper around a function export which calls the
+// static constructors and destructors.
+void Writer::createCommandExportWrapper(uint32_t functionIndex,
+ DefinedFunction *f) {
+ // First write the body's contents to a string.
+ std::string bodyContent;
+ {
+ raw_string_ostream os(bodyContent);
+ writeUleb128(os, 0, "num locals");
+
+ // Call `__wasm_call_ctors` which call static constructors (and
+ // applies any runtime relocations in Emscripten-style PIC mode)
+ if (WasmSym::callCtors->isLive()) {
+ writeU8(os, WASM_OPCODE_CALL, "CALL");
+ writeUleb128(os, WasmSym::callCtors->getFunctionIndex(),
+ "function index");
+ }
+
+ // Call the user's code, leaving any return values on the operand stack.
+ for (size_t i = 0; i < f->signature->Params.size(); ++i) {
+ writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
+ writeUleb128(os, i, "local index");
+ }
+ writeU8(os, WASM_OPCODE_CALL, "CALL");
+ writeUleb128(os, functionIndex, "function index");
+
+ // Call the function that calls the destructors.
+ if (DefinedFunction *callDtors = WasmSym::callDtors) {
+ writeU8(os, WASM_OPCODE_CALL, "CALL");
+ writeUleb128(os, callDtors->getFunctionIndex(), "function index");
+ }
+
+ // End the function, returning the return values from the user's code.
+ writeU8(os, WASM_OPCODE_END, "END");
+ }
+ createFunction(f, bodyContent);
+}
+
+void Writer::createInitTLSFunction() {
std::string bodyContent;
{
raw_string_ostream os(bodyContent);
writeU8(os, WASM_OPCODE_GLOBAL_SET, "global.set");
writeUleb128(os, WasmSym::tlsBase->getGlobalIndex(), "global index");
+ // FIXME(wvo): this local needs to be I64 in wasm64, or we need an extend op.
writeU8(os, WASM_OPCODE_LOCAL_GET, "local.get");
writeUleb128(os, 0, "local index");
for (const WasmInitFunc &f : l.InitFunctions) {
FunctionSymbol *sym = file->getFunctionSymbol(f.Symbol);
// comdat exclusions can cause init functions be discarded.
- if (sym->isDiscarded())
+ if (sym->isDiscarded() || !sym->isLive())
continue;
- assert(sym->isLive());
if (sym->signature->Params.size() != 0)
error("constructor functions cannot take arguments: " + toString(*sym));
LLVM_DEBUG(dbgs() << "initFunctions: " << toString(*sym) << "\n");
out.functionSec = make<FunctionSection>();
out.tableSec = make<TableSection>();
out.memorySec = make<MemorySection>();
- out.eventSec = make<EventSection>();
+ out.tagSec = make<TagSection>();
out.globalSec = make<GlobalSection>();
out.exportSec = make<ExportSection>();
- out.startSec = make<StartSection>(hasPassiveInitializedSegments());
+ out.startSec = make<StartSection>();
out.elemSec = make<ElemSection>();
- out.dataCountSec = make<DataCountSection>(segments);
- out.linkingSec = make<LinkingSection>(initFunctions, segments);
- out.nameSec = make<NameSection>();
out.producersSec = make<ProducersSection>();
out.targetFeaturesSec = make<TargetFeaturesSection>();
}
+void Writer::createSyntheticSectionsPostLayout() {
+ out.dataCountSec = make<DataCountSection>(segments);
+ out.linkingSec = make<LinkingSection>(initFunctions, segments);
+ out.nameSec = make<NameSection>(segments);
+}
+
void Writer::run() {
if (config->relocatable || config->isPic)
config->globalBase = 0;
if (!config->isPic) {
config->tableBase = 1;
if (WasmSym::definedTableBase)
- WasmSym::definedTableBase->setVirtualAddress(config->tableBase);
+ WasmSym::definedTableBase->setVA(config->tableBase);
+ if (WasmSym::definedTableBase32)
+ WasmSym::definedTableBase32->setVA(config->tableBase);
}
log("-- createOutputSegments");
createOutputSegments();
log("-- createSyntheticSections");
createSyntheticSections();
- log("-- populateProducers");
- populateProducers();
- log("-- populateTargetFeatures");
- populateTargetFeatures();
- log("-- calculateImports");
- calculateImports();
log("-- layoutMemory");
layoutMemory();
if (!config->relocatable) {
// Create linker synthesized __start_SECNAME/__stop_SECNAME symbols
// This has to be done after memory layout is performed.
- for (const OutputSegment *seg : segments)
+ for (const OutputSegment *seg : segments) {
addStartStopSymbols(seg);
+ }
+ }
+
+ for (auto &pair : config->exportedSymbols) {
+ Symbol *sym = symtab->find(pair.first());
+ if (sym && sym->isDefined())
+ sym->forceExport = true;
+ }
+
+ // Delay reporting error about explict exports until after addStartStopSymbols
+ // which can create optional symbols.
+ for (auto &name : config->requiredExports) {
+ Symbol *sym = symtab->find(name);
+ if (!sym || !sym->isDefined()) {
+ if (config->unresolvedSymbols == UnresolvedPolicy::ReportError)
+ error(Twine("symbol exported via --export not found: ") + name);
+ if (config->unresolvedSymbols == UnresolvedPolicy::Warn)
+ warn(Twine("symbol exported via --export not found: ") + name);
+ }
+ }
+
+ if (config->isPic && !config->sharedMemory) {
+ // In shared memory mode all data segments are passive and initilized
+ // via __wasm_init_memory.
+ log("-- combineOutputSegments");
+ combineOutputSegments();
}
+ log("-- createSyntheticSectionsPostLayout");
+ createSyntheticSectionsPostLayout();
+ log("-- populateProducers");
+ populateProducers();
+ log("-- calculateImports");
+ calculateImports();
log("-- scanRelocations");
scanRelocations();
+ log("-- finalizeIndirectFunctionTable");
+ finalizeIndirectFunctionTable();
+ log("-- createSyntheticInitFunctions");
+ createSyntheticInitFunctions();
log("-- assignIndexes");
assignIndexes();
log("-- calculateInitFunctions");
if (!config->relocatable) {
// Create linker synthesized functions
- if (config->sharedMemory)
+ if (WasmSym::applyDataRelocs)
+ createApplyDataRelocationsFunction();
+ if (WasmSym::applyGlobalRelocs)
+ createApplyGlobalRelocationsFunction();
+ if (WasmSym::initMemory)
createInitMemoryFunction();
- if (config->isPic)
- createApplyRelocationsFunction();
+ createStartFunction();
+
createCallCtorsFunction();
+
+ // Create export wrappers for commands if needed.
+ //
+ // If the input contains a call to `__wasm_call_ctors`, either in one of
+ // the input objects or an explicit export from the command-line, we
+ // assume ctors and dtors are taken care of already.
+ if (!config->relocatable && !config->isPic &&
+ !WasmSym::callCtors->isUsedInRegularObj &&
+ !WasmSym::callCtors->isExported()) {
+ log("-- createCommandExportWrappers");
+ createCommandExportWrappers();
+ }
}
- if (!config->relocatable && config->sharedMemory && !config->shared)
+ if (WasmSym::initTLS && WasmSym::initTLS->isLive())
createInitTLSFunction();
if (errorCount())
calculateCustomSections();
log("-- populateSymtab");
populateSymtab();
+ log("-- populateTargetFeatures");
+ populateTargetFeatures();
log("-- addSections");
addSections();
if (errorHandler().verbose) {
log("Defined Functions: " + Twine(out.functionSec->inputFunctions.size()));
log("Defined Globals : " + Twine(out.globalSec->numGlobals()));
- log("Defined Events : " + Twine(out.eventSec->inputEvents.size()));
+ log("Defined Tags : " + Twine(out.tagSec->inputTags.size()));
+ log("Defined Tables : " + Twine(out.tableSec->inputTables.size()));
log("Function Imports : " +
Twine(out.importSec->getNumImportedFunctions()));
log("Global Imports : " + Twine(out.importSec->getNumImportedGlobals()));
- log("Event Imports : " + Twine(out.importSec->getNumImportedEvents()));
+ log("Tag Imports : " + Twine(out.importSec->getNumImportedTags()));
+ log("Table Imports : " + Twine(out.importSec->getNumImportedTables()));
for (ObjFile *file : symtab->objectFiles)
file->dumpInfo();
}
log("-- finalizeSections");
finalizeSections();
+ log("-- writeMapFile");
+ writeMapFile(outputSections);
+
log("-- openFile");
openFile();
if (errorCount())
return "f64";
case ValType::V128:
return "v128";
- case ValType::EXNREF:
- return "exnref";
+ case ValType::FUNCREF:
+ return "funcref";
case ValType::EXTERNREF:
return "externref";
}
toString(static_cast<ValType>(type.Type));
}
-std::string toString(const WasmEventType &type) {
- if (type.Attribute == WASM_EVENT_ATTRIBUTE_EXCEPTION)
+std::string toString(const WasmTagType &type) {
+ if (type.Attribute == WASM_TAG_ATTRIBUTE_EXCEPTION)
return "exception";
return "unknown";
}
+static std::string toString(const llvm::wasm::WasmLimits &limits) {
+ std::string ret;
+ ret += "flags=0x" + std::to_string(limits.Flags);
+ ret += "; min=" + std::to_string(limits.Minimum);
+ if (limits.Flags & WASM_LIMITS_FLAG_HAS_MAX)
+ ret += "; max=" + std::to_string(limits.Maximum);
+ return ret;
+}
+
+std::string toString(const WasmTableType &type) {
+ SmallString<128> ret("");
+ return "type=" + toString(static_cast<ValType>(type.ElemType)) +
+ "; limits=[" + toString(type.Limits) + "]";
+}
+
namespace wasm {
void debugWrite(uint64_t offset, const Twine &msg) {
LLVM_DEBUG(dbgs() << format(" | %08lld: ", offset) << msg << "\n");
writeValueType(os, paramType, "param type");
}
writeUleb128(os, sig.Returns.size(), "result Count");
- if (sig.Returns.size()) {
- writeValueType(os, sig.Returns[0], "result type");
+ for (ValType returnType : sig.Returns) {
+ writeValueType(os, returnType, "result type");
}
}
writeSleb128(os, number, msg);
}
+void writePtrConst(raw_ostream &os, int64_t number, bool is64,
+ const Twine &msg) {
+ if (is64)
+ writeI64Const(os, number, msg);
+ else
+ writeI32Const(os, static_cast<int32_t>(number), msg);
+}
+
void writeMemArg(raw_ostream &os, uint32_t alignment, uint64_t offset) {
writeUleb128(os, alignment, "alignment");
writeUleb128(os, offset, "offset");
void writeLimits(raw_ostream &os, const WasmLimits &limits) {
writeU8(os, limits.Flags, "limits flags");
- writeUleb128(os, limits.Initial, "limits initial");
+ writeUleb128(os, limits.Minimum, "limits min");
if (limits.Flags & WASM_LIMITS_FLAG_HAS_MAX)
writeUleb128(os, limits.Maximum, "limits max");
}
writeU8(os, type.Mutable, "global mutable");
}
-void writeGlobal(raw_ostream &os, const WasmGlobal &global) {
- writeGlobalType(os, global.Type);
- writeInitExpr(os, global.InitExpr);
-}
-
-void writeEventType(raw_ostream &os, const WasmEventType &type) {
- writeUleb128(os, type.Attribute, "event attribute");
+void writeTagType(raw_ostream &os, const WasmTagType &type) {
+ writeUleb128(os, type.Attribute, "tag attribute");
writeUleb128(os, type.SigIndex, "sig index");
}
-void writeEvent(raw_ostream &os, const WasmEvent &event) {
- writeEventType(os, event.Type);
+void writeTag(raw_ostream &os, const WasmTag &tag) {
+ writeTagType(os, tag.Type);
}
-void writeTableType(raw_ostream &os, const llvm::wasm::WasmTable &type) {
- writeU8(os, WASM_TYPE_FUNCREF, "table type");
+void writeTableType(raw_ostream &os, const WasmTableType &type) {
+ writeValueType(os, ValType(type.ElemType), "table type");
writeLimits(os, type.Limits);
}
case WASM_EXTERNAL_GLOBAL:
writeGlobalType(os, import.Global);
break;
- case WASM_EXTERNAL_EVENT:
- writeEventType(os, import.Event);
+ case WASM_EXTERNAL_TAG:
+ writeTagType(os, import.Tag);
break;
case WASM_EXTERNAL_MEMORY:
writeLimits(os, import.Memory);
case WASM_EXTERNAL_GLOBAL:
writeUleb128(os, export_.Index, "global index");
break;
- case WASM_EXTERNAL_EVENT:
- writeUleb128(os, export_.Index, "event index");
+ case WASM_EXTERNAL_TAG:
+ writeUleb128(os, export_.Index, "tag index");
break;
case WASM_EXTERNAL_MEMORY:
writeUleb128(os, export_.Index, "memory index");
void writeI64Const(raw_ostream &os, int64_t number, const Twine &msg);
+void writePtrConst(raw_ostream &os, int64_t number, bool is64,
+ const Twine &msg);
+
void writeMemArg(raw_ostream &os, uint32_t alignment, uint64_t offset);
void writeInitExpr(raw_ostream &os, const llvm::wasm::WasmInitExpr &initExpr);
void writeGlobalType(raw_ostream &os, const llvm::wasm::WasmGlobalType &type);
-void writeGlobal(raw_ostream &os, const llvm::wasm::WasmGlobal &global);
-
-void writeEventType(raw_ostream &os, const llvm::wasm::WasmEventType &type);
+void writeTagType(raw_ostream &os, const llvm::wasm::WasmTagType &type);
-void writeEvent(raw_ostream &os, const llvm::wasm::WasmEvent &event);
+void writeTag(raw_ostream &os, const llvm::wasm::WasmTag &tag);
-void writeTableType(raw_ostream &os, const llvm::wasm::WasmTable &type);
+void writeTableType(raw_ostream &os, const llvm::wasm::WasmTableType &type);
void writeImport(raw_ostream &os, const llvm::wasm::WasmImport &import);
std::string toString(llvm::wasm::ValType type);
std::string toString(const llvm::wasm::WasmSignature &sig);
std::string toString(const llvm::wasm::WasmGlobalType &type);
-std::string toString(const llvm::wasm::WasmEventType &type);
+std::string toString(const llvm::wasm::WasmTagType &type);
+std::string toString(const llvm::wasm::WasmTableType &type);
} // namespace lld