update libcxxabi to llvm 16

This commit is contained in:
Andrew Kelley 2023-01-26 13:23:18 -07:00
parent 1eaf180dd0
commit e41b58ddc3
6 changed files with 140 additions and 52 deletions

View file

@ -14,6 +14,10 @@
#include <stdio.h> #include <stdio.h>
#include <sys/debug.h> #include <sys/debug.h>
#if !__has_cpp_attribute(clang::optnone)
#error This file requires clang::optnone attribute support
#endif
/* /*
The legacy IBM xlC and xlclang++ compilers use the state table for EH The legacy IBM xlC and xlclang++ compilers use the state table for EH
instead of the range table. Destructors, or addresses of the possible catch instead of the range table. Destructors, or addresses of the possible catch
@ -183,10 +187,6 @@ enum FSMMagic : uint32_t {
number3 = 0x1cedbeef // State table generated by xlclang++ compiler. number3 = 0x1cedbeef // State table generated by xlclang++ compiler.
}; };
constexpr uint32_t REG_EXCP_OBJ = 14; // Register to pass the address of the exception
// object from the personality to xlclang++
// compiled code.
constexpr size_t dtorArgument = 0x02; // Flag to destructor indicating to free constexpr size_t dtorArgument = 0x02; // Flag to destructor indicating to free
// virtual bases, don't delete object. // virtual bases, don't delete object.
@ -555,8 +555,16 @@ __xlcxx_personality_v0(int version, _Unwind_Action actions, uint64_t exceptionCl
if (actions & _UA_CLEANUP_PHASE) { if (actions & _UA_CLEANUP_PHASE) {
// Phase 2 cleanup: // Phase 2 cleanup:
if (results.reason == _URC_HANDLER_FOUND) { if (results.reason == _URC_HANDLER_FOUND) {
// Store the address of unwind_exception in the stack field
// reserved for compilers (SP + 3 * sizeof(uintptr_t)) in the stack of
// the caller of the function containing the landing pad (within the link
// area for the call to the latter) for __xlc_exception_handle()
// to retrieve when it is called by the landing pad.
uintptr_t *currentSP = reinterpret_cast<uintptr_t*>(_Unwind_GetGR(context, 1));
uintptr_t *callersSP = reinterpret_cast<uintptr_t*>(currentSP[0]);
callersSP[3] = reinterpret_cast<uintptr_t>(unwind_exception);
_LIBCXXABI_TRACE_STATETAB("Handshake: set unwind_exception=%p in stack=%p\n", reinterpret_cast<void*>(unwind_exception), reinterpret_cast<void*>(callersSP));
// Jump to the handler. // Jump to the handler.
_Unwind_SetGR(context, REG_EXCP_OBJ, reinterpret_cast<uintptr_t>(unwind_exception));
_Unwind_SetIP(context, results.landingPad); _Unwind_SetIP(context, results.landingPad);
return _URC_INSTALL_CONTEXT; return _URC_INSTALL_CONTEXT;
} }
@ -633,12 +641,38 @@ _LIBCXXABI_FUNC_VIS void __xlc_throw_badexception() {
__cxa_throw(newexception, const_cast<std::type_info*>(&typeid(std::bad_exception)), 0); __cxa_throw(newexception, const_cast<std::type_info*>(&typeid(std::bad_exception)), 0);
} }
// force_a_stackframe
// This function is called by __xlc_exception_handle() to ensure a stack frame
// is created for __xlc_exception_handle().
__attribute__((noinline, optnone))
static void force_a_stackframe() {}
// __xlc_exception_handle // __xlc_exception_handle
// This function is for xlclang++. It returns the address of the exception // This function is for xlclang++. It returns the address of the exception
// object set in gpr14 by the personality routine for xlclang++ compiled code. // object stored in the reserved field in the stack of the caller of the
// function that calls __xlc_exception_handle() (within the link area for the
// call to the latter). The address is stored by the personality routine for
// xlclang++ compiled code. The implementation of __xlc_exception_handle()
// assumes a stack frame is created for it. The following ensures this
// assumption holds true: 1) a call to force_a_stackframe() is made inside
// __xlc_exception_handle() to make it non-leaf; and 2) optimizations are
// disabled for this function with attribute 'optnone'. Note: this function
// may not work as expected if these are changed.
__attribute__((optnone))
_LIBCXXABI_FUNC_VIS uintptr_t __xlc_exception_handle() { _LIBCXXABI_FUNC_VIS uintptr_t __xlc_exception_handle() {
uintptr_t exceptionObject; // Make a call to force_a_stackframe() so that the compiler creates a stack
asm("mr %0, 14" : "=r"(exceptionObject)); // frame for this function.
force_a_stackframe();
// Get the SP of this function, i.e., __xlc_exception_handle().
uintptr_t *lastStack;
asm("mr %0, 1" : "=r"(lastStack));
// Get the SP of the caller of __xlc_exception_handle().
uintptr_t *callerStack = reinterpret_cast<uintptr_t*>(lastStack[0]);
// Get the SP of the caller of the caller.
uintptr_t *callerStack2 = reinterpret_cast<uintptr_t*>(callerStack[0]);
uintptr_t exceptionObject = callerStack2[3];
_LIBCXXABI_TRACE_STATETAB("Handshake: exceptionObject=%p from stack=%p\n", reinterpret_cast<void*>(exceptionObject), reinterpret_cast<void*>(callerStack2));
return exceptionObject; return exceptionObject;
} }

View file

@ -386,15 +386,12 @@ __cxa_demangle(const char *MangledName, char *Buf, size_t *N, int *Status) {
int InternalStatus = demangle_success; int InternalStatus = demangle_success;
Demangler Parser(MangledName, MangledName + std::strlen(MangledName)); Demangler Parser(MangledName, MangledName + std::strlen(MangledName));
OutputBuffer O;
Node *AST = Parser.parse(); Node *AST = Parser.parse();
if (AST == nullptr) if (AST == nullptr)
InternalStatus = demangle_invalid_mangled_name; InternalStatus = demangle_invalid_mangled_name;
else if (!initializeOutputBuffer(Buf, N, O, 1024))
InternalStatus = demangle_memory_alloc_failure;
else { else {
OutputBuffer O(Buf, N);
assert(Parser.ForwardTemplateRefs.empty()); assert(Parser.ForwardTemplateRefs.empty());
AST->print(O); AST->print(O);
O += '\0'; O += '\0';

View file

@ -5,6 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H #ifndef LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H
#define LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H #define LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H
@ -54,9 +55,12 @@
# endif # endif
#endif #endif
#include <__threading_support>
#include <cstdint>
#include <cstring>
#include <limits.h> #include <limits.h>
#include <stdlib.h> #include <stdlib.h>
#include <__threading_support>
#ifndef _LIBCXXABI_HAS_NO_THREADS #ifndef _LIBCXXABI_HAS_NO_THREADS
# if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) # if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
# pragma comment(lib, "pthread") # pragma comment(lib, "pthread")

View file

@ -26,6 +26,7 @@
#include <cstdlib> #include <cstdlib>
#include <cstring> #include <cstring>
#include <limits> #include <limits>
#include <new>
#include <utility> #include <utility>
DEMANGLE_NAMESPACE_BEGIN DEMANGLE_NAMESPACE_BEGIN
@ -369,6 +370,10 @@ public:
VendorExtQualType(const Node *Ty_, StringView Ext_, const Node *TA_) VendorExtQualType(const Node *Ty_, StringView Ext_, const Node *TA_)
: Node(KVendorExtQualType), Ty(Ty_), Ext(Ext_), TA(TA_) {} : Node(KVendorExtQualType), Ty(Ty_), Ext(Ext_), TA(TA_) {}
const Node *getTy() const { return Ty; }
StringView getExt() const { return Ext; }
const Node *getTA() const { return TA; }
template <typename Fn> void match(Fn F) const { F(Ty, Ext, TA); } template <typename Fn> void match(Fn F) const { F(Ty, Ext, TA); }
void printLeft(OutputBuffer &OB) const override { void printLeft(OutputBuffer &OB) const override {
@ -417,6 +422,9 @@ public:
Child_->ArrayCache, Child_->FunctionCache), Child_->ArrayCache, Child_->FunctionCache),
Quals(Quals_), Child(Child_) {} Quals(Quals_), Child(Child_) {}
Qualifiers getQuals() const { return Quals; }
const Node *getChild() const { return Child; }
template<typename Fn> void match(Fn F) const { F(Child, Quals); } template<typename Fn> void match(Fn F) const { F(Child, Quals); }
bool hasRHSComponentSlow(OutputBuffer &OB) const override { bool hasRHSComponentSlow(OutputBuffer &OB) const override {
@ -585,6 +593,8 @@ public:
: Node(KPointerType, Pointee_->RHSComponentCache), : Node(KPointerType, Pointee_->RHSComponentCache),
Pointee(Pointee_) {} Pointee(Pointee_) {}
const Node *getPointee() const { return Pointee; }
template<typename Fn> void match(Fn F) const { F(Pointee); } template<typename Fn> void match(Fn F) const { F(Pointee); }
bool hasRHSComponentSlow(OutputBuffer &OB) const override { bool hasRHSComponentSlow(OutputBuffer &OB) const override {
@ -1070,6 +1080,9 @@ public:
VectorType(const Node *BaseType_, const Node *Dimension_) VectorType(const Node *BaseType_, const Node *Dimension_)
: Node(KVectorType), BaseType(BaseType_), Dimension(Dimension_) {} : Node(KVectorType), BaseType(BaseType_), Dimension(Dimension_) {}
const Node *getBaseType() const { return BaseType; }
const Node *getDimension() const { return Dimension; }
template<typename Fn> void match(Fn F) const { F(BaseType, Dimension); } template<typename Fn> void match(Fn F) const { F(BaseType, Dimension); }
void printLeft(OutputBuffer &OB) const override { void printLeft(OutputBuffer &OB) const override {
@ -3019,14 +3032,21 @@ AbstractManglingParser<Derived, Alloc>::parseOperatorEncoding() {
if (numLeft() < 2) if (numLeft() < 2)
return nullptr; return nullptr;
auto Op = std::lower_bound( // We can't use lower_bound as that can link to symbols in the C++ library,
&Ops[0], &Ops[NumOps], First, // and this must remain independant of that.
[](const OperatorInfo &Op_, const char *Enc_) { return Op_ < Enc_; }); size_t lower = 0u, upper = NumOps - 1; // Inclusive bounds.
if (Op == &Ops[NumOps] || *Op != First) while (upper != lower) {
size_t middle = (upper + lower) / 2;
if (Ops[middle] < First)
lower = middle + 1;
else
upper = middle;
}
if (Ops[lower] != First)
return nullptr; return nullptr;
First += 2; First += 2;
return Op; return &Ops[lower];
} }
// <operator-name> ::= See parseOperatorEncoding() // <operator-name> ::= See parseOperatorEncoding()
@ -5099,7 +5119,7 @@ template <>
struct FloatData<long double> struct FloatData<long double>
{ {
#if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \ #if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \
defined(__wasm__) || defined(__riscv) defined(__wasm__) || defined(__riscv) || defined(__loongarch__)
static const size_t mangled_size = 32; static const size_t mangled_size = 32;
#elif defined(__arm__) || defined(__mips__) || defined(__hexagon__) #elif defined(__arm__) || defined(__mips__) || defined(__hexagon__)
static const size_t mangled_size = 16; static const size_t mangled_size = 16;

View file

@ -69,7 +69,9 @@ class OutputBuffer {
public: public:
OutputBuffer(char *StartBuf, size_t Size) OutputBuffer(char *StartBuf, size_t Size)
: Buffer(StartBuf), CurrentPosition(0), BufferCapacity(Size) {} : Buffer(StartBuf), BufferCapacity(Size) {}
OutputBuffer(char *StartBuf, size_t *SizePtr)
: OutputBuffer(StartBuf, StartBuf ? *SizePtr : 0) {}
OutputBuffer() = default; OutputBuffer() = default;
// Non-copyable // Non-copyable
OutputBuffer(const OutputBuffer &) = delete; OutputBuffer(const OutputBuffer &) = delete;
@ -77,12 +79,6 @@ public:
operator StringView() const { return StringView(Buffer, CurrentPosition); } operator StringView() const { return StringView(Buffer, CurrentPosition); }
void reset(char *Buffer_, size_t BufferCapacity_) {
CurrentPosition = 0;
Buffer = Buffer_;
BufferCapacity = BufferCapacity_;
}
/// If a ParameterPackExpansion (or similar type) is encountered, the offset /// If a ParameterPackExpansion (or similar type) is encountered, the offset
/// into the pack that we're currently printing. /// into the pack that we're currently printing.
unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max(); unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max();
@ -198,21 +194,6 @@ public:
ScopedOverride &operator=(const ScopedOverride &) = delete; ScopedOverride &operator=(const ScopedOverride &) = delete;
}; };
inline bool initializeOutputBuffer(char *Buf, size_t *N, OutputBuffer &OB,
size_t InitSize) {
size_t BufferSize;
if (Buf == nullptr) {
Buf = static_cast<char *>(std::malloc(InitSize));
if (Buf == nullptr)
return false;
BufferSize = InitSize;
} else
BufferSize = *N;
OB.reset(Buf, BufferSize);
return true;
}
DEMANGLE_NAMESPACE_END DEMANGLE_NAMESPACE_END
#endif #endif

View file

@ -15,6 +15,7 @@
#endif #endif
#endif #endif
#include <assert.h>
#include <stdlib.h> // for malloc, calloc, free #include <stdlib.h> // for malloc, calloc, free
#include <string.h> // for memset #include <string.h> // for memset
#include <new> // for std::__libcpp_aligned_{alloc,free} #include <new> // for std::__libcpp_aligned_{alloc,free}
@ -63,11 +64,28 @@ char heap[HEAP_SIZE] __attribute__((aligned));
typedef unsigned short heap_offset; typedef unsigned short heap_offset;
typedef unsigned short heap_size; typedef unsigned short heap_size;
// On both 64 and 32 bit targets heap_node should have the following properties
// Size: 4
// Alignment: 2
struct heap_node { struct heap_node {
heap_offset next_node; // offset into heap heap_offset next_node; // offset into heap
heap_size len; // size in units of "sizeof(heap_node)" heap_size len; // size in units of "sizeof(heap_node)"
}; };
// All pointers returned by fallback_malloc must be at least aligned
// as RequiredAligned. Note that RequiredAlignment can be greater than
// alignof(std::max_align_t) on 64 bit systems compiling 32 bit code.
struct FallbackMaxAlignType {
} __attribute__((aligned));
const size_t RequiredAlignment = alignof(FallbackMaxAlignType);
static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0,
"The required alignment must be evenly divisible by the sizeof(heap_node)");
// The number of heap_node's that can fit in a chunk of memory with the size
// of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4.
const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node);
static const heap_node* list_end = static const heap_node* list_end =
(heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
static heap_node* freelist = NULL; static heap_node* freelist = NULL;
@ -82,10 +100,23 @@ heap_offset offset_from_node(const heap_node* ptr) {
sizeof(heap_node)); sizeof(heap_node));
} }
// Return a pointer to the first address, 'A', in `heap` that can actually be
// used to represent a heap_node. 'A' must be aligned so that
// '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this
// address should be 12 bytes after the first 16 byte boundary.
heap_node* getFirstAlignedNodeInHeap() {
heap_node* node = (heap_node*)heap;
const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node);
size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment;
size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset;
size_t NElemOffset = requiredOffset / sizeof(heap_node);
return node + NElemOffset;
}
void init_heap() { void init_heap() {
freelist = (heap_node*)heap; freelist = getFirstAlignedNodeInHeap();
freelist->next_node = offset_from_node(list_end); freelist->next_node = offset_from_node(list_end);
freelist->len = HEAP_SIZE / sizeof(heap_node); freelist->len = static_cast<heap_size>(list_end - freelist);
} }
// How big a chunk we allocate // How big a chunk we allocate
@ -109,23 +140,44 @@ void* fallback_malloc(size_t len) {
for (p = freelist, prev = 0; p && p != list_end; for (p = freelist, prev = 0; p && p != list_end;
prev = p, p = node_from_offset(p->next_node)) { prev = p, p = node_from_offset(p->next_node)) {
if (p->len > nelems) { // chunk is larger, shorten, and return the tail // Check the invariant that all heap_nodes pointers 'p' are aligned
heap_node* q; // so that 'p + 1' has an alignment of at least RequiredAlignment
assert(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0);
p->len = static_cast<heap_size>(p->len - nelems); // Calculate the number of extra padding elements needed in order
q = p + p->len; // to split 'p' and create a properly aligned heap_node from the tail
q->next_node = 0; // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems'
q->len = static_cast<heap_size>(nelems); // will be a multiple of NodesPerAlignment.
return (void*)(q + 1); size_t aligned_nelems = nelems;
if (p->len > nelems) {
heap_size remaining_len = static_cast<heap_size>(p->len - nelems);
aligned_nelems += remaining_len % NodesPerAlignment;
} }
if (p->len == nelems) { // exact size match // chunk is larger and we can create a properly aligned heap_node
// from the tail. In this case we shorten 'p' and return the tail.
if (p->len > aligned_nelems) {
heap_node* q;
p->len = static_cast<heap_size>(p->len - aligned_nelems);
q = p + p->len;
q->next_node = 0;
q->len = static_cast<heap_size>(aligned_nelems);
void* ptr = q + 1;
assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
return ptr;
}
// The chunk is the exact size or the chunk is larger but not large
// enough to split due to alignment constraints.
if (p->len >= nelems) {
if (prev == 0) if (prev == 0)
freelist = node_from_offset(p->next_node); freelist = node_from_offset(p->next_node);
else else
prev->next_node = p->next_node; prev->next_node = p->next_node;
p->next_node = 0; p->next_node = 0;
return (void*)(p + 1); void* ptr = p + 1;
assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
return ptr;
} }
} }
return NULL; // couldn't find a spot big enough return NULL; // couldn't find a spot big enough