set(LLVM_VERSION_MINOR 0)
endif()
if(NOT DEFINED LLVM_VERSION_PATCH)
- set(LLVM_VERSION_PATCH 0)
+ set(LLVM_VERSION_PATCH 1)
endif()
if(NOT DEFINED LLVM_VERSION_SUFFIX)
set(LLVM_VERSION_SUFFIX "")
option(LLVM_APPEND_VC_REV
"Embed the version control system revision id in LLVM" ON)
-if( LLVM_APPEND_VC_REV )
- add_version_info_from_vcs(PACKAGE_VERSION)
-endif()
-
set(PACKAGE_NAME LLVM)
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "http://llvm.org/bugs/")
**LLVM_APPEND_VC_REV**:BOOL
Embed version control revision info (svn revision number or Git revision id).
- This is used among other things in the LLVM version string (stored in the
- PACKAGE_VERSION macro). For this to work cmake must be invoked before the
- build. Defaults to ON.
+ The version info is provided by the ``LLVM_REVISION`` macro in
+ ``llvm/include/llvm/Support/VCSRevision.h``. Developers using git who don't
+ need revision info can disable this option to avoid re-linking most binaries
+ after a branch switch. Defaults to ON.
**LLVM_ENABLE_THREADS**:BOOL
Build with threads support, if available. Defaults to ON.
auto GTI = gep_type_begin(PointeeType, Operands);
Type *TargetType;
+
+ // Handle the case where the GEP instruction has a single operand,
+ // the basis, therefore TargetType is a nullptr.
+ if (Operands.empty())
+ return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
+
for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
TargetType = GTI.getIndexedType();
// We assume that the cost of Scalar GEP with constant index and the
return getReservedRegs().test(PhysReg);
}
+ /// Returns true when the given register unit is considered reserved.
+ ///
+ /// Register units are considered reserved when for at least one of their
+ /// root registers, the root register and all super registers are reserved.
+ /// This currently iterates the register hierarchy and may be slower than
+ /// expected.
+ bool isReservedRegUnit(unsigned Unit) const;
+
/// isAllocatable - Returns true when PhysReg belongs to an allocatable
/// register class and it hasn't been reserved.
///
/// module is modified.
bool UpgradeModuleFlags(Module &M);
+ void UpgradeSectionAttributes(Module &M);
+
/// If the given TBAA tag uses the scalar TBAA format, create a new node
/// corresponding to the upgrade to the struct-path aware TBAA format.
/// Otherwise return the \p TBAANode itself.
Adapters.reserve(ParamCount);
}
+ formatv_object_base(formatv_object_base const &rhs) = delete;
+
+ formatv_object_base(formatv_object_base &&rhs)
+ : Fmt(std::move(rhs.Fmt)),
+ Adapters(), // Adapters are initialized by formatv_object
+ Replacements(std::move(rhs.Replacements)) {
+ Adapters.reserve(rhs.Adapters.size());
+ };
+
void format(raw_ostream &S) const {
for (auto &R : Replacements) {
if (R.Type == ReplacementType::Empty)
Parameters(std::move(Params)) {
Adapters = apply_tuple(create_adapters(), Parameters);
}
+
+ formatv_object(formatv_object const &rhs) = delete;
+
+ formatv_object(formatv_object &&rhs)
+ : formatv_object_base(std::move(rhs)),
+ Parameters(std::move(rhs.Parameters)) {
+ Adapters = apply_tuple(create_adapters(), Parameters);
+ }
};
// \brief Format text given a format string and replacement parameters.
UpgradeDebugInfo(*M);
UpgradeModuleFlags(*M);
+ UpgradeSectionAttributes(*M);
if (!Slots)
return false;
if (convertToString(Record, 0, S))
return error("Invalid record");
// Check for the i386 and other (x86_64, ARM) conventions
- if (S.find("__DATA, __objc_catlist") != std::string::npos ||
+ if (S.find("__DATA,__objc_catlist") != std::string::npos ||
S.find("__OBJC,__category") != std::string::npos)
return true;
break;
auto *SP = cast<DISubprogram>(Scope->getScopeNode());
DIE *ContextDIE;
+ DwarfCompileUnit *ContextCU = this;
if (includeMinimalInlineScopes())
ContextDIE = &getUnitDie();
else if (auto *SPDecl = SP->getDeclaration()) {
ContextDIE = &getUnitDie();
getOrCreateSubprogramDIE(SPDecl);
- } else
+ } else {
ContextDIE = getOrCreateContextDIE(resolve(SP->getScope()));
+ // The scope may be shared with a subprogram that has already been
+ // constructed in another CU, in which case we need to construct this
+ // subprogram in the same CU.
+ ContextCU = DD->lookupCU(ContextDIE->getUnitDie());
+ }
// Passing null as the associated node because the abstract definition
// shouldn't be found by lookup.
- AbsDef = &createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
- applySubprogramAttributesToDefinition(SP, *AbsDef);
+ AbsDef = &ContextCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, nullptr);
+ ContextCU->applySubprogramAttributesToDefinition(SP, *AbsDef);
- if (!includeMinimalInlineScopes())
- addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
- if (DIE *ObjectPointer = createAndAddScopeChildren(Scope, *AbsDef))
- addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer);
+ if (!ContextCU->includeMinimalInlineScopes())
+ ContextCU->addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
+ if (DIE *ObjectPointer = ContextCU->createAndAddScopeChildren(Scope, *AbsDef))
+ ContextCU->addDIEEntry(*AbsDef, dwarf::DW_AT_object_pointer, *ObjectPointer);
}
DIE *DwarfCompileUnit::constructImportedEntityDIE(
// 0, referencing the comp_dir of all the type units that use it.
MCDwarfDwoLineTable SplitTypeUnitFileTable;
/// @}
-
+
/// True iff there are multiple CUs in this module.
bool SingleCU;
bool IsDarwin;
bool isLexicalScopeDIENull(LexicalScope *Scope);
bool hasDwarfPubSections(bool includeMinimalInlineScopes) const;
+
+ /// Find the matching DwarfCompileUnit for the given CU DIE.
+ DwarfCompileUnit *lookupCU(const DIE *Die) { return CUDieMap.lookup(Die); }
};
} // End of namespace llvm
// Intersection between the bits we already emitted and the bits
// covered by this subregister.
- SmallBitVector Intersection(RegSize, false);
- Intersection.set(Offset, Offset + Size);
- Intersection ^= Coverage;
+ SmallBitVector CurSubReg(RegSize, false);
+ CurSubReg.set(Offset, Offset + Size);
// If this sub-register has a DWARF number and we haven't covered
// its range, emit a DWARF piece for it.
- if (Reg >= 0 && Intersection.any()) {
+ if (Reg >= 0 && CurSubReg.test(Coverage)) {
// Emit a piece for any gap in the coverage.
if (Offset > CurPos)
DwarfRegs.push_back({-1, Offset - CurPos, nullptr});
// may share super-registers. That's OK because createDeadDefs() is
// idempotent. It is very rare for a register unit to have multiple roots, so
// uniquing super-registers is probably not worthwhile.
- bool IsReserved = true;
+ bool IsReserved = false;
for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
+ bool IsRootReserved = true;
for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
Super.isValid(); ++Super) {
unsigned Reg = *Super;
// A register unit is considered reserved if all its roots and all their
// super registers are reserved.
if (!MRI->isReserved(Reg))
- IsReserved = false;
+ IsRootReserved = false;
}
+ IsReserved |= IsRootReserved;
}
+ assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
+ "reserved computation mismatch");
// Now extend LR to reach all uses.
// Ignore uses of reserved registers. We only track defs of those.
// kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
// flags, and postRA passes will use a live register utility instead.
LiveRange *getRegUnitLI(unsigned Unit) {
- if (UpdateFlags)
+ if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
return &LIS.getRegUnit(Unit);
return LIS.getCachedRegUnit(Unit);
}
UpdatedCSRs.push_back(0);
IsUpdatedCSRsInitialized = true;
}
+
+bool MachineRegisterInfo::isReservedRegUnit(unsigned Unit) const {
+ const TargetRegisterInfo *TRI = getTargetRegisterInfo();
+ for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
+ bool IsRootReserved = true;
+ for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
+ Super.isValid(); ++Super) {
+ unsigned Reg = *Super;
+ if (!isReserved(Reg)) {
+ IsRootReserved = false;
+ break;
+ }
+ }
+ if (IsRootReserved)
+ return true;
+ }
+ return false;
+}
// Check the cached regunit intervals.
if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
+ if (MRI->isReservedRegUnit(*Units))
+ continue;
if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
}
}
}
}
+ // Upgrade Objective-C Image Info Section. Removed the whitespce in the
+ // section name so that llvm-lto will not complain about mismatching
+ // module flags that is functionally the same.
+ if (ID->getString() == "Objective-C Image Info Section") {
+ if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
+ SmallVector<StringRef, 4> ValueComp;
+ Value->getString().split(ValueComp, " ");
+ if (ValueComp.size() != 1) {
+ std::string NewValue;
+ for (auto &S : ValueComp)
+ NewValue += S.str();
+ Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
+ MDString::get(M.getContext(), NewValue)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
}
// "Objective-C Class Properties" is recently added for Objective-C. We
return Changed;
}
+void llvm::UpgradeSectionAttributes(Module &M) {
+ auto TrimSpaces = [](StringRef Section) -> std::string {
+ SmallVector<StringRef, 5> Components;
+ Section.split(Components, ',');
+
+ SmallString<32> Buffer;
+ raw_svector_ostream OS(Buffer);
+
+ for (auto Component : Components)
+ OS << ',' << Component.trim();
+
+ return OS.str().substr(1);
+ };
+
+ for (auto &GV : M.globals()) {
+ if (!GV.hasSection())
+ continue;
+
+ StringRef Section = GV.getSection();
+
+ if (!Section.startswith("__DATA, __objc_catlist"))
+ continue;
+
+ // __DATA, __objc_catlist, regular, no_dead_strip
+ // __DATA,__objc_catlist,regular,no_dead_strip
+ GV.setSection(TrimSpaces(Section));
+ }
+}
+
static bool isOldLoopArgument(Metadata *MD) {
auto *T = dyn_cast_or_null<MDTuple>(MD);
if (!T)
Unknown = true;
continue;
}
+ if (!isa<ConstantInt>(Idxs[i - 1]))
+ // FIXME: add the support of cosntant vector index.
+ continue;
if (InRangeIndex && i == *InRangeIndex + 1) {
// If an index is marked inrange, we cannot apply this canonicalization to
// the following index, as that will cause the inrange index to point to
} else {
if (ForDefinition)
NewGV = copyGlobalAliasProto(cast<GlobalAlias>(SGV));
+ else if (SGV->getValueType()->isFunctionTy())
+ NewGV =
+ Function::Create(cast<FunctionType>(TypeMap.get(SGV->getValueType())),
+ GlobalValue::ExternalLinkage, SGV->getName(), &DstM);
else
NewGV = new GlobalVariable(
DstM, TypeMap.get(SGV->getValueType()),
bool ModuleLinker::linkIfNeeded(GlobalValue &GV) {
GlobalValue *DGV = getLinkedToGlobal(&GV);
- if (shouldLinkOnlyNeeded() && !(DGV && DGV->isDeclaration()))
- return false;
+ if (shouldLinkOnlyNeeded()) {
+ // Always import variables with appending linkage.
+ if (!GV.hasAppendingLinkage()) {
+ // Don't import globals unless they are referenced by the destination
+ // module.
+ if (!DGV)
+ return false;
+ // Don't import globals that are already defined in the destination module
+ if (!DGV->isDeclaration())
+ return false;
+ }
+ }
if (DGV && !GV.hasLocalLinkage() && !GV.hasAppendingLinkage()) {
auto *DGVar = dyn_cast<GlobalVariable>(DGV);
.Case("0x06f", "krait") // APQ8064
.Case("0x201", "kryo")
.Case("0x205", "kryo")
+ .Case("0xc00", "falkor")
.Default("generic");
return "generic";
default:
return None;
+ case AArch64::LD1i64:
+ case AArch64::LD2i64:
+ DestRegIdx = 0;
+ BaseRegIdx = 3;
+ OffsetIdx = -1;
+ IsPrePost = false;
+ break;
+
case AArch64::LD1i8:
case AArch64::LD1i16:
case AArch64::LD1i32:
- case AArch64::LD1i64:
case AArch64::LD2i8:
case AArch64::LD2i16:
case AArch64::LD2i32:
- case AArch64::LD2i64:
case AArch64::LD3i8:
case AArch64::LD3i16:
case AArch64::LD3i32:
+ case AArch64::LD3i64:
case AArch64::LD4i8:
case AArch64::LD4i16:
case AArch64::LD4i32:
- DestRegIdx = 0;
- BaseRegIdx = 3;
- OffsetIdx = -1;
- IsPrePost = false;
- break;
-
- case AArch64::LD3i64:
case AArch64::LD4i64:
DestRegIdx = -1;
BaseRegIdx = 3;
case AArch64::LD1Rv4s:
case AArch64::LD1Rv8h:
case AArch64::LD1Rv16b:
- case AArch64::LD1Twov1d:
- case AArch64::LD1Twov2s:
- case AArch64::LD1Twov4h:
- case AArch64::LD1Twov8b:
- case AArch64::LD2Twov2s:
- case AArch64::LD2Twov4s:
- case AArch64::LD2Twov8b:
- case AArch64::LD2Rv1d:
- case AArch64::LD2Rv2s:
- case AArch64::LD2Rv4s:
- case AArch64::LD2Rv8b:
DestRegIdx = 0;
BaseRegIdx = 1;
OffsetIdx = -1;
IsPrePost = false;
break;
+ case AArch64::LD1Twov1d:
+ case AArch64::LD1Twov2s:
+ case AArch64::LD1Twov4h:
+ case AArch64::LD1Twov8b:
case AArch64::LD1Twov2d:
case AArch64::LD1Twov4s:
case AArch64::LD1Twov8h:
case AArch64::LD1Fourv4s:
case AArch64::LD1Fourv8h:
case AArch64::LD1Fourv16b:
+ case AArch64::LD2Twov2s:
+ case AArch64::LD2Twov4s:
+ case AArch64::LD2Twov8b:
case AArch64::LD2Twov2d:
case AArch64::LD2Twov4h:
case AArch64::LD2Twov8h:
case AArch64::LD2Twov16b:
+ case AArch64::LD2Rv1d:
+ case AArch64::LD2Rv2s:
+ case AArch64::LD2Rv4s:
+ case AArch64::LD2Rv8b:
case AArch64::LD2Rv2d:
case AArch64::LD2Rv4h:
case AArch64::LD2Rv8h:
IsPrePost = false;
break;
+ case AArch64::LD1i64_POST:
+ case AArch64::LD2i64_POST:
+ DestRegIdx = 1;
+ BaseRegIdx = 4;
+ OffsetIdx = 5;
+ IsPrePost = true;
+ break;
+
case AArch64::LD1i8_POST:
case AArch64::LD1i16_POST:
case AArch64::LD1i32_POST:
- case AArch64::LD1i64_POST:
case AArch64::LD2i8_POST:
case AArch64::LD2i16_POST:
case AArch64::LD2i32_POST:
- case AArch64::LD2i64_POST:
case AArch64::LD3i8_POST:
case AArch64::LD3i16_POST:
case AArch64::LD3i32_POST:
+ case AArch64::LD3i64_POST:
case AArch64::LD4i8_POST:
case AArch64::LD4i16_POST:
case AArch64::LD4i32_POST:
- DestRegIdx = 1;
- BaseRegIdx = 4;
- OffsetIdx = 5;
- IsPrePost = false;
- break;
-
- case AArch64::LD3i64_POST:
case AArch64::LD4i64_POST:
DestRegIdx = -1;
BaseRegIdx = 4;
OffsetIdx = 5;
- IsPrePost = false;
+ IsPrePost = true;
break;
case AArch64::LD1Onev1d_POST:
case AArch64::LD1Rv4s_POST:
case AArch64::LD1Rv8h_POST:
case AArch64::LD1Rv16b_POST:
- case AArch64::LD1Twov1d_POST:
- case AArch64::LD1Twov2s_POST:
- case AArch64::LD1Twov4h_POST:
- case AArch64::LD1Twov8b_POST:
- case AArch64::LD2Twov2s_POST:
- case AArch64::LD2Twov4s_POST:
- case AArch64::LD2Twov8b_POST:
- case AArch64::LD2Rv1d_POST:
- case AArch64::LD2Rv2s_POST:
- case AArch64::LD2Rv4s_POST:
- case AArch64::LD2Rv8b_POST:
DestRegIdx = 1;
BaseRegIdx = 2;
OffsetIdx = 3;
- IsPrePost = false;
+ IsPrePost = true;
break;
+ case AArch64::LD1Twov1d_POST:
+ case AArch64::LD1Twov2s_POST:
+ case AArch64::LD1Twov4h_POST:
+ case AArch64::LD1Twov8b_POST:
case AArch64::LD1Twov2d_POST:
case AArch64::LD1Twov4s_POST:
case AArch64::LD1Twov8h_POST:
case AArch64::LD1Fourv4s_POST:
case AArch64::LD1Fourv8h_POST:
case AArch64::LD1Fourv16b_POST:
+ case AArch64::LD2Twov2s_POST:
+ case AArch64::LD2Twov4s_POST:
+ case AArch64::LD2Twov8b_POST:
case AArch64::LD2Twov2d_POST:
case AArch64::LD2Twov4h_POST:
case AArch64::LD2Twov8h_POST:
case AArch64::LD2Twov16b_POST:
+ case AArch64::LD2Rv1d_POST:
+ case AArch64::LD2Rv2s_POST:
+ case AArch64::LD2Rv4s_POST:
+ case AArch64::LD2Rv8b_POST:
case AArch64::LD2Rv2d_POST:
case AArch64::LD2Rv4h_POST:
case AArch64::LD2Rv8h_POST:
DestRegIdx = -1;
BaseRegIdx = 2;
OffsetIdx = 3;
- IsPrePost = false;
+ IsPrePost = true;
break;
case AArch64::LDRBBroW:
IsPrePost = true;
break;
- case AArch64::LDPDi:
+ case AArch64::LDNPDi:
+ case AArch64::LDNPQi:
+ case AArch64::LDNPSi:
case AArch64::LDPQi:
+ case AArch64::LDPDi:
+ case AArch64::LDPSi:
DestRegIdx = -1;
BaseRegIdx = 2;
OffsetIdx = 3;
break;
case AArch64::LDPSWi:
- case AArch64::LDPSi:
case AArch64::LDPWi:
case AArch64::LDPXi:
DestRegIdx = 0;
case AArch64::LDPQpost:
case AArch64::LDPQpre:
+ case AArch64::LDPDpost:
+ case AArch64::LDPDpre:
+ case AArch64::LDPSpost:
+ case AArch64::LDPSpre:
DestRegIdx = -1;
BaseRegIdx = 3;
OffsetIdx = 4;
IsPrePost = true;
break;
- case AArch64::LDPDpost:
- case AArch64::LDPDpre:
case AArch64::LDPSWpost:
case AArch64::LDPSWpre:
- case AArch64::LDPSpost:
- case AArch64::LDPSpre:
case AArch64::LDPWpost:
case AArch64::LDPWpre:
case AArch64::LDPXpost:
if (!TII->isStridedAccess(MI))
continue;
- LoadInfo LdI = *getLoadInfo(MI);
- unsigned OldTag = *getTag(TRI, MI, LdI);
- auto &OldCollisions = TagMap[OldTag];
+ Optional<LoadInfo> OptLdI = getLoadInfo(MI);
+ if (!OptLdI)
+ continue;
+ LoadInfo LdI = *OptLdI;
+ Optional<unsigned> OptOldTag = getTag(TRI, MI, LdI);
+ if (!OptOldTag)
+ continue;
+ auto &OldCollisions = TagMap[*OptOldTag];
if (OldCollisions.size() <= 1)
continue;
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
+ // The first operand can be a frame index where we'd normally expect a
+ // register.
+ assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
+ if (!MI.getOperand(1).isReg())
+ return false;
+
switch (MI.getOpcode()) {
default:
break;
def MSRpstateImm4 : MSRpstateImm0_15;
// The thread pointer (on Linux, at least, where this has been implemented) is
-// TPIDR_EL0. Add pseudo op so we can mark it as not having any side effects.
-let hasSideEffects = 0 in
+// TPIDR_EL0.
def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
[(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
// CMP is an alias for SUBS with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri: {
+ // Sometimes the first operand is a FrameIndex. Bail if tht happens.
+ if (!PredI.getOperand(1).isReg())
+ return None;
MCPhysReg SrcReg = PredI.getOperand(1).getReg();
// Must not be a symbolic immediate.
int GCNHazardRecognizer::getWaitStatesSince(
function_ref<bool(MachineInstr *)> IsHazard) {
- int WaitStates = -1;
+ int WaitStates = 0;
for (MachineInstr *MI : EmittedInstrs) {
+ if (MI) {
+ if (IsHazard(MI))
+ return WaitStates;
+
+ unsigned Opcode = MI->getOpcode();
+ if (Opcode == AMDGPU::DBG_VALUE || Opcode == AMDGPU::IMPLICIT_DEF)
+ continue;
+ }
++WaitStates;
- if (!MI || !IsHazard(MI))
- continue;
- return WaitStates;
}
return std::numeric_limits<int>::max();
}
// Add 's' bit operand (always reg0 for this)
.addReg(0));
+ assert(Subtarget->hasV4TOps());
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX)
.addReg(MI->getOperand(0).getReg()));
return;
.addImm(ARMCC::AL)
.addReg(0));
+ assert(Subtarget->hasV4TOps());
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BX)
.addReg(ScratchReg)
// Predicate.
const Value *Val, unsigned VReg) const {
assert(!Val == !VReg && "Return value without a vreg");
- auto Ret = MIRBuilder.buildInstrNoInsert(ARM::BX_RET).add(predOps(ARMCC::AL));
+ auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
+ unsigned Opcode = ST.getReturnOpcode();
+ auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
if (!lowerReturnVal(MIRBuilder, Val, VReg, Ret))
return false;
if (STI->isThumb())
MIB.add(predOps(ARMCC::AL));
} else if (RetOpcode == ARM::TCRETURNri) {
+ unsigned Opcode =
+ STI->isThumb() ? ARM::tTAILJMPr
+ : (STI->hasV4TOps() ? ARM::TAILJMPr : ARM::TAILJMPr4);
BuildMI(MBB, MBBI, dl,
- TII.get(STI->isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr))
+ TII.get(Opcode))
.addReg(JumpTarget.getReg(), RegState::Kill);
}
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
+ assert(isThumb2 || Subtarget->hasV4TOps());
+
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc)).addReg(AddrReg));
RetRegs.push_back(VA.getLocReg());
}
- unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(RetOpc));
+ TII.get(Subtarget->getReturnOpcode()));
AddOptionalDefs(MIB);
for (unsigned R : RetRegs)
MIB.addReg(R, RegState::Implicit);
if (DPRCSSize > 0) {
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
- while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
+ while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
LastPush = MBBI++;
}
BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
- // bx lr - Return from this function.
- Opcode = Thumb ? ARM::tBX_RET : ARM::BX_RET;
- BuildMI(AllocMBB, DL, TII.get(Opcode)).add(predOps(ARMCC::AL));
+ // Return from this function.
+ BuildMI(AllocMBB, DL, TII.get(ST->getReturnOpcode())).add(predOps(ARMCC::AL));
// Restore SR0 and SR1 in case of __morestack() was not called.
// pop {SR0, SR1}
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(BX GPR:$dst)>, Sched<[WriteBr]>,
- Requires<[IsARM]>;
+ Requires<[IsARM, HasV4T]>;
}
// Secure Monitor Call is a system instruction.
(MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in
+ def TAILJMPr4 : ARMPseudoExpand<(outs), (ins GPR:$dst),
+ 4, IIC_Br, [],
+ (MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
+ Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
+
// Large immediate handling.
// 32-bit immediate using two piece mod_imms or movw + movt.
for (auto Use : Prev->uses())
if (Use.isKill()) {
+ assert(STI->hasV4TOps());
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
.addReg(Use.getReg(), RegState::Kill)
.add(predOps(ARMCC::AL))
/// True if fast-isel is used.
bool useFastISel() const;
+
+ /// Returns the correct return opcode for the current feature set.
+ /// Use BX if available to allow mixing thumb/arm code, but fall back
+ /// to plain mov pc,lr on ARMv4.
+ unsigned getReturnOpcode() const {
+ if (isThumb())
+ return ARM::tBX_RET;
+ if (hasV4TOps())
+ return ARM::BX_RET;
+ return ARM::MOVPCLR;
+ }
};
} // end namespace llvm
if (isThumb) {
if (ARMArchFeature.empty())
- ARMArchFeature = "+thumb-mode";
+ ARMArchFeature = "+thumb-mode,+v4t";
else
- ARMArchFeature += ",+thumb-mode";
+ ARMArchFeature += ",+thumb-mode,+v4t";
}
if (TT.isOSNaCl()) {
unsigned TmpReg = 0; // 0 for no temporary register
unsigned SrcReg = MI.getOperand(1).getReg();
bool SrcIsKill = MI.getOperand(1).isKill();
- OpLo = AVR::LDRdPtr;
- OpHi = AVR::LDDRdPtrQ;
+ OpLo = AVR::LDRdPtrPi;
+ OpHi = AVR::LDRdPtr;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
// Use a temporary register if src and dst registers are the same.
// Load low byte.
auto MIBLO = buildMI(MBB, MBBI, OpLo)
.addReg(CurDstLoReg, RegState::Define)
+ .addReg(SrcReg, RegState::Define)
.addReg(SrcReg);
// Push low byte onto stack if necessary.
// Load high byte.
auto MIBHI = buildMI(MBB, MBBI, OpHi)
.addReg(CurDstHiReg, RegState::Define)
- .addReg(SrcReg, getKillRegState(SrcIsKill))
- .addImm(1);
+ .addReg(SrcReg, getKillRegState(SrcIsKill));
if (TmpReg) {
// Move the high byte into the final destination.
OpHi = AVR::LDDRdPtrQ;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
- assert(Imm <= 63 && "Offset is out of range");
+ // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value
+ // allowed for the instruction, 62 is the limit here.
+ assert(Imm <= 62 && "Offset is out of range");
// Use a temporary register if src and dst registers are the same.
if (DstReg == SrcReg)
template <>
bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
- llvm_unreachable("wide LPM is unimplemented");
+ MachineInstr &MI = *MBBI;
+ unsigned OpLo, OpHi, DstLoReg, DstHiReg;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned TmpReg = 0; // 0 for no temporary register
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ bool SrcIsKill = MI.getOperand(1).isKill();
+ OpLo = AVR::LPMRdZPi;
+ OpHi = AVR::LPMRdZ;
+ TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+
+ // Use a temporary register if src and dst registers are the same.
+ if (DstReg == SrcReg)
+ TmpReg = scavengeGPR8(MI);
+
+ unsigned CurDstLoReg = (DstReg == SrcReg) ? TmpReg : DstLoReg;
+ unsigned CurDstHiReg = (DstReg == SrcReg) ? TmpReg : DstHiReg;
+
+ // Load low byte.
+ auto MIBLO = buildMI(MBB, MBBI, OpLo)
+ .addReg(CurDstLoReg, RegState::Define)
+ .addReg(SrcReg);
+
+ // Push low byte onto stack if necessary.
+ if (TmpReg)
+ buildMI(MBB, MBBI, AVR::PUSHRr).addReg(TmpReg);
+
+ // Load high byte.
+ auto MIBHI = buildMI(MBB, MBBI, OpHi)
+ .addReg(CurDstHiReg, RegState::Define)
+ .addReg(SrcReg, getKillRegState(SrcIsKill));
+
+ if (TmpReg) {
+ // Move the high byte into the final destination.
+ buildMI(MBB, MBBI, AVR::MOVRdRr).addReg(DstHiReg).addReg(TmpReg);
+
+ // Move the low byte from the scratch space into the final destination.
+ buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
+ }
+
+ MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+
+ MI.eraseFromParent();
+ return true;
}
template <>
OpHi = AVR::STDPtrQRr;
TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg);
- assert(Imm <= 63 && "Offset is out of range");
+ // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value
+ // allowed for the instruction, 62 is the limit here.
+ assert(Imm <= 62 && "Offset is out of range");
auto MIBLO = buildMI(MBB, MBBI, OpLo)
.addReg(DstReg)
OpHi = AVR::INRdA;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
- assert(Imm <= 63 && "Address is out of range");
+ // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value
+ // allowed for the instruction, 62 is the limit here.
+ assert(Imm <= 62 && "Address is out of range");
auto MIBLO = buildMI(MBB, MBBI, OpLo)
.addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead))
OpHi = AVR::OUTARr;
TRI->splitReg(SrcReg, SrcLoReg, SrcHiReg);
- assert(Imm <= 63 && "Address is out of range");
+ // Since we add 1 to the Imm value for the high byte below, and 63 is the highest Imm value
+ // allowed for the instruction, 62 is the limit here.
+ assert(Imm <= 62 && "Address is out of range");
// 16 bit I/O writes need the high byte first
auto MIBHI = buildMI(MBB, MBBI, OpHi)
}
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator I = BB->getParent()->begin();
- ++I;
+
+ MachineFunction::iterator I;
+ for (I = F->begin(); I != F->end() && &(*I) != BB; ++I);
+ if (I != F->end()) ++I;
// Create loop block.
MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override {
return MVT::i8;
}
+
+ MVT::SimpleValueType getCmpLibcallReturnType() const override {
+ return MVT::i8;
+ }
+
const char *getTargetNodeName(unsigned Opcode) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
llvm_unreachable("unexpected opcode!");
case AVR::JMPk:
case AVR::CALLk:
- assert(BrOffset >= 0 && "offset must be absolute address");
- return isUIntN(16, BrOffset);
+ return true;
case AVR::RCALLk:
case AVR::RJMPk:
return isIntN(13, BrOffset);
}
}
+unsigned AVRInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &NewDestBB,
+ const DebugLoc &DL,
+ int64_t BrOffset,
+ RegScavenger *RS) const {
+ // This method inserts a *direct* branch (JMP), despite its name.
+ // LLVM calls this method to fixup unconditional branches; it never calls
+ // insertBranch or some hypothetical "insertDirectBranch".
+ // See lib/CodeGen/RegisterRelaxation.cpp for details.
+ // We end up here when a jump is too long for a RJMP instruction.
+ auto &MI = *BuildMI(&MBB, DL, get(AVR::JMPk)).addMBB(&NewDestBB);
+
+ return getInstSizeInBytes(MI);
+}
+
} // end of namespace llvm
bool isBranchOffsetInRange(unsigned BranchOpc,
int64_t BrOffset) const override;
+
+ unsigned insertIndirectBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock &NewDestBB,
+ const DebugLoc &DL,
+ int64_t BrOffset,
+ RegScavenger *RS) const override;
private:
const AVRRegisterInfo RI;
};
//
// Expands to:
// ld Rd, P+
- // ld Rd+1, P+
+ // ld Rd+1, P
let Constraints = "@earlyclobber $reg" in
def LDWRdPtr : Pseudo<(outs DREGS:$reg),
- (ins PTRDISPREGS:$ptrreg),
+ (ins PTRREGS:$ptrreg),
"ldw\t$reg, $ptrreg",
[(set i16:$reg, (load i16:$ptrreg))]>,
Requires<[HasSRAM]>;
// Indirect loads (with postincrement or predecrement).
let mayLoad = 1,
hasSideEffects = 0,
-Constraints = "$ptrreg = $base_wb,@earlyclobber $reg,@earlyclobber $base_wb" in
+Constraints = "$ptrreg = $base_wb,@earlyclobber $reg" in
{
def LDRdPtrPi : FSTLD<0,
0b01,
Requires<[HasSRAM]>;
}
-class AtomicLoad<PatFrag Op, RegisterClass DRC> :
- Pseudo<(outs DRC:$rd), (ins PTRREGS:$rr), "atomic_op",
+class AtomicLoad<PatFrag Op, RegisterClass DRC,
+ RegisterClass PTRRC> :
+ Pseudo<(outs DRC:$rd), (ins PTRRC:$rr), "atomic_op",
[(set DRC:$rd, (Op i16:$rr))]>;
-class AtomicStore<PatFrag Op, RegisterClass DRC> :
- Pseudo<(outs), (ins PTRDISPREGS:$rd, DRC:$rr), "atomic_op",
+class AtomicStore<PatFrag Op, RegisterClass DRC,
+ RegisterClass PTRRC> :
+ Pseudo<(outs), (ins PTRRC:$rd, DRC:$rr), "atomic_op",
[(Op i16:$rd, DRC:$rr)]>;
-class AtomicLoadOp<PatFrag Op, RegisterClass DRC> :
- Pseudo<(outs DRC:$rd), (ins PTRREGS:$rr, DRC:$operand),
+class AtomicLoadOp<PatFrag Op, RegisterClass DRC,
+ RegisterClass PTRRC> :
+ Pseudo<(outs DRC:$rd), (ins PTRRC:$rr, DRC:$operand),
"atomic_op",
[(set DRC:$rd, (Op i16:$rr, DRC:$operand))]>;
-def AtomicLoad8 : AtomicLoad<atomic_load_8, GPR8>;
-def AtomicLoad16 : AtomicLoad<atomic_load_16, DREGS>;
-
-def AtomicStore8 : AtomicStore<atomic_store_8, GPR8>;
-def AtomicStore16 : AtomicStore<atomic_store_16, DREGS>;
-
-def AtomicLoadAdd8 : AtomicLoadOp<atomic_load_add_8, GPR8>;
-def AtomicLoadAdd16 : AtomicLoadOp<atomic_load_add_16, DREGS>;
-def AtomicLoadSub8 : AtomicLoadOp<atomic_load_sub_8, GPR8>;
-def AtomicLoadSub16 : AtomicLoadOp<atomic_load_sub_16, DREGS>;
-def AtomicLoadAnd8 : AtomicLoadOp<atomic_load_and_8, GPR8>;
-def AtomicLoadAnd16 : AtomicLoadOp<atomic_load_and_16, DREGS>;
-def AtomicLoadOr8 : AtomicLoadOp<atomic_load_or_8, GPR8>;
-def AtomicLoadOr16 : AtomicLoadOp<atomic_load_or_16, DREGS>;
-def AtomicLoadXor8 : AtomicLoadOp<atomic_load_xor_8, GPR8>;
-def AtomicLoadXor16 : AtomicLoadOp<atomic_load_xor_16, DREGS>;
+// FIXME: I think 16-bit atomic binary ops need to mark
+// r0 as clobbered.
+
+// Atomic instructions
+// ===================
+//
+// These are all expanded by AVRExpandPseudoInsts
+//
+// 8-bit operations can use any pointer register because
+// they are expanded directly into an LD/ST instruction.
+//
+// 16-bit operations use 16-bit load/store postincrement instructions,
+// which require PTRDISPREGS.
+
+def AtomicLoad8 : AtomicLoad<atomic_load_8, GPR8, PTRREGS>;
+def AtomicLoad16 : AtomicLoad<atomic_load_16, DREGS, PTRDISPREGS>;
+
+def AtomicStore8 : AtomicStore<atomic_store_8, GPR8, PTRREGS>;
+def AtomicStore16 : AtomicStore<atomic_store_16, DREGS, PTRDISPREGS>;
+
+class AtomicLoadOp8<PatFrag Op> : AtomicLoadOp<Op, GPR8, PTRREGS>;
+class AtomicLoadOp16<PatFrag Op> : AtomicLoadOp<Op, DREGS, PTRDISPREGS>;
+
+def AtomicLoadAdd8 : AtomicLoadOp8<atomic_load_add_8>;
+def AtomicLoadAdd16 : AtomicLoadOp16<atomic_load_add_16>;
+def AtomicLoadSub8 : AtomicLoadOp8<atomic_load_sub_8>;
+def AtomicLoadSub16 : AtomicLoadOp16<atomic_load_sub_16>;
+def AtomicLoadAnd8 : AtomicLoadOp8<atomic_load_and_8>;
+def AtomicLoadAnd16 : AtomicLoadOp16<atomic_load_and_16>;
+def AtomicLoadOr8 : AtomicLoadOp8<atomic_load_or_8>;
+def AtomicLoadOr16 : AtomicLoadOp16<atomic_load_or_16>;
+def AtomicLoadXor8 : AtomicLoadOp8<atomic_load_xor_8>;
+def AtomicLoadXor16 : AtomicLoadOp16<atomic_load_xor_16>;
def AtomicFence : Pseudo<(outs), (ins), "atomic_fence",
[(atomic_fence imm, imm)]>;
// Load program memory operations.
let canFoldAsLoad = 1,
isReMaterializable = 1,
+mayLoad = 1,
hasSideEffects = 0 in
{
let Defs = [R0],
Requires<[HasLPMX]>;
// Load program memory, while postincrementing the Z register.
- let mayLoad = 1,
- Defs = [R31R30] in
+ let Defs = [R31R30] in
{
def LPMRdZPi : FLPMX<0,
1,
// If the offset is too big we have to adjust and restore the frame pointer
// to materialize a valid load/store with displacement.
//:TODO: consider using only one adiw/sbiw chain for more than one frame index
- if (Offset > 63) {
+ if (Offset > 62) {
unsigned AddOpc = AVR::ADIWRdK, SubOpc = AVR::SBIWRdK;
int AddOffset = Offset - 63 + 1;
namespace llvm {
-static const char *AVRDataLayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-n8";
+static const char *AVRDataLayout = "e-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8";
/// Processes a CPU name.
static StringRef getCPU(StringRef CPU) {
#include "AVRTargetStreamer.h"
+#include "llvm/MC/MCContext.h"
+
namespace llvm {
AVRTargetStreamer::AVRTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
AVRTargetAsmStreamer::AVRTargetAsmStreamer(MCStreamer &S)
: AVRTargetStreamer(S) {}
+void AVRTargetStreamer::finish() {
+ MCStreamer &OS = getStreamer();
+ MCContext &Context = OS.getContext();
+
+ MCSymbol *DoCopyData = Context.getOrCreateSymbol("__do_copy_data");
+ MCSymbol *DoClearBss = Context.getOrCreateSymbol("__do_clear_bss");
+
+ // FIXME: We can disable __do_copy_data if there are no static RAM variables.
+
+ OS.emitRawComment(" Declaring this symbol tells the CRT that it should");
+ OS.emitRawComment("copy all variables from program memory to RAM on startup");
+ OS.EmitSymbolAttribute(DoCopyData, MCSA_Global);
+
+ OS.emitRawComment(" Declaring this symbol tells the CRT that it should");
+ OS.emitRawComment("clear the zeroed data section on startup");
+ OS.EmitSymbolAttribute(DoClearBss, MCSA_Global);
+}
+
} // end namespace llvm
class AVRTargetStreamer : public MCTargetStreamer {
public:
explicit AVRTargetStreamer(MCStreamer &S);
+
+ void finish() override;
};
/// A target streamer for textual AVR assembly code.
.addReg(LHS)
.addReg(MI.getOperand(2).getReg())
.addMBB(Copy1MBB);
- else
+ else {
+ int64_t imm32 = MI.getOperand(2).getImm();
+ // sanity check before we build J*_ri instruction.
+ assert (isInt<32>(imm32));
BuildMI(BB, DL, TII.get(NewCC))
.addReg(LHS)
- .addImm(MI.getOperand(2).getImm())
+ .addImm(imm32)
.addMBB(Copy1MBB);
+ }
// Copy0MBB:
// %FalseValue = ...
(ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR:$src, GPR:$src2),
"# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
[(set i64:$dst,
- (BPFselectcc i64:$lhs, (i64 imm:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>;
+ (BPFselectcc i64:$lhs, (i64 i64immSExt32:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>;
}
// load 64-bit global addr into register
bool expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
+ bool expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+
bool reportParseError(Twine ErrorMsg);
bool reportParseError(SMLoc Loc, Twine ErrorMsg);
return expandSeq(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SEQIMacro:
return expandSeqI(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
+ case Mips::MFTC0: case Mips::MTTC0:
+ case Mips::MFTGPR: case Mips::MTTGPR:
+ case Mips::MFTLO: case Mips::MTTLO:
+ case Mips::MFTHI: case Mips::MTTHI:
+ case Mips::MFTACX: case Mips::MTTACX:
+ case Mips::MFTDSP: case Mips::MTTDSP:
+ case Mips::MFTC1: case Mips::MTTC1:
+ case Mips::MFTHC1: case Mips::MTTHC1:
+ case Mips::CFTC1: case Mips::CTTC1:
+ return expandMXTRAlias(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
}
}
return false;
}
+// Map the DSP accumulator and control register to the corresponding gpr
+// operand. Unlike the other alias, the m(f|t)t(lo|hi|acx) instructions
+// do not map the DSP registers contigously to gpr registers.
+static unsigned getRegisterForMxtrDSP(MCInst &Inst, bool IsMFDSP) {
+ switch (Inst.getOpcode()) {
+ case Mips::MFTLO:
+ case Mips::MTTLO:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::ZERO;
+ case Mips::AC1:
+ return Mips::A0;
+ case Mips::AC2:
+ return Mips::T0;
+ case Mips::AC3:
+ return Mips::T4;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTHI:
+ case Mips::MTTHI:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::AT;
+ case Mips::AC1:
+ return Mips::A1;
+ case Mips::AC2:
+ return Mips::T1;
+ case Mips::AC3:
+ return Mips::T5;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTACX:
+ case Mips::MTTACX:
+ switch (Inst.getOperand(IsMFDSP ? 1 : 0).getReg()) {
+ case Mips::AC0:
+ return Mips::V0;
+ case Mips::AC1:
+ return Mips::A2;
+ case Mips::AC2:
+ return Mips::T2;
+ case Mips::AC3:
+ return Mips::T6;
+ default:
+ llvm_unreachable("Unknown register for 'mttr' alias!");
+ }
+ case Mips::MFTDSP:
+ case Mips::MTTDSP:
+ return Mips::S0;
+ default:
+ llvm_unreachable("Unknown instruction for 'mttr' dsp alias!");
+ }
+}
+
+// Map the floating point register operand to the corresponding register
+// operand.
+static unsigned getRegisterForMxtrFP(MCInst &Inst, bool IsMFTC1) {
+ switch (Inst.getOperand(IsMFTC1 ? 1 : 0).getReg()) {
+ case Mips::F0: return Mips::ZERO;
+ case Mips::F1: return Mips::AT;
+ case Mips::F2: return Mips::V0;
+ case Mips::F3: return Mips::V1;
+ case Mips::F4: return Mips::A0;
+ case Mips::F5: return Mips::A1;
+ case Mips::F6: return Mips::A2;
+ case Mips::F7: return Mips::A3;
+ case Mips::F8: return Mips::T0;
+ case Mips::F9: return Mips::T1;
+ case Mips::F10: return Mips::T2;
+ case Mips::F11: return Mips::T3;
+ case Mips::F12: return Mips::T4;
+ case Mips::F13: return Mips::T5;
+ case Mips::F14: return Mips::T6;
+ case Mips::F15: return Mips::T7;
+ case Mips::F16: return Mips::S0;
+ case Mips::F17: return Mips::S1;
+ case Mips::F18: return Mips::S2;
+ case Mips::F19: return Mips::S3;
+ case Mips::F20: return Mips::S4;
+ case Mips::F21: return Mips::S5;
+ case Mips::F22: return Mips::S6;
+ case Mips::F23: return Mips::S7;
+ case Mips::F24: return Mips::T8;
+ case Mips::F25: return Mips::T9;
+ case Mips::F26: return Mips::K0;
+ case Mips::F27: return Mips::K1;
+ case Mips::F28: return Mips::GP;
+ case Mips::F29: return Mips::SP;
+ case Mips::F30: return Mips::FP;
+ case Mips::F31: return Mips::RA;
+ default: llvm_unreachable("Unknown register for mttc1 alias!");
+ }
+}
+
+// Map the coprocessor operand the corresponding gpr register operand.
+static unsigned getRegisterForMxtrC0(MCInst &Inst, bool IsMFTC0) {
+ switch (Inst.getOperand(IsMFTC0 ? 1 : 0).getReg()) {
+ case Mips::COP00: return Mips::ZERO;
+ case Mips::COP01: return Mips::AT;
+ case Mips::COP02: return Mips::V0;
+ case Mips::COP03: return Mips::V1;
+ case Mips::COP04: return Mips::A0;
+ case Mips::COP05: return Mips::A1;
+ case Mips::COP06: return Mips::A2;
+ case Mips::COP07: return Mips::A3;
+ case Mips::COP08: return Mips::T0;
+ case Mips::COP09: return Mips::T1;
+ case Mips::COP010: return Mips::T2;
+ case Mips::COP011: return Mips::T3;
+ case Mips::COP012: return Mips::T4;
+ case Mips::COP013: return Mips::T5;
+ case Mips::COP014: return Mips::T6;
+ case Mips::COP015: return Mips::T7;
+ case Mips::COP016: return Mips::S0;
+ case Mips::COP017: return Mips::S1;
+ case Mips::COP018: return Mips::S2;
+ case Mips::COP019: return Mips::S3;
+ case Mips::COP020: return Mips::S4;
+ case Mips::COP021: return Mips::S5;
+ case Mips::COP022: return Mips::S6;
+ case Mips::COP023: return Mips::S7;
+ case Mips::COP024: return Mips::T8;
+ case Mips::COP025: return Mips::T9;
+ case Mips::COP026: return Mips::K0;
+ case Mips::COP027: return Mips::K1;
+ case Mips::COP028: return Mips::GP;
+ case Mips::COP029: return Mips::SP;
+ case Mips::COP030: return Mips::FP;
+ case Mips::COP031: return Mips::RA;
+ default: llvm_unreachable("Unknown register for mttc0 alias!");
+ }
+}
+
+/// Expand an alias of 'mftr' or 'mttr' into the full instruction, by producing
+/// an mftr or mttr with the correctly mapped gpr register, u, sel and h bits.
+bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ unsigned rd = 0;
+ unsigned u = 1;
+ unsigned sel = 0;
+ unsigned h = 0;
+ bool IsMFTR = false;
+ switch (Inst.getOpcode()) {
+ case Mips::MFTC0:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTC0:
+ u = 0;
+ rd = getRegisterForMxtrC0(Inst, IsMFTR);
+ sel = Inst.getOperand(2).getImm();
+ break;
+ case Mips::MFTGPR:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTGPR:
+ rd = Inst.getOperand(IsMFTR ? 1 : 0).getReg();
+ break;
+ case Mips::MFTLO:
+ case Mips::MFTHI:
+ case Mips::MFTACX:
+ case Mips::MFTDSP:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTLO:
+ case Mips::MTTHI:
+ case Mips::MTTACX:
+ case Mips::MTTDSP:
+ rd = getRegisterForMxtrDSP(Inst, IsMFTR);
+ sel = 1;
+ break;
+ case Mips::MFTHC1:
+ h = 1;
+ LLVM_FALLTHROUGH;
+ case Mips::MFTC1:
+ IsMFTR = true;
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 2;
+ break;
+ case Mips::MTTHC1:
+ h = 1;
+ LLVM_FALLTHROUGH;
+ case Mips::MTTC1:
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 2;
+ break;
+ case Mips::CFTC1:
+ IsMFTR = true;
+ LLVM_FALLTHROUGH;
+ case Mips::CTTC1:
+ rd = getRegisterForMxtrFP(Inst, IsMFTR);
+ sel = 3;
+ break;
+ }
+ unsigned Op0 = IsMFTR ? Inst.getOperand(0).getReg() : rd;
+ unsigned Op1 =
+ IsMFTR ? rd
+ : (Inst.getOpcode() != Mips::MTTDSP ? Inst.getOperand(1).getReg()
+ : Inst.getOperand(0).getReg());
+
+ TOut.emitRRIII(IsMFTR ? Mips::MFTR : Mips::MTTR, Op0, Op1, u, sel, h, IDLoc,
+ STI);
+ return false;
+}
+
unsigned
MipsAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
const OperandVector &Operands) {
MipsAsmParser::parseInvNum(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const MCExpr *IdVal;
- // If the first token is '$' we may have register operand.
- if (Parser.getTok().is(AsmToken::Dollar))
- return MatchOperand_NoMatch;
+ // If the first token is '$' we may have register operand. We have to reject
+ // cases where it is not a register. Complicating the matter is that
+ // register names are not reserved across all ABIs.
+ // Peek past the dollar to see if it's a register name for this ABI.
SMLoc S = Parser.getTok().getLoc();
+ if (Parser.getTok().is(AsmToken::Dollar)) {
+ return matchCPURegisterName(Parser.getLexer().peekTok().getString()) == -1
+ ? MatchOperand_ParseFail
+ : MatchOperand_NoMatch;
+ }
if (getParser().parseExpression(IdVal))
return MatchOperand_ParseFail;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal);
- assert(MCE && "Unexpected MCExpr type.");
+ if (!MCE)
+ return MatchOperand_NoMatch;
int64_t Val = MCE->getValue();
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(
break;
case MEK_CALL_HI16:
case MEK_CALL_LO16:
- case MEK_DTPREL_HI:
- case MEK_DTPREL_LO:
case MEK_GOT:
case MEK_GOT_CALL:
case MEK_GOT_DISP:
case MEK_NEG:
case MEK_PCREL_HI16:
case MEK_PCREL_LO16:
- case MEK_TLSLDM:
// If we do have nested target-specific expressions, they will be in
// a consecutive chain.
if (const MipsMCExpr *E = dyn_cast<const MipsMCExpr>(getSubExpr()))
E->fixELFSymbolsInTLSFixups(Asm);
break;
- case MEK_GOTTPREL:
+ case MEK_DTPREL_HI:
+ case MEK_DTPREL_LO:
+ case MEK_TLSLDM:
case MEK_TLSGD:
+ case MEK_GOTTPREL:
case MEK_TPREL_HI:
case MEK_TPREL_LO:
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc, STI);
}
+void MipsTargetStreamer::emitRRIII(unsigned Opcode, unsigned Reg0,
+ unsigned Reg1, int16_t Imm0, int16_t Imm1,
+ int16_t Imm2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.addOperand(MCOperand::createReg(Reg1));
+ TmpInst.addOperand(MCOperand::createImm(Imm0));
+ TmpInst.addOperand(MCOperand::createImm(Imm1));
+ TmpInst.addOperand(MCOperand::createImm(Imm2));
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
void MipsTargetStreamer::emitAddu(unsigned DstReg, unsigned SrcReg,
unsigned TrgReg, bool Is64Bit,
const MCSubtargetInfo *STI) {
class BPOSGE32_MM_DESC : BPOSGE32_DESC_BASE<"bposge32", brtarget_mm,
NoItinerary>;
+let DecoderNamespace = "MicroMipsDSP", Arch = "mmdsp",
+ AdditionalPredicates = [HasDSP, InMicroMips] in {
+ def LWDSP_MM : Load<"lw", DSPROpnd, null_frag, II_LW>, DspMMRel,
+ LW_FM_MM<0x3f>;
+ def SWDSP_MM : Store<"sw", DSPROpnd, null_frag, II_SW>, DspMMRel,
+ LW_FM_MM<0x3e>;
+}
// Instruction defs.
// microMIPS DSP Rev 1
def ADDQ_PH_MM : DspMMRel, ADDQ_PH_MM_ENC, ADDQ_PH_DESC;
def STORE_CCOND_DSP : Store<"store_ccond_dsp", DSPCC>;
}
+let DecoderNamespace = "MipsDSP", Arch = "dsp",
+ AdditionalPredicates = [HasDSP] in {
+ def LWDSP : Load<"lw", DSPROpnd, null_frag, II_LW>, DspMMRel, LW_FM<0x23>;
+ def SWDSP : Store<"sw", DSPROpnd, null_frag, II_SW>, DspMMRel, LW_FM<0x2b>;
+}
+
// Pseudo CMP and PICK instructions.
class PseudoCMP<Instruction RealInst> :
PseudoDSP<(outs DSPCC:$cmp), (ins DSPROpnd:$rs, DSPROpnd:$rt), []>,
return MFI.hasVarSizedObjects() && TRI->needsStackRealignment(MF);
}
+// Estimate the size of the stack, including the incoming arguments. We need to
+// account for register spills, local objects, reserved call frame and incoming
+// arguments. This is required to determine the largest possible positive offset
+// from $sp so that it can be determined if an emergency spill slot for stack
+// addresses is required.
uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
- int64_t Offset = 0;
+ int64_t Size = 0;
- // Iterate over fixed sized objects.
+ // Iterate over fixed sized objects which are incoming arguments.
for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
- Offset = std::max(Offset, -MFI.getObjectOffset(I));
+ if (MFI.getObjectOffset(I) > 0)
+ Size += MFI.getObjectSize(I);
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
- unsigned Size = TRI.getSpillSize(*TRI.getMinimalPhysRegClass(*R));
- Offset = alignTo(Offset + Size, Size);
+ unsigned RegSize = TRI.getSpillSize(*TRI.getMinimalPhysRegClass(*R));
+ Size = alignTo(Size + RegSize, RegSize);
}
- unsigned MaxAlign = MFI.getMaxAlignment();
-
- // Check that MaxAlign is not zero if there is a stack object that is not a
- // callee-saved spill.
- assert(!MFI.getObjectIndexEnd() || MaxAlign);
-
- // Iterate over other objects.
- for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I)
- Offset = alignTo(Offset + MFI.getObjectSize(I), MaxAlign);
-
- // Call frame.
- if (MFI.adjustsStack() && hasReservedCallFrame(MF))
- Offset = alignTo(Offset + MFI.getMaxCallFrameSize(),
- std::max(MaxAlign, getStackAlignment()));
-
- return alignTo(Offset, getStackAlignment());
+ // Get the size of the rest of the frame objects and any possible reserved
+ // call frame, accounting for alignment.
+ return Size + MFI.estimateStackSize(MF);
}
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
def FIELD5_1_DMT_EMT : FIELD5<0b00001>;
def FIELD5_2_DMT_EMT : FIELD5<0b01111>;
def FIELD5_1_2_DVPE_EVPE : FIELD5<0b00000>;
+def FIELD5_MFTR : FIELD5<0b01000>;
+def FIELD5_MTTR : FIELD5<0b01100>;
class COP0_MFMC0_MT<FIELD5 Op1, FIELD5 Op2, OPCODE1 sc> : MipsMTInst {
bits<32> Inst;
let Inst{2-0} = 0b001;
}
+class COP0_MFTTR_MT<FIELD5 Op> : MipsMTInst {
+ bits<32> Inst;
+
+ bits<5> rt;
+ bits<5> rd;
+ bits<1> u;
+ bits<1> h;
+ bits<3> sel;
+ let Inst{31-26} = 0b010000; // COP0
+ let Inst{25-21} = Op.Value; // MFMC0
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00000; // rx - currently unsupported.
+ let Inst{5} = u;
+ let Inst{4} = h;
+ let Inst{3} = 0b0;
+ let Inst{2-0} = sel;
+}
+
class SPECIAL3_MT_FORK : MipsMTInst {
bits<32> Inst;
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
+// This file describes the MIPS MT ASE as defined by MD00378 1.12.
+//
+// TODO: Add support for the microMIPS encodings for the MT ASE and add the
+// instruction mappings.
+//
+//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// MIPS MT Instruction Encodings
class YIELD_ENC : SPECIAL3_MT_YIELD;
+class MFTR_ENC : COP0_MFTTR_MT<FIELD5_MFTR>;
+
+class MTTR_ENC : COP0_MFTTR_MT<FIELD5_MTTR>;
+
//===----------------------------------------------------------------------===//
// MIPS MT Instruction Descriptions
//===----------------------------------------------------------------------===//
InstrItinClass Itinerary = Itin;
}
+class MFTR_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rt, uimm1:$u, uimm3:$sel, uimm1:$h);
+ string AsmString = "mftr\t$rd, $rt, $u, $sel, $h";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_MFTR;
+}
+
+class MTTR_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rt, uimm1:$u, uimm3:$sel, uimm1:$h);
+ string AsmString = "mttr\t$rt, $rd, $u, $sel, $h";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_MTTR;
+}
+
class FORK_DESC {
dag OutOperandList = (outs GPR32Opnd:$rs, GPR32Opnd:$rd);
dag InOperandList = (ins GPR32Opnd:$rt);
def FORK : FORK_ENC, FORK_DESC, ASE_MT;
def YIELD : YIELD_ENC, YIELD_DESC, ASE_MT;
+
+ def MFTR : MFTR_ENC, MFTR_DESC, ASE_MT;
+
+ def MTTR : MTTR_ENC, MTTR_DESC, ASE_MT;
}
+//===----------------------------------------------------------------------===//
+// MIPS MT Pseudo Instructions - used to support mtfr & mttr aliases.
+//===----------------------------------------------------------------------===//
+def MFTC0 : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins COP0Opnd:$rt,
+ uimm3:$sel),
+ "mftc0 $rd, $rt, $sel">, ASE_MT;
+
+def MFTGPR : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rt,
+ uimm3:$sel),
+ "mftgpr $rd, $rt">, ASE_MT;
+
+def MFTLO : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mftlo $rt, $ac">, ASE_MT;
+
+def MFTHI : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mfthi $rt, $ac">, ASE_MT;
+
+def MFTACX : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins ACC64DSPOpnd:$ac),
+ "mftacx $rt, $ac">, ASE_MT;
+
+def MFTDSP : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins),
+ "mftdsp $rt">, ASE_MT;
+
+def MFTC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGR32Opnd:$ft),
+ "mftc1 $rt, $ft">, ASE_MT;
+
+def MFTHC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGR32Opnd:$ft),
+ "mfthc1 $rt, $ft">, ASE_MT;
+
+def CFTC1 : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins FGRCCOpnd:$ft),
+ "cftc1 $rt, $ft">, ASE_MT;
+
+
+def MTTC0 : MipsAsmPseudoInst<(outs COP0Opnd:$rd), (ins GPR32Opnd:$rt,
+ uimm3:$sel),
+ "mttc0 $rt, $rd, $sel">, ASE_MT;
+
+def MTTGPR : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins GPR32Opnd:$rd),
+ "mttgpr $rd, $rt">, ASE_MT;
+
+def MTTLO : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mttlo $rt, $ac">, ASE_MT;
+
+def MTTHI : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mtthi $rt, $ac">, ASE_MT;
+
+def MTTACX : MipsAsmPseudoInst<(outs ACC64DSPOpnd:$ac), (ins GPR32Opnd:$rt),
+ "mttacx $rt, $ac">, ASE_MT;
+
+def MTTDSP : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rt),
+ "mttdsp $rt">, ASE_MT;
+
+def MTTC1 : MipsAsmPseudoInst<(outs FGR32Opnd:$ft), (ins GPR32Opnd:$rt),
+ "mttc1 $rt, $ft">, ASE_MT;
+
+def MTTHC1 : MipsAsmPseudoInst<(outs FGR32Opnd:$ft), (ins GPR32Opnd:$rt),
+ "mtthc1 $rt, $ft">, ASE_MT;
+
+def CTTC1 : MipsAsmPseudoInst<(outs FGRCCOpnd:$ft), (ins GPR32Opnd:$rt),
+ "cttc1 $rt, $ft">, ASE_MT;
+
//===----------------------------------------------------------------------===//
// MIPS MT Instruction Definitions
//===----------------------------------------------------------------------===//
def : MipsInstAlias<"evpe", (EVPE ZERO), 1>, ASE_MT;
def : MipsInstAlias<"yield $rs", (YIELD ZERO, GPR32Opnd:$rs), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftc0 $rd, $rt", (MFTC0 GPR32Opnd:$rd, COP0Opnd:$rt, 0),
+ 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftlo $rt", (MFTLO GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mfthi $rt", (MFTHI GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mftacx $rt", (MFTACX GPR32Opnd:$rt, AC0), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttc0 $rd, $rt", (MTTC0 COP0Opnd:$rt, GPR32Opnd:$rd, 0),
+ 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttlo $rt", (MTTLO AC0, GPR32Opnd:$rt), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mtthi $rt", (MTTHI AC0, GPR32Opnd:$rt), 1>, ASE_MT;
+
+ def : MipsInstAlias<"mttacx $rt", (MTTACX AC0, GPR32Opnd:$rt), 1>, ASE_MT;
}
}
// Set scavenging frame index if necessary.
- uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() +
- estimateStackSize(MF);
+ uint64_t MaxSPOffset = estimateStackSize(MF);
- if (isInt<16>(MaxSPOffset))
+ // MSA has a minimum offset of 10 bits signed. If there is a variable
+ // sized object on the stack, the estimation cannot account for it.
+ if (isIntN(STI.hasMSA() ? 10 : 16, MaxSPOffset) &&
+ !MF.getFrameInfo().hasVarSizedObjects())
return;
const TargetRegisterClass &RC =
Opc = Mips::SW;
else if (Mips::HI64RegClass.hasSubClassEq(RC))
Opc = Mips::SD;
+ else if (Mips::DSPRRegClass.hasSubClassEq(RC))
+ Opc = Mips::SWDSP;
// Hi, Lo are normally caller save but they are callee save
// for interrupt handling.
Opc = Mips::LW;
else if (Mips::LO64RegClass.hasSubClassEq(RC))
Opc = Mips::LD;
+ else if (Mips::DSPRRegClass.hasSubClassEq(RC))
+ Opc = Mips::LWDSP;
assert(Opc && "Register class not handled!");
def II_MFHC1 : InstrItinClass;
def II_MFC2 : InstrItinClass;
def II_MFHI_MFLO : InstrItinClass; // mfhi and mflo
+def II_MFTR : InstrItinClass;
def II_MOD : InstrItinClass;
def II_MODU : InstrItinClass;
def II_MOVE : InstrItinClass;
def II_MTHC1 : InstrItinClass;
def II_MTC2 : InstrItinClass;
def II_MTHI_MTLO : InstrItinClass; // mthi and mtlo
+def II_MTTR : InstrItinClass;
def II_MUL : InstrItinClass;
def II_MUH : InstrItinClass;
def II_MUHU : InstrItinClass;
InstrItinData<II_MFHC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFC2 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFTR , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTHC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC2 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFHC1 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTHC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTTR , [InstrStage<2, [ALU]>]>,
InstrItinData<II_CACHE , [InstrStage<1, [ALU]>]>,
InstrItinData<II_PREF , [InstrStage<1, [ALU]>]>,
InstrItinData<II_CACHEE , [InstrStage<1, [ALU]>]>,
// MIPS MT instructions
// ====================
-def : ItinRW<[GenericWriteMove], [II_DMT, II_DVPE, II_EMT, II_EVPE]>;
+def : ItinRW<[GenericWriteMove], [II_DMT, II_DVPE, II_EMT, II_EVPE, II_MFTR,
+ II_MTTR]>;
def : ItinRW<[GenericReadWriteCOP0Long], [II_YIELD]>;
+
def : ItinRW<[GenericWriteCOP0Short], [II_FORK]>;
// MIPS32R6 and MIPS16e
SMLoc IDLoc, const MCSubtargetInfo *STI);
void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitRRIII(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm0,
+ int16_t Imm1, int16_t Imm2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
void emitAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg, bool Is64Bit,
const MCSubtargetInfo *STI);
void emitDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount,
return DAG.getTargetConstant(1, dl, VT);
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
+ if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
+ // Split the pieces.
+ SDValue Lower =
+ DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
+ SDValue Upper =
+ DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
+ // We have to manually lower both halves so getNode doesn't try to
+ // reassemble the build_vector.
+ Lower = LowerBUILD_VECTORvXi1(Lower, DAG);
+ Upper = LowerBUILD_VECTORvXi1(Upper, DAG);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
+ }
SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
return DAG.getBitcast(VT, Imm);
if (!OpVT.isScalarInteger() || OpSize < 128 || isNullConstant(Y))
return SDValue();
+ // Bail out if we know that this is not really just an oversized integer.
+ if (peekThroughBitcasts(X).getValueType() == MVT::f128 ||
+ peekThroughBitcasts(Y).getValueType() == MVT::f128)
+ return SDValue();
+
// TODO: Use PXOR + PTEST for SSE4.1 or later?
// TODO: Add support for AVX-512.
EVT VT = SetCC->getValueType(0);
private:
// Expression handling.
const Expression *createExpression(Instruction *) const;
- const Expression *createBinaryExpression(unsigned, Type *, Value *,
- Value *) const;
+ const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *,
+ Instruction *) const;
PHIExpression *createPHIExpression(Instruction *, bool &HasBackEdge,
bool &OriginalOpsConstant) const;
const DeadExpression *createDeadExpression() const;
}
const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
- Value *Arg1,
- Value *Arg2) const {
+ Value *Arg1, Value *Arg2,
+ Instruction *I) const {
auto *E = new (ExpressionAllocator) BasicExpression(2);
E->setType(T);
E->op_push_back(lookupOperandLeader(Arg2));
Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ);
- if (const Expression *SimplifiedE = checkSimplificationResults(E, nullptr, V))
+ if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
return SimplifiedE;
return E;
}
// expression.
assert(II->getNumArgOperands() == 2 &&
"Expect two args for recognised intrinsics.");
- return createBinaryExpression(
- Opcode, EI->getType(), II->getArgOperand(0), II->getArgOperand(1));
+ return createBinaryExpression(Opcode, EI->getType(),
+ II->getArgOperand(0),
+ II->getArgOperand(1), I);
}
}
}
}
void NewGVN::addAdditionalUsers(Value *To, Value *User) const {
+ assert(User && To != User);
if (isa<Instruction>(To))
AdditionalUsers[To].insert(User);
}
conf.lib.clang_disposeString(self)
@staticmethod
- def from_result(res, fn, args):
+ def from_result(res, fn=None, args=None):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
"""The command-line option that disables this diagnostic."""
disable = _CXString()
conf.lib.clang_getDiagnosticOption(self, byref(disable))
-
- return conf.lib.clang_getCString(disable)
+ return _CXString.from_result(disable)
def format(self, options=None):
"""
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
- formatted = conf.lib.clang_formatDiagnostic(self, options)
- return conf.lib.clang_getCString(formatted)
+ return conf.lib.clang_formatDiagnostic(self, options)
def __repr__(self):
return "<Diagnostic severity %r, location %r, spelling %r>" % (
assert children[0].spelling.endswith('declared here')
assert children[0].location.line == 1
assert children[0].location.column == 1
+
+def test_diagnostic_string_repr():
+ tu = get_tu('struct MissingSemicolon{}')
+ assert len(tu.diagnostics) == 1
+ d = tu.diagnostics[0]
+
+ assert repr(d) == '<Diagnostic severity 3, location <SourceLocation file \'t.c\', line 1, column 26>, spelling "expected \';\' after struct">'
+
--- /dev/null
+import clang.cindex
+from clang.cindex import ExceptionSpecificationKind
+from .util import get_tu
+
+
+def find_function_declarations(node, declarations=[]):
+ if node.kind == clang.cindex.CursorKind.FUNCTION_DECL:
+ declarations.append((node.spelling, node.exception_specification_kind))
+ for child in node.get_children():
+ declarations = find_function_declarations(child, declarations)
+ return declarations
+
+
+def test_exception_specification_kind():
+ source = """int square1(int x);
+ int square2(int x) noexcept;
+ int square3(int x) noexcept(noexcept(x * x));"""
+
+ tu = get_tu(source, lang='cpp', flags=['-std=c++14'])
+
+ declarations = find_function_declarations(tu.cursor)
+ expected = [
+ ('square1', ExceptionSpecificationKind.NONE),
+ ('square2', ExceptionSpecificationKind.BASIC_NOEXCEPT),
+ ('square3', ExceptionSpecificationKind.COMPUTED_NOEXCEPT)
+ ]
+ assert declarations == expected
let Documentation = [DLLImportDocs];
}
-def SelectAny : InheritableAttr, TargetSpecificAttr<TargetWindows> {
+def SelectAny : InheritableAttr {
let Spellings = [Declspec<"selectany">, GCC<"selectany">];
- let Documentation = [Undocumented];
+ let Documentation = [SelectAnyDocs];
}
def Thread : Attr {
ensure that this class cannot be subclassed.
}];
}
+
+
+def SelectAnyDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+This attribute appertains to a global symbol, causing it to have a weak
+definition (
+`linkonce <https://llvm.org/docs/LangRef.html#linkage-types>`_
+), allowing the linker to select any definition.
+
+For more information see
+`gcc documentation <https://gcc.gnu.org/onlinedocs/gcc-7.2.0/gcc/Microsoft-Windows-Variable-Attributes.html>`_
+or `msvc documentation <https://docs.microsoft.com/pl-pl/cpp/cpp/selectany>`_.
+}];
+}
TARGET_BUILTIN(__builtin_ia32_ptestmd512, "UsV16iV16iUs", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_ptestmq512, "UcV8LLiV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pbroadcastd512_gpr_mask, "V16iiV16iUs", "", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pbroadcastq512_mem_mask, "V8LLiLLiV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_loaddqusi512_mask, "V16iiC*V16iUs", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_loaddqudi512_mask, "V8LLiLLiC*V8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_loadups512_mask, "V16ffC*V16fUs", "", "avx512f")
/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
- struct LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) EvalInfo {
+ struct EvalInfo {
ASTContext &Ctx;
/// EvalStatus - Contains information about the evaluation.
/// RAII object used to optionally suppress diagnostics and side-effects from
/// a speculative evaluation.
class SpeculativeEvaluationRAII {
- /// Pair of EvalInfo, and a bit that stores whether or not we were
- /// speculatively evaluating when we created this RAII.
- llvm::PointerIntPair<EvalInfo *, 1, bool> InfoAndOldSpecEval;
- Expr::EvalStatus Old;
+ EvalInfo *Info = nullptr;
+ Expr::EvalStatus OldStatus;
+ bool OldIsSpeculativelyEvaluating;
void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
- InfoAndOldSpecEval = Other.InfoAndOldSpecEval;
- Old = Other.Old;
- Other.InfoAndOldSpecEval.setPointer(nullptr);
+ Info = Other.Info;
+ OldStatus = Other.OldStatus;
+ Other.Info = nullptr;
}
void maybeRestoreState() {
- EvalInfo *Info = InfoAndOldSpecEval.getPointer();
if (!Info)
return;
- Info->EvalStatus = Old;
- Info->IsSpeculativelyEvaluating = InfoAndOldSpecEval.getInt();
+ Info->EvalStatus = OldStatus;
+ Info->IsSpeculativelyEvaluating = OldIsSpeculativelyEvaluating;
}
public:
SpeculativeEvaluationRAII(
EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
- : InfoAndOldSpecEval(&Info, Info.IsSpeculativelyEvaluating),
- Old(Info.EvalStatus) {
+ : Info(&Info), OldStatus(Info.EvalStatus),
+ OldIsSpeculativelyEvaluating(Info.IsSpeculativelyEvaluating) {
Info.EvalStatus.Diag = NewDiag;
Info.IsSpeculativelyEvaluating = true;
}
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_500/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_501/final/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
bool IsLowerBound) {
- QualType BaseTy;
- if (auto *ASE =
- dyn_cast<OMPArraySectionExpr>(E->getBase()->IgnoreParenImpCasts()))
- BaseTy = OMPArraySectionExpr::getBaseOriginalType(ASE);
- else
- BaseTy = E->getBase()->getType();
+ QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase());
QualType ResultExprTy;
if (auto *AT = getContext().getAsArrayType(BaseTy))
ResultExprTy = AT->getElementType();
getFieldAlignmentSource(BaseInfo.getAlignmentSource());
LValueBaseInfo FieldBaseInfo(fieldAlignSource, BaseInfo.getMayAlias());
+ QualType type = field->getType();
const RecordDecl *rec = field->getParent();
- if (rec->isUnion() || rec->hasAttr<MayAliasAttr>())
+ if (rec->isUnion() || rec->hasAttr<MayAliasAttr>() || type->isVectorType())
FieldBaseInfo.setMayAlias(true);
bool mayAlias = FieldBaseInfo.getMayAlias();
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo);
}
- QualType type = field->getType();
Address addr = base.getAddress();
unsigned cvr = base.getVRQualifiers();
bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
return nullptr;
}
+ /// \brief Get an LValue for the current ThreadID variable.
+ LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
+ if (OuterRegionInfo)
+ return OuterRegionInfo->getThreadIDVariableLValue(CGF);
+ llvm_unreachable("No LValue for inlined OpenMP construct");
+ }
+
/// \brief Get the name of the capture helper.
StringRef getHelperName() const override {
if (auto *OuterRegionInfo = getOldCSI())
/// \param Init Initial expression of array.
/// \param SrcAddr Address of the original array.
static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
- QualType Type, const Expr *Init,
+ QualType Type, bool EmitDeclareReductionInit,
+ const Expr *Init,
const OMPDeclareReductionDecl *DRD,
Address SrcAddr = Address::invalid()) {
// Perform element-by-element initialization.
// Emit copy.
{
CodeGenFunction::RunCleanupsScope InitScope(CGF);
- if (DRD && (DRD->getInitializer() || !Init)) {
+ if (EmitDeclareReductionInit) {
emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
SrcElementCurrent, ElementTy);
} else
// captured region.
auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
+ bool EmitDeclareReductionInit =
+ DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
- DRD ? ClausesData[N].ReductionOp : PrivateVD->getInit(),
+ EmitDeclareReductionInit,
+ EmitDeclareReductionInit ? ClausesData[N].ReductionOp
+ : PrivateVD->getInit(),
DRD, SharedLVal.getAddress());
}
// Build type kmp_routine_entry_t (if not built yet).
emitKmpRoutineEntryT(KmpInt32Ty);
// Build type kmp_task_t (if not built yet).
- if (KmpTaskTQTy.isNull()) {
- KmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
- CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
+ if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
+ if (SavedKmpTaskloopTQTy.isNull()) {
+ SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
+ }
+ KmpTaskTQTy = SavedKmpTaskloopTQTy;
+ } else if (D.getDirectiveKind() == OMPD_task) {
+ assert(D.getDirectiveKind() == OMPD_task &&
+ "Expected taskloop or task directive");
+ if (SavedKmpTaskTQTy.isNull()) {
+ SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
+ }
+ KmpTaskTQTy = SavedKmpTaskTQTy;
}
auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
// Build particular struct kmp_task_t for the given task.
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
+ /// Saved kmp_task_t for task directive.
+ QualType SavedKmpTaskTQTy;
+ /// Saved kmp_task_t for taskloop-based directive.
+ QualType SavedKmpTaskloopTQTy;
/// \brief Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
EmitBlock(LoopExit.getBlock());
}
-void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
+bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
if (!HaveInsertPoint())
- return;
+ return false;
// Emit inits for the linear variables.
+ bool HasLinears = false;
for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
for (auto *Init : C->inits()) {
+ HasLinears = true;
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
EmitIgnoredExpr(CS);
}
}
+ return HasLinears;
}
void CodeGenFunction::EmitOMPLinearClauseFinal(
CGF.EmitOMPSimdInit(S);
emitAlignedClause(CGF, S);
- CGF.EmitOMPLinearClauseInit(S);
+ (void)CGF.EmitOMPLinearClauseInit(S);
{
OMPPrivateScope LoopScope(CGF);
CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
llvm::DenseSet<const Expr *> EmittedFinals;
emitAlignedClause(*this, S);
- EmitOMPLinearClauseInit(S);
+ bool HasLinears = EmitOMPLinearClauseInit(S);
// Emit helper vars inits.
std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
// Emit 'then' code.
{
OMPPrivateScope LoopScope(*this);
- if (EmitOMPFirstprivateClause(S, LoopScope)) {
+ if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables and post-update of
// lastprivate variables.
auto IP = CGF.Builder.saveAndClearIP();
CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
CodeGen(CGF);
- CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
+ CGF.EmitBranch(Stack.back().ContBlock.getBlock());
CGF.Builder.restoreIP(IP);
Stack.back().HasBeenEmitted = true;
}
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'linear' clause.
- void EmitOMPLinearClauseInit(const OMPLoopDirective &D);
+ /// \return true if at least one linear variable is found that should be
+ /// initialized with the value of the original variable, false otherwise.
+ bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
llvm::Value * /*OutlinedFn*/,
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)),
EffectiveTriple() {
- if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
- if (!isThreadModelSupported(A->getValue()))
- D.Diag(diag::err_drv_invalid_thread_model_for_target)
- << A->getValue() << A->getAsString(Args);
-
std::string CandidateLibPath = getArchSpecificLibPath();
if (getVFS().exists(CandidateLibPath))
getFilePaths().push_back(CandidateLibPath);
return new tools::baremetal::Linker(*this);
}
-std::string BareMetal::getThreadModel() const {
- return "single";
-}
-
-bool BareMetal::isThreadModelSupported(const StringRef Model) const {
- return Model == "single";
-}
-
std::string BareMetal::getRuntimesDir() const {
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(Dir, "lib", "baremetal");
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
bool SupportsObjCGC() const override { return false; }
- std::string getThreadModel() const override;
- bool isThreadModelSupported(const StringRef Model) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
Expanded.BraceWrapping.AfterFunction = true;
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
- Expanded.BraceWrapping.SplitEmptyFunction = false;
+ Expanded.BraceWrapping.SplitEmptyFunction = true;
Expanded.BraceWrapping.SplitEmptyRecord = false;
break;
case FormatStyle::BS_Stroustrup:
__M);
}
+#ifdef __x86_64__
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
{
-#ifdef __x86_64__
return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A,
(__v8di)
_mm512_setzero_si512 (),
__M);
-#else
- return (__m512i) __builtin_ia32_pbroadcastq512_mem_mask (__A,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
-#endif
}
+#endif
static __inline __m512 __DEFAULT_FN_ATTRS
_mm512_setzero_ps(void)
if (D->isInvalidDecl())
return false;
- if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>() ||
+ bool Referenced = false;
+ if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
+ // For a decomposition declaration, warn if none of the bindings are
+ // referenced, instead of if the variable itself is referenced (which
+ // it is, by the bindings' expressions).
+ for (auto *BD : DD->bindings()) {
+ if (BD->isReferenced()) {
+ Referenced = true;
+ break;
+ }
+ }
+ } else if (!D->getDeclName()) {
+ return false;
+ } else if (D->isReferenced() || D->isUsed()) {
+ Referenced = true;
+ }
+
+ if (Referenced || D->hasAttr<UnusedAttr>() ||
D->hasAttr<ObjCPreciseLifetimeAttr>())
return false;
else
DiagID = diag::warn_unused_variable;
- Diag(D->getLocation(), DiagID) << D->getDeclName() << Hint;
+ Diag(D->getLocation(), DiagID) << D << Hint;
}
static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
NamedDecl *D = cast<NamedDecl>(TmpD);
- if (!D->getDeclName()) continue;
-
// Diagnose unused variables in this scope.
if (!S->hasUnrecoverableErrorOccurred()) {
DiagnoseUnusedDecl(D);
DiagnoseUnusedNestedTypedefs(RD);
}
+ if (!D->getDeclName()) continue;
+
// If this was a forward reference to a label, verify it was defined.
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
CheckPoppedLabel(LD, *this);
IdentifierInfo *II = Name.getAsIdentifierInfo();
if (D.isDecompositionDeclarator()) {
- AddToScope = false;
// Take the name of the first declarator as our name for diagnostic
// purposes.
auto &Decomp = D.getDecompositionDeclarator();
NamedDecl *New =
ActOnVariableDeclarator(S, D, DC, TInfo, Previous,
MultiTemplateParamsArg(), AddToScope, Bindings);
- CurContext->addHiddenDecl(New);
+ if (AddToScope) {
+ S->AddDecl(New);
+ CurContext->addHiddenDecl(New);
+ }
if (isInOpenMPDeclareTargetContext())
checkDeclIsAllowedInOpenMPTarget(nullptr, New);
if (!NewStep->isValueDependent()) {
// Check that the step is integer expression.
SourceLocation StepLoc = NewStep->getLocStart();
- ExprResult Val =
- SemaRef.PerformOpenMPImplicitIntegerConversion(StepLoc, NewStep);
+ ExprResult Val = SemaRef.PerformOpenMPImplicitIntegerConversion(
+ StepLoc, getExprAsWritten(NewStep));
if (Val.isInvalid())
return true;
NewStep = Val.get();
PrevD = D;
}
}
- if (Ty->isDependentType() || Ty->isInstantiationDependentType() ||
+ if (SemaRef.CurContext->isDependentContext() || Ty->isDependentType() ||
+ Ty->isInstantiationDependentType() ||
Ty->containsUnexpandedParameterPack() ||
filterLookupForUDR<bool>(Lookups, [](ValueDecl *D) -> bool {
return !D->isInvalidDecl() &&
if (!CurContext->isDependentContext() &&
DSAStack->getParentOrderedRegionParam() &&
DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
- << DSAStack->getParentLoopControlVariable(
- DepCounter.getZExtValue());
+ ValueDecl* VD = DSAStack->getParentLoopControlVariable(
+ DepCounter.getZExtValue());
+ if (VD) {
+ Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 1 << VD;
+ } else {
+ Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration) << 0;
+ }
continue;
}
OpsOffs.push_back({RHS, OOK});
if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
TotalDepCount > VarList.size() &&
- DSAStack->getParentOrderedRegionParam()) {
- Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ DSAStack->getParentOrderedRegionParam() &&
+ DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
+ Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration) << 1
<< DSAStack->getParentLoopControlVariable(VarList.size() + 1);
}
if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
Decl *TemplateDeclInstantiator::VisitBindingDecl(BindingDecl *D) {
auto *NewBD = BindingDecl::Create(SemaRef.Context, Owner, D->getLocation(),
D->getIdentifier());
+ NewBD->setReferenced(D->isReferenced());
SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewBD);
return NewBD;
}
# Determine range to format.
if vim.eval('exists("l:lines")') == '1':
- lines = vim.eval('l:lines')
+ lines = ['-lines', vim.eval('l:lines')]
elif vim.eval('exists("l:formatdiff")') == '1':
with open(vim.current.buffer.name, 'r') as f:
ondisk = f.read().splitlines();
<!--*************************************************************************-->
<h1>C++ Defect Report Support in Clang</h1>
<!--*************************************************************************-->
-<p>Last updated: $Date: 2017/10/04 20:27:42 $</p>
+<p>Last updated: $Date: 2017/12/24 23:15:40 $</p>
<h2 id="cxxdr">C++ defect report implementation status</h2>
<!--*************************************************************************-->
<h1>C++ Support in Clang</h1>
<!--*************************************************************************-->
-<p>Last updated: $Date: 2017/10/04 20:27:42 $</p>
+<p>Last updated: $Date: 2017/12/24 23:15:40 $</p>
<p>Clang fully implements all published ISO C++ standards (<a
href="#cxx98">C++98 / C++03</a>, <a
std::vector<CieRecord *> Cies;
// CIE records are uniquified by their contents and personality functions.
- llvm::DenseMap<std::pair<ArrayRef<uint8_t>, SymbolBody *>, CieRecord> CieMap;
+ llvm::DenseMap<std::pair<ArrayRef<uint8_t>, SymbolBody *>, CieRecord *>
+ CieMap;
};
class GotSection : public SyntheticSection {
/// info in final executables.
virtual bool isLazyPointer(const Reference &);
+ /// Reference from an __stub_helper entry to the required offset of the
+ /// lazy bind commands.
+ virtual Reference::KindValue lazyImmediateLocationKind() = 0;
+
/// Returns true if the specified relocation is paired to the next relocation.
virtual bool isPairedReloc(const normalized::Relocation &) = 0;
return invalid;
}
+ Reference::KindValue lazyImmediateLocationKind() override {
+ return lazyImmediateLocation;
+ }
+
Reference::KindValue pointerKind() override {
return invalid;
}
return pointer64;
}
+ Reference::KindValue lazyImmediateLocationKind() override {
+ return lazyImmediateLocation;
+ }
+
uint32_t dwarfCompactUnwindType() override {
return 0x03000000;
}
return delta32;
}
+ Reference::KindValue lazyImmediateLocationKind() override {
+ return lazyImmediateLocation;
+ }
+
Reference::KindValue unwindRefToEhFrameKind() override {
return invalid;
}
return unwindFDEToFunction;
}
+ Reference::KindValue lazyImmediateLocationKind() override {
+ return lazyImmediateLocation;
+ }
+
Reference::KindValue unwindRefToEhFrameKind() override {
return unwindInfoToEhFrame;
}
SymbolScope &symbolScope);
void appendSection(SectionInfo *si, NormalizedFile &file);
uint32_t sectionIndexForAtom(const Atom *atom);
+ void fixLazyReferenceImm(const DefinedAtom *atom, uint32_t offset,
+ NormalizedFile &file);
typedef llvm::DenseMap<const Atom*, uint32_t> AtomToIndex;
struct AtomAndIndex { const Atom *atom; uint32_t index; SymbolScope scope; };
uint8_t segmentIndex;
uint64_t segmentStartAddr;
+ uint32_t offsetInBindInfo = 0;
+
for (SectionInfo *sect : _sectionInfos) {
segIndexForSection(sect, segmentIndex, segmentStartAddr);
for (const AtomInfo &info : sect->atomsAndOffsets) {
bind.symbolName = targ->name();
bind.addend = ref->addend();
nFile.lazyBindingInfo.push_back(bind);
+
+ // Now that we know the segmentOffset and the ordinal attribute,
+ // we can fix the helper's code
+
+ fixLazyReferenceImm(atom, offsetInBindInfo, nFile);
+
+ // 5 bytes for opcodes + variable sizes (target name + \0 and offset
+ // encode's size)
+ offsetInBindInfo +=
+ 6 + targ->name().size() + llvm::getULEB128Size(bind.segOffset);
+ if (bind.ordinal > BIND_IMMEDIATE_MASK)
+ offsetInBindInfo += llvm::getULEB128Size(bind.ordinal);
+ }
+ }
+ }
+ }
+}
+
+void Util::fixLazyReferenceImm(const DefinedAtom *atom, uint32_t offset,
+ NormalizedFile &file) {
+ for (const auto &ref : *atom) {
+ const DefinedAtom *da = dyn_cast<DefinedAtom>(ref->target());
+ if (da == nullptr)
+ return;
+
+ const Reference *helperRef = nullptr;
+ for (const Reference *hr : *da) {
+ if (hr->kindValue() == _archHandler.lazyImmediateLocationKind()) {
+ helperRef = hr;
+ break;
+ }
+ }
+ if (helperRef == nullptr)
+ continue;
+
+ // TODO: maybe get the fixed atom content from _archHandler ?
+ for (SectionInfo *sectInfo : _sectionInfos) {
+ for (const AtomInfo &atomInfo : sectInfo->atomsAndOffsets) {
+ if (atomInfo.atom == helperRef->target()) {
+ auto sectionContent =
+ file.sections[sectInfo->normalizedSectionIndex].content;
+ uint8_t *rawb =
+ file.ownedAllocations.Allocate<uint8_t>(sectionContent.size());
+ llvm::MutableArrayRef<uint8_t> newContent{rawb,
+ sectionContent.size()};
+ std::copy(sectionContent.begin(), sectionContent.end(),
+ newContent.begin());
+ llvm::support::ulittle32_t *loc =
+ reinterpret_cast<llvm::support::ulittle32_t *>(
+ &newContent[atomInfo.offsetInSection +
+ helperRef->offsetInAtom()]);
+ *loc = offset;
+ file.sections[sectInfo->normalizedSectionIndex].content = newContent;
}
}
}
.setMCJITMemoryManager(
std::unique_ptr<MemoryManager>(new MemoryManager(*this)))
.setCodeModel(codeModel)
- .setOptLevel(llvm::CodeGenOpt::Less)
- .setUseOrcMCJITReplacement(true);
+ .setOptLevel(llvm::CodeGenOpt::Less);
llvm::StringRef mArch;
llvm::StringRef mCPU;
bool ThreadInfo::ReadRegisterAsUint64(unsigned int register_id,
uint64_t &value) const {
- StringRef value_str(m_registers.lookup(register_id));
- if (value_str.getAsInteger(16, value)) {
+ std::string value_str(m_registers.lookup(register_id));
+ if (!llvm::to_integer(value_str, value, 16)) {
GTEST_LOG_(ERROR)
<< formatv("ThreadInfo: Unable to parse register value at {0}.",
register_id)
formatv("{0,=34:X-}", fmt_repeat(fmt_pad(N, 1, 3), 5)).str());
}
+TEST(FormatVariadicTest, MoveConstructor) {
+ auto fmt = formatv("{0} {1}", 1, 2);
+ auto fmt2 = std::move(fmt);
+ std::string S = fmt2;
+ EXPECT_EQ("1 2", S);
+}
TEST(FormatVariadicTest, ImplicitConversions) {
std::string S = formatv("{0} {1}", 1, 2);
EXPECT_EQ("1 2", S);
EXPECT_EQ(sys::detail::getHostCPUNameForARM("CPU implementer : 0x51\n"
"CPU part : 0x201"),
"kryo");
+ EXPECT_EQ(sys::detail::getHostCPUNameForARM("CPU implementer : 0x51\n"
+ "CPU part : 0xc00"),
+ "falkor");
// MSM8992/4 weirdness
StringRef MSM8992ProcCpuInfo = R"(
dryrun=""
stable_version=""
-revision=""
+revisions=""
BUGZILLA_BIN=""
BUGZILLA_CMD=""
release_metabug=""
echo " -user EMAIL Your email address for logging into bugzilla."
echo " -stable-version X.Y The stable release version (e.g. 4.0, 5.0)."
echo " -r NUM Revision number to merge (e.g. 1234567)."
+ echo " This option can be specified multiple times."
echo " -bugzilla-bin PATH Path to bugzilla binary (optional)."
echo " -assign-to EMAIL Assign bug to user with EMAIL (optional)."
echo " -dry-run Print commands instead of executing them."
;;
-r)
shift
- revision="$1"
+ revisions="$revisions $1"
;;
-project)
shift
4.0)
release_metabug="32061"
;;
+ 5.0)
+ release_metabug="34492"
+ ;;
*)
echo "error: invalid stable version"
exit 1
esac
bugzilla_version=$stable_version
-if [ -z "$revision" ]; then
- echo "error: revision not specified"
+if [ -z "$revisions" ]; then
+ echo "error: no revisions specified"
exit 1
fi
if [ $BUGZILLA_MAJOR_VERSION -eq 1 ]; then
- echo "***************************** Warning *******************************"
- echo "You are using an older version of the bugzilla cli tool. You will be "
- echo "able to create bugs, but this script will crash with the following "
- echo "error when trying to read back information about the bug you created:"
- echo ""
- echo "KeyError: 'internals'"
- echo ""
- echo "To avoid this error, use version 2.0.0 or higher"
- echo "https://pypi.python.org/pypi/python-bugzilla"
- echo "*********************************************************************"
+ echo "***************************** Error ** ********************************"
+ echo "You are using an older version of the bugzilla cli tool, which is not "
+ echo "supported. You need to use bugzilla cli version 2.0.0 or higher:"
+ echo "***********************************************************************"
+ exit 1
fi
BUGZILLA_CMD="$BUGZILLA_BIN --bugzilla=$bugzilla_url"
-bug_url="https://reviews.llvm.org/rL$revision"
+rev_string=""
+for r in $revisions; do
+ rev_string="$rev_string r$r"
+done
echo "Checking for duplicate bugs..."
-check_duplicates=`$BUGZILLA_CMD query --url $bug_url`
+check_duplicates=`$BUGZILLA_CMD query --blocked=$release_metabug --field="cf_fixed_by_commits=$rev_string"`
if [ -n "$check_duplicates" ]; then
echo "Duplicate bug found:"
echo "Done"
-# Get short commit summary
+# Get short commit summary. To avoid having a huge summary, we just
+# use the commit message for the first commit.
commit_summary=''
-commit_msg=`svn log -r $revision https://llvm.org/svn/llvm-project/`
-if [ $? -ne 0 ]; then
- echo "warning: failed to get commit message."
- commit_msg=""
-fi
+for r in $revisions; do
+ commit_msg=`svn log -r $r https://llvm.org/svn/llvm-project/`
+ if [ $? -ne 0 ]; then
+ echo "warning: failed to get commit message."
+ commit_msg=""
+ fi
+ break
+done
if [ -n "$commit_msg" ]; then
commit_summary=`echo "$commit_msg" | sed '4q;d' | cut -c1-80`
commit_summary=" : ${commit_summary}"
fi
-bug_summary="Merge r$revision into the $stable_version branch${commit_summary}"
+bug_summary="Merge${rev_string} into the $stable_version branch${commit_summary}"
-if [ -z "$dryrun" ]; then
- set -x
-fi
+set -x
+
+# Login to bugzilla
+$BUGZILLA_CMD login $bugzilla_user
-${dryrun} $BUGZILLA_CMD --login --user=$bugzilla_user new \
+bug_id=`${dryrun} $BUGZILLA_CMD --ensure-logged-in new \
-p "$bugzilla_product" \
- -c "$bugzilla_component" -u $bug_url --blocked=$release_metabug \
+ -c "$bugzilla_component" --blocked=$release_metabug \
-o All --priority=P --arch All -v $bugzilla_version \
+ --field="cf_fixed_by_commits=$rev_string" \
--summary "${bug_summary}" \
- -l "Is this patch OK to merge to the $stable_version branch?" \
+ -l "Is it OK to merge the following revision(s) to the $stable_version branch?" \
$bugzilla_assigned_to \
- --oneline
-
-set +x
+ -i`
if [ -n "$dryrun" ]; then
exit 0
fi
-if [ $BUGZILLA_MAJOR_VERSION -eq 1 ]; then
- success=`$BUGZILLA_CMD query --url $bug_url`
- if [ -z "$success" ]; then
- echo "Failed to create bug."
- exit 1
- fi
+set +x
- echo " Created new bug:"
- echo $success
+if [ -z "$bug_id" ]; then
+ echo "Failed to create bug."
+ exit 1
fi
+
+echo " Created new bug:"
+echo https://llvm.org/PR$bug_id
+
+# Add links to revisions
+for r in $revisions; do
+ $BUGZILLA_CMD --ensure-logged-in modify -l "https://reviews.llvm.org/rL$r" $bug_id
+done