llvm/lib/Target/Mips/Mips.h
llvm/lib/Target/Mips/CheriStackInvalidatePass.cpp
Pass Invoked at:
llvm/lib/Target/Mips/MipsTargetMachine.cpp: void addPostRegAlloc() overide.
Machine Function Pass: class CheriInvalidatePass : public MachineFunctionPass
.
Every function do:
#if 0
If enabled, for every function do:
cheri.sensitive.functions
;Add bound information to alloca instructions.
Tracking by commit: 2bda57cca54edb8e928de715bfcb6638ea3e1210
llvm/lib/CodeGen/CheriBoundAllocas.cpp: class CheriBoundAllocas
:
a module pass:
if (!DL.isFatPointer(AllocaAS))
;I8CapTy
and SizeTy
using context(M->getContext()
), data layout and address space;F
in the module M
:
TargetMachine
and TargetLowering
from TargetPassConfig
;Allocas.clear(); visit(F);
UseRematerilizableIntrinsic
;BoundedStackFn
according to the intrinsic ID: Intrinsic::getDeclaration(M, BoundedStackCap, SizeTy);
AllocaInst *AI
in Allocas
:
BoundsChecker.findUsesThatNeedBounds()
: now all or nothing // We need to convert it to an i8* for the intrinisic. Note: we must
// not reuse a bitcast since otherwise that results in spilling the
// register that was incremented and doing a setbounds in a different
// basic block. This is stupid and we should be either using the
// bounded capability everywhere or be doing inc+setoffset in the
// other block.
Instruction *AllocaI8 =
cast<Instruction>(B.CreateBitCast(AI, I8CapTy));
auto WithBounds = B.CreateCall(SetBoundsIntrin, {AllocaI8, Size});
const_cast<Use *>(U)->set(B.CreateBitCast(WithBounds, AllocaTy));
llvm/lib/CodeGen/CodeGen.cpp: call initializeCheriBoundAllocasPass(Registry);
in llvm::initializeCodeGen(...)
llvm/include/llvm/initializePasses.h: void initializeCheriBoundAllocasPass(PassRegistry &);
llvm/include/llvm/Passes.h:
/// List of target independent CodeGen pass IDs.
namespace llvm {
///...
/// Create CHERI pass to bound alloca.s
ModulePass *createCheriBoundAllocasPass();
} // End llvm namespace
file: llvm/lib/Transforms/CHERICap/FoldCapIntrinsics.cpp
Fold Capability Intrinsics.
TargetLibraryInfoWrapperPass
runOnModule
:
bool runOnModule(Module &M) override {
if (skipModule(M))
return false;
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
Modified = false;
DL = &M.getDataLayout();
LLVMContext &C = M.getContext();
I8CapTy = Type::getInt8PtrTy(C, 200);
CapAddrTy = Type::getIntNTy(C, DL->getPointerBaseSizeInBits(200));
CapSizeTy = Type::getIntNTy(C, DL->getIndexSizeInBits(200));
// Don't add these intrinsics to the module if none of them are used:
IncOffset = M.getFunction(
Intrinsic::getName(Intrinsic::cheri_cap_offset_increment, CapSizeTy));
SetOffset =
M.getFunction(Intrinsic::getName(Intrinsic::cheri_cap_offset_set,
CapSizeTy));
GetOffset =
M.getFunction(Intrinsic::getName(Intrinsic::cheri_cap_offset_get,
CapSizeTy));
SetAddr =
M.getFunction(Intrinsic::getName(Intrinsic::cheri_cap_address_set,
CapAddrTy));
GetAddr =
M.getFunction(Intrinsic::getName(Intrinsic::cheri_cap_address_get,
CapAddrTy));
// at least one intrinsic was used -> we need to run the fold
// setoffset/incoffset pass
if (IncOffset || SetOffset || GetOffset) {
// Ensure that all the intrinsics exist in the module
if (!IncOffset)
IncOffset = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_offset_increment, CapSizeTy);
if (!SetOffset)
SetOffset = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_offset_set, CapSizeTy);
if (!GetOffset)
GetOffset = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_offset_get, CapSizeTy);
// TODO: does the order here matter?
foldIncOffset();
foldSetOffset(&M);
}
if (SetAddr || GetAddr) {
// Ensure that all the intrinsics exist in the module
if (!SetAddr)
SetAddr = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_address_set, CapAddrTy);
if (!GetAddr)
GetAddr = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_address_get, CapAddrTy);
if (!IncOffset)
IncOffset = Intrinsic::getDeclaration(
&M, Intrinsic::cheri_cap_offset_increment, CapSizeTy);
// TODO: does the order here matter?
foldSetAddress(&M);
}
foldGetIntrinisics(&M);
return Modified;
}
file: llvm/lib/Transforms/CHERICap/PureCapABICalls.cpp
The pass that transform PCC-relative calls into direct calls (so that they can be inlined) and then back again (so that we generate the correct code).
class: CheriCapDirectCalls
bool runOnFunction(Function &F) override {
Modified = false;
DeadInstructions.clear();
visit(F);
for (auto *V : DeadInstructions)
RecursivelyDeleteTriviallyDeadInstructions(V);
return Modified;
}
A util Function pass; every instruction:
CalledFunc
and attribute list AL
for this instruction.CalledFunc
.LogAllocSize
:
a)The function of LogAllocSize
:
std::pair<unsigned Optional<unsigned>> AllocSize
;cheri::CSetBoundsStats->add(Alignment, KnownSize, "function with alloc_size", cheri::SetBoundsPointerSource::Heap, "call to " + (CalledFunc ? CalledFunc->getName() : "function pointer"), cheri::inferSourceLocation(&I), SizeMultipleOf)
file: llvm/lib/IR/Type.cpp
PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace)
PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
assert(EltTy && "Can't get a pointer to <null> type!");
assert(isValidElementType(EltTy) && "Invalid type for pointer element!");
LLVMContextImpl *CImpl = EltTy->getContext().pImpl;
// Since AddressSpace #0 is the common case, we special case it.
PointerType *&Entry = AddressSpace == 0 ? CImpl->PointerTypes[EltTy]
: CImpl->ASPointerTypes[std::make_pair(EltTy, AddressSpace)];
if (!Entry)
Entry = new (CImpl->Alloc) PointerType(EltTy, AddressSpace);
return Entry;
}
llvm/include/llvm/initializePasses.h:
llvm/include/llvm/Passes.h:
llvm/lib/Target/Mips/MipsTargetMachine.cpp:
initializeCHERICapDirectCallsPass(*PR);
initializeCHERICapFoldIntrinsicsPass(*PR);
initializeCheriAddressingModeFolderPass(*PR);
initializeCheriRangeCheckerPass(*PR);
void addPostRegAlloc() override {
if (getMipsSubtarget().isCheri())
addPass(createCheriInvalidatePass());
}
void MipsPassConfig::addIRPasses() {
...
if (getMipsSubtarget().isCheri()) {
if (getOptLevel() != CodeGenOpt::Level::None) {
addPass(createCHERICapFoldIntrinsicsPass());
}
addPass(createCheriLoopPointerDecanonicalize());
addPass(createAggressiveDCEPass());
addPass(createCheriRangeChecker());
addPass(createCheriBoundAllocasPass());
}
}
void MipsPassConfig::addPreRegAlloc() {
if (getMipsSubtarget().isCheri()) {
addPass(createCheriAddressingModeFolder());
// The CheriAddressingModeFolder can sometimes produce new dead instructions
// be sure to clean them up:
if (getOptLevel() != CodeGenOpt::Level::None)
addPass(&DeadMachineInstructionElimID);
addPass(createCheri128FailHardPass());
}
}
extern "C" void LLVMInitializeMipsTarget() {
// Register the target.
RegisterTargetMachine<MipsebTargetMachine> X(getTheMipsTarget());
RegisterTargetMachine<MipselTargetMachine> Y(getTheMipselTarget());
RegisterTargetMachine<MipsebTargetMachine> A(getTheMips64Target());
RegisterTargetMachine<MipselTargetMachine> B(getTheMips64elTarget());
RegisterTargetMachine<MipsCheriTargetMachine> C(getTheMipsCheriTarget());
PassRegistry *PR = PassRegistry::getPassRegistry();
...
initializeCHERICapDirectCallsPass(*PR);
initializeCHERICapFoldIntrinsicsPass(*PR);
initializeCheriAddressingModeFolderPass(*PR);
initializeCheriRangeCheckerPass(*PR);
}
void addPostRegAlloc() override {
if (getMipsSubtarget().isCheri())
addPass(createCheriInvalidatePass());
}
};
} // end anonymous namespace
void MipsPassConfig::addIRPasses() {
TargetPassConfig::addIRPasses();
addPass(createAtomicExpandPass());
...
if (getMipsSubtarget().isCheri()) {
if (getOptLevel() != CodeGenOpt::Level::None) {
addPass(createCHERICapFoldIntrinsicsPass());
}
addPass(createCheriLoopPointerDecanonicalize());
addPass(createAggressiveDCEPass());
addPass(createCheriRangeChecker());
addPass(createCheriBoundAllocasPass());
}
}
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
bool MipsPassConfig::addInstSelector() {
addPass(createMipsModuleISelDagPass());
addPass(createMips16ISelDag(getMipsTargetMachine(), getOptLevel()));
addPass(createMipsSEISelDag(getMipsTargetMachine(), getOptLevel()));
return false;
}
void MipsPassConfig::addPreRegAlloc() {
addPass(createMipsOptimizePICCallPass());
if (getMipsSubtarget().isCheri()) {
addPass(createCheriAddressingModeFolder());
// The CheriAddressingModeFolder can sometimes produce new dead instructions
// be sure to clean them up:
if (getOptLevel() != CodeGenOpt::Level::None)
addPass(&DeadMachineInstructionElimID);
addPass(createCheri128FailHardPass());
}
}
TargetTransformInfo
MipsTargetMachine::getTargetTransformInfo(const Function &F) {
if (Subtarget->allowMixed16_32()) {
LLVM_DEBUG(errs() << "No Target Transform Info Pass Added\n");
// FIXME: This is no longer necessary as the TTI returned is per-function.
return TargetTransformInfo(F.getParent()->getDataLayout());
}
LLVM_DEBUG(errs() << "Target Transform Info Pass Added\n");
return TargetTransformInfo(BasicTTIImpl(this, F));
}
// Implemented by targets that want to run passes immediately before
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
void MipsPassConfig::addPreEmitPass() {
// Expand pseudo instructions that are sensitive to register allocation.
addPass(createMipsExpandPseudoPass());
// The microMIPS size reduction pass performs instruction reselection for
// instructions which can be remapped to a 16 bit instruction.
addPass(createMicroMipsSizeReducePass());
// The delay slot filler pass can potientially create forbidden slot hazards
// for MIPSR6 and therefore it should go before MipsBranchExpansion pass.
addPass(createMipsDelaySlotFillerPass());
// This pass expands branches and takes care about the forbidden slot hazards.
// Expanding branches may potentially create forbidden slot hazards for
// MIPSR6, and fixing such hazard may potentially break a branch by extending
// its offset out of range. That's why this pass combine these two tasks, and
// runs them alternately until one of them finishes without any changes. Only
// then we can be sure that all branches are expanded properly and no hazards
// exists.
// Any new pass should go before this pass.
addPass(createMipsBranchExpansion());
addPass(createMipsConstantIslandPass());
}
bool MipsPassConfig::addIRTranslator() {
addPass(new IRTranslator());
return false;
}
void MipsPassConfig::addPreLegalizeMachineIR() {
addPass(createMipsPreLegalizeCombiner());
}
bool MipsPassConfig::addLegalizeMachineIR() {
addPass(new Legalizer());
return false;
}
bool MipsPassConfig::addRegBankSelect() {
addPass(new RegBankSelect());
return false;
}
bool MipsPassConfig::addGlobalInstructionSelect() {
addPass(new InstructionSelect());
return false;
}
Reference 1
References: llvm/lib/CodeGen/CheriBoundAllocas.cpp Overview A Module pass with instruction visitor: class CheriBoundAllocas : public ModulePass, public InstVisitor<CheriBoundAllocas> Initialization: initializeCheriBoundAllocaPass() declared in llvm/include/llvm/InitializePasses.h impl in llvm/lib/CodeGen/CheriBoundAllocas.cpp: INITIALIZE_PASS(…) called in: llvm/lib/CodeGen/CheriBoundAllocas.cpp: CheriBoundAllocas() llvm/lib/CodeGen/CodeGen.cpp: initializeCodeGen() llvm/tools/opt/opt.cpp: main() Pass Creation: createCheriBoundAllocasPass() declared in llvm/include/llvm/CodeGen/Passes.h called in Mips: llvm/lib/Target/Mips/MipsTargetMachine.cpp: MipsPassConfig::addIRPasses() RISCV: llvm/lib/Target/RISCV/RISCVTargetMachine.cpp: RISCVPassConfig::addIRPasses() Adding bound instruction: Get intrinsic: Intrinsic::getDeclaration(M, Intrinsic::cheri_cap_bounds_set, SizeTy); // llvm/lib/CodeGen/CheriBoundAllocas.cpp // Intrinsic handle as function Function *SetBoundsIntrin = Intrinsic::getDeclaration(M, Intrinsic::cheri_cap_bounds_set, SizeTy); Intrinsic::ID BoundedStackCap = UseRematerializableIntrinsic ?
References: llvm/lib/Target/Mips/CheriRangeChecker.cpp Add LLVM pass to the pipeline Overview Only in Mips, not in RISCV. A function pass with instruction visitor: class CheriRangeChecker : public FunctionPass, public InstVisitor<CheriRangeChecker>; Pass is initialized at void initializeCheriRangeCheckerPass(PassRegistry &); in llvm/lib/Target/Mips/Mips.h (LLM: this init func’s implementation will be automatically generated by LLVM using macro INITIALIZE_PASS_BEGIN). <– llvm/lib/Target/Mips/MipsTargetMachine.cpp: LLVMInitializeMipsTarget(): initializeCheriRangeCheckerPass(*PR); Pass is invoked at llvm/lib/Target/Mips/MipsTargetMachine.cpp: void MipsPassConfig::addIRPasses(): addPass(createCheriRangeChecker()); Functions/Steps: runOnFunction(): visit(F): collect pairs of range info and correspondign cast instruction in vectors: <AllocaOperands, xxxCastInst> in Casts and <AllocaOperands, ConstantCast> in ConstantCasts: visitAddrSpaceCast(): get ValueSource of the cast operand: auto Src = getValueSource(ASC.
If you could revise
the fundmental principles of
computer system design
to improve security...
... what would you change?