File: lib/libc/stdlib/malloc.c
disable sbrk()
by undefine MALLOC_DSS
macro. LLM: What is this mean?
/*
* MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS). In an ideal world, this functionality would be completely
* unnecessary, but we are burdened by history and the lack of resource limits
* for anonymous mapped memory.
*/
#if 0
/* SVA: No sbrk()! */
#define MALLOC_DSS
#endif
Add ghost_sbrk(intptr_t incr)
and use it to replace sbrk
. ghost_sbrk
use secmemalloc
when GHOSTING. secmemalloc(uintptr_t size)
is implemented as an interrupt (number 0x7f
.
/*
* Function: secmemalloc()
*
* Description:
* Ask the SVA VM to allocate some ghost memory.
*/
static inline void *
secmemalloc (uintptr_t size) {
void * ptr;
__asm__ __volatile__ ("int $0x7f\n" : "=a" (ptr) : "D" (size));
return ptr;
}
/* SVA: ghost sbrk() */
void *
ghost_sbrk (intptr_t incr) {
static uintptr_t totalAllocated = 0;
static uintptr_t currentSize = 0;
static uintptr_t start = 0xffffff0000000000u;
void * oldBrk = (void *)(start + currentSize);
if (getenv ("GHOSTING") == NULL)
return sbrk (incr);
//
// If this is the first time we've been called, allocate some ghost
// memory so that we have something with which to start.
//
if (!start) {
start = (uintptr_t) secmemalloc (0x400000);
totalAllocated = 0x400000;
currentSize = 0;
}
// Caller is asking to increase the allocation space
if (incr > 0) {
//
// If we have enough space remaining, simply increase the current size.
// Otherwise, go allocate more secure memory.
//
if ((totalAllocated - currentSize) >= incr) {
currentSize += incr;
} else {
secmemalloc (incr - (totalAllocated - currentSize));
currentSize += incr;
}
}
// Caller is asking to decrease the allocation space
if (incr < 0) {
currentSize += incr;
}
//
// Return the previous break value: note that an input increment of zero
// returns the current (unchanged) break value.
//
return oldBrk;
}
disable TLS. define ‘NO_TLS’ macro.
LLM: Will kernel’s errorneously change to these SVA related fields invalidate SVA protection???
// file
// sys/sys/pcpu.h
/*
* This structure maps out the global data that needs to be kept on a
* per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
* macros defined in <machine/pcpu.h>. Machine dependent fields are
* defined in the PCPU_MD_FIELDS macro defined in <machine/pcpu.h>.
*/
struct pcpu {
struct thread *pc_curthread; /* Current thread */
struct thread *pc_idlethread; /* Idle thread */
struct thread *pc_fpcurthread; /* Fp state owner */
struct thread *pc_deadthread; /* Zombie thread or NULL */
struct pcb *pc_curpcb; /* Current pcb */
uint64_t pc_switchtime; /* cpu_ticks() at last csw */
int pc_switchticks; /* `ticks' at last csw */
u_int pc_cpuid; /* This cpu number */
STAILQ_ENTRY(pcpu) pc_allcpu;
struct lock_list_entry *pc_spinlocks;
struct vmmeter pc_cnt; /* VM stats counters */
long pc_cp_time[CPUSTATES]; /* statclock ticks */
struct device *pc_device;
void *pc_netisr; /* netisr SWI cookie */
int pc_dnweight; /* vm_page_dontneed() */
int pc_domain; /* Memory domain. */
/*
* Stuff for read mostly lock
*
* XXXUPS remove as soon as we have per cpu variable
* linker sets.
*/
struct rm_queue pc_rm_queue;
uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
/*
* Keep MD fields last, so that CPU-specific variations on a
* single architecture don't result in offset variations of
* the machine-independent fields of the pcpu. Even though
* the pcpu structure is private to the kernel, some ports
* (e.g., lsof, part of gtop) define _KERNEL and include this
* header. While strictly speaking this is wrong, there's no
* reason not to keep the offsets of the MI fields constant
* if only to make kernel debugging easier.
*/
PCPU_MD_FIELDS;
/*
* Fields added to support SVA. Normally, SVA would control the PCPU
* data structure, but we allow FreeBSD to maintain control to make
* porting simpler.
*/
void * svaIContext; /* Pointer to SVA CPUState */
uint64_t svaRSP; /* Saved RSP on system call */
uint64_t svaRBP; /* Saved RBP on system call */
uint64_t svaRDI; /* Saved RDI on system call */
uint64_t svaRSI; /* Saved RSI on system call */
} __aligned(CACHE_LINE_SIZE);
// file
// sys/sys/proc.h
struct thread{
...
#if 1
/* The thread that swapped out so this thread could swap on */
struct thread * prev;
struct mtx * mtx;
uintptr_t svaID; /* Thread ID for SVA Thread */
unsigned char sva; /* Flag whether SVA saved state on context switch */
void (*callout)(void *, struct trapframe *); /* Thread startup function */
void * callarg; /* Thread startup argument */
#endif
}
sys/sys/amd64/amd64/
trap.c
void fr_sva_trap(unsigned trapno, void * trapAddr)
to replace void trap(struct trapframe *frame)
trap_pfault_sva
instead of trap_pfault
if ((type != T_BPTFLT) && (type != T_NMI))
panic ("SVA: trap: %lx: %d\n", frame->tf_rip, type);
mp_machdep.c
common_tss[cpu].tss_ist3
that is SVA IST configuration
#if 0
common_tss[cpu] = common_tss[0];
#else
/*
* SVA: Don't reconfigure the SVA IST configuration.
*/
{
uintptr_t ist = common_tss[cpu].tss_ist3;
common_tss[cpu] = common_tss[0];
common_tss[cpu].tss_ist3 = ist;
}
#endif
lidt($r_idt)
;sva_init_secondary()
to initialize the processor.sva_load_cr0(cr0)
instead of load_cr0(cr0)
.machdep.c
wrmsr(MSR_KGSBASE, (u_int64_t)pc)
pcpu[i].svaIContext
from sva_getCPUState(&common_tss[i]).sva_init_primary()
;sva_register_general_exception(IDT_DE,fr_sva_trap)
to replace setidt(IDT_DE,&IDTVEC(div))
;cpu_switch_sva(old,new,mtx)
to replace cpu_switch()
in cpu_switch.S.exception.S
SVAsyscall
, call this during IDTVEC(fast_syscall)SVAsysret
locore.S & apic_vector.S & atpic_vector.S & cpu_switch.S: sva/cfi.h:
ret
: first check label of return address, then return.APIC sys/x86/x86/local_apic.c
apic_isr_svax
instead of apic_isrx
;sva_reg: spurious_handler
instead of setidt: spuriousint
;sva_reg: lapic_handle_timer
instead of setidt: timerint
;sva_reg: lapic_handle_error
instead of setidt: errorint
;lapic_handle_intr_sva(int vector)
apic_isr_sva1(unsigned int vector)
apic_isr_sva2(..)
lapic_handle_timer(int type)
instead of lapic_handle_timer(struct trapframe *frame)
; // defined in include/apicvar.hsys/kern/subr_trap.c:
sys/sys/amd64/amd64/vm_machdep.c
add void kernel_thread_trampoline (struct thread * td)
: call fork_exit with td-> callout, and td->callarg;
#if 1
void
kernel_thread_trampoline (struct thread * td)
{
extern void fork_exit(void (*callout)(void *, struct trapframe *),
void *arg,
struct trapframe *frame);
/*
* Call the specified function.
*/
fork_exit (td->callout, td->callarg, 0);
return;
}
#endif
cpu_fork(…): init thread struct: .sva/.callout/.callarg/.svaID;
sva_init_stack
: to initialize the thread state in SVA; paramters:
kernel_thread_trampoline
is passed in for proper start of a process (fork_exit()).cpu_set_fork_handler(td,func,arg):
/*
* Intercept the return address from a freshly forked process that has NOT
* been scheduled yet.
*
* This is needed to make kernel threads stay in kernel mode.
*/
void
cpu_set_fork_handler(td, func, arg)
struct thread *td;
void (*func)(void *);
void *arg;
{
/*
* Note that the trap frame follows the args, so the function
* is really called like this: func(arg, frame);
*/
td->td_pcb->pcb_r12 = (long) func; /* function */
td->td_pcb->pcb_rbx = (long) arg; /* first arg */
#if 1
td->callout = func;
td->callarg = arg;
#endif
}
cpu_exit(struct thread *td)
cpu_set_syscall_retval(struct thread *td, int error)
sva_icontext_setretval (td->td_retval[1], td->td_retval[0], 0);
to replace the assignment to td->td_frame->tf_rax/rdx/rflags
sva_icontext_restart(0,0)
.cpu_set_upcall (struct thread *td, struct thread *td0)
add cpu_create_upcall(td,td0,func,arg)
sys/sys/amd64/amd64/machdep.c
exec_setregs(td, image_params *imgp, u_long stack) /* reset registers on default values on exec */
sva_translate(imgp->entry_addr)
;sys/vm/vm_glue.c
sys/kern/kern_kthread.c
sys/kern/kern_fork.c
context switch: sys/kern/sched_ule|4bsd.c:
sys/amd64/amd64/sigtramp.S
sys/sys/amd64/amd64/machdep.c
sendsig (sig_t catcher, ksiginfo_t *ksi, sigset_t *mask):
sva_save_icontext()
sva_ialloca(sigframe,..)
sva_ipush_function5
;sys_sigreturn(td, struct sigreturn_args *uap):
sva_load_icontext()
;
sys/conf/ldscript.amd64
.savmem
section. _svastart
, _svaend
, SVAPTPages
exportedsys/sys/amd64/amd64/
pmap.c:
call sva_mmu_init(): set static address for kernel MMU initialization; init CR3;
#if 1
/*
* Set the static address locations in the struct here to aid in kernel MMU
* initialization. Note that we pass in the page mapping for the pml4 page.
* This function will also initialize the cr3.
*/
sva_mmu_init (&((pdp_entry_t *)KPML4phys)[PML4PML4I],
NPDEPG,
firstaddr,
(uintptr_t)btext,
(uintptr_t)etext);
#endif
pmap_init_pat(): use sva_load_cr0
instead of load_cr0
/* Disable PGE. */
cr4 = rcr4();
load_cr4(cr4 & ~CR4_PGE);
/* Disable caches (CD = 1, NW = 0). */
cr0 = rcr0();
#ifdef SVA_MMU
sva_load_cr0((cr0 & ~CR0_NW) | CR0_CD);
#else
load_cr0((cr0 & ~CR0_NW) | CR0_CD);
#endif
/* Flushes caches and TLBs. */
wbinvd();
invltlb();
/* Update PAT and index table. */
wrmsr(MSR_PAT, pat_msr);
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
/* Flush caches and TLBs again. */
wbinvd();
invltlb();
/* Restore caches and PGE. */
#ifdef SVA_MMU
sva_load_cr0(cr0);
#else
load_cr0(cr0);
#endif
load_cr4(cr4);
use sva_update_l2_mapping
instead of pde_store()
. same parameters.
use sva_update_l1_mapping
instead of pte_store
. same parameters. in functions:
use sva_remove_mapping(pte/ptq)
instead of pte_clear(pte) pte_load_clear(ptq)
add sva_remove_page
in pmap_add_delayed_free_list
.
_pmap_unwire_pte_hold(...)
: use sva_update_l2/l3/l4_mapping.
pmap_pinit(pmap_t pmap)
: update l4 mappings;
sva_update_l4_mappings
instead of `pmap->pm_pml4[KPML4I + i] = …;pmap_allocpde -> _pmap_allocpte(…): update l4, l2, l3, l1 mappings.
pmap_release(): update l4;
pmap_growkernel(): l2/l3/l1
pmap_collect(): remove mapping
pmap_demote_pde(…): declare l1. update l2.
pmap_demote_pdpe: declare l2; up l2/l3.
pmap_promote_pde: update l2,l1
pmap_protect_pde: update l2
pmap_protect: update l1
pmap_enter: update l1,
pmap_enter_pde: up l2;
pmap_enter_quick_locked: up l1.
pmap_object_init_pt: up l2;
pmap_change_wiring: up l1.
pmap_copy: up l1/l2
pmap_remove_pages: rm mapping;
machdep.c: cpu_setregs():
sva_load_cr0(cr0)
.amd64_mem.c: sva_load_cr0 ((cr0)) instead of load_cr0((cr0)) ==> LLM: cr0 must come from privileged sandbox. CR0 cannot be a global/shared variable.
sys/kern/kern_sva.c
sys/amd64/amd64/sys_machdep.c
sys/sys/amd64/amd64/
sys/amd64/amd64/
sys/conf/
CFLAGS+= -mllvm -add-sfi -mllvm -enable-sfi-loadchecks -Xclang -backend-option -Xclang -x86-add-cfi
kern.post.mk: compile libsva.a into full kernel binary:
${FULLKERNEL}: ${SYSTEM_DEP} vers.o $S/../../../SVA/lib/libsva.a
@rm -f ${.TARGET}
@echo linking SVA ${.TARGET}
${SYSTEM_LD} -L$S/../../../SVA/lib -lsva
@${SYSTEM_CTFMERGE}
ldscript.amd64: add ‘svamem’ section:
/* Create the SVA data section */
_svastart = ALIGN(0x1000);
.svamem ALIGN(0x1000) : {
SVAPTPages = .;
. = . + 4194304;
*(svamem)
_svaend = .;
}
NOTES: add makeoptions CONF_CFLAGS=-fno-builtin -I/usr/home/criswell/src/sva/SVA/include
options: add SVA_MMU opt_sva_mmu.h
sys/dev/aic7xxx/aicasm/Makefile: add CC=gcc
TODO: []
DONE:
sys/dev/aic7xxx/aicasm/Makefile
sys/conf/
sys/amd64/amd64/
sys/amd64
-/conf/{SVA, SVAMAC}; -/ia32/ia32_syscall.c; -/include/ {apicvar.h, asm.h, asmacros.h, cpufunc.h, pmap.h}; -/linux32/linux32_locore.S;
sys/kern/; sys/sys/; sys/vm/; sys/x86/x86/local_apic.c;
Reference 1
If you could revise
the fundmental principles of
computer system design
to improve security...
... what would you change?