guest_mem_before_exec
trace?
The def of trace event:
# tcg/tcg-op.c
# @vaddr: Access' virtual address.
# @info : Access' information (see below).
#
# Start virtual memory access (before any potential access violation).
#
# Does not include memory accesses performed by devices.
#
# Access information can be parsed as:
#
# struct mem_info {
# uint8_t size_shift : 4; /* interpreted as "1 << size_shift" bytes */
# bool sign_extend: 1; /* sign-extended */
# uint8_t endianness : 1; /* 0: little, 1: big */
# bool store : 1; /* whether it is a store operation */
# pad : 1;
# uint8_t mmuidx : 4; /* mmuidx (softmmu only) */
# };
#
# Mode: user, softmmu
# Targets: TCG(all)
vcpu tcg guest_mem_before(TCGv vaddr, uint16_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
The function def of trace_guest_mem_before_exec/trans
is auto generated during qemu build to print out the trace info with above format.
Event handling code is automatically generated. It is where the trace information got written to the trace file.
// qemu-build/trace-root.c
void _simple_trace_guest_mem_before_exec(CPUState * __cpu, uint64_t vaddr, uint16_t info)
{
TraceBufferRecord rec;
if (!true) {
return;
}
if (trace_record_start(&rec, _TRACE_GUEST_MEM_BEFORE_EXEC_EVENT.id, 8 + 8 + 8)) {
return; /* Trace Buffer Full, Event Dropped ! */
}
trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)__cpu);
trace_record_write_u64(&rec, (uint64_t)vaddr);
trace_record_write_u64(&rec, (uint64_t)info);
trace_record_finish(&rec);
}
accel/tcg/cputlb.c: cpu_ldub/ldsb/lduw/ldsw/ldl/ldq_data_ra -> cpu_ldub/ldsb/lduw/ldsw/ldl/ldq/_mmuidx_ra -> cpu_load/store_helper -> trace_guest_mem_before_exec
accel/tcg/cputlb.c: cpu_ldq_cap_data_ra -> cpu_load/store_helper -> trace_guest_mem_before_exec
accel/tcg/use-exec.c: cpu_ldub/ldsb/lduw/ldsw/ldl/ldq/stb/stw/stl/stq_data -> trace_guest_mem_before_exec
accel/atomic_common.inc.c: atomic_trace_rmw/ld/st_pre -> trace_guest_mem_before_exec
target/mips/op_helper_cheri.c: load_cap_from_memory() ->accel/tcg/cputlb.c: cpu_ldq_data_ra -> cpu_ldq_mmuidx_ra -> trace_guest_mem_before_exec
target/cheri-common/op_helper_cheri_common.c: load_cap_from_memory_128: -> accel/tcg/cputlb.c: cpu_ldq_cap_data_ra -> cpu_load/store_helper -> trace_guest_mem_before_exec: “avoid logging memory accesses that load capability components as normal memory accesses.”
???? Will not trace if a TLB hit happens. Is this a missing trace point in target/cheri-common/op_helper_cheri_common.c: load_cap_from_memory_128:
// target/cheri-common/op_helper_cheri_common.c:
bool load_cap_from_memory_128(CPUArchState *env, uint64_t *pesbt,
uint64_t *cursor, uint32_t cb,
const cap_register_t *source, target_ulong vaddr,
target_ulong retpc, hwaddr *physaddr)
{
...
void *host = probe_read(env, vaddr, CHERI_CAP_SIZE,
cpu_mmu_index(env, false), retpc);
// When writing back pesbt we have to XOR with the NULL mask to ensure that
// NULL capabilities have an all-zeroes representation.
if (likely(host)) {
// Fast path, host address in TLB
// Lele: tracing needed here
*pesbt = ldq_p((char *)host + CHERI_MEM_OFFSET_METADATA) ^
CC128_NULL_XOR_MASK;
*cursor = ldq_p((char *)host + CHERI_MEM_OFFSET_CURSOR);
} else {
// Slow path for e.g. IO regions.
qemu_maybe_log_instr_extra(env, "Using slow path for load from guest "
"address " TARGET_FMT_plx "\n", vaddr);
*pesbt = cpu_ldq_cap_data_ra(env, vaddr + CHERI_MEM_OFFSET_METADATA, retpc) ^
CC128_NULL_XOR_MASK;
*cursor = cpu_ldq_cap_data_ra(env, vaddr + CHERI_MEM_OFFSET_CURSOR, retpc);
}
...
}
Similarly, probe_read
also used in:
haddr = probe_read(...)
Reference 1
If you could revise
the fundmental principles of
computer system design
to improve security...
... what would you change?