Cpu_switch

Reference:

called in sched_4bsd.c.

defined in sys/mips/mips/switch.S: (CheriBSD commit: 6bc17b8c7806fa912beef6efe69f71c599752f7e)

/*
 * cpu_switch(struct thread *old, struct thread *new, struct mutex *mtx);
 *	a0 - old
 *	a1 - new
 *	a2 - mtx
 * Find the highest priority process and resume it.
 */
NESTED(cpu_switch, CALLFRAME_SIZ, ra)
	mfc0	t0, MIPS_COP_0_STATUS		# t0 = saved status register
	nop
	nop
	and     a3, t0, ~(MIPS_SR_INT_IE)	
	mtc0	a3, MIPS_COP_0_STATUS		# Disable all interrupts
	ITLBNOPFIX
	beqz	a0, mips_sw1
	move	a3, a0
	PTR_L	a0, TD_PCB(a0)		# load PCB addr of curproc
	SAVE_U_PCB_CONTEXT(sp, PCB_REG_SP, a0)		# save old sp
	PTR_SUBU	sp, sp, CALLFRAME_SIZ
	REG_S	ra, CALLFRAME_RA(sp)
	.mask	0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
	SAVE_U_PCB_CONTEXT(s0, PCB_REG_S0, a0)		# do a 'savectx()'
	SAVE_U_PCB_CONTEXT(s1, PCB_REG_S1, a0)
	SAVE_U_PCB_CONTEXT(s2, PCB_REG_S2, a0)
	SAVE_U_PCB_CONTEXT(s3, PCB_REG_S3, a0)
	SAVE_U_PCB_CONTEXT(s4, PCB_REG_S4, a0)
	SAVE_U_PCB_CONTEXT(s5, PCB_REG_S5, a0)
	SAVE_U_PCB_CONTEXT(s6, PCB_REG_S6, a0)
	SAVE_U_PCB_CONTEXT(s7, PCB_REG_S7, a0)
	SAVE_U_PCB_CONTEXT(s8, PCB_REG_S8, a0)
	SAVE_U_PCB_CONTEXT(ra, PCB_REG_RA, a0)		# save return address
	SAVE_U_PCB_CONTEXT(t0, PCB_REG_SR, a0)		# save status register
	SAVE_U_PCB_CONTEXT(gp, PCB_REG_GP, a0)
	jal	getpc
	nop
getpc:
	SAVE_U_PCB_CONTEXT(ra, PCB_REG_PC, a0)		# save return address

#ifdef CPU_CHERI
	SAVE_U_PCB_CHERIKFRAME(a0);
#endif

#ifdef CPU_CNMIPS

	lw	t2, TD_MDFLAGS(a3)		# get md_flags
	and	t1, t2, MDTD_COP2USED
	beqz	t1, cop2_untouched
	nop

	/* Clear cop2used flag */
	and	t2, t2, ~MDTD_COP2USED
	sw	t2, TD_MDFLAGS(a3)

	and	t2, t0, ~MIPS_SR_COP_2_BIT	# clear COP_2 enable bit
	SAVE_U_PCB_CONTEXT(t2, PCB_REG_SR, a0)	# save status register

	RESTORE_U_PCB_REG(t0, PS, a0)		# get CPU status register
	and	t2, t0, ~MIPS_SR_COP_2_BIT	# clear COP_2 enable bit
	SAVE_U_PCB_REG(t2, PS, a0)		# save stratus register

	/* preserve a0..a3 */
	move	s0, a0
	move	s1, a1
	move	s2, a2
	move	s3, a3

	/* does kernel own COP2 context? */
	lw	t1, TD_COP2OWNER(a3)		# get md_cop2owner
	beqz	t1, userland_cop2		# 0 - it's userland context
	nop

	PTR_L	a0, TD_COP2(a3)
	beqz	a0, no_cop2_context
	nop

	j	do_cop2_save
	nop

userland_cop2:

	PTR_L	a0, TD_UCOP2(a3)
	beqz	a0, no_cop2_context
	nop

do_cop2_save:
	PTR_LA	t9, octeon_cop2_save
	jalr	t9
	nop

no_cop2_context:
	move	a3, s3
	move	a2, s2
	move	a1, s1
	move	a0, s0

cop2_untouched:
#endif

	PTR_S	a2, TD_LOCK(a3)			# Switchout td_lock 

mips_sw1:
#ifdef CPU_QEMU_MALTA
	/*
	 * If per-thread tracing is disabled, skip this block and don't muck
	 * with emulator state.
	 */
	PTR_LA	t2, _C_LABEL(qemu_trace_perthread)	# Load address of var
	lw	t2, 0(t2)				# Load var value
	beqz	t2, done_qemu_tracing			# Skip if value is 0
	nop

	/*
	 * If per-thread tracing is enabled, update Qemu-internal state to
	 * reflect the thread we are switching to.  Don't disable before
	 * checking, so we can ensure that we get a full trace if both the
	 * 'old' and 'new' threads have tracing enabled.
	 */
	lw	t2, TD_MDFLAGS(a1)			# Get new->md_flags
	andi	t2, t2, MDTD_QTRACE			# Mask Qemu trace bit
	beqz	t2, disable_qemu_tracing		# Branch if not set
	nop

enable_qemu_tracing:
	li	$0, 0xbeef
	b	done_qemu_tracing
	nop

disable_qemu_tracing:
	li	$0, 0xdead

done_qemu_tracing:
#endif

#if defined(SMP) && defined(SCHED_ULE)
	PTR_LA	t0, _C_LABEL(blocked_lock)
blocked_loop:
	PTR_L	t1, TD_LOCK(a1)
	beq	t0, t1, blocked_loop
	nop
#endif
	move	s7, a1	# Store newthread
/*
 * Switch to new context.
 */
	GET_CPU_PCPU(a3)
	PTR_S	a1, PC_CURTHREAD(a3)
	PTR_L	a2, TD_PCB(a1)
	PTR_S	a2, PC_CURPCB(a3)
	PTR_L	v0, TD_KSTACK(a1)		# va of 1st half of kstack
#if defined(__mips_n64)
	PTR_LI	s0, MIPS_XKSEG_START
#else
	PTR_LI	s0, MIPS_KSEG2_START		# If Uarea addr is below kseg2,
#endif /* __mips_n64 */
	bltu	v0, s0, sw2			# no need to insert in TLB.
	PTE_L	a1, TD_UPTE + 0(s7)		# a1 = u. pte #0
	PTE_L	a2, TD_UPTE + PTESIZE(s7)	# a2 = u. pte #1
/*
 * Wiredown the USPACE of newproc in TLB entry#0.  Check whether target
 * USPACE is already in another place of TLB before that, and if so
 * invalidate that TLB entry.
 * NOTE: This is hard coded to UPAGES == 2.
 * Also, there should be no TLB faults at this point.
 */
	MTC0		v0, MIPS_COP_0_TLB_HI	# VPN = va
	HAZARD_DELAY
	tlbp					# probe VPN
	HAZARD_DELAY
	mfc0		s0, MIPS_COP_0_TLB_INDEX
	HAZARD_DELAY

	# MIPS_KSEG0_START + (2 * index * PAGE_SIZE) -> MIPS_COP_0_TLB_HI
	PTR_LI		t1, MIPS_KSEG0_START		# invalidate tlb entry
#ifdef KSTACK_LARGE_PAGE
	bltz		s0, inval_nxt1
#else
	bltz		s0, entry0set
#endif /* KSTACK_LARGE_PAGE */
	nop
	sll		s0, PAGE_SHIFT + 1
	PTR_ADDU	t1, s0
	MTC0		t1, MIPS_COP_0_TLB_HI
	PTE_MTC0	zero, MIPS_COP_0_TLB_LO0
	PTE_MTC0	zero, MIPS_COP_0_TLB_LO1
	MTC0		zero, MIPS_COP_0_TLB_PG_MASK
	HAZARD_DELAY
	tlbwi
	HAZARD_DELAY

#ifdef KSTACK_LARGE_PAGE
/*
 * With a KSTACK_PAGE_SIZE of 16K and PAGE_SIZE of 4K it is possible that
 * a second TLB entry is currently mapping the kernel thread stack as a
 * regular 4K sized page(s). Check for this case and, if so, invalidate
 * that TLB entry as well.
 */
#if (PAGE_SIZE != 4096) && (KSTACK_PAGE_SIZE != 16384)
#error PAGE_SIZE is not 4K or KSTACK_PAGE_SIZE is not 16K.
#endif
inval_nxt1:
	move		v1, v0
	PTR_ADDU	v1, PAGE_SIZE * 2
	MTC0		v1, MIPS_COP_0_TLB_HI		# VPN = va
	HAZARD_DELAY
	tlbp						# probe VPN
	HAZARD_DELAY
	mfc0		s0, MIPS_COP_0_TLB_INDEX
	HAZARD_DELAY

	# MIPS_KSEG0_START + (2 * index * PAGE_SIZE) -> MIPS_COP_0_TLB_HI
	PTR_LI		t1, MIPS_KSEG0_START		# invalidate tlb entry
	bltz		s0, entry0set
	nop
	sll		s0, PAGE_SHIFT + 1
	PTR_ADDU	t1, s0
	MTC0		t1, MIPS_COP_0_TLB_HI
	PTE_MTC0	zero, MIPS_COP_0_TLB_LO0
	PTE_MTC0	zero, MIPS_COP_0_TLB_LO1
	MTC0		zero, MIPS_COP_0_TLB_PG_MASK
	HAZARD_DELAY
	tlbwi
	HAZARD_DELAY
#endif /* KSTACK_LARGE_PAGE */

entry0set:
	MTC0		v0, MIPS_COP_0_TLB_HI		# set VPN again
	HAZARD_DELAY
/* SMP!! - Works only for  unshared TLB case - i.e. no v-cpus */
	mtc0		zero, MIPS_COP_0_TLB_INDEX	# TLB entry #0
	HAZARD_DELAY
	PTE_MTC0	a1, MIPS_COP_0_TLB_LO0		# upte[0]
	HAZARD_DELAY
	PTE_MTC0	a2, MIPS_COP_0_TLB_LO1		# upte[1]
	HAZARD_DELAY
#ifdef KSTACK_LARGE_PAGE
	li		t1, KSTACK_TLBMASK_MASK
	MTC0		t1, MIPS_COP_0_TLB_PG_MASK
	HAZARD_DELAY
#else /* ! KSTACK_LARGE_PAGE */
	MTC0		zero, MIPS_COP_0_TLB_PG_MASK
	HAZARD_DELAY
#endif /* ! KSTACK_LARGE_PAGE */
	tlbwi					# set TLB entry #0
	HAZARD_DELAY
#ifdef KSTACK_LARGE_PAGE
	MTC0		zero, MIPS_COP_0_TLB_PG_MASK
	HAZARD_DELAY
#endif /* KSTACK_LARGE_PAGE */
/*
 * Now running on new u struct.
 */
sw2:
	PTR_L	s0, TD_PCB(s7)
	RESTORE_U_PCB_CONTEXT(sp, PCB_REG_SP, s0)
	PTR_LA	t9, _C_LABEL(pmap_activate)	# s7 = new proc pointer
	jalr	t9				# s7 = new proc pointer
	move	a0, s7				# BDSLOT
/*
 * Restore registers and return.
 */
	move	a0, s0
	move	a1, s7
	RESTORE_U_PCB_CONTEXT(gp, PCB_REG_GP, a0)
	RESTORE_U_PCB_CONTEXT(v0, PCB_REG_SR, a0)	# restore kernel context
	RESTORE_U_PCB_CONTEXT(ra, PCB_REG_RA, a0)
	RESTORE_U_PCB_CONTEXT(s0, PCB_REG_S0, a0)
	RESTORE_U_PCB_CONTEXT(s1, PCB_REG_S1, a0)
	RESTORE_U_PCB_CONTEXT(s2, PCB_REG_S2, a0)
	RESTORE_U_PCB_CONTEXT(s3, PCB_REG_S3, a0)
	RESTORE_U_PCB_CONTEXT(s4, PCB_REG_S4, a0)
	RESTORE_U_PCB_CONTEXT(s5, PCB_REG_S5, a0)
	RESTORE_U_PCB_CONTEXT(s6, PCB_REG_S6, a0)
	RESTORE_U_PCB_CONTEXT(s7, PCB_REG_S7, a0)
	RESTORE_U_PCB_CONTEXT(s8, PCB_REG_S8, a0)

#ifdef CPU_CHERI
	RESTORE_U_PCB_CHERIKFRAME(a0);
#endif

	mfc0	t0, MIPS_COP_0_STATUS
	and	t0, t0, MIPS_SR_INT_MASK
	and	v0, v0, ~MIPS_SR_INT_MASK
	or	v0, v0, t0
	mtc0	v0, MIPS_COP_0_STATUS
	ITLBNOPFIX
/*
 * Set the new thread's TLS pointer.
 *
 * Note that this code is removed if the CPU doesn't support ULRI by
 * remove_userlocal_code() in cpu.c.
 */
#ifdef CPU_CHERI
	# Get TLS address
	clc	$c3, a1, TD_MDTLS($ddc)
	PTR_L	t1, TD_MDTLS_TCB_OFFSET(a1)	# Get TLS/TCB offset
	cincoffset $c3, $c3, t1
	cwritehwr $c3, $chwr_userlocal
	cgetaddr v0, $c3
#else
	PTR_L	t0, TD_MDTLS(a1)		# Get TLS pointer
	PTR_L	t1, TD_MDTLS_TCB_OFFSET(a1)	# Get TLS/TCB offset
	PTR_ADDU v0, t0, t1
#endif
	MTC0	v0, MIPS_COP_0_USERLOCAL, 2	# write it to ULR for rdhwr

	j	ra
	nop
END(cpu_switch)
Created Aug 19, 2019 // Last Updated May 18, 2021

If you could revise
the fundmental principles of
computer system design
to improve security...

... what would you change?