1 // Code for manipulating stack locations.
3 // Copyright (C) 2009-2015 Kevin O'Connor <kevin@koconnor.net>
5 // This file may be distributed under the terms of the GNU LGPLv3 license.
7 #include "biosvar.h" // GET_GLOBAL
8 #include "bregs.h" // CR0_PE
9 #include "fw/paravirt.h" // PORT_SMI_CMD
10 #include "hw/rtc.h" // rtc_use
11 #include "list.h" // hlist_node
12 #include "malloc.h" // free
13 #include "output.h" // dprintf
14 #include "romfile.h" // romfile_loadint
15 #include "stacks.h" // struct mutex_s
16 #include "string.h" // memset
17 #include "util.h" // useRTC
19 #define MAIN_STACK_MAX (1024*1024)
22 /****************************************************************
23 * 16bit / 32bit calling
24 ****************************************************************/
38 int HaveSmmCall32 VARFSEG;
40 // Backup state in preparation for call32
42 call32_prep(u8 method)
44 if (!CONFIG_CALL32_SMM || method != C16_SMM) {
48 // Called in 16bit protected mode?!
50 SET_LOW(Call16Data.cr0, cr0);
52 // Backup fs/gs and gdt
53 SET_LOW(Call16Data.fs, GET_SEG(FS));
54 SET_LOW(Call16Data.gs, GET_SEG(GS));
57 SET_LOW(Call16Data.gdt.length, gdt.length);
58 SET_LOW(Call16Data.gdt.addr, gdt.addr);
60 // Enable a20 and backup its previous state
61 SET_LOW(Call16Data.a20, set_a20(1));
65 SET_LOW(Call16Data.ss, GET_SEG(SS));
67 // Backup cmos index register and disable nmi
68 u8 cmosindex = inb(PORT_CMOS_INDEX);
69 outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
71 SET_LOW(Call16Data.cmosindex, cmosindex);
73 SET_LOW(Call16Data.method, method);
77 // Restore state backed up during call32
81 u8 method = GET_LOW(Call16Data.method);
82 SET_LOW(Call16Data.method, 0);
83 SET_LOW(Call16Data.ss, 0);
85 if (!CONFIG_CALL32_SMM || method != C16_SMM) {
87 set_a20(GET_LOW(Call16Data.a20));
89 // Restore gdt and fs/gs
91 gdt.length = GET_LOW(Call16Data.gdt.length);
92 gdt.addr = GET_LOW(Call16Data.gdt.addr);
94 SET_SEG(FS, GET_LOW(Call16Data.fs));
95 SET_SEG(GS, GET_LOW(Call16Data.gs));
98 u32 cr0_caching = GET_LOW(Call16Data.cr0) & (CR0_CD|CR0_NW);
100 cr0_mask(CR0_CD|CR0_NW, cr0_caching);
103 // Restore cmos index register
104 outb(GET_LOW(Call16Data.cmosindex), PORT_CMOS_INDEX);
109 // Force next call16() to restore to a pristine cpu environment state
111 call16_override(int big)
114 if (getesp() > BUILD_STACK_ADDR)
115 panic("call16_override with invalid stack\n");
116 memset(&Call16Data, 0, sizeof(Call16Data));
118 Call16Data.method = C16_BIG;
121 Call16Data.a20 = !CONFIG_DISABLE_A20;
125 // 16bit handler code called from call16() / call16_smm()
127 call16_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx))
129 u8 method = call32_post();
130 u32 ret = func(eax, edx);
135 #define ASM32_SWITCH16 " .pushsection .text.32fseg." UNIQSEC "\n .code16\n"
136 #define ASM32_BACK32 " .popsection\n .code32\n"
137 #define ASM16_SWITCH32 " .code32\n"
138 #define ASM16_BACK16 " .code16gcc\n"
140 // Call a SeaBIOS C function in 32bit mode using smm trampoline
142 call32_smm(void *func, u32 eax)
145 dprintf(9, "call32_smm %p %x\n", func, eax);
146 call32_prep(C16_SMM);
149 // Backup esp / set esp to flat stack location
151 " movl %%ss, %%eax\n"
153 " addl %%eax, %%esp\n"
155 // Transition to 32bit mode, call func, return to 16bit
156 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
157 " movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
158 " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%ebx\n"
159 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
168 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
169 " movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
171 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
178 : "=&r" (bkup_esp), "+r" (eax)
180 : "eax", "ecx", "edx", "ebx", "cc", "memory");
183 dprintf(9, "call32_smm done %p %x\n", func, eax);
188 call16_smm(u32 eax, u32 edx, void *func)
191 if (!CONFIG_CALL32_SMM)
193 func -= BUILD_BIOS_ADDR;
194 dprintf(9, "call16_smm %p %x %x\n", func, eax, edx);
195 u32 stackoffset = Call16Data.ss << 4;
200 // Transition to 16bit mode, call func, return to 32bit
201 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
202 " movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
203 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%ebx\n"
204 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
211 " calll _cfunc16_call16_helper\n"
214 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
215 " movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
217 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
221 // Set esp to flat stack location
224 : "+r" (stackoffset), "+r" (eax), "+d" (edx)
226 : "eax", "ecx", "ebx", "cc", "memory");
230 // Call a 32bit SeaBIOS function from a 16bit SeaBIOS function.
232 __call32(void *func, u32 eax, u32 errret)
235 if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32))
236 return call32_smm(func, eax);
237 // Jump direclty to 32bit mode - this clobbers the 16bit segment
238 // selector registers.
239 int ret = call32_prep(C16_BIG);
242 u32 bkup_ss, bkup_esp;
244 // Backup ss/esp / set esp to flat stack location
251 // Transition to 32bit mode, call func, return to 16bit
252 " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n"
253 " jmp transition32_nmi_off\n"
257 " jmp transition16big\n"
264 : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax)
266 : "ecx", "edx", "cc", "memory");
271 // Call a 16bit SeaBIOS function, restoring the mode from last call32().
273 call16(u32 eax, u32 edx, void *func)
276 if (getesp() > MAIN_STACK_MAX)
277 panic("call16 with invalid stack\n");
278 if (CONFIG_CALL32_SMM && Call16Data.method == C16_SMM)
279 return call16_smm(eax, edx, func);
281 extern void transition16big(void);
282 extern void transition16(void);
283 void *thunk = transition16;
284 if (Call16Data.method == C16_BIG || in_post())
285 thunk = transition16big;
286 func -= BUILD_BIOS_ADDR;
287 u32 stackseg = Call16Data.ss;
289 // Transition to 16bit mode
290 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n"
292 // Setup ss/esp and call func
301 " calll _cfunc16_call16_helper\n"
302 // Return to 32bit and restore esp
304 " jmp transition32_nmi_off\n"
307 : "+a" (eax), "+c"(thunk), "+r"(stackseg)
308 : "r" (func), "r" (edx)
309 : "edx", "cc", "memory");
314 /****************************************************************
316 ****************************************************************/
318 // Space for a stack for 16bit code.
319 u8 ExtraStack[BUILD_EXTRA_STACK_SIZE+1] VARLOW __aligned(8);
322 // Test if currently on the extra stack
326 return MODE16 && GET_SEG(SS) == SEG_LOW && getesp() > (u32)ExtraStack;
329 // Switch to the extra stack and call a function.
331 __stack_hop(u32 eax, u32 edx, void *func)
333 if (on_extra_stack())
334 return ((u32 (*)(u32, u32))func)(eax, edx);
336 u16 stack_seg = SEG_LOW;
337 u32 bkup_ss, bkup_esp;
339 // Backup current %ss/%esp values.
342 // Copy stack seg to %ds/%ss and set %esp
352 // Restore segments and stack
356 : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss), "=&r" (bkup_esp)
357 : "m" (StackPos), "r" (stack_seg)
362 // Switch back to original caller's stack and call a function.
364 __stack_hop_back(u32 eax, u32 edx, void *func)
367 return call16(eax, edx, func);
368 if (!MODE16 || !on_extra_stack())
369 return ((u32 (*)(u32, u32))func)(eax, edx);
372 u32 bkup_stack_pos, temp;
374 // Backup stack_pos and current %ss/%esp
378 // Restore original callers' %ss/%esp
381 "movw %%ds:-8(%4), %%sp\n"
385 // Restore %ss/%esp and stack_pos
390 : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss)
391 , "=&r" (bkup_stack_pos), "=&r" (temp), "+m" (StackPos)
398 /****************************************************************
399 * External 16bit interface calling
400 ****************************************************************/
402 // Far call 16bit code with a specified register state.
404 _farcall16(struct bregs *callregs, u16 callregseg)
406 if (need_hop_back()) {
407 stack_hop_back(_farcall16, callregs, callregseg);
412 "calll __farcall16\n"
413 : "+a" (callregs), "+m" (*callregs), "+d" (callregseg)
415 : "ebx", "ecx", "esi", "edi", "cc", "memory");
418 // Invoke external 16bit code.
420 farcall16(struct bregs *callregs)
423 _farcall16(callregs, 0);
426 // Invoke external 16bit code in "big real" mode.
428 farcall16big(struct bregs *callregs)
431 _farcall16(callregs, 0);
434 // Invoke a 16bit software interrupt.
436 __call16_int(struct bregs *callregs, u16 offset)
438 callregs->code.offset = offset;
440 callregs->code.seg = SEG_BIOS;
441 _farcall16((void*)callregs - Call16Data.ss * 16, Call16Data.ss);
444 callregs->code.seg = GET_SEG(CS);
445 _farcall16(callregs, GET_SEG(SS));
452 extern void reset_vector(void) __noreturn;
454 call16(0, 0, reset_vector);
459 /****************************************************************
461 ****************************************************************/
463 // Thread info - stored at bottom of each thread stack - don't change
464 // without also updating the inline assembler below.
467 struct hlist_node node;
469 struct thread_info MainThread VARFSEG = {
470 NULL, { &MainThread.node, &MainThread.node.next }
472 #define THREADSTACKSIZE 4096
474 // Check if any threads are running.
478 return (CONFIG_THREADS
479 && GET_FLATPTR(MainThread.node.next) != &MainThread.node);
482 // Return the 'struct thread_info' for the currently running thread.
487 if (esp <= MAIN_STACK_MAX)
489 return (void*)ALIGN_DOWN(esp, THREADSTACKSIZE);
492 static u8 CanInterrupt, ThreadControl;
494 // Initialize the support for internal threads.
499 if (! CONFIG_THREADS)
501 ThreadControl = romfile_loadint("etc/threads", 1);
504 // Should hardware initialization threads run during optionrom execution.
506 threads_during_optionroms(void)
508 return CONFIG_THREADS && CONFIG_RTC_TIMER && ThreadControl == 2 && in_post();
511 // Switch to next thread stack.
513 switch_next(struct thread_info *cur)
515 struct thread_info *next = container_of(
516 cur->node.next, struct thread_info, node);
521 " pushl $1f\n" // store return pc
522 " pushl %%ebp\n" // backup %ebp
523 " movl %%esp, (%%eax)\n" // cur->stackpos = %esp
524 " movl (%%ecx), %%esp\n" // %esp = next->stackpos
525 " popl %%ebp\n" // restore %ebp
526 " retl\n" // restore pc
528 : "+a"(cur), "+c"(next)
530 : "ebx", "edx", "esi", "edi", "cc", "memory");
533 // Last thing called from a thread (called on MainThread stack).
535 __end_thread(struct thread_info *old)
537 hlist_del(&old->node);
538 dprintf(DEBUG_thread, "\\%08x/ End thread\n", (u32)old);
541 dprintf(1, "All threads complete.\n");
544 // Create a new thread and start executing 'func' in it.
546 run_thread(void (*func)(void*), void *data)
549 if (! CONFIG_THREADS || ! ThreadControl)
551 struct thread_info *thread;
552 thread = memalign_tmphigh(THREADSTACKSIZE, THREADSTACKSIZE);
556 dprintf(DEBUG_thread, "/%08x\\ Start thread\n", (u32)thread);
557 thread->stackpos = (void*)thread + THREADSTACKSIZE;
558 struct thread_info *cur = getCurThread();
559 hlist_add_after(&thread->node, &cur->node);
562 " pushl $1f\n" // store return pc
563 " pushl %%ebp\n" // backup %ebp
564 " movl %%esp, (%%edx)\n" // cur->stackpos = %esp
565 " movl (%%ebx), %%esp\n" // %esp = thread->stackpos
566 " calll *%%ecx\n" // Call func
569 " movl %%ebx, %%eax\n" // %eax = thread
570 " movl 4(%%ebx), %%ebx\n" // %ebx = thread->node.next
571 " movl (%5), %%esp\n" // %esp = MainThread.stackpos
572 " calll %4\n" // call __end_thread(thread)
573 " movl -4(%%ebx), %%esp\n" // %esp = next->stackpos
574 " popl %%ebp\n" // restore %ebp
575 " retl\n" // restore pc
577 : "+a"(data), "+c"(func), "+b"(thread), "+d"(cur)
578 : "m"(*(u8*)__end_thread), "m"(MainThread)
579 : "esi", "edi", "cc", "memory");
587 /****************************************************************
589 ****************************************************************/
591 // Low-level irq enable.
595 if (!MODESEGMENT && !CanInterrupt) {
596 // Can't enable interrupts (PIC and/or IVT not yet setup)
600 if (need_hop_back()) {
601 stack_hop_back(check_irqs, 0, 0);
606 asm volatile("sti ; nop ; rep ; nop ; cli ; cld" : : :"memory");
609 // Briefly permit irqs to occur.
613 if (MODESEGMENT || !CONFIG_THREADS) {
617 struct thread_info *cur = getCurThread();
618 if (cur == &MainThread)
619 // Permit irqs to fire
622 // Switch to the next thread
629 if (need_hop_back()) {
630 stack_hop_back(wait_irq, 0, 0);
633 asm volatile("sti ; hlt ; cli ; cld": : :"memory");
636 // Wait for next irq to occur.
640 if (!CONFIG_HARDWARE_IRQ
641 || (!MODESEGMENT && (have_threads() || !CanInterrupt))) {
642 // Threads still active or irqs not available - do a yield instead.
649 // Wait for all threads (other than the main thread) to complete.
654 while (have_threads())
659 mutex_lock(struct mutex_s *mutex)
662 if (! CONFIG_THREADS)
664 while (mutex->isLocked)
670 mutex_unlock(struct mutex_s *mutex)
673 if (! CONFIG_THREADS)
679 /****************************************************************
681 ****************************************************************/
683 int CanPreempt VARFSEG;
684 static u32 PreemptCount;
686 // Turn on RTC irqs and arrange for them to check the 32bit threads.
690 if (! threads_during_optionroms())
697 // Turn off RTC irqs / stop checking for thread execution.
701 if (! threads_during_optionroms()) {
707 dprintf(9, "Done preempt - %d checks\n", PreemptCount);
711 // Check if preemption is on, and wait for it to complete if so.
715 if (MODESEGMENT || !CONFIG_THREADS || !CanPreempt
716 || getesp() < MAIN_STACK_MAX)
723 // Try to execute 32bit threads.
728 switch_next(&MainThread);
731 // 16bit code that checks if threads are pending and executes them if so.
735 if (CONFIG_THREADS && GET_GLOBAL(CanPreempt) && have_threads())
736 call32(yield_preempt, 0, 0);
740 /****************************************************************
742 ****************************************************************/
744 struct call32_params_s {
750 call32_params_helper(struct call32_params_s *params)
752 return ((u32 (*)(u32, u32, u32))params->func)(
753 params->eax, params->edx, params->ecx);
757 __call32_params(void *func, u32 eax, u32 edx, u32 ecx, u32 errret)
760 struct call32_params_s params = {func, eax, edx, ecx};
761 return call32(call32_params_helper, MAKE_FLATPTR(GET_SEG(SS), ¶ms)