1 // Code for manipulating stack locations.
3 // Copyright (C) 2009-2014 Kevin O'Connor <kevin@koconnor.net>
5 // This file may be distributed under the terms of the GNU LGPLv3 license.
7 #include "biosvar.h" // GET_GLOBAL
8 #include "bregs.h" // CR0_PE
9 #include "fw/paravirt.h" // PORT_SMI_CMD
10 #include "hw/rtc.h" // rtc_use
11 #include "list.h" // hlist_node
12 #include "malloc.h" // free
13 #include "output.h" // dprintf
14 #include "romfile.h" // romfile_loadint
15 #include "stacks.h" // struct mutex_s
16 #include "util.h" // useRTC
18 #define MAIN_STACK_MAX (1024*1024)
21 /****************************************************************
22 * 16bit / 32bit calling
23 ****************************************************************/
36 int HaveSmmCall32 VARFSEG;
38 // Backup state in preparation for call32_smm()
42 // Backup cmos index register and disable nmi
43 u8 cmosindex = inb(PORT_CMOS_INDEX);
44 outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
46 SET_LOW(Call32Data.cmosindex, cmosindex);
49 SET_LOW(Call32Data.ss, GET_SEG(SS));
51 SET_LOW(Call32Data.method, C32_SMM);
54 // Restore state backed up during call32_smm()
58 SET_LOW(Call32Data.method, 0);
59 SET_LOW(Call32Data.ss, 0);
61 // Restore cmos index register
62 outb(GET_LOW(Call32Data.cmosindex), PORT_CMOS_INDEX);
66 #define ASM32_SWITCH16 " .pushsection .text.32fseg." UNIQSEC "\n .code16\n"
67 #define ASM32_BACK32 " .popsection\n .code32\n"
68 #define ASM16_SWITCH32 " .code32\n"
69 #define ASM16_BACK16 " .code16gcc\n"
71 // Call a SeaBIOS C function in 32bit mode using smm trampoline
73 call32_smm(void *func, u32 eax)
76 dprintf(9, "call32_smm %p %x\n", func, eax);
80 // Backup esp / set esp to flat stack location
84 " addl %%eax, %%esp\n"
86 // Transition to 32bit mode, call func, return to 16bit
87 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
88 " movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
89 " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%ebx\n"
90 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
99 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
100 " movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
102 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
109 : "=&r" (bkup_esp), "+r" (eax)
111 : "eax", "ecx", "edx", "ebx", "cc", "memory");
114 dprintf(9, "call32_smm done %p %x\n", func, eax);
118 // 16bit handler code called from call16_smm()
120 call16_smm_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx))
122 if (!CONFIG_CALL32_SMM)
125 u32 ret = func(eax, edx);
131 call16_smm(u32 eax, u32 edx, void *func)
134 if (!CONFIG_CALL32_SMM)
136 func -= BUILD_BIOS_ADDR;
137 dprintf(9, "call16_smm %p %x %x\n", func, eax, edx);
138 u32 stackoffset = Call32Data.ss << 4;
143 // Transition to 16bit mode, call func, return to 32bit
144 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
145 " movl $" __stringify(CALL32SMM_RETURNID) ", %%ecx\n"
146 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%ebx\n"
147 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
154 " calll _cfunc16_call16_smm_helper\n"
157 " movl $" __stringify(CALL32SMM_CMDID) ", %%eax\n"
158 " movl $" __stringify(CALL32SMM_ENTERID) ", %%ecx\n"
160 " outb %%al, $" __stringify(PORT_SMI_CMD) "\n"
164 // Set esp to flat stack location
167 : "+r" (stackoffset), "+r" (eax), "+d" (edx)
169 : "eax", "ecx", "ebx", "cc", "memory");
173 // Backup state in preparation for call32_sloppy()
175 call32_sloppy_prep(void)
177 // Backup cmos index register and disable nmi
178 u8 cmosindex = inb(PORT_CMOS_INDEX);
179 outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
181 SET_LOW(Call32Data.cmosindex, cmosindex);
183 // Enable a20 and backup it's previous state
184 SET_LOW(Call32Data.a20, set_a20(1));
186 // Backup ss/fs/gs and gdt
187 SET_LOW(Call32Data.ss, GET_SEG(SS));
188 SET_LOW(Call32Data.fs, GET_SEG(FS));
189 SET_LOW(Call32Data.gs, GET_SEG(GS));
190 struct descloc_s gdt;
192 SET_LOW(Call32Data.gdt.length, gdt.length);
193 SET_LOW(Call32Data.gdt.addr, gdt.addr);
195 SET_LOW(Call32Data.method, C32_SLOPPY);
198 // Restore state backed up during call32_sloppy()
200 call32_sloppy_post(void)
202 SET_LOW(Call32Data.method, 0);
203 SET_LOW(Call32Data.ss, 0);
205 // Restore gdt and fs/gs
206 struct descloc_s gdt;
207 gdt.length = GET_LOW(Call32Data.gdt.length);
208 gdt.addr = GET_LOW(Call32Data.gdt.addr);
210 SET_SEG(FS, GET_LOW(Call32Data.fs));
211 SET_SEG(GS, GET_LOW(Call32Data.gs));
214 set_a20(GET_LOW(Call32Data.a20));
216 // Restore cmos index register
217 outb(GET_LOW(Call32Data.cmosindex), PORT_CMOS_INDEX);
221 // Call a C function in 32bit mode. This clobbers the 16bit segment
222 // selector registers.
224 call32_sloppy(void *func, u32 eax)
227 call32_sloppy_prep();
228 u32 bkup_ss, bkup_esp;
230 // Backup ss/esp / set esp to flat stack location
237 // Transition to 32bit mode, call func, return to 16bit
238 " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n"
239 " jmp transition32\n"
243 " jmp transition16big\n"
250 : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax)
252 : "ecx", "edx", "cc", "memory");
253 call32_sloppy_post();
257 // 16bit handler code called from call16_sloppy()
259 call16_sloppy_helper(u32 eax, u32 edx, u32 (*func)(u32 eax, u32 edx))
261 call32_sloppy_post();
262 u32 ret = func(eax, edx);
263 call32_sloppy_prep();
267 // Jump back to 16bit mode while in 32bit mode from call32_sloppy()
269 call16_sloppy(u32 eax, u32 edx, void *func)
272 if (getesp() > MAIN_STACK_MAX)
273 panic("call16_sloppy with invalid stack\n");
274 func -= BUILD_BIOS_ADDR;
275 u32 stackseg = Call32Data.ss;
277 // Transition to 16bit mode
278 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n"
279 " jmp transition16big\n"
280 // Setup ss/esp and call func
289 " calll _cfunc16_call16_sloppy_helper\n"
290 // Return to 32bit and restore esp
292 " jmp transition32\n"
296 : "r" (func), "r" (edx), "r" (stackseg)
297 : "edx", "ecx", "cc", "memory");
301 // Call a 32bit SeaBIOS function from a 16bit SeaBIOS function.
303 call32(void *func, u32 eax, u32 errret)
306 if (CONFIG_CALL32_SMM && GET_GLOBAL(HaveSmmCall32))
307 return call32_smm(func, eax);
310 // Called in 16bit protected mode?!
312 return call32_sloppy(func, eax);
315 // Call a 16bit SeaBIOS function from a 32bit SeaBIOS function.
317 call16(u32 eax, u32 edx, void *func)
320 if (getesp() > BUILD_STACK_ADDR)
321 panic("call16 with invalid stack\n");
322 func -= BUILD_BIOS_ADDR;
324 // Transition to 16bit mode
325 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n"
326 " jmp transition16\n"
333 " jmp transition32\n"
337 : "r" (func), "r" (edx)
338 : "edx", "ecx", "cc", "memory");
342 // Call a 16bit SeaBIOS function in "big real" mode.
344 call16big(u32 eax, u32 edx, void *func)
347 if (getesp() > BUILD_STACK_ADDR)
348 panic("call16big with invalid stack\n");
349 func -= BUILD_BIOS_ADDR;
351 // Transition to 16bit mode
352 " movl $(1f - " __stringify(BUILD_BIOS_ADDR) "), %%edx\n"
353 " jmp transition16big\n"
360 " jmp transition32\n"
364 : "r" (func), "r" (edx)
365 : "edx", "ecx", "cc", "memory");
369 // Call a 16bit SeaBIOS function, restoring the mode from last call32().
371 call16_back(u32 eax, u32 edx, void *func)
374 if (CONFIG_CALL32_SMM && Call32Data.method == C32_SMM)
375 return call16_smm(eax, edx, func);
376 if (Call32Data.method == C32_SLOPPY)
377 return call16_sloppy(eax, edx, func);
379 return call16big(eax, edx, func);
380 return call16(eax, edx, func);
384 /****************************************************************
386 ****************************************************************/
388 // Space for a stack for 16bit code.
389 u8 ExtraStack[BUILD_EXTRA_STACK_SIZE+1] VARLOW __aligned(8);
392 // Test if currently on the extra stack
396 return MODE16 && GET_SEG(SS) == SEG_LOW && getesp() > (u32)ExtraStack;
399 // Switch to the extra stack and call a function.
401 stack_hop(u32 eax, u32 edx, void *func)
403 if (on_extra_stack())
404 return ((u32 (*)(u32, u32))func)(eax, edx);
406 u16 stack_seg = SEG_LOW;
407 u32 bkup_ss, bkup_esp;
409 // Backup current %ss/%esp values.
412 // Copy stack seg to %ds/%ss and set %esp
422 // Restore segments and stack
426 : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss), "=&r" (bkup_esp)
427 : "m" (StackPos), "r" (stack_seg)
432 // Switch back to original caller's stack and call a function.
434 stack_hop_back(u32 eax, u32 edx, void *func)
437 return call16_back(eax, edx, func);
438 if (!MODE16 || !on_extra_stack())
439 return ((u32 (*)(u32, u32))func)(eax, edx);
442 u32 bkup_stack_pos, temp;
444 // Backup stack_pos and current %ss/%esp
448 // Restore original callers' %ss/%esp
451 "movw %%ds:-8(%4), %%sp\n"
455 // Restore %ss/%esp and stack_pos
460 : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss)
461 , "=&r" (bkup_stack_pos), "=&r" (temp), "+m" (StackPos)
468 /****************************************************************
469 * External 16bit interface calling
470 ****************************************************************/
472 // Far call 16bit code with a specified register state.
474 _farcall16(struct bregs *callregs, u16 callregseg)
476 if (need_hop_back()) {
477 extern void _cfunc16__farcall16(void);
478 stack_hop_back((u32)callregs, callregseg, _cfunc16__farcall16);
483 "calll __farcall16\n"
484 : "+a" (callregs), "+m" (*callregs), "+d" (callregseg)
486 : "ebx", "ecx", "esi", "edi", "cc", "memory");
490 farcall16(struct bregs *callregs)
492 extern void _cfunc16__farcall16(void);
493 call16((u32)callregs, 0, _cfunc16__farcall16);
497 farcall16big(struct bregs *callregs)
499 extern void _cfunc16__farcall16(void);
500 call16big((u32)callregs, 0, _cfunc16__farcall16);
503 // Invoke a 16bit software interrupt.
505 __call16_int(struct bregs *callregs, u16 offset)
507 callregs->code.offset = offset;
509 callregs->code.seg = SEG_BIOS;
510 _farcall16((void*)callregs - Call32Data.ss * 16, Call32Data.ss);
513 callregs->code.seg = GET_SEG(CS);
514 _farcall16(callregs, GET_SEG(SS));
521 extern void reset_vector(void) __noreturn;
523 call16_back(0, 0, reset_vector);
528 /****************************************************************
530 ****************************************************************/
532 // Thread info - stored at bottom of each thread stack - don't change
533 // without also updating the inline assembler below.
536 struct hlist_node node;
538 struct thread_info MainThread VARFSEG = {
539 NULL, { &MainThread.node, &MainThread.node.next }
541 #define THREADSTACKSIZE 4096
543 // Check if any threads are running.
547 return (CONFIG_THREADS
548 && GET_FLATPTR(MainThread.node.next) != &MainThread.node);
551 // Return the 'struct thread_info' for the currently running thread.
556 if (esp <= MAIN_STACK_MAX)
558 return (void*)ALIGN_DOWN(esp, THREADSTACKSIZE);
561 static int ThreadControl;
563 // Initialize the support for internal threads.
567 if (! CONFIG_THREADS)
569 ThreadControl = romfile_loadint("etc/threads", 1);
572 // Should hardware initialization threads run during optionrom execution.
574 threads_during_optionroms(void)
576 return CONFIG_THREADS && ThreadControl == 2 && in_post();
579 // Switch to next thread stack.
581 switch_next(struct thread_info *cur)
583 struct thread_info *next = container_of(
584 cur->node.next, struct thread_info, node);
589 " pushl $1f\n" // store return pc
590 " pushl %%ebp\n" // backup %ebp
591 " movl %%esp, (%%eax)\n" // cur->stackpos = %esp
592 " movl (%%ecx), %%esp\n" // %esp = next->stackpos
593 " popl %%ebp\n" // restore %ebp
594 " retl\n" // restore pc
596 : "+a"(cur), "+c"(next)
598 : "ebx", "edx", "esi", "edi", "cc", "memory");
601 // Last thing called from a thread (called on MainThread stack).
603 __end_thread(struct thread_info *old)
605 hlist_del(&old->node);
606 dprintf(DEBUG_thread, "\\%08x/ End thread\n", (u32)old);
609 dprintf(1, "All threads complete.\n");
612 // Create a new thread and start executing 'func' in it.
614 run_thread(void (*func)(void*), void *data)
617 if (! CONFIG_THREADS || ! ThreadControl)
619 struct thread_info *thread;
620 thread = memalign_tmphigh(THREADSTACKSIZE, THREADSTACKSIZE);
624 dprintf(DEBUG_thread, "/%08x\\ Start thread\n", (u32)thread);
625 thread->stackpos = (void*)thread + THREADSTACKSIZE;
626 struct thread_info *cur = getCurThread();
627 hlist_add_after(&thread->node, &cur->node);
630 " pushl $1f\n" // store return pc
631 " pushl %%ebp\n" // backup %ebp
632 " movl %%esp, (%%edx)\n" // cur->stackpos = %esp
633 " movl (%%ebx), %%esp\n" // %esp = thread->stackpos
634 " calll *%%ecx\n" // Call func
637 " movl %%ebx, %%eax\n" // %eax = thread
638 " movl 4(%%ebx), %%ebx\n" // %ebx = thread->node.next
639 " movl (%5), %%esp\n" // %esp = MainThread.stackpos
640 " calll %4\n" // call __end_thread(thread)
641 " movl -4(%%ebx), %%esp\n" // %esp = next->stackpos
642 " popl %%ebp\n" // restore %ebp
643 " retl\n" // restore pc
645 : "+a"(data), "+c"(func), "+b"(thread), "+d"(cur)
646 : "m"(*(u8*)__end_thread), "m"(MainThread)
647 : "esi", "edi", "cc", "memory");
655 /****************************************************************
657 ****************************************************************/
659 // Low-level irq enable.
663 if (need_hop_back()) {
664 extern void _cfunc16_check_irqs(void);
665 stack_hop_back(0, 0, _cfunc16_check_irqs);
668 asm volatile("sti ; nop ; rep ; nop ; cli ; cld" : : :"memory");
671 // Briefly permit irqs to occur.
675 if (MODESEGMENT || !CONFIG_THREADS) {
679 struct thread_info *cur = getCurThread();
680 if (cur == &MainThread)
681 // Permit irqs to fire
684 // Switch to the next thread
691 if (need_hop_back()) {
692 extern void _cfunc16_wait_irq(void);
693 stack_hop_back(0, 0, _cfunc16_wait_irq);
696 asm volatile("sti ; hlt ; cli ; cld": : :"memory");
699 // Wait for next irq to occur.
703 if (!MODESEGMENT && have_threads()) {
704 // Threads still active - do a yield instead.
711 // Wait for all threads (other than the main thread) to complete.
716 while (have_threads())
721 mutex_lock(struct mutex_s *mutex)
724 if (! CONFIG_THREADS)
726 while (mutex->isLocked)
732 mutex_unlock(struct mutex_s *mutex)
735 if (! CONFIG_THREADS)
741 /****************************************************************
743 ****************************************************************/
745 int CanPreempt VARFSEG;
746 static u32 PreemptCount;
748 // Turn on RTC irqs and arrange for them to check the 32bit threads.
752 if (! threads_during_optionroms())
759 // Turn off RTC irqs / stop checking for thread execution.
763 if (! threads_during_optionroms()) {
769 dprintf(9, "Done preempt - %d checks\n", PreemptCount);
773 // Check if preemption is on, and wait for it to complete if so.
777 if (MODESEGMENT || !CONFIG_THREADS || !CanPreempt
778 || getesp() < MAIN_STACK_MAX)
785 // Try to execute 32bit threads.
790 switch_next(&MainThread);
793 // 16bit code that checks if threads are pending and executes them if so.
797 extern void _cfunc32flat_yield_preempt(void);
798 if (CONFIG_THREADS && GET_GLOBAL(CanPreempt) && have_threads())
799 call32(_cfunc32flat_yield_preempt, 0, 0);
803 /****************************************************************
805 ****************************************************************/
807 struct call32_params_s {
813 call32_params_helper(struct call32_params_s *params)
815 return ((u32 (*)(u32, u32, u32))params->func)(
816 params->eax, params->edx, params->ecx);
820 call32_params(void *func, u32 eax, u32 edx, u32 ecx, u32 errret)
823 struct call32_params_s params = {func, eax, edx, ecx};
824 extern void _cfunc32flat_call32_params_helper(void);
825 return call32(_cfunc32flat_call32_params_helper
826 , (u32)MAKE_FLATPTR(GET_SEG(SS), ¶ms), errret);