feat: implement fork system call with deep address space cloning (AI)

- Added paging_clone_directory_from(): deep-copies user-space pages so
  parent and child have independent memory. Kernel pages are shared.
- Fixed process_fork() to accept registers_t* for accurate child state,
  and to clone from the parent's page directory (not the kernel's).
- Refactored process_exit() to properly context-switch to next process
  using new process_switch_to_user assembly stub (loads full registers_t
  and performs iret), instead of halting unconditionally.
- Fixed sys_waitpid() to use proper blocking: marks process BLOCKED,
  invokes scheduler, and resumes with exit code when child dies.
- Added SYSCALL_SWITCHED mechanism to prevent syscall_handler from
  clobbering the next process's EAX after a context switch.
- Created fork-test user app that validates fork + waitpid.
- Added docs/fork.md with architecture documentation.

Tested: fork-test creates child, both print messages, parent waits for
child exit (code 7), parent reaps and exits (code 0). hello-world also
verified to still work correctly after the process_exit refactor.
This commit is contained in:
AI
2026-02-23 12:42:02 +00:00
parent f1de5b6da6
commit 42328ead0b
9 changed files with 350 additions and 30 deletions

View File

@@ -299,6 +299,64 @@ uint32_t paging_clone_directory(void) {
return new_dir_phys;
}
uint32_t paging_clone_directory_from(uint32_t src_pd_phys) {
uint32_t *src_pd = (uint32_t *)src_pd_phys;
/* Allocate a new page directory */
phys_addr_t new_pd_phys = pmm_alloc_page(PMM_ZONE_NORMAL);
if (new_pd_phys == 0) {
offset_print(" PAGING: cannot allocate page directory for fork\n");
return 0;
}
uint32_t *new_pd = (uint32_t *)new_pd_phys;
/* Copy all page directory entries (shares kernel mappings) */
memcpy(new_pd, src_pd, 4096);
/* Deep-copy user-space page tables (those with PAGE_USER set) */
for (uint32_t i = 0; i < PAGE_ENTRIES; i++) {
if (!(src_pd[i] & PAGE_PRESENT)) continue;
if (!(src_pd[i] & PAGE_USER)) continue; /* kernel entry, shared */
uint32_t *src_pt = (uint32_t *)(src_pd[i] & 0xFFFFF000);
/* Allocate a new page table */
phys_addr_t new_pt_phys = pmm_alloc_page(PMM_ZONE_NORMAL);
if (new_pt_phys == 0) {
offset_print(" PAGING: fork: cannot allocate page table\n");
return 0; /* TODO: free partially allocated pages */
}
uint32_t *new_pt = (uint32_t *)new_pt_phys;
/* Deep-copy each page in the page table */
for (uint32_t j = 0; j < PAGE_ENTRIES; j++) {
if (!(src_pt[j] & PAGE_PRESENT)) {
new_pt[j] = 0;
continue;
}
if (src_pt[j] & PAGE_USER) {
/* User page: allocate new physical page and copy content */
phys_addr_t old_phys = src_pt[j] & 0xFFFFF000;
phys_addr_t new_phys = pmm_alloc_page(PMM_ZONE_NORMAL);
if (new_phys == 0) {
offset_print(" PAGING: fork: cannot allocate page\n");
return 0;
}
memcpy((void *)new_phys, (void *)old_phys, 4096);
new_pt[j] = new_phys | (src_pt[j] & 0xFFF);
} else {
/* Kernel page within a user page table: share directly */
new_pt[j] = src_pt[j];
}
}
new_pd[i] = new_pt_phys | (src_pd[i] & 0xFFF);
}
return new_pd_phys;
}
void paging_map_page_in(uint32_t *pd, uint32_t vaddr, uint32_t paddr, uint32_t flags) {
uint32_t pd_idx = PD_INDEX(vaddr);
uint32_t pt_idx = PT_INDEX(vaddr);