paging.c (5222B)
1 /* Paging implementation for x86 */ 2 #include <stdint.h> 3 #include <string.h> 4 #include "paging.h" 5 #include "terminal.h" 6 7 /* Page directory - must be page-aligned */ 8 static page_directory_t kernel_page_directory __attribute__((aligned(4096))); 9 10 /* Page tables for identity mapping (0-4MB for kernel) */ 11 static page_table_t kernel_page_table __attribute__((aligned(4096))); 12 13 /* Next free physical address for allocation (starts after 16MB) */ 14 static uint32_t next_free_page = 0x01000000; /* Start at 16MB */ 15 16 /* Enable paging by setting CR0 and CR3 */ 17 static void enable_paging(uint32_t page_directory_physical) { 18 __asm__ volatile ( 19 "mov %0, %%cr3\n" /* Load page directory address */ 20 "mov %%cr0, %%eax\n" 21 "or $0x80000000, %%eax\n" /* Set PG bit */ 22 "mov %%eax, %%cr0\n" 23 : : "r"(page_directory_physical) : "eax" 24 ); 25 } 26 27 /* Initialize paging with identity mapping for first 4MB */ 28 void paging_init(void) { 29 terminal_writestring("Initializing paging...\n"); 30 31 /* Clear page directory */ 32 memset(&kernel_page_directory, 0, sizeof(kernel_page_directory)); 33 34 /* Clear kernel page table */ 35 memset(&kernel_page_table, 0, sizeof(kernel_page_table)); 36 37 /* Identity map first 4MB (kernel code and data) */ 38 /* This maps virtual 0x00000000-0x003FFFFF to physical 0x00000000-0x003FFFFF */ 39 for (uint32_t i = 0; i < 1024; i++) { 40 /* Each page is 4KB */ 41 uint32_t physical_addr = i * 4096; 42 /* Present, writable, user-accessible (needed for LuaJIT in usermode) */ 43 kernel_page_table.entries[i] = physical_addr | PAGE_PRESENT | PAGE_WRITE | PAGE_USER; 44 } 45 46 /* Install kernel page table in directory entry 0 */ 47 kernel_page_directory.entries[0] = ((uint32_t)&kernel_page_table) | PAGE_PRESENT | PAGE_WRITE | PAGE_USER; 48 49 /* Identity map additional memory regions for heap/stack (4MB-4GB) */ 50 /* We'll use 4MB pages for simplicity in higher regions */ 51 for (uint32_t i = 1; i < 1024; i++) { /* Map up to 4GB */ 52 /* Use 4MB pages (PSE) */ 53 uint32_t physical_addr = i * 0x400000; /* 4MB chunks */ 54 /* Present, writable, user-accessible, 4MB page size */ 55 kernel_page_directory.entries[i] = physical_addr | PAGE_PRESENT | PAGE_WRITE | PAGE_USER | PAGE_SIZE; 56 } 57 58 terminal_writestring("Page tables configured\n"); 59 60 /* Enable PSE (Page Size Extension) for 4MB pages */ 61 __asm__ volatile ( 62 "mov %%cr4, %%eax\n" 63 "or $0x10, %%eax\n" /* Set PSE bit */ 64 "mov %%eax, %%cr4\n" 65 : : : "eax" 66 ); 67 68 terminal_writestring("PSE enabled\n"); 69 70 /* Enable paging */ 71 enable_paging((uint32_t)&kernel_page_directory); 72 73 terminal_writestring("Paging enabled!\n"); 74 } 75 76 /* Map a virtual page to a physical page with given flags */ 77 void paging_map_page(uint32_t virtual_addr, uint32_t physical_addr, uint32_t flags) { 78 /* Extract page directory index (top 10 bits) */ 79 uint32_t pd_index = virtual_addr >> 22; 80 81 /* Extract page table index (middle 10 bits) */ 82 uint32_t pt_index = (virtual_addr >> 12) & 0x3FF; 83 84 /* Check if page table exists for this directory entry */ 85 if (!(kernel_page_directory.entries[pd_index] & PAGE_PRESENT)) { 86 /* Need to allocate a new page table */ 87 /* For now, we only support the first 4MB which already has a table */ 88 terminal_writestring("ERROR: Cannot map page outside first 4MB yet\n"); 89 return; 90 } 91 92 /* Get pointer to page table */ 93 page_table_t* pt = (page_table_t*)(kernel_page_directory.entries[pd_index] & ~0xFFF); 94 95 /* Map the page */ 96 pt->entries[pt_index] = (physical_addr & ~0xFFF) | flags | PAGE_PRESENT; 97 98 /* Flush TLB for this page */ 99 __asm__ volatile ("invlpg (%0)" : : "r"(virtual_addr) : "memory"); 100 } 101 102 /* Allocate executable memory for JIT code */ 103 void* paging_alloc_executable(uint32_t size) { 104 /* Round up to page boundary */ 105 uint32_t pages_needed = (size + 4095) / 4096; 106 107 /* Allocate from our free page pool */ 108 uint32_t virtual_addr = next_free_page; 109 next_free_page += pages_needed * 4096; 110 111 /* The memory is already mapped as read/write in our identity mapping */ 112 /* For JIT code, we need it to be executable too, which it already is */ 113 /* (we don't have NX bit set, so all pages are executable) */ 114 115 return (void*)virtual_addr; 116 } 117 118 /* Get physical address for virtual address */ 119 uint32_t paging_get_physical(uint32_t virtual_addr) { 120 uint32_t pd_index = virtual_addr >> 22; 121 uint32_t pt_index = (virtual_addr >> 12) & 0x3FF; 122 uint32_t offset = virtual_addr & 0xFFF; 123 124 /* Check if using 4MB pages */ 125 if (kernel_page_directory.entries[pd_index] & PAGE_SIZE) { 126 /* 4MB page */ 127 uint32_t base = kernel_page_directory.entries[pd_index] & ~0x3FFFFF; 128 return base + (virtual_addr & 0x3FFFFF); 129 } 130 131 /* 4KB pages */ 132 if (!(kernel_page_directory.entries[pd_index] & PAGE_PRESENT)) { 133 return 0; /* Not mapped */ 134 } 135 136 page_table_t* pt = (page_table_t*)(kernel_page_directory.entries[pd_index] & ~0xFFF); 137 138 if (!(pt->entries[pt_index] & PAGE_PRESENT)) { 139 return 0; /* Not mapped */ 140 } 141 142 return (pt->entries[pt_index] & ~0xFFF) + offset; 143 }