|
39 | 39 | #define PAGE_ENTRIES_PER_PAGE (512) |
40 | 40 |
|
41 | 41 | #define PAGE_2MB_SHIFT 21 |
42 | | -#define MEM_SIZE 256 * 1024 * 1024 |
43 | 42 |
|
44 | 43 | #if !defined(WOLFBOOT_LOADER) |
45 | 44 | #define WOLFBOOT_PTP_NUM 128 |
46 | 45 | static uint8_t page_table_pages[WOLFBOOT_PTP_NUM * PAGE_TABLE_PAGE_SIZE] |
47 | 46 | __attribute__((aligned(PAGE_TABLE_PAGE_SIZE))); |
48 | 47 | static int page_table_page_used; |
49 | | -/* TODO: reserve space in the linker? check amount of memory space? */ |
50 | | -uint8_t _mem[MEM_SIZE] __attribute__((aligned(PAGE_TABLE_PAGE_SIZE))); |
51 | | -uint8_t *mem = &_mem[0]; |
52 | 48 | #endif /* !BUILD_LOADER_STAGE1 */ |
53 | 49 |
|
54 | 50 | static inline void x86_paging_clear_pte(uint64_t *pte) |
@@ -194,21 +190,17 @@ static void x86_paging_map_page(uint64_t vaddress, uint64_t paddress) |
194 | 190 | /* already mapped */ |
195 | 191 | if (*pl1e != 0) |
196 | 192 | return; |
197 | | - if (paddress == 0) { |
198 | | - paddress = (uint64_t)mem; |
199 | | - mem += PAGE_TABLE_PAGE_SIZE; |
200 | | - if (mem >= _mem + MEM_SIZE) { |
201 | | - wolfBoot_printf("No more pages to satisfy virtual allocation"); |
202 | | - panic(); |
203 | | - } |
204 | | - } |
205 | 193 | x86_paging_setup_entry(pl1e, paddress); |
206 | 194 | } |
207 | 195 |
|
208 | 196 | int x86_paging_map_memory(uint64_t va, uint64_t pa, uint32_t size) |
209 | 197 | { |
210 | 198 | uint64_t start, end, page; |
211 | 199 |
|
| 200 | + if ((pa & PAGE_MASK) == 0) { |
| 201 | + wolfBoot_printf("can't satisfy mapping request at pa address 0\r\n"); |
| 202 | + return -1; |
| 203 | + } |
212 | 204 | end = va + size; |
213 | 205 | start = va & PAGE_MASK; |
214 | 206 | pa = pa & PAGE_MASK; |
|
0 commit comments