1 : /*
2 : * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : /*
8 : * NaCl Simple/secure ELF loader (NaCl SEL) memory map.
9 : */
10 :
11 : #include "native_client/src/include/portability.h"
12 :
13 : #include <assert.h>
14 : #include <stdlib.h>
15 : #include <stdio.h>
16 :
17 : #include "native_client/src/shared/platform/nacl_check.h"
18 : #include "native_client/src/shared/platform/nacl_log.h"
19 : #include "native_client/src/shared/platform/nacl_host_desc.h"
20 : #include "native_client/src/trusted/service_runtime/sel_mem.h"
21 : #include "native_client/src/trusted/service_runtime/sel_util.h"
22 : #include "native_client/src/trusted/service_runtime/nacl_config.h"
23 : #include "native_client/src/trusted/service_runtime/nacl_memory_object.h"
24 :
25 : #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
26 : #define REMOVE_MARKED_DEBUG 0
27 :
28 :
29 : /*
30 : * The memory map structure is a simple array of memory regions which
31 : * may have different access protections. We do not yet merge regions
32 : * with the same access protections together to reduce the region
33 : * number, but may do so in the future.
34 : *
35 : * Regions are described by (relative) starting page number, the
36 : * number of pages, and the protection that the pages should have.
37 : *
38 : * Takes ownership of NaClMemObj.
39 : */
40 : struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num,
41 : size_t npages,
42 : int prot,
43 43 : struct NaClMemObj *nmop) {
44 : struct NaClVmmapEntry *entry;
45 :
46 43 : NaClLog(4,
47 : "NaClVmmapEntryMake(0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
48 : "0x%x,0x%"NACL_PRIxPTR")\n",
49 : page_num, npages, prot, (uintptr_t) nmop);
50 43 : entry = (struct NaClVmmapEntry *) malloc(sizeof *entry);
51 43 : if (NULL == entry) {
52 0 : return 0;
53 : }
54 43 : NaClLog(4, "entry: 0x%"NACL_PRIxPTR"\n", (uintptr_t) entry);
55 43 : entry->page_num = page_num;
56 43 : entry->npages = npages;
57 43 : entry->prot = prot;
58 43 : entry->nmop = nmop;
59 43 : entry->removed = 0;
60 43 : return entry;
61 : }
62 :
63 :
64 24 : void NaClVmmapEntryFree(struct NaClVmmapEntry *entry) {
65 24 : NaClLog(4,
66 : ("NaClVmmapEntryFree(0x%08"NACL_PRIxPTR
67 : "): (0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
68 : "0x%x,0x%"NACL_PRIxPTR")\n"),
69 : (uintptr_t) entry,
70 : entry->page_num, entry->npages, entry->prot, (uintptr_t) entry->nmop);
71 :
72 24 : NaClMemObjSafeDtor(entry->nmop);
73 24 : free(entry->nmop);
74 :
75 24 : free(entry);
76 24 : }
77 :
78 :
79 : /*
80 : * Print debug.
81 : */
82 : void NaClVmentryPrint(void *state,
83 0 : struct NaClVmmapEntry *vmep) {
84 : UNREFERENCED_PARAMETER(state);
85 :
86 0 : printf("page num 0x%06x\n", (uint32_t)vmep->page_num);
87 0 : printf("num pages %d\n", (uint32_t)vmep->npages);
88 0 : printf("prot bits %x\n", vmep->prot);
89 0 : fflush(stdout);
90 0 : }
91 :
92 :
93 : void NaClVmmapDebug(struct NaClVmmap *self,
94 0 : char *msg) {
95 0 : puts(msg);
96 0 : NaClVmmapVisit(self, NaClVmentryPrint, (void *) 0);
97 0 : fflush(stdout);
98 0 : }
99 :
100 :
101 23 : int NaClVmmapCtor(struct NaClVmmap *self) {
102 23 : self->size = START_ENTRIES;
103 23 : if (SIZE_T_MAX / sizeof *self->vmentry < self->size) {
104 0 : return 0;
105 : }
106 23 : self->vmentry = calloc(self->size, sizeof *self->vmentry);
107 23 : if (!self->vmentry) {
108 0 : return 0;
109 : }
110 23 : self->nvalid = 0;
111 23 : self->is_sorted = 1;
112 23 : return 1;
113 : }
114 :
115 :
116 4 : void NaClVmmapDtor(struct NaClVmmap *self) {
117 : size_t i;
118 :
119 22 : for (i = 0; i < self->nvalid; ++i) {
120 18 : NaClVmmapEntryFree(self->vmentry[i]);
121 : }
122 4 : free(self->vmentry);
123 4 : self->vmentry = 0;
124 4 : }
125 :
126 : /*
127 : * Comparison function for qsort. Should never encounter a
128 : * removed/invalid entry.
129 : */
130 :
131 : static int NaClVmmapCmpEntries(void const *vleft,
132 114 : void const *vright) {
133 : struct NaClVmmapEntry const *const *left =
134 114 : (struct NaClVmmapEntry const *const *) vleft;
135 : struct NaClVmmapEntry const *const *right =
136 114 : (struct NaClVmmapEntry const *const *) vright;
137 :
138 114 : return (int) ((*left)->page_num - (*right)->page_num);
139 : }
140 :
141 :
142 29 : static void NaClVmmapRemoveMarked(struct NaClVmmap *self) {
143 : size_t i;
144 : size_t last;
145 :
146 29 : if (0 == self->nvalid)
147 0 : return;
148 :
149 : #if REMOVE_MARKED_DEBUG
150 : NaClVmmapDebug(self, "Before RemoveMarked");
151 : #endif
152 : /*
153 : * Linearly scan with a move-end-to-front strategy to get rid of
154 : * marked-to-be-removed entries.
155 : */
156 :
157 : /*
158 : * Invariant:
159 : *
160 : * forall j in [0, self->nvalid): NULL != self->vmentry[j]
161 : */
162 58 : for (last = self->nvalid; last > 0 && self->vmentry[--last]->removed; ) {
163 0 : NaClVmmapEntryFree(self->vmentry[last]);
164 0 : self->vmentry[last] = NULL;
165 : }
166 29 : if (last == 0 && self->vmentry[0]->removed) {
167 0 : NaClLog(LOG_FATAL, "No valid entries in VM map\n");
168 : }
169 :
170 : /*
171 : * Post condition of above loop:
172 : *
173 : * forall j in [0, last]: NULL != self->vmentry[j]
174 : *
175 : * 0 <= last < self->nvalid && !self->vmentry[last]->removed
176 : */
177 29 : CHECK(last < self->nvalid);
178 29 : CHECK(!self->vmentry[last]->removed);
179 : /*
180 : * and,
181 : *
182 : * forall j in (last, self->nvalid): NULL == self->vmentry[j]
183 : */
184 :
185 : /*
186 : * Loop invariant: forall j in [0, i): !self->vmentry[j]->removed
187 : */
188 147 : for (i = 0; i < last; ++i) {
189 118 : if (!self->vmentry[i]->removed) {
190 112 : continue;
191 : }
192 : /*
193 : * post condition: self->vmentry[i]->removed
194 : *
195 : * swap with entry at self->vmentry[last].
196 : */
197 :
198 6 : NaClVmmapEntryFree(self->vmentry[i]);
199 6 : self->vmentry[i] = self->vmentry[last];
200 6 : self->vmentry[last] = NULL;
201 :
202 : /*
203 : * Invariants here:
204 : *
205 : * forall j in [last, self->nvalid): NULL == self->vmentry[j]
206 : *
207 : * forall j in [0, i]: !self->vmentry[j]->removed
208 : */
209 :
210 12 : while (--last > i && self->vmentry[last]->removed) {
211 0 : NaClVmmapEntryFree(self->vmentry[last]);
212 0 : self->vmentry[last] = NULL;
213 : }
214 : /*
215 : * since !self->vmentry[i]->removed, we are guaranteed that
216 : * !self->vmentry[last]->removed when the while loop terminates.
217 : *
218 : * forall j in (last, self->nvalid):
219 : * NULL == self->vmentry[j]->removed
220 : */
221 : }
222 : /* i == last */
223 : /* forall j in [0, last]: !self->vmentry[j]->removed */
224 : /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
225 29 : self->nvalid = last + 1;
226 :
227 29 : self->is_sorted = 0;
228 : #if REMOVE_MARKED_DEBUG
229 : NaClVmmapDebug(self, "After RemoveMarked");
230 : #endif
231 : }
232 :
233 :
234 31 : void NaClVmmapMakeSorted(struct NaClVmmap *self) {
235 31 : if (self->is_sorted)
236 14 : return;
237 :
238 17 : NaClVmmapRemoveMarked(self);
239 :
240 17 : qsort(self->vmentry,
241 : self->nvalid,
242 : sizeof *self->vmentry,
243 : NaClVmmapCmpEntries);
244 17 : self->is_sorted = 1;
245 : #if REMOVE_MARKED_DEBUG
246 : NaClVmmapDebug(self, "After Sort");
247 : #endif
248 : }
249 :
250 :
251 : /*
252 : * Adds an entry. Does not sort.
253 : */
254 : int NaClVmmapAdd(struct NaClVmmap *self,
255 : uintptr_t page_num,
256 : size_t npages,
257 : int prot,
258 43 : struct NaClMemObj *nmop) {
259 : struct NaClVmmapEntry *entry;
260 :
261 43 : NaClLog(2,
262 : ("NaClVmmapAdd(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
263 : "0x%"NACL_PRIxS", 0x%x, "
264 : "0x%08"NACL_PRIxPTR")\n"),
265 : (uintptr_t) self, page_num, npages, prot, (uintptr_t) nmop);
266 43 : if (self->nvalid == self->size) {
267 6 : size_t new_size = 2 * self->size;
268 : struct NaClVmmapEntry **new_map;
269 :
270 6 : new_map = realloc(self->vmentry, new_size * sizeof *new_map);
271 6 : if (NULL == new_map) {
272 0 : return 0;
273 : }
274 6 : self->vmentry = new_map;
275 6 : self->size = new_size;
276 : }
277 : /* self->nvalid < self->size */
278 43 : entry = NaClVmmapEntryMake(page_num, npages, prot, nmop);
279 :
280 43 : self->vmentry[self->nvalid] = entry;
281 43 : self->is_sorted = 0;
282 43 : ++self->nvalid;
283 :
284 43 : return 1;
285 : }
286 :
287 :
288 : /*
289 : * Update the virtual memory map. Deletion is handled by a remove
290 : * flag, since a NULL nmop just means that the memory is backed by the
291 : * system paging file.
292 : */
293 : void NaClVmmapUpdate(struct NaClVmmap *self,
294 : uintptr_t page_num,
295 : size_t npages,
296 : int prot,
297 : struct NaClMemObj *nmop,
298 12 : int remove) {
299 : /* update existing entries or create new entry as needed */
300 : size_t i;
301 12 : uintptr_t new_region_end_page = page_num + npages;
302 :
303 12 : NaClLog(2,
304 : ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", "
305 : "0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS", "
306 : "0x%x, 0x%08"NACL_PRIxPTR", %d)\n"),
307 : (uintptr_t) self, page_num, npages, prot, (uintptr_t) nmop,
308 : remove);
309 12 : NaClVmmapMakeSorted(self);
310 :
311 12 : CHECK(npages > 0);
312 :
313 65 : for (i = 0; i < self->nvalid; i++) {
314 56 : struct NaClVmmapEntry *ent = self->vmentry[i];
315 56 : uintptr_t ent_end_page = ent->page_num + ent->npages;
316 : nacl_off64_t additional_offset =
317 56 : (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
318 :
319 56 : if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
320 : /*
321 : * Split existing mapping into two parts, with new mapping in
322 : * the middle.
323 : */
324 1 : if (!NaClVmmapAdd(self,
325 : new_region_end_page,
326 : ent_end_page - new_region_end_page,
327 : ent->prot,
328 : NaClMemObjSplit(ent->nmop, additional_offset))) {
329 0 : NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not split entry\n");
330 : }
331 1 : ent->npages = page_num - ent->page_num;
332 1 : break;
333 57 : } else if (ent->page_num < page_num && page_num < ent_end_page) {
334 : /* New mapping overlaps end of existing mapping. */
335 2 : ent->npages = page_num - ent->page_num;
336 53 : } else if (ent->page_num < new_region_end_page &&
337 : new_region_end_page < ent_end_page) {
338 : /* New mapping overlaps start of existing mapping. */
339 :
340 2 : NaClMemObjIncOffset(ent->nmop, additional_offset);
341 :
342 2 : ent->page_num = new_region_end_page;
343 2 : ent->npages = ent_end_page - new_region_end_page;
344 2 : break;
345 57 : } else if (page_num <= ent->page_num &&
346 : ent_end_page <= new_region_end_page) {
347 : /* New mapping covers all of the existing mapping. */
348 6 : ent->removed = 1;
349 : } else {
350 : /* No overlap */
351 45 : assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
352 : }
353 : }
354 :
355 12 : if (!remove) {
356 9 : if (!NaClVmmapAdd(self, page_num, npages, prot, nmop)) {
357 0 : NaClLog(LOG_FATAL, "NaClVmmapUpdate: could not add entry\n");
358 : }
359 : }
360 :
361 12 : NaClVmmapRemoveMarked(self);
362 12 : }
363 :
364 :
365 : static int NaClVmmapContainCmpEntries(void const *vkey,
366 32 : void const *vent) {
367 : struct NaClVmmapEntry const *const *key =
368 32 : (struct NaClVmmapEntry const *const *) vkey;
369 : struct NaClVmmapEntry const *const *ent =
370 32 : (struct NaClVmmapEntry const *const *) vent;
371 :
372 32 : NaClLog(5, "key->page_num = 0x%05"NACL_PRIxPTR"\n", (*key)->page_num);
373 :
374 32 : NaClLog(5, "entry->page_num = 0x%05"NACL_PRIxPTR"\n", (*ent)->page_num);
375 32 : NaClLog(5, "entry->npages = 0x%"NACL_PRIxS"\n", (*ent)->npages);
376 :
377 32 : if ((*key)->page_num < (*ent)->page_num) return -1;
378 18 : if ((*key)->page_num < (*ent)->page_num + (*ent)->npages) return 0;
379 9 : return 1;
380 : }
381 :
382 : struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self,
383 5 : uintptr_t pnum) {
384 : struct NaClVmmapEntry key;
385 : struct NaClVmmapEntry *kptr;
386 : struct NaClVmmapEntry *const *result_ptr;
387 :
388 5 : NaClVmmapMakeSorted(self);
389 5 : key.page_num = pnum;
390 5 : kptr = &key;
391 5 : result_ptr = ((struct NaClVmmapEntry *const *)
392 : bsearch(&kptr,
393 : self->vmentry,
394 : self->nvalid,
395 : sizeof self->vmentry[0],
396 : NaClVmmapContainCmpEntries));
397 5 : return result_ptr ? *result_ptr : NULL;
398 : }
399 :
400 :
401 : struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self,
402 : uintptr_t pnum,
403 7 : struct NaClVmmapIter *space) {
404 : struct NaClVmmapEntry key;
405 : struct NaClVmmapEntry *kptr;
406 : struct NaClVmmapEntry **result_ptr;
407 :
408 7 : NaClVmmapMakeSorted(self);
409 7 : key.page_num = pnum;
410 7 : kptr = &key;
411 7 : result_ptr = ((struct NaClVmmapEntry **)
412 : bsearch(&kptr,
413 : self->vmentry,
414 : self->nvalid,
415 : sizeof self->vmentry[0],
416 : NaClVmmapContainCmpEntries));
417 7 : space->vmmap = self;
418 7 : if (NULL == result_ptr) {
419 0 : space->entry_ix = self->nvalid;
420 : } else {
421 7 : space->entry_ix = result_ptr - self->vmentry;
422 : }
423 7 : return space;
424 : }
425 :
426 :
427 8 : int NaClVmmapIterAtEnd(struct NaClVmmapIter *nvip) {
428 8 : return nvip->entry_ix >= nvip->vmmap->nvalid;
429 : }
430 :
431 :
432 : /*
433 : * IterStar only permissible if not AtEnd
434 : */
435 8 : struct NaClVmmapEntry *NaClVmmapIterStar(struct NaClVmmapIter *nvip) {
436 8 : return nvip->vmmap->vmentry[nvip->entry_ix];
437 : }
438 :
439 :
440 1 : void NaClVmmapIterIncr(struct NaClVmmapIter *nvip) {
441 1 : ++nvip->entry_ix;
442 1 : }
443 :
444 :
445 : /*
446 : * Iterator becomes invalid after Erase. We could have a version that
447 : * keep the iterator valid by copying forward, but it is unclear
448 : * whether that is needed.
449 : */
450 0 : void NaClVmmapIterErase(struct NaClVmmapIter *nvip) {
451 : struct NaClVmmap *nvp;
452 :
453 0 : nvp = nvip->vmmap;
454 0 : free(nvp->vmentry[nvip->entry_ix]);
455 0 : nvp->vmentry[nvip->entry_ix] = nvp->vmentry[--nvp->nvalid];
456 0 : nvp->is_sorted = 0;
457 0 : }
458 :
459 :
460 : void NaClVmmapVisit(struct NaClVmmap *self,
461 : void (*fn)(void *state,
462 : struct NaClVmmapEntry *entry),
463 0 : void *state) {
464 : size_t i;
465 : size_t nentries;
466 :
467 0 : NaClVmmapMakeSorted(self);
468 0 : for (i = 0, nentries = self->nvalid; i < nentries; ++i) {
469 0 : (*fn)(state, self->vmentry[i]);
470 : }
471 0 : }
472 :
473 :
474 : /*
475 : * Linear search, from high addresses down.
476 : */
477 : uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self,
478 5 : size_t num_pages) {
479 : size_t i;
480 : struct NaClVmmapEntry *vmep;
481 : uintptr_t end_page;
482 : uintptr_t start_page;
483 :
484 5 : if (0 == self->nvalid)
485 1 : return 0;
486 4 : NaClVmmapMakeSorted(self);
487 9 : for (i = self->nvalid; --i > 0; ) {
488 3 : vmep = self->vmentry[i-1];
489 3 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
490 3 : start_page = self->vmentry[i]->page_num; /* start page from current */
491 3 : if (start_page - end_page >= num_pages) {
492 2 : return start_page - num_pages;
493 : }
494 : }
495 2 : return 0;
496 : /*
497 : * in user addresses, page 0 is always trampoline, and user
498 : * addresses are contained in system addresses, so returning a
499 : * system page number of 0 can serve as error indicator: it is at
500 : * worst the trampoline page, and likely to be below it.
501 : */
502 : }
503 :
504 :
505 : /*
506 : * Linear search, from high addresses down. For mmap, so the starting
507 : * address of the region found must be NACL_MAP_PAGESIZE aligned.
508 : *
509 : * For general mmap it is better to use as high an address as
510 : * possible, since the stack size for the main thread is currently
511 : * fixed, and the heap is the only thing that grows.
512 : */
513 : uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self,
514 3 : size_t num_pages) {
515 : size_t i;
516 : struct NaClVmmapEntry *vmep;
517 : uintptr_t end_page;
518 : uintptr_t start_page;
519 :
520 3 : if (0 == self->nvalid)
521 0 : return 0;
522 3 : NaClVmmapMakeSorted(self);
523 3 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
524 :
525 7 : for (i = self->nvalid; --i > 0; ) {
526 4 : vmep = self->vmentry[i-1];
527 4 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
528 4 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
529 :
530 4 : start_page = self->vmentry[i]->page_num; /* start page from current */
531 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
532 :
533 4 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
534 :
535 4 : if (start_page <= end_page) {
536 1 : continue;
537 : }
538 : }
539 3 : if (start_page - end_page >= num_pages) {
540 3 : return start_page - num_pages;
541 : }
542 : }
543 0 : return 0;
544 : /*
545 : * in user addresses, page 0 is always trampoline, and user
546 : * addresses are contained in system addresses, so returning a
547 : * system page number of 0 can serve as error indicator: it is at
548 : * worst the trampoline page, and likely to be below it.
549 : */
550 : }
551 :
552 :
553 : /*
554 : * Linear search, from uaddr up.
555 : */
556 : uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self,
557 : uintptr_t uaddr,
558 0 : size_t num_pages) {
559 : size_t nvalid;
560 : size_t i;
561 : struct NaClVmmapEntry *vmep;
562 : uintptr_t usr_page;
563 : uintptr_t start_page;
564 : uintptr_t end_page;
565 :
566 0 : NaClVmmapMakeSorted(self);
567 :
568 0 : usr_page = uaddr >> NACL_PAGESHIFT;
569 0 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
570 :
571 0 : nvalid = self->nvalid;
572 :
573 0 : for (i = 1; i < nvalid; ++i) {
574 0 : vmep = self->vmentry[i-1];
575 0 : end_page = vmep->page_num + vmep->npages;
576 0 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
577 :
578 0 : start_page = self->vmentry[i]->page_num;
579 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
580 :
581 0 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
582 :
583 0 : if (start_page <= end_page) {
584 0 : continue;
585 : }
586 : }
587 0 : if (end_page <= usr_page && usr_page < start_page) {
588 0 : end_page = usr_page;
589 : }
590 0 : if (usr_page <= end_page && (start_page - end_page) >= num_pages) {
591 : /* found a gap at or after uaddr that's big enough */
592 0 : return end_page;
593 : }
594 : }
595 0 : return 0;
596 : }
|