1 : /*
2 : * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : /*
8 : * NaCl Simple/secure ELF loader (NaCl SEL) memory map.
9 : */
10 :
11 : #include "native_client/src/include/portability.h"
12 :
13 : #include <assert.h>
14 : #include <fcntl.h>
15 : #include <stdlib.h>
16 : #include <stdio.h>
17 :
18 : #include "native_client/src/include/nacl_platform.h"
19 : #include "native_client/src/include/portability.h"
20 :
21 : #include "native_client/src/shared/platform/nacl_check.h"
22 : #include "native_client/src/shared/platform/nacl_log.h"
23 : #include "native_client/src/shared/platform/nacl_host_desc.h"
24 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
25 : #include "native_client/src/trusted/desc/nacl_desc_io.h"
26 : #include "native_client/src/trusted/service_runtime/sel_mem.h"
27 : #include "native_client/src/trusted/service_runtime/sel_util.h"
28 : #include "native_client/src/trusted/service_runtime/nacl_config.h"
29 :
30 : #include "native_client/src/trusted/service_runtime/include/sys/fcntl.h"
31 : #include "native_client/src/trusted/service_runtime/include/sys/mman.h"
32 :
33 : #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
34 : #define REMOVE_MARKED_DEBUG 0
35 :
36 :
37 : /*
38 : * The memory map structure is a simple array of memory regions which
39 : * may have different access protections. We do not yet merge regions
40 : * with the same access protections together to reduce the region
41 : * number, but may do so in the future.
42 : *
43 : * Regions are described by (relative) starting page number, the
44 : * number of pages, and the protection that the pages should have.
45 : */
46 51182 : struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num,
47 : size_t npages,
48 : int prot,
49 : int flags,
50 : struct NaClDesc *desc,
51 : nacl_off64_t offset,
52 : nacl_off64_t file_size) {
53 : struct NaClVmmapEntry *entry;
54 :
55 51182 : NaClLog(4,
56 : "NaClVmmapEntryMake(0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
57 : "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n",
58 : page_num, npages, prot, flags, (uintptr_t) desc, offset);
59 51182 : entry = (struct NaClVmmapEntry *) malloc(sizeof *entry);
60 51182 : if (NULL == entry) {
61 0 : return 0;
62 : }
63 51182 : NaClLog(4, "entry: 0x%"NACL_PRIxPTR"\n", (uintptr_t) entry);
64 51182 : entry->page_num = page_num;
65 51182 : entry->npages = npages;
66 51182 : entry->prot = prot;
67 51182 : entry->flags = flags;
68 51182 : entry->removed = 0;
69 51182 : entry->desc = desc;
70 51182 : if (desc != NULL) {
71 337 : NaClDescRef(desc);
72 : }
73 51182 : entry->offset = offset;
74 51182 : entry->file_size = file_size;
75 51182 : return entry;
76 : }
77 :
78 :
79 46151 : void NaClVmmapEntryFree(struct NaClVmmapEntry *entry) {
80 92302 : NaClLog(4,
81 : ("NaClVmmapEntryFree(0x%08"NACL_PRIxPTR
82 : "): (0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
83 : "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n"),
84 : (uintptr_t) entry,
85 : entry->page_num, entry->npages, entry->prot,
86 46151 : entry->flags, (uintptr_t) entry->desc, entry->offset);
87 :
88 46151 : if (entry->desc != NULL) {
89 43 : NaClDescSafeUnref(entry->desc);
90 : }
91 46151 : free(entry);
92 46151 : }
93 :
94 :
95 : /*
96 : * Print debug.
97 : */
98 8 : void NaClVmentryPrint(void *state,
99 : struct NaClVmmapEntry *vmep) {
100 : UNREFERENCED_PARAMETER(state);
101 :
102 8 : printf("page num 0x%06x\n", (uint32_t)vmep->page_num);
103 8 : printf("num pages %d\n", (uint32_t)vmep->npages);
104 8 : printf("prot bits %x\n", vmep->prot);
105 8 : printf("flags %x\n", vmep->flags);
106 8 : fflush(stdout);
107 8 : }
108 :
109 :
110 1 : void NaClVmmapDebug(struct NaClVmmap *self,
111 : char *msg) {
112 1 : puts(msg);
113 1 : NaClVmmapVisit(self, NaClVmentryPrint, (void *) 0);
114 1 : fflush(stdout);
115 1 : }
116 :
117 :
118 314 : int NaClVmmapCtor(struct NaClVmmap *self) {
119 314 : self->size = START_ENTRIES;
120 314 : if (SIZE_T_MAX / sizeof *self->vmentry < self->size) {
121 0 : return 0;
122 : }
123 314 : self->vmentry = calloc(self->size, sizeof *self->vmentry);
124 314 : if (!self->vmentry) {
125 0 : return 0;
126 : }
127 314 : self->nvalid = 0;
128 314 : self->is_sorted = 1;
129 314 : return 1;
130 : }
131 :
132 :
133 4 : void NaClVmmapDtor(struct NaClVmmap *self) {
134 : size_t i;
135 :
136 22 : for (i = 0; i < self->nvalid; ++i) {
137 18 : NaClVmmapEntryFree(self->vmentry[i]);
138 : }
139 4 : free(self->vmentry);
140 4 : self->vmentry = 0;
141 4 : }
142 :
143 : /*
144 : * Comparison function for qsort. Should never encounter a
145 : * removed/invalid entry.
146 : */
147 :
148 20567907 : static int NaClVmmapCmpEntries(void const *vleft,
149 : void const *vright) {
150 20567907 : struct NaClVmmapEntry const *const *left =
151 : (struct NaClVmmapEntry const *const *) vleft;
152 20567907 : struct NaClVmmapEntry const *const *right =
153 : (struct NaClVmmapEntry const *const *) vright;
154 :
155 20567907 : return (int) ((*left)->page_num - (*right)->page_num);
156 : }
157 :
158 :
159 189267 : static void NaClVmmapRemoveMarked(struct NaClVmmap *self) {
160 : size_t i;
161 : size_t last;
162 :
163 189267 : if (0 == self->nvalid)
164 0 : return;
165 :
166 : #if REMOVE_MARKED_DEBUG
167 : NaClVmmapDebug(self, "Before RemoveMarked");
168 : #endif
169 : /*
170 : * Linearly scan with a move-end-to-front strategy to get rid of
171 : * marked-to-be-removed entries.
172 : */
173 :
174 : /*
175 : * Invariant:
176 : *
177 : * forall j in [0, self->nvalid): NULL != self->vmentry[j]
178 : */
179 378534 : for (last = self->nvalid; last > 0 && self->vmentry[--last]->removed; ) {
180 0 : NaClVmmapEntryFree(self->vmentry[last]);
181 0 : self->vmentry[last] = NULL;
182 : }
183 189267 : if (last == 0 && self->vmentry[0]->removed) {
184 0 : NaClLog(LOG_FATAL, "No valid entries in VM map\n");
185 0 : return;
186 : }
187 :
188 : /*
189 : * Post condition of above loop:
190 : *
191 : * forall j in [0, last]: NULL != self->vmentry[j]
192 : *
193 : * 0 <= last < self->nvalid && !self->vmentry[last]->removed
194 : */
195 189267 : CHECK(last < self->nvalid);
196 189267 : CHECK(!self->vmentry[last]->removed);
197 : /*
198 : * and,
199 : *
200 : * forall j in (last, self->nvalid): NULL == self->vmentry[j]
201 : */
202 :
203 : /*
204 : * Loop invariant: forall j in [0, i): !self->vmentry[j]->removed
205 : */
206 8921573 : for (i = 0; i < last; ++i) {
207 8732306 : if (!self->vmentry[i]->removed) {
208 8686180 : continue;
209 : }
210 : /*
211 : * post condition: self->vmentry[i]->removed
212 : *
213 : * swap with entry at self->vmentry[last].
214 : */
215 :
216 46126 : NaClVmmapEntryFree(self->vmentry[i]);
217 46126 : self->vmentry[i] = self->vmentry[last];
218 46126 : self->vmentry[last] = NULL;
219 :
220 : /*
221 : * Invariants here:
222 : *
223 : * forall j in [last, self->nvalid): NULL == self->vmentry[j]
224 : *
225 : * forall j in [0, i]: !self->vmentry[j]->removed
226 : */
227 :
228 92259 : while (--last > i && self->vmentry[last]->removed) {
229 7 : NaClVmmapEntryFree(self->vmentry[last]);
230 7 : self->vmentry[last] = NULL;
231 : }
232 : /*
233 : * since !self->vmentry[i]->removed, we are guaranteed that
234 : * !self->vmentry[last]->removed when the while loop terminates.
235 : *
236 : * forall j in (last, self->nvalid):
237 : * NULL == self->vmentry[j]->removed
238 : */
239 : }
240 : /* i == last */
241 : /* forall j in [0, last]: !self->vmentry[j]->removed */
242 : /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
243 189267 : self->nvalid = last + 1;
244 :
245 189267 : self->is_sorted = 0;
246 : #if REMOVE_MARKED_DEBUG
247 : NaClVmmapDebug(self, "After RemoveMarked");
248 : #endif
249 : }
250 :
251 :
252 143254 : void NaClVmmapMakeSorted(struct NaClVmmap *self) {
253 143254 : if (self->is_sorted)
254 191869 : return;
255 :
256 94639 : NaClVmmapRemoveMarked(self);
257 :
258 94639 : qsort(self->vmentry,
259 : self->nvalid,
260 : sizeof *self->vmentry,
261 : NaClVmmapCmpEntries);
262 94639 : self->is_sorted = 1;
263 : #if REMOVE_MARKED_DEBUG
264 : NaClVmmapDebug(self, "After Sort");
265 : #endif
266 : }
267 :
268 51182 : void NaClVmmapAdd(struct NaClVmmap *self,
269 : uintptr_t page_num,
270 : size_t npages,
271 : int prot,
272 : int flags,
273 : struct NaClDesc *desc,
274 : nacl_off64_t offset,
275 : nacl_off64_t file_size) {
276 : struct NaClVmmapEntry *entry;
277 :
278 51182 : NaClLog(2,
279 : ("NaClVmmapAdd(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
280 : "0x%"NACL_PRIxS", 0x%x, 0x%x, 0x%"NACL_PRIxPTR", "
281 : "0x%"NACL_PRIx64")\n"),
282 : (uintptr_t) self, page_num, npages, prot, flags,
283 : (uintptr_t) desc, offset);
284 51182 : if (self->nvalid == self->size) {
285 301 : size_t new_size = 2 * self->size;
286 : struct NaClVmmapEntry **new_map;
287 :
288 301 : new_map = realloc(self->vmentry, new_size * sizeof *new_map);
289 301 : if (NULL == new_map) {
290 0 : NaClLog(LOG_FATAL, "NaClVmmapAdd: could not allocate memory\n");
291 51182 : return;
292 : }
293 301 : self->vmentry = new_map;
294 301 : self->size = new_size;
295 : }
296 : /* self->nvalid < self->size */
297 51182 : entry = NaClVmmapEntryMake(page_num, npages, prot, flags,
298 : desc, offset, file_size);
299 :
300 51182 : self->vmentry[self->nvalid] = entry;
301 51182 : self->is_sorted = 0;
302 51182 : ++self->nvalid;
303 : }
304 :
305 : /*
306 : * Update the virtual memory map. Deletion is handled by a remove
307 : * flag, since a NULL desc just means that the memory is backed by the
308 : * system paging file.
309 : */
310 94628 : static void NaClVmmapUpdate(struct NaClVmmap *self,
311 : uintptr_t page_num,
312 : size_t npages,
313 : int prot,
314 : int flags,
315 : int remove,
316 : struct NaClDesc *desc,
317 : nacl_off64_t offset,
318 : nacl_off64_t file_size) {
319 : /* update existing entries or create new entry as needed */
320 : size_t i;
321 94628 : uintptr_t new_region_end_page = page_num + npages;
322 :
323 94628 : NaClLog(2,
324 : ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
325 : "0x%"NACL_PRIxS", 0x%x, 0x%x, %d, 0x%"NACL_PRIxPTR", "
326 : "0x%"NACL_PRIx64")\n"),
327 : (uintptr_t) self, page_num, npages, prot, flags,
328 : remove, (uintptr_t) desc, offset);
329 94628 : NaClVmmapMakeSorted(self);
330 :
331 94628 : CHECK(npages > 0);
332 :
333 4553554 : for (i = 0; i < self->nvalid; i++) {
334 4458934 : struct NaClVmmapEntry *ent = self->vmentry[i];
335 4458934 : uintptr_t ent_end_page = ent->page_num + ent->npages;
336 4458934 : nacl_off64_t additional_offset =
337 4458934 : (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
338 :
339 :
340 4458934 : if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
341 : /*
342 : * Split existing mapping into two parts, with new mapping in
343 : * the middle.
344 : */
345 8 : NaClVmmapAdd(self,
346 : new_region_end_page,
347 : ent_end_page - new_region_end_page,
348 : ent->prot,
349 : ent->flags,
350 : ent->desc,
351 4 : ent->offset + additional_offset,
352 : ent->file_size);
353 4 : ent->npages = page_num - ent->page_num;
354 4 : break;
355 4458930 : } else if (ent->page_num < page_num && page_num < ent_end_page) {
356 : /* New mapping overlaps end of existing mapping. */
357 7 : ent->npages = page_num - ent->page_num;
358 4458923 : } else if (ent->page_num < new_region_end_page &&
359 : new_region_end_page < ent_end_page) {
360 : /* New mapping overlaps start of existing mapping. */
361 4 : ent->page_num = new_region_end_page;
362 4 : ent->npages = ent_end_page - new_region_end_page;
363 4 : ent->offset += additional_offset;
364 4 : break;
365 4458919 : } else if (page_num <= ent->page_num &&
366 : ent_end_page <= new_region_end_page) {
367 : /* New mapping covers all of the existing mapping. */
368 46133 : ent->removed = 1;
369 : } else {
370 : /* No overlap */
371 4412786 : assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
372 : }
373 : }
374 :
375 94628 : if (!remove) {
376 49512 : NaClVmmapAdd(self, page_num, npages, prot, flags, desc, offset, file_size);
377 : }
378 :
379 94628 : NaClVmmapRemoveMarked(self);
380 94628 : }
381 :
382 49512 : void NaClVmmapAddWithOverwrite(struct NaClVmmap *self,
383 : uintptr_t page_num,
384 : size_t npages,
385 : int prot,
386 : int flags,
387 : struct NaClDesc *desc,
388 : nacl_off64_t offset,
389 : nacl_off64_t file_size) {
390 49512 : NaClVmmapUpdate(self,
391 : page_num,
392 : npages,
393 : prot,
394 : flags,
395 : /* remove= */ 0,
396 : desc,
397 : offset,
398 : file_size);
399 49512 : }
400 :
401 45116 : void NaClVmmapRemove(struct NaClVmmap *self,
402 : uintptr_t page_num,
403 : size_t npages) {
404 45116 : NaClVmmapUpdate(self,
405 : page_num,
406 : npages,
407 : /* prot= */ 0,
408 : /* flags= */ 0,
409 : /* remove= */ 1,
410 : /* desc= */NULL,
411 : /* offset= */0,
412 : /* file_size= */0);
413 45116 : }
414 :
415 : /*
416 : * NaClVmmapCheckMapping checks whether there is an existing mapping with
417 : * maximum protection equivalent or higher to the given one.
418 : */
419 38 : static int NaClVmmapCheckExistingMapping(struct NaClVmmap *self,
420 : uintptr_t page_num,
421 : size_t npages,
422 : int prot) {
423 : size_t i;
424 38 : uintptr_t region_end_page = page_num + npages;
425 :
426 38 : NaClLog(2,
427 : ("NaClVmmapCheckExistingMapping(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
428 : ", 0x%"NACL_PRIxS", 0x%x)\n"),
429 : (uintptr_t) self, page_num, npages, prot);
430 :
431 38 : if (0 == self->nvalid) {
432 0 : return 0;
433 : }
434 38 : NaClVmmapMakeSorted(self);
435 :
436 218 : for (i = 0; i < self->nvalid; ++i) {
437 218 : struct NaClVmmapEntry *ent = self->vmentry[i];
438 218 : uintptr_t ent_end_page = ent->page_num + ent->npages;
439 218 : int flags = NaClVmmapEntryMaxProt(ent);
440 :
441 218 : if (ent->page_num <= page_num && region_end_page <= ent_end_page) {
442 : /* The mapping is inside existing entry. */
443 34 : return 0 == (prot & (~flags));
444 184 : } else if (ent->page_num <= page_num && page_num < ent_end_page) {
445 : /* The mapping overlaps the entry. */
446 7 : if (0 != (prot & (~flags))) {
447 0 : return 0;
448 : }
449 7 : page_num = ent_end_page;
450 7 : npages = region_end_page - ent_end_page;
451 177 : } else if (page_num < ent->page_num) {
452 : /* The mapping without backing store. */
453 4 : return 0;
454 : }
455 : }
456 0 : return 0;
457 : }
458 :
459 38 : int NaClVmmapChangeProt(struct NaClVmmap *self,
460 : uintptr_t page_num,
461 : size_t npages,
462 : int prot) {
463 : size_t i;
464 : size_t nvalid;
465 38 : uintptr_t new_region_end_page = page_num + npages;
466 :
467 : /*
468 : * NaClVmmapCheckExistingMapping should be always called before
469 : * NaClVmmapChangeProt proceeds to ensure that valid mapping exists
470 : * as modifications cannot be rolled back.
471 : */
472 38 : if (!NaClVmmapCheckExistingMapping(self, page_num, npages, prot)) {
473 5 : return 0;
474 : }
475 :
476 33 : NaClLog(2,
477 : ("NaClVmmapChangeProt(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
478 : ", 0x%"NACL_PRIxS", 0x%x)\n"),
479 : (uintptr_t) self, page_num, npages, prot);
480 33 : NaClVmmapMakeSorted(self);
481 :
482 : /*
483 : * This loop & interval boundary tests closely follow those in
484 : * NaClVmmapUpdate. When updating those, do not forget to update them
485 : * at both places where appropriate.
486 : * TODO(phosek): use better data structure which will support intervals
487 : */
488 :
489 218 : for (i = 0, nvalid = self->nvalid; i < nvalid && npages > 0; i++) {
490 188 : struct NaClVmmapEntry *ent = self->vmentry[i];
491 188 : uintptr_t ent_end_page = ent->page_num + ent->npages;
492 188 : nacl_off64_t additional_offset =
493 188 : (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
494 :
495 188 : if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
496 : /* Split existing mapping into two parts */
497 0 : NaClVmmapAdd(self,
498 : new_region_end_page,
499 : ent_end_page - new_region_end_page,
500 : ent->prot,
501 : ent->flags,
502 : ent->desc,
503 0 : ent->offset + additional_offset,
504 : ent->file_size);
505 0 : ent->npages = page_num - ent->page_num;
506 : /* Add the new mapping into the middle. */
507 0 : NaClVmmapAdd(self,
508 : page_num,
509 : npages,
510 : prot,
511 : ent->flags,
512 : ent->desc,
513 0 : ent->offset + (page_num - ent->page_num),
514 : ent->file_size);
515 0 : break;
516 188 : } else if (ent->page_num < page_num && page_num < ent_end_page) {
517 : /* New mapping overlaps end of existing mapping. */
518 3 : ent->npages = page_num - ent->page_num;
519 : /* Add the overlapping part of the mapping. */
520 6 : NaClVmmapAdd(self,
521 : page_num,
522 : ent_end_page - page_num,
523 : prot,
524 : ent->flags,
525 : ent->desc,
526 3 : ent->offset + (page_num - ent->page_num),
527 : ent->file_size);
528 : /* The remaining part (if any) will be added in other iteration. */
529 3 : page_num = ent_end_page;
530 3 : npages = new_region_end_page - ent_end_page;
531 185 : } else if (ent->page_num < new_region_end_page &&
532 : new_region_end_page < ent_end_page) {
533 : /* New mapping overlaps start of existing mapping, split it. */
534 3 : NaClVmmapAdd(self,
535 : page_num,
536 : npages,
537 : prot,
538 : ent->flags,
539 : ent->desc,
540 : ent->offset,
541 : ent->file_size);
542 3 : ent->page_num = new_region_end_page;
543 3 : ent->npages = ent_end_page - new_region_end_page;
544 3 : ent->offset += additional_offset;
545 3 : break;
546 182 : } else if (page_num <= ent->page_num &&
547 : ent_end_page <= new_region_end_page) {
548 : /* New mapping covers all of the existing mapping. */
549 31 : page_num = ent_end_page;
550 31 : npages = new_region_end_page - ent_end_page;
551 31 : ent->prot = prot;
552 : } else {
553 : /* No overlap */
554 151 : assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
555 : }
556 : }
557 33 : return 1;
558 : }
559 :
560 242 : int NaClVmmapEntryMaxProt(struct NaClVmmapEntry *entry) {
561 242 : int flags = PROT_NONE;
562 :
563 251 : if (entry->desc != NULL && 0 == (entry->flags & NACL_ABI_MAP_PRIVATE)) {
564 9 : int o_flags = (*NACL_VTBL(NaClDesc, entry->desc)->GetFlags)(entry->desc);
565 9 : switch (o_flags & NACL_ABI_O_ACCMODE) {
566 : case NACL_ABI_O_RDONLY:
567 2 : flags = NACL_ABI_PROT_READ;
568 2 : break;
569 : case NACL_ABI_O_WRONLY:
570 0 : flags = NACL_ABI_PROT_WRITE;
571 0 : break;
572 : case NACL_ABI_O_RDWR:
573 7 : flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
574 7 : break;
575 : default:
576 0 : NaClLog(LOG_FATAL, "Internal error: illegal O_ACCMODE\n");
577 0 : break;
578 : }
579 : } else {
580 233 : flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
581 : }
582 :
583 242 : return flags;
584 : }
585 :
586 110 : static int NaClVmmapContainCmpEntries(void const *vkey,
587 : void const *vent) {
588 110 : struct NaClVmmapEntry const *const *key =
589 : (struct NaClVmmapEntry const *const *) vkey;
590 110 : struct NaClVmmapEntry const *const *ent =
591 : (struct NaClVmmapEntry const *const *) vent;
592 :
593 110 : NaClLog(5, "key->page_num = 0x%05"NACL_PRIxPTR"\n", (*key)->page_num);
594 :
595 110 : NaClLog(5, "entry->page_num = 0x%05"NACL_PRIxPTR"\n", (*ent)->page_num);
596 110 : NaClLog(5, "entry->npages = 0x%"NACL_PRIxS"\n", (*ent)->npages);
597 :
598 110 : if ((*key)->page_num < (*ent)->page_num) return -1;
599 65 : if ((*key)->page_num < (*ent)->page_num + (*ent)->npages) return 0;
600 28 : return 1;
601 : }
602 :
603 5 : struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self,
604 : uintptr_t pnum) {
605 : struct NaClVmmapEntry key;
606 : struct NaClVmmapEntry *kptr;
607 : struct NaClVmmapEntry *const *result_ptr;
608 :
609 5 : NaClVmmapMakeSorted(self);
610 5 : key.page_num = pnum;
611 5 : kptr = &key;
612 5 : result_ptr = ((struct NaClVmmapEntry *const *)
613 5 : bsearch(&kptr,
614 5 : self->vmentry,
615 : self->nvalid,
616 : sizeof self->vmentry[0],
617 : NaClVmmapContainCmpEntries));
618 5 : return result_ptr ? *result_ptr : NULL;
619 : }
620 :
621 :
622 35 : struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self,
623 : uintptr_t pnum,
624 : struct NaClVmmapIter *space) {
625 : struct NaClVmmapEntry key;
626 : struct NaClVmmapEntry *kptr;
627 : struct NaClVmmapEntry **result_ptr;
628 :
629 35 : NaClVmmapMakeSorted(self);
630 35 : key.page_num = pnum;
631 35 : kptr = &key;
632 35 : result_ptr = ((struct NaClVmmapEntry **)
633 35 : bsearch(&kptr,
634 35 : self->vmentry,
635 : self->nvalid,
636 : sizeof self->vmentry[0],
637 : NaClVmmapContainCmpEntries));
638 35 : space->vmmap = self;
639 35 : if (NULL == result_ptr) {
640 0 : space->entry_ix = self->nvalid;
641 : } else {
642 35 : space->entry_ix = result_ptr - self->vmentry;
643 : }
644 35 : return space;
645 : }
646 :
647 :
648 74 : int NaClVmmapIterAtEnd(struct NaClVmmapIter *nvip) {
649 74 : return nvip->entry_ix >= nvip->vmmap->nvalid;
650 : }
651 :
652 :
653 : /*
654 : * IterStar only permissible if not AtEnd
655 : */
656 144 : struct NaClVmmapEntry *NaClVmmapIterStar(struct NaClVmmapIter *nvip) {
657 144 : return nvip->vmmap->vmentry[nvip->entry_ix];
658 : }
659 :
660 :
661 39 : void NaClVmmapIterIncr(struct NaClVmmapIter *nvip) {
662 39 : ++nvip->entry_ix;
663 39 : }
664 :
665 :
666 : /*
667 : * Iterator becomes invalid after Erase. We could have a version that
668 : * keep the iterator valid by copying forward, but it is unclear
669 : * whether that is needed.
670 : */
671 0 : void NaClVmmapIterErase(struct NaClVmmapIter *nvip) {
672 : struct NaClVmmap *nvp;
673 :
674 0 : nvp = nvip->vmmap;
675 0 : free(nvp->vmentry[nvip->entry_ix]);
676 0 : nvp->vmentry[nvip->entry_ix] = nvp->vmentry[--nvp->nvalid];
677 0 : nvp->is_sorted = 0;
678 0 : }
679 :
680 :
681 26 : void NaClVmmapVisit(struct NaClVmmap *self,
682 : void (*fn)(void *state,
683 : struct NaClVmmapEntry *entry),
684 : void *state) {
685 : size_t i;
686 : size_t nentries;
687 :
688 26 : NaClVmmapMakeSorted(self);
689 189 : for (i = 0, nentries = self->nvalid; i < nentries; ++i) {
690 163 : (*fn)(state, self->vmentry[i]);
691 : }
692 26 : }
693 :
694 :
695 : /*
696 : * Linear search, from high addresses down.
697 : */
698 5 : uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self,
699 : size_t num_pages) {
700 : size_t i;
701 : struct NaClVmmapEntry *vmep;
702 : uintptr_t end_page;
703 : uintptr_t start_page;
704 :
705 5 : if (0 == self->nvalid)
706 1 : return 0;
707 4 : NaClVmmapMakeSorted(self);
708 9 : for (i = self->nvalid; --i > 0; ) {
709 3 : vmep = self->vmentry[i-1];
710 3 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
711 3 : start_page = self->vmentry[i]->page_num; /* start page from current */
712 3 : if (start_page - end_page >= num_pages) {
713 2 : return start_page - num_pages;
714 : }
715 : }
716 2 : return 0;
717 : /*
718 : * in user addresses, page 0 is always trampoline, and user
719 : * addresses are contained in system addresses, so returning a
720 : * system page number of 0 can serve as error indicator: it is at
721 : * worst the trampoline page, and likely to be below it.
722 : */
723 : }
724 :
725 :
726 : /*
727 : * Linear search, from high addresses down. For mmap, so the starting
728 : * address of the region found must be NACL_MAP_PAGESIZE aligned.
729 : *
730 : * For general mmap it is better to use as high an address as
731 : * possible, since the stack size for the main thread is currently
732 : * fixed, and the heap is the only thing that grows.
733 : */
734 48467 : uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self,
735 : size_t num_pages) {
736 : size_t i;
737 : struct NaClVmmapEntry *vmep;
738 : uintptr_t end_page;
739 : uintptr_t start_page;
740 :
741 48467 : if (0 == self->nvalid)
742 0 : return 0;
743 48467 : NaClVmmapMakeSorted(self);
744 48467 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
745 :
746 3886108 : for (i = self->nvalid; --i > 0; ) {
747 3837639 : vmep = self->vmentry[i-1];
748 3837639 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
749 3837639 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
750 :
751 3837639 : start_page = self->vmentry[i]->page_num; /* start page from current */
752 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
753 :
754 3837639 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
755 :
756 3837639 : if (start_page <= end_page) {
757 3788983 : continue;
758 : }
759 : }
760 48656 : if (start_page - end_page >= num_pages) {
761 48465 : return start_page - num_pages;
762 : }
763 : }
764 2 : return 0;
765 : /*
766 : * in user addresses, page 0 is always trampoline, and user
767 : * addresses are contained in system addresses, so returning a
768 : * system page number of 0 can serve as error indicator: it is at
769 : * worst the trampoline page, and likely to be below it.
770 : */
771 : }
772 :
773 :
774 : /*
775 : * Linear search, from uaddr up.
776 : */
777 14 : uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self,
778 : uintptr_t uaddr,
779 : size_t num_pages) {
780 : size_t nvalid;
781 : size_t i;
782 : struct NaClVmmapEntry *vmep;
783 : uintptr_t usr_page;
784 : uintptr_t start_page;
785 : uintptr_t end_page;
786 :
787 14 : NaClVmmapMakeSorted(self);
788 :
789 14 : usr_page = uaddr >> NACL_PAGESHIFT;
790 14 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
791 :
792 14 : nvalid = self->nvalid;
793 :
794 84 : for (i = 1; i < nvalid; ++i) {
795 82 : vmep = self->vmentry[i-1];
796 82 : end_page = vmep->page_num + vmep->npages;
797 82 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
798 :
799 82 : start_page = self->vmentry[i]->page_num;
800 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
801 :
802 82 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
803 :
804 82 : if (start_page <= end_page) {
805 62 : continue;
806 : }
807 : }
808 20 : if (end_page <= usr_page && usr_page < start_page) {
809 11 : end_page = usr_page;
810 : }
811 20 : if (usr_page <= end_page && (start_page - end_page) >= num_pages) {
812 : /* found a gap at or after uaddr that's big enough */
813 12 : return end_page;
814 : }
815 : }
816 2 : return 0;
817 : }
|