1 : /*
2 : * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : /*
8 : * NaCl Simple/secure ELF loader (NaCl SEL) memory map.
9 : */
10 :
11 : #include "native_client/src/include/portability.h"
12 :
13 : #include <assert.h>
14 : #include <fcntl.h>
15 : #include <stdlib.h>
16 : #include <stdio.h>
17 :
18 : #include "native_client/src/include/nacl_platform.h"
19 : #include "native_client/src/include/portability.h"
20 :
21 : #include "native_client/src/shared/platform/nacl_check.h"
22 : #include "native_client/src/shared/platform/nacl_log.h"
23 : #include "native_client/src/shared/platform/nacl_host_desc.h"
24 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
25 : #include "native_client/src/trusted/desc/nacl_desc_io.h"
26 : #include "native_client/src/trusted/service_runtime/sel_mem.h"
27 : #include "native_client/src/trusted/service_runtime/sel_util.h"
28 : #include "native_client/src/trusted/service_runtime/nacl_config.h"
29 :
30 : #include "native_client/src/trusted/service_runtime/include/sys/fcntl.h"
31 : #include "native_client/src/trusted/service_runtime/include/sys/mman.h"
32 :
33 : #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
34 : #define REMOVE_MARKED_DEBUG 0
35 :
36 :
37 : /*
38 : * The memory map structure is a simple array of memory regions which
39 : * may have different access protections. We do not yet merge regions
40 : * with the same access protections together to reduce the region
41 : * number, but may do so in the future.
42 : *
43 : * Regions are described by (relative) starting page number, the
44 : * number of pages, and the protection that the pages should have.
45 : */
46 73094 : struct NaClVmmapEntry *NaClVmmapEntryMake(uintptr_t page_num,
47 73094 : size_t npages,
48 73094 : int prot,
49 73094 : int flags,
50 73094 : struct NaClDesc *desc,
51 73094 : nacl_off64_t offset,
52 73094 : nacl_off64_t file_size) {
53 73094 : struct NaClVmmapEntry *entry;
54 :
55 73094 : NaClLog(4,
56 : "NaClVmmapEntryMake(0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
57 : "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n",
58 : page_num, npages, prot, flags, (uintptr_t) desc, offset);
59 73094 : entry = (struct NaClVmmapEntry *) malloc(sizeof *entry);
60 73094 : if (NULL == entry) {
61 0 : return 0;
62 : }
63 73094 : NaClLog(4, "entry: 0x%"NACL_PRIxPTR"\n", (uintptr_t) entry);
64 73094 : entry->page_num = page_num;
65 73094 : entry->npages = npages;
66 73094 : entry->prot = prot;
67 73094 : entry->flags = flags;
68 73094 : entry->removed = 0;
69 73094 : entry->desc = desc;
70 73094 : if (desc != NULL) {
71 333 : NaClDescRef(desc);
72 333 : }
73 73094 : entry->offset = offset;
74 73094 : entry->file_size = file_size;
75 73094 : return entry;
76 73094 : }
77 :
78 :
79 69326 : void NaClVmmapEntryFree(struct NaClVmmapEntry *entry) {
80 69326 : NaClLog(4,
81 : ("NaClVmmapEntryFree(0x%08"NACL_PRIxPTR
82 : "): (0x%"NACL_PRIxPTR",0x%"NACL_PRIxS","
83 : "0x%x,0x%x,0x%"NACL_PRIxPTR",0x%"NACL_PRIx64")\n"),
84 : (uintptr_t) entry,
85 : entry->page_num, entry->npages, entry->prot,
86 : entry->flags, (uintptr_t) entry->desc, entry->offset);
87 :
88 69326 : if (entry->desc != NULL) {
89 52 : NaClDescSafeUnref(entry->desc);
90 52 : }
91 69326 : free(entry);
92 69326 : }
93 :
94 :
95 : /*
96 : * Print debug.
97 : */
98 8 : void NaClVmentryPrint(void *state,
99 8 : struct NaClVmmapEntry *vmep) {
100 16 : UNREFERENCED_PARAMETER(state);
101 :
102 8 : printf("page num 0x%06x\n", (uint32_t)vmep->page_num);
103 8 : printf("num pages %d\n", (uint32_t)vmep->npages);
104 8 : printf("prot bits %x\n", vmep->prot);
105 8 : printf("flags %x\n", vmep->flags);
106 8 : fflush(stdout);
107 8 : }
108 :
109 :
110 1 : void NaClVmmapDebug(struct NaClVmmap *self,
111 1 : char *msg) {
112 1 : puts(msg);
113 1 : NaClVmmapVisit(self, NaClVmentryPrint, (void *) 0);
114 1 : fflush(stdout);
115 1 : }
116 :
117 :
118 299 : int NaClVmmapCtor(struct NaClVmmap *self) {
119 299 : self->size = START_ENTRIES;
120 299 : if (SIZE_T_MAX / sizeof *self->vmentry < self->size) {
121 0 : return 0;
122 : }
123 299 : self->vmentry = calloc(self->size, sizeof *self->vmentry);
124 299 : if (!self->vmentry) {
125 0 : return 0;
126 : }
127 299 : self->nvalid = 0;
128 299 : self->is_sorted = 1;
129 299 : return 1;
130 299 : }
131 :
132 :
133 4 : void NaClVmmapDtor(struct NaClVmmap *self) {
134 4 : size_t i;
135 :
136 44 : for (i = 0; i < self->nvalid; ++i) {
137 18 : NaClVmmapEntryFree(self->vmentry[i]);
138 18 : }
139 4 : free(self->vmentry);
140 4 : self->vmentry = 0;
141 4 : }
142 :
143 : /*
144 : * Comparison function for qsort. Should never encounter a
145 : * removed/invalid entry.
146 : */
147 :
148 11437648 : static int NaClVmmapCmpEntries(void const *vleft,
149 11437648 : void const *vright) {
150 11437648 : struct NaClVmmapEntry const *const *left =
151 : (struct NaClVmmapEntry const *const *) vleft;
152 11437648 : struct NaClVmmapEntry const *const *right =
153 : (struct NaClVmmapEntry const *const *) vright;
154 :
155 11437648 : return (int) ((*left)->page_num - (*right)->page_num);
156 : }
157 :
158 :
159 279580 : static void NaClVmmapRemoveMarked(struct NaClVmmap *self) {
160 279580 : size_t i;
161 279580 : size_t last;
162 :
163 279580 : if (0 == self->nvalid)
164 0 : return;
165 :
166 : #if REMOVE_MARKED_DEBUG
167 : NaClVmmapDebug(self, "Before RemoveMarked");
168 : #endif
169 : /*
170 : * Linearly scan with a move-end-to-front strategy to get rid of
171 : * marked-to-be-removed entries.
172 : */
173 :
174 : /*
175 : * Invariant:
176 : *
177 : * forall j in [0, self->nvalid): NULL != self->vmentry[j]
178 : */
179 838740 : for (last = self->nvalid; last > 0 && self->vmentry[--last]->removed; ) {
180 0 : NaClVmmapEntryFree(self->vmentry[last]);
181 0 : self->vmentry[last] = NULL;
182 0 : }
183 279596 : if (last == 0 && self->vmentry[0]->removed) {
184 0 : NaClLog(LOG_FATAL, "No valid entries in VM map\n");
185 0 : return;
186 : }
187 :
188 : /*
189 : * Post condition of above loop:
190 : *
191 : * forall j in [0, last]: NULL != self->vmentry[j]
192 : *
193 : * 0 <= last < self->nvalid && !self->vmentry[last]->removed
194 : */
195 838740 : CHECK(last < self->nvalid);
196 838740 : CHECK(!self->vmentry[last]->removed);
197 : /*
198 : * and,
199 : *
200 : * forall j in (last, self->nvalid): NULL == self->vmentry[j]
201 : */
202 :
203 : /*
204 : * Loop invariant: forall j in [0, i): !self->vmentry[j]->removed
205 : */
206 7057912 : for (i = 0; i < last; ++i) {
207 3249376 : if (!self->vmentry[i]->removed) {
208 3180075 : continue;
209 : }
210 : /*
211 : * post condition: self->vmentry[i]->removed
212 : *
213 : * swap with entry at self->vmentry[last].
214 : */
215 :
216 69301 : NaClVmmapEntryFree(self->vmentry[i]);
217 69301 : self->vmentry[i] = self->vmentry[last];
218 69301 : self->vmentry[last] = NULL;
219 :
220 : /*
221 : * Invariants here:
222 : *
223 : * forall j in [last, self->nvalid): NULL == self->vmentry[j]
224 : *
225 : * forall j in [0, i]: !self->vmentry[j]->removed
226 : */
227 :
228 207908 : while (--last > i && self->vmentry[last]->removed) {
229 7 : NaClVmmapEntryFree(self->vmentry[last]);
230 7 : self->vmentry[last] = NULL;
231 7 : }
232 : /*
233 : * since !self->vmentry[i]->removed, we are guaranteed that
234 : * !self->vmentry[last]->removed when the while loop terminates.
235 : *
236 : * forall j in (last, self->nvalid):
237 : * NULL == self->vmentry[j]->removed
238 : */
239 69301 : }
240 : /* i == last */
241 : /* forall j in [0, last]: !self->vmentry[j]->removed */
242 : /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
243 279580 : self->nvalid = last + 1;
244 :
245 279580 : self->is_sorted = 0;
246 : #if REMOVE_MARKED_DEBUG
247 : NaClVmmapDebug(self, "After RemoveMarked");
248 : #endif
249 559160 : }
250 :
251 :
252 210392 : void NaClVmmapMakeSorted(struct NaClVmmap *self) {
253 210392 : if (self->is_sorted)
254 70597 : return;
255 :
256 139795 : NaClVmmapRemoveMarked(self);
257 :
258 139795 : qsort(self->vmentry,
259 : self->nvalid,
260 : sizeof *self->vmentry,
261 : NaClVmmapCmpEntries);
262 139795 : self->is_sorted = 1;
263 : #if REMOVE_MARKED_DEBUG
264 : NaClVmmapDebug(self, "After Sort");
265 : #endif
266 350187 : }
267 :
268 73094 : void NaClVmmapAdd(struct NaClVmmap *self,
269 73094 : uintptr_t page_num,
270 73094 : size_t npages,
271 73094 : int prot,
272 73094 : int flags,
273 73094 : struct NaClDesc *desc,
274 73094 : nacl_off64_t offset,
275 73094 : nacl_off64_t file_size) {
276 73094 : struct NaClVmmapEntry *entry;
277 :
278 73094 : NaClLog(2,
279 : ("NaClVmmapAdd(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
280 : "0x%"NACL_PRIxS", 0x%x, 0x%x, 0x%"NACL_PRIxPTR", "
281 : "0x%"NACL_PRIx64")\n"),
282 : (uintptr_t) self, page_num, npages, prot, flags,
283 : (uintptr_t) desc, offset);
284 73094 : if (self->nvalid == self->size) {
285 291 : size_t new_size = 2 * self->size;
286 291 : struct NaClVmmapEntry **new_map;
287 :
288 291 : new_map = realloc(self->vmentry, new_size * sizeof *new_map);
289 291 : if (NULL == new_map) {
290 0 : NaClLog(LOG_FATAL, "NaClVmmapAdd: could not allocate memory\n");
291 0 : return;
292 : }
293 291 : self->vmentry = new_map;
294 291 : self->size = new_size;
295 291 : }
296 : /* self->nvalid < self->size */
297 73094 : entry = NaClVmmapEntryMake(page_num, npages, prot, flags,
298 : desc, offset, file_size);
299 :
300 73094 : self->vmentry[self->nvalid] = entry;
301 73094 : self->is_sorted = 0;
302 73094 : ++self->nvalid;
303 146188 : }
304 :
305 : /*
306 : * Update the virtual memory map. Deletion is handled by a remove
307 : * flag, since a NULL desc just means that the memory is backed by the
308 : * system paging file.
309 : */
310 139785 : static void NaClVmmapUpdate(struct NaClVmmap *self,
311 139785 : uintptr_t page_num,
312 139785 : size_t npages,
313 139785 : int prot,
314 139785 : int flags,
315 139785 : int remove,
316 139785 : struct NaClDesc *desc,
317 139785 : nacl_off64_t offset,
318 139785 : nacl_off64_t file_size) {
319 : /* update existing entries or create new entry as needed */
320 139785 : size_t i;
321 139785 : uintptr_t new_region_end_page = page_num + npages;
322 :
323 139785 : NaClLog(2,
324 : ("NaClVmmapUpdate(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR", "
325 : "0x%"NACL_PRIxS", 0x%x, 0x%x, %d, 0x%"NACL_PRIxPTR", "
326 : "0x%"NACL_PRIx64")\n"),
327 : (uintptr_t) self, page_num, npages, prot, flags,
328 : remove, (uintptr_t) desc, offset);
329 139785 : NaClVmmapMakeSorted(self);
330 :
331 419355 : CHECK(npages > 0);
332 :
333 3805224 : for (i = 0; i < self->nvalid; i++) {
334 1762835 : struct NaClVmmapEntry *ent = self->vmentry[i];
335 1762835 : uintptr_t ent_end_page = ent->page_num + ent->npages;
336 1762835 : nacl_off64_t additional_offset =
337 : (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
338 :
339 :
340 2465383 : if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
341 : /*
342 : * Split existing mapping into two parts, with new mapping in
343 : * the middle.
344 : */
345 4 : NaClVmmapAdd(self,
346 : new_region_end_page,
347 : ent_end_page - new_region_end_page,
348 : ent->prot,
349 : ent->flags,
350 : ent->desc,
351 : ent->offset + additional_offset,
352 : ent->file_size);
353 4 : ent->npages = page_num - ent->page_num;
354 4 : break;
355 2465375 : } else if (ent->page_num < page_num && page_num < ent_end_page) {
356 : /* New mapping overlaps end of existing mapping. */
357 7 : ent->npages = page_num - ent->page_num;
358 2534680 : } else if (ent->page_num < new_region_end_page &&
359 : new_region_end_page < ent_end_page) {
360 : /* New mapping overlaps start of existing mapping. */
361 4 : ent->page_num = new_region_end_page;
362 4 : ent->npages = ent_end_page - new_region_end_page;
363 4 : ent->offset += additional_offset;
364 4 : break;
365 2823103 : } else if (page_num <= ent->page_num &&
366 : ent_end_page <= new_region_end_page) {
367 : /* New mapping covers all of the existing mapping. */
368 69308 : ent->removed = 1;
369 69308 : } else {
370 : /* No overlap */
371 5783073 : assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
372 : }
373 1762827 : }
374 :
375 139785 : if (!remove) {
376 71503 : NaClVmmapAdd(self, page_num, npages, prot, flags, desc, offset, file_size);
377 71503 : }
378 :
379 139785 : NaClVmmapRemoveMarked(self);
380 139785 : }
381 :
382 71503 : void NaClVmmapAddWithOverwrite(struct NaClVmmap *self,
383 71503 : uintptr_t page_num,
384 71503 : size_t npages,
385 71503 : int prot,
386 71503 : int flags,
387 71503 : struct NaClDesc *desc,
388 71503 : nacl_off64_t offset,
389 71503 : nacl_off64_t file_size) {
390 71503 : NaClVmmapUpdate(self,
391 : page_num,
392 : npages,
393 : prot,
394 : flags,
395 : /* remove= */ 0,
396 : desc,
397 : offset,
398 : file_size);
399 71503 : }
400 :
401 68282 : void NaClVmmapRemove(struct NaClVmmap *self,
402 68282 : uintptr_t page_num,
403 68282 : size_t npages) {
404 68282 : NaClVmmapUpdate(self,
405 : page_num,
406 : npages,
407 : /* prot= */ 0,
408 : /* flags= */ 0,
409 : /* remove= */ 1,
410 : /* desc= */NULL,
411 : /* offset= */0,
412 : /* file_size= */0);
413 68282 : }
414 :
415 : /*
416 : * NaClVmmapCheckMapping checks whether there is an existing mapping with
417 : * maximum protection equivalent or higher to the given one.
418 : */
419 38 : static int NaClVmmapCheckExistingMapping(struct NaClVmmap *self,
420 38 : uintptr_t page_num,
421 38 : size_t npages,
422 38 : int prot) {
423 38 : size_t i;
424 38 : uintptr_t region_end_page = page_num + npages;
425 :
426 38 : NaClLog(2,
427 : ("NaClVmmapCheckExistingMapping(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
428 : ", 0x%"NACL_PRIxS", 0x%x)\n"),
429 : (uintptr_t) self, page_num, npages, prot);
430 :
431 38 : if (0 == self->nvalid) {
432 0 : return 0;
433 : }
434 38 : NaClVmmapMakeSorted(self);
435 :
436 436 : for (i = 0; i < self->nvalid; ++i) {
437 218 : struct NaClVmmapEntry *ent = self->vmentry[i];
438 218 : uintptr_t ent_end_page = ent->page_num + ent->npages;
439 218 : int flags = NaClVmmapEntryMaxProt(ent);
440 :
441 432 : if (ent->page_num <= page_num && region_end_page <= ent_end_page) {
442 : /* The mapping is inside existing entry. */
443 34 : return 0 == (prot & (~flags));
444 364 : } else if (ent->page_num <= page_num && page_num < ent_end_page) {
445 : /* The mapping overlaps the entry. */
446 7 : if (0 != (prot & (~flags))) {
447 0 : return 0;
448 : }
449 7 : page_num = ent_end_page;
450 7 : npages = region_end_page - ent_end_page;
451 184 : } else if (page_num < ent->page_num) {
452 : /* The mapping without backing store. */
453 4 : return 0;
454 : }
455 180 : }
456 0 : return 0;
457 38 : }
458 :
459 38 : int NaClVmmapChangeProt(struct NaClVmmap *self,
460 38 : uintptr_t page_num,
461 38 : size_t npages,
462 38 : int prot) {
463 38 : size_t i;
464 38 : size_t nvalid;
465 38 : uintptr_t new_region_end_page = page_num + npages;
466 :
467 : /*
468 : * NaClVmmapCheckExistingMapping should be always called before
469 : * NaClVmmapChangeProt proceeds to ensure that valid mapping exists
470 : * as modifications cannot be rolled back.
471 : */
472 38 : if (!NaClVmmapCheckExistingMapping(self, page_num, npages, prot)) {
473 5 : return 0;
474 : }
475 :
476 33 : NaClLog(2,
477 : ("NaClVmmapChangeProt(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxPTR
478 : ", 0x%"NACL_PRIxS", 0x%x)\n"),
479 : (uintptr_t) self, page_num, npages, prot);
480 33 : NaClVmmapMakeSorted(self);
481 :
482 : /*
483 : * This loop & interval boundary tests closely follow those in
484 : * NaClVmmapUpdate. When updating those, do not forget to update them
485 : * at both places where appropriate.
486 : * TODO(phosek): use better data structure which will support intervals
487 : */
488 :
489 654 : for (i = 0, nvalid = self->nvalid; i < nvalid && npages > 0; i++) {
490 188 : struct NaClVmmapEntry *ent = self->vmentry[i];
491 188 : uintptr_t ent_end_page = ent->page_num + ent->npages;
492 188 : nacl_off64_t additional_offset =
493 : (new_region_end_page - ent->page_num) << NACL_PAGESHIFT;
494 :
495 342 : if (ent->page_num < page_num && new_region_end_page < ent_end_page) {
496 : /* Split existing mapping into two parts */
497 0 : NaClVmmapAdd(self,
498 : new_region_end_page,
499 : ent_end_page - new_region_end_page,
500 : ent->prot,
501 : ent->flags,
502 : ent->desc,
503 : ent->offset + additional_offset,
504 : ent->file_size);
505 0 : ent->npages = page_num - ent->page_num;
506 : /* Add the new mapping into the middle. */
507 0 : NaClVmmapAdd(self,
508 : page_num,
509 : npages,
510 : prot,
511 : ent->flags,
512 : ent->desc,
513 : ent->offset + (page_num - ent->page_num),
514 : ent->file_size);
515 0 : break;
516 342 : } else if (ent->page_num < page_num && page_num < ent_end_page) {
517 : /* New mapping overlaps end of existing mapping. */
518 3 : ent->npages = page_num - ent->page_num;
519 : /* Add the overlapping part of the mapping. */
520 3 : NaClVmmapAdd(self,
521 : page_num,
522 : ent_end_page - page_num,
523 : prot,
524 : ent->flags,
525 : ent->desc,
526 : ent->offset + (page_num - ent->page_num),
527 : ent->file_size);
528 : /* The remaining part (if any) will be added in other iteration. */
529 3 : page_num = ent_end_page;
530 3 : npages = new_region_end_page - ent_end_page;
531 373 : } else if (ent->page_num < new_region_end_page &&
532 : new_region_end_page < ent_end_page) {
533 : /* New mapping overlaps start of existing mapping, split it. */
534 3 : NaClVmmapAdd(self,
535 : page_num,
536 : npages,
537 : prot,
538 : ent->flags,
539 : ent->desc,
540 : ent->offset,
541 : ent->file_size);
542 3 : ent->page_num = new_region_end_page;
543 3 : ent->npages = ent_end_page - new_region_end_page;
544 3 : ent->offset += additional_offset;
545 3 : break;
546 213 : } else if (page_num <= ent->page_num &&
547 : ent_end_page <= new_region_end_page) {
548 : /* New mapping covers all of the existing mapping. */
549 31 : page_num = ent_end_page;
550 31 : npages = new_region_end_page - ent_end_page;
551 31 : ent->prot = prot;
552 31 : } else {
553 : /* No overlap */
554 604 : assert(new_region_end_page <= ent->page_num || ent_end_page <= page_num);
555 : }
556 185 : }
557 33 : return 1;
558 38 : }
559 :
560 242 : int NaClVmmapEntryMaxProt(struct NaClVmmapEntry *entry) {
561 242 : int flags = PROT_NONE;
562 :
563 303 : if (entry->desc != NULL && 0 == (entry->flags & NACL_ABI_MAP_PRIVATE)) {
564 9 : int o_flags = (*NACL_VTBL(NaClDesc, entry->desc)->GetFlags)(entry->desc);
565 9 : switch (o_flags & NACL_ABI_O_ACCMODE) {
566 : case NACL_ABI_O_RDONLY:
567 2 : flags = NACL_ABI_PROT_READ;
568 2 : break;
569 : case NACL_ABI_O_WRONLY:
570 0 : flags = NACL_ABI_PROT_WRITE;
571 0 : break;
572 : case NACL_ABI_O_RDWR:
573 7 : flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
574 7 : break;
575 : default:
576 0 : NaClLog(LOG_FATAL, "Internal error: illegal O_ACCMODE\n");
577 0 : break;
578 : }
579 9 : } else {
580 233 : flags = NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE;
581 : }
582 :
583 242 : return flags;
584 : }
585 :
586 113 : static int NaClVmmapContainCmpEntries(void const *vkey,
587 113 : void const *vent) {
588 113 : struct NaClVmmapEntry const *const *key =
589 : (struct NaClVmmapEntry const *const *) vkey;
590 113 : struct NaClVmmapEntry const *const *ent =
591 : (struct NaClVmmapEntry const *const *) vent;
592 :
593 113 : NaClLog(5, "key->page_num = 0x%05"NACL_PRIxPTR"\n", (*key)->page_num);
594 :
595 113 : NaClLog(5, "entry->page_num = 0x%05"NACL_PRIxPTR"\n", (*ent)->page_num);
596 113 : NaClLog(5, "entry->npages = 0x%"NACL_PRIxS"\n", (*ent)->npages);
597 :
598 159 : if ((*key)->page_num < (*ent)->page_num) return -1;
599 105 : if ((*key)->page_num < (*ent)->page_num + (*ent)->npages) return 0;
600 29 : return 1;
601 113 : }
602 :
603 5 : struct NaClVmmapEntry const *NaClVmmapFindPage(struct NaClVmmap *self,
604 5 : uintptr_t pnum) {
605 5 : struct NaClVmmapEntry key;
606 5 : struct NaClVmmapEntry *kptr;
607 5 : struct NaClVmmapEntry *const *result_ptr;
608 :
609 5 : NaClVmmapMakeSorted(self);
610 5 : key.page_num = pnum;
611 5 : kptr = &key;
612 : result_ptr = ((struct NaClVmmapEntry *const *)
613 5 : bsearch(&kptr,
614 : self->vmentry,
615 : self->nvalid,
616 : sizeof self->vmentry[0],
617 : NaClVmmapContainCmpEntries));
618 15 : return result_ptr ? *result_ptr : NULL;
619 : }
620 :
621 :
622 36 : struct NaClVmmapIter *NaClVmmapFindPageIter(struct NaClVmmap *self,
623 36 : uintptr_t pnum,
624 36 : struct NaClVmmapIter *space) {
625 36 : struct NaClVmmapEntry key;
626 36 : struct NaClVmmapEntry *kptr;
627 36 : struct NaClVmmapEntry **result_ptr;
628 :
629 36 : NaClVmmapMakeSorted(self);
630 36 : key.page_num = pnum;
631 36 : kptr = &key;
632 : result_ptr = ((struct NaClVmmapEntry **)
633 36 : bsearch(&kptr,
634 : self->vmentry,
635 : self->nvalid,
636 : sizeof self->vmentry[0],
637 : NaClVmmapContainCmpEntries));
638 36 : space->vmmap = self;
639 36 : if (NULL == result_ptr) {
640 0 : space->entry_ix = self->nvalid;
641 0 : } else {
642 36 : space->entry_ix = result_ptr - self->vmentry;
643 : }
644 36 : return space;
645 : }
646 :
647 :
648 76 : int NaClVmmapIterAtEnd(struct NaClVmmapIter *nvip) {
649 76 : return nvip->entry_ix >= nvip->vmmap->nvalid;
650 : }
651 :
652 :
653 : /*
654 : * IterStar only permissible if not AtEnd
655 : */
656 146 : struct NaClVmmapEntry *NaClVmmapIterStar(struct NaClVmmapIter *nvip) {
657 146 : return nvip->vmmap->vmentry[nvip->entry_ix];
658 : }
659 :
660 :
661 40 : void NaClVmmapIterIncr(struct NaClVmmapIter *nvip) {
662 40 : ++nvip->entry_ix;
663 40 : }
664 :
665 :
666 : /*
667 : * Iterator becomes invalid after Erase. We could have a version that
668 : * keep the iterator valid by copying forward, but it is unclear
669 : * whether that is needed.
670 : */
671 0 : void NaClVmmapIterErase(struct NaClVmmapIter *nvip) {
672 0 : struct NaClVmmap *nvp;
673 :
674 0 : nvp = nvip->vmmap;
675 0 : free(nvp->vmentry[nvip->entry_ix]);
676 0 : nvp->vmentry[nvip->entry_ix] = nvp->vmentry[--nvp->nvalid];
677 0 : nvp->is_sorted = 0;
678 0 : }
679 :
680 :
681 24 : void NaClVmmapVisit(struct NaClVmmap *self,
682 24 : void (*fn)(void *state,
683 : struct NaClVmmapEntry *entry),
684 24 : void *state) {
685 24 : size_t i;
686 24 : size_t nentries;
687 :
688 24 : NaClVmmapMakeSorted(self);
689 350 : for (i = 0, nentries = self->nvalid; i < nentries; ++i) {
690 151 : (*fn)(state, self->vmentry[i]);
691 151 : }
692 24 : }
693 :
694 :
695 : /*
696 : * Linear search, from high addresses down.
697 : */
698 5 : uintptr_t NaClVmmapFindSpace(struct NaClVmmap *self,
699 5 : size_t num_pages) {
700 5 : size_t i;
701 5 : struct NaClVmmapEntry *vmep;
702 5 : uintptr_t end_page;
703 5 : uintptr_t start_page;
704 :
705 5 : if (0 == self->nvalid)
706 1 : return 0;
707 4 : NaClVmmapMakeSorted(self);
708 9 : for (i = self->nvalid; --i > 0; ) {
709 3 : vmep = self->vmentry[i-1];
710 3 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
711 3 : start_page = self->vmentry[i]->page_num; /* start page from current */
712 3 : if (start_page - end_page >= num_pages) {
713 2 : return start_page - num_pages;
714 : }
715 1 : }
716 2 : return 0;
717 : /*
718 : * in user addresses, page 0 is always trampoline, and user
719 : * addresses are contained in system addresses, so returning a
720 : * system page number of 0 can serve as error indicator: it is at
721 : * worst the trampoline page, and likely to be below it.
722 : */
723 5 : }
724 :
725 :
726 : /*
727 : * Linear search, from high addresses down. For mmap, so the starting
728 : * address of the region found must be NACL_MAP_PAGESIZE aligned.
729 : *
730 : * For general mmap it is better to use as high an address as
731 : * possible, since the stack size for the main thread is currently
732 : * fixed, and the heap is the only thing that grows.
733 : */
734 70449 : uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap *self,
735 70449 : size_t num_pages) {
736 70449 : size_t i;
737 70449 : struct NaClVmmapEntry *vmep;
738 70449 : uintptr_t end_page;
739 70449 : uintptr_t start_page;
740 :
741 70449 : if (0 == self->nvalid)
742 0 : return 0;
743 70449 : NaClVmmapMakeSorted(self);
744 70449 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
745 :
746 918770 : for (i = self->nvalid; --i > 0; ) {
747 848319 : vmep = self->vmentry[i-1];
748 848319 : end_page = vmep->page_num + vmep->npages; /* end page from previous */
749 848319 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
750 :
751 848319 : start_page = self->vmentry[i]->page_num; /* start page from current */
752 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
753 :
754 848319 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
755 :
756 848319 : if (start_page <= end_page) {
757 777582 : continue;
758 : }
759 : }
760 70737 : if (start_page - end_page >= num_pages) {
761 70447 : return start_page - num_pages;
762 : }
763 290 : }
764 2 : return 0;
765 : /*
766 : * in user addresses, page 0 is always trampoline, and user
767 : * addresses are contained in system addresses, so returning a
768 : * system page number of 0 can serve as error indicator: it is at
769 : * worst the trampoline page, and likely to be below it.
770 : */
771 70449 : }
772 :
773 :
774 : /*
775 : * Linear search, from uaddr up.
776 : */
777 14 : uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap *self,
778 14 : uintptr_t uaddr,
779 14 : size_t num_pages) {
780 14 : size_t nvalid;
781 14 : size_t i;
782 14 : struct NaClVmmapEntry *vmep;
783 14 : uintptr_t usr_page;
784 14 : uintptr_t start_page;
785 14 : uintptr_t end_page;
786 :
787 14 : NaClVmmapMakeSorted(self);
788 :
789 14 : usr_page = uaddr >> NACL_PAGESHIFT;
790 14 : num_pages = NaClRoundPageNumUpToMapMultiple(num_pages);
791 :
792 14 : nvalid = self->nvalid;
793 :
794 168 : for (i = 1; i < nvalid; ++i) {
795 82 : vmep = self->vmentry[i-1];
796 82 : end_page = vmep->page_num + vmep->npages;
797 82 : end_page = NaClRoundPageNumUpToMapMultiple(end_page);
798 :
799 82 : start_page = self->vmentry[i]->page_num;
800 : if (NACL_MAP_PAGESHIFT > NACL_PAGESHIFT) {
801 :
802 82 : start_page = NaClTruncPageNumDownToMapMultiple(start_page);
803 :
804 82 : if (start_page <= end_page) {
805 62 : continue;
806 : }
807 : }
808 39 : if (end_page <= usr_page && usr_page < start_page) {
809 11 : end_page = usr_page;
810 11 : }
811 32 : if (usr_page <= end_page && (start_page - end_page) >= num_pages) {
812 : /* found a gap at or after uaddr that's big enough */
813 12 : return end_page;
814 : }
815 8 : }
816 2 : return 0;
817 14 : }
|