1 : /*
2 : * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include <string.h>
8 :
9 : #include "native_client/src/include/concurrency_ops.h"
10 : #include "native_client/src/include/nacl_platform.h"
11 : #include "native_client/src/include/portability.h"
12 : #include "native_client/src/shared/platform/nacl_check.h"
13 : #include "native_client/src/shared/platform/nacl_log.h"
14 : #include "native_client/src/shared/platform/nacl_sync.h"
15 : #include "native_client/src/shared/platform/nacl_sync_checked.h"
16 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
17 : #include "native_client/src/trusted/desc/nacl_desc_effector.h"
18 : #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
19 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
20 : #include "native_client/src/trusted/perf_counter/nacl_perf_counter.h"
21 : #include "native_client/src/trusted/service_runtime/arch/sel_ldr_arch.h"
22 : #include "native_client/src/trusted/service_runtime/include/bits/mman.h"
23 : #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
24 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
25 : #include "native_client/src/trusted/service_runtime/nacl_error_code.h"
26 : #include "native_client/src/trusted/service_runtime/nacl_text.h"
27 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
28 : #include "native_client/src/trusted/service_runtime/sel_memory.h"
29 : #include "native_client/src/trusted/service_runtime/thread_suspension.h"
30 :
31 :
32 : /* initial size of the malloced buffer for dynamic regions */
33 : static const int kMinDynamicRegionsAllocated = 32;
34 :
35 : static const int kBitsPerByte = 8;
36 :
37 274 : static uint8_t *BitmapAllocate(uint32_t indexes) {
38 274 : uint32_t byte_count = (indexes + kBitsPerByte - 1) / kBitsPerByte;
39 274 : uint8_t *bitmap = malloc(byte_count);
40 274 : if (bitmap != NULL) {
41 274 : memset(bitmap, 0, byte_count);
42 : }
43 274 : return bitmap;
44 : }
45 :
46 206 : static int BitmapIsBitSet(uint8_t *bitmap, uint32_t index) {
47 206 : return (bitmap[index / kBitsPerByte] & (1 << (index % kBitsPerByte))) != 0;
48 : }
49 :
50 101 : static void BitmapSetBit(uint8_t *bitmap, uint32_t index) {
51 101 : bitmap[index / kBitsPerByte] |= 1 << (index % kBitsPerByte);
52 101 : }
53 :
54 282 : NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
55 282 : enum NaClErrorCode retval = LOAD_INTERNAL;
56 : uintptr_t dynamic_text_size;
57 282 : struct NaClDescImcShm *shm = NULL;
58 : uintptr_t shm_vaddr_base;
59 : int mmap_protections;
60 : uintptr_t mmap_ret;
61 :
62 : uintptr_t shm_upper_bound;
63 : uintptr_t text_sysaddr;
64 :
65 282 : shm_vaddr_base = NaClEndOfStaticText(nap);
66 282 : NaClLog(4,
67 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
68 : shm_vaddr_base);
69 282 : shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
70 282 : NaClLog(4,
71 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
72 : shm_vaddr_base);
73 :
74 : /*
75 : * Default is that there is no usable dynamic code area.
76 : */
77 282 : nap->dynamic_text_start = shm_vaddr_base;
78 282 : nap->dynamic_text_end = shm_vaddr_base;
79 282 : if (!nap->use_shm_for_dynamic_text) {
80 2 : NaClLog(4,
81 : "NaClMakeDynamicTextShared:"
82 : " rodata / data segments not allocation aligned\n");
83 2 : NaClLog(4,
84 : " not using shm for text\n");
85 2 : return LOAD_OK;
86 : }
87 :
88 : /*
89 : * Allocate a shm region the size of which is nap->rodata_start -
90 : * end-of-text. This implies that the "core" text will not be
91 : * backed by shm.
92 : */
93 280 : shm_upper_bound = nap->rodata_start;
94 280 : if (0 == shm_upper_bound) {
95 4 : shm_upper_bound = NaClTruncAllocPage(nap->data_start);
96 : }
97 280 : if (0 == shm_upper_bound) {
98 4 : shm_upper_bound = shm_vaddr_base;
99 : }
100 :
101 280 : NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);
102 :
103 280 : dynamic_text_size = shm_upper_bound - shm_vaddr_base;
104 280 : NaClLog(4,
105 : "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
106 : dynamic_text_size);
107 :
108 280 : if (0 == dynamic_text_size) {
109 6 : NaClLog(4, "Empty JITtable region\n");
110 6 : return LOAD_OK;
111 : }
112 :
113 274 : shm = (struct NaClDescImcShm *) malloc(sizeof *shm);
114 274 : if (NULL == shm) {
115 0 : NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n");
116 0 : retval = LOAD_NO_MEMORY;
117 0 : goto cleanup;
118 : }
119 274 : if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) {
120 : /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */
121 0 : free(shm);
122 0 : shm = NULL;
123 0 : NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
124 0 : retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT;
125 0 : goto cleanup;
126 : }
127 :
128 274 : text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);
129 :
130 : /* Existing memory is anonymous paging file backed. */
131 274 : NaClPageFree((void *) text_sysaddr, dynamic_text_size);
132 :
133 : /*
134 : * Unix allows us to map pages with PROT_NONE initially and later
135 : * increase the mapping permissions with mprotect().
136 : *
137 : * Windows does not allow this, however: the initial permissions are
138 : * an upper bound on what the permissions may later be changed to
139 : * with VirtualProtect() or VirtualAlloc(). Given this, using
140 : * PROT_NONE at this point does not even make sense. On Windows,
141 : * the pages start off as uncommitted, which makes them inaccessible
142 : * regardless of the page permissions they are mapped with.
143 : *
144 : * Write permissions are included here for nacl64-gdb to set
145 : * breakpoints.
146 : */
147 : #if NACL_WINDOWS
148 : mmap_protections =
149 : NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
150 : #else
151 274 : mmap_protections = NACL_ABI_PROT_NONE;
152 : #endif
153 274 : NaClLog(4,
154 : "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
155 : " prot=0x%x, flags=0x%x, offset=0)\n",
156 : text_sysaddr,
157 : (int) dynamic_text_size,
158 : mmap_protections,
159 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
160 548 : mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)->
161 274 : Map)((struct NaClDesc *) shm,
162 : NaClDescEffectorTrustedMem(),
163 : (void *) text_sysaddr,
164 : dynamic_text_size,
165 : mmap_protections,
166 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
167 : 0);
168 274 : if (text_sysaddr != mmap_ret) {
169 0 : NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
170 : }
171 :
172 274 : nap->dynamic_page_bitmap =
173 274 : BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
174 274 : if (NULL == nap->dynamic_page_bitmap) {
175 0 : NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
176 : }
177 :
178 274 : nap->dynamic_text_start = shm_vaddr_base;
179 274 : nap->dynamic_text_end = shm_upper_bound;
180 274 : nap->text_shm = &shm->base;
181 274 : retval = LOAD_OK;
182 :
183 : cleanup:
184 274 : if (LOAD_OK != retval) {
185 0 : NaClDescSafeUnref((struct NaClDesc *) shm);
186 0 : free(shm);
187 : }
188 :
189 274 : return retval;
190 : }
191 :
192 : /*
193 : * Binary search nap->dynamic_regions to find the maximal region with start<=ptr
194 : * caller must hold nap->dynamic_load_mutex, and must discard result
195 : * when lock is released.
196 : */
197 2173 : struct NaClDynamicRegion* NaClDynamicRegionFindClosestLEQ(struct NaClApp *nap,
198 : uintptr_t ptr) {
199 2173 : const int kBinarySearchToScanCutoff = 16;
200 2173 : int begin = 0;
201 2173 : int end = nap->num_dynamic_regions;
202 2173 : if (0 == nap->num_dynamic_regions) {
203 12 : return NULL;
204 : }
205 : /* as an optimization, check the last region first */
206 2161 : if (nap->dynamic_regions[nap->num_dynamic_regions-1].start <= ptr) {
207 44 : return nap->dynamic_regions + nap->num_dynamic_regions-1;
208 : }
209 : /* comes before everything */
210 2117 : if (ptr < nap->dynamic_regions[0].start) {
211 0 : return NULL;
212 : }
213 : /* binary search, until range is small */
214 16796 : while (begin + kBinarySearchToScanCutoff + 1 < end) {
215 12562 : int mid = begin + (end - begin)/2;
216 12562 : if (nap->dynamic_regions[mid].start <= ptr) {
217 12558 : begin = mid;
218 : } else {
219 4 : end = mid;
220 : }
221 : }
222 : /* linear scan, faster for small ranges */
223 27582 : while (begin + 1 < end && nap->dynamic_regions[begin + 1].start <= ptr) {
224 23348 : begin++;
225 : }
226 2117 : return nap->dynamic_regions + begin;
227 : }
228 :
229 33 : struct NaClDynamicRegion* NaClDynamicRegionFind(struct NaClApp *nap,
230 : uintptr_t ptr,
231 : size_t size) {
232 33 : struct NaClDynamicRegion *p =
233 33 : NaClDynamicRegionFindClosestLEQ(nap, ptr + size - 1);
234 33 : return (p != NULL && ptr < p->start + p->size) ? p : NULL;
235 : }
236 :
237 2140 : int NaClDynamicRegionCreate(struct NaClApp *nap,
238 : uintptr_t start,
239 : size_t size,
240 : int is_mmap) {
241 : struct NaClDynamicRegion item, *regionp, *end;
242 2140 : item.start = start;
243 2140 : item.size = size;
244 2140 : item.delete_generation = -1;
245 2140 : item.is_mmap = is_mmap;
246 2140 : if (nap->dynamic_regions_allocated == nap->num_dynamic_regions) {
247 : /* out of space, double buffer size */
248 19 : nap->dynamic_regions_allocated *= 2;
249 19 : if (nap->dynamic_regions_allocated < kMinDynamicRegionsAllocated) {
250 12 : nap->dynamic_regions_allocated = kMinDynamicRegionsAllocated;
251 : }
252 19 : nap->dynamic_regions = realloc(nap->dynamic_regions,
253 : sizeof(struct NaClDynamicRegion) *
254 19 : nap->dynamic_regions_allocated);
255 19 : if (NULL == nap->dynamic_regions) {
256 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
257 0 : return 0;
258 : }
259 : }
260 : /* find preceding entry */
261 2140 : regionp = NaClDynamicRegionFindClosestLEQ(nap, start + size - 1);
262 2140 : if (regionp != NULL && start < regionp->start + regionp->size) {
263 : /* target already in use */
264 4 : return 0;
265 : }
266 2136 : if (NULL == regionp) {
267 : /* start at beginning if we couldn't find predecessor */
268 12 : regionp = nap->dynamic_regions;
269 : }
270 2136 : end = nap->dynamic_regions + nap->num_dynamic_regions;
271 : /* scroll to insertion point (this should scroll at most 1 element) */
272 2136 : for (; regionp != end && regionp->start < item.start; ++regionp);
273 : /* insert and shift everything forward by 1 */
274 4470 : for (; regionp != end; ++regionp) {
275 : /* swap(*i, item); */
276 2334 : struct NaClDynamicRegion t = *regionp;
277 2334 : *regionp = item;
278 2334 : item = t;
279 : }
280 2136 : *regionp = item;
281 2136 : nap->num_dynamic_regions++;
282 2136 : return 1;
283 : }
284 :
285 6 : void NaClDynamicRegionDelete(struct NaClApp *nap, struct NaClDynamicRegion* r) {
286 12 : struct NaClDynamicRegion *end = nap->dynamic_regions
287 6 : + nap->num_dynamic_regions;
288 : /* shift everything down */
289 12 : for (; r + 1 < end; ++r) {
290 6 : r[0] = r[1];
291 : }
292 6 : nap->num_dynamic_regions--;
293 :
294 6 : if ( nap->dynamic_regions_allocated > kMinDynamicRegionsAllocated
295 0 : && nap->dynamic_regions_allocated/4 > nap->num_dynamic_regions) {
296 : /* too much waste, shrink buffer*/
297 0 : nap->dynamic_regions_allocated /= 2;
298 0 : nap->dynamic_regions = realloc(nap->dynamic_regions,
299 : sizeof(struct NaClDynamicRegion) *
300 0 : nap->dynamic_regions_allocated);
301 0 : if (NULL == nap->dynamic_regions) {
302 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
303 0 : return;
304 : }
305 : }
306 : }
307 :
308 :
309 11 : void NaClSetThreadGeneration(struct NaClAppThread *natp, int generation) {
310 : /*
311 : * outer check handles fast case (no change)
312 : * since threads only set their own generation it is safe
313 : */
314 11 : if (natp->dynamic_delete_generation != generation) {
315 8 : NaClXMutexLock(&natp->mu);
316 8 : CHECK(natp->dynamic_delete_generation <= generation);
317 8 : natp->dynamic_delete_generation = generation;
318 8 : NaClXMutexUnlock(&natp->mu);
319 : }
320 11 : }
321 :
322 13 : int NaClMinimumThreadGeneration(struct NaClApp *nap) {
323 : size_t index;
324 13 : int rv = INT_MAX;
325 13 : NaClXMutexLock(&nap->threads_mu);
326 33 : for (index = 0; index < nap->threads.num_entries; ++index) {
327 20 : struct NaClAppThread *thread = NaClGetThreadMu(nap, (int) index);
328 20 : if (thread != NULL) {
329 18 : NaClXMutexLock(&thread->mu);
330 18 : if (rv > thread->dynamic_delete_generation) {
331 15 : rv = thread->dynamic_delete_generation;
332 : }
333 18 : NaClXMutexUnlock(&thread->mu);
334 : }
335 : }
336 13 : NaClXMutexUnlock(&nap->threads_mu);
337 13 : return rv;
338 : }
339 :
340 2133 : static void CopyBundleTails(uint8_t *dest,
341 : uint8_t *src,
342 : int32_t size,
343 : int bundle_size) {
344 : /*
345 : * The order in which these locations are written does not matter:
346 : * none of the locations will be reachable, because the bundle heads
347 : * still contains HLTs.
348 : */
349 2133 : int bundle_mask = bundle_size - 1;
350 : uint32_t *src_ptr;
351 : uint32_t *dest_ptr;
352 : uint32_t *end_ptr;
353 :
354 2133 : CHECK(0 == ((uintptr_t) dest & 3));
355 :
356 2133 : src_ptr = (uint32_t *) src;
357 2133 : dest_ptr = (uint32_t *) dest;
358 2133 : end_ptr = (uint32_t *) (dest + size);
359 913938 : while (dest_ptr < end_ptr) {
360 909672 : if ((((uintptr_t) dest_ptr) & bundle_mask) != 0) {
361 795963 : *dest_ptr = *src_ptr;
362 : }
363 909672 : dest_ptr++;
364 909672 : src_ptr++;
365 : }
366 2133 : }
367 :
368 2133 : static void CopyBundleHeads(uint8_t *dest,
369 : uint8_t *src,
370 : uint32_t size,
371 : int bundle_size) {
372 : /* Again, the order in which these locations are written does not matter. */
373 : uint8_t *src_ptr;
374 : uint8_t *dest_ptr;
375 : uint8_t *end_ptr;
376 :
377 : /* dest must be aligned for the writes to be atomic. */
378 2133 : CHECK(0 == ((uintptr_t) dest & 3));
379 :
380 2133 : src_ptr = src;
381 2133 : dest_ptr = dest;
382 2133 : end_ptr = dest + size;
383 117975 : while (dest_ptr < end_ptr) {
384 : /*
385 : * We assume that writing the 32-bit int here is atomic, which is
386 : * the case on x86 and ARM as long as the address is word-aligned.
387 : * The read does not have to be atomic.
388 : */
389 113709 : *(uint32_t *) dest_ptr = *(uint32_t *) src_ptr;
390 113709 : dest_ptr += bundle_size;
391 113709 : src_ptr += bundle_size;
392 : }
393 2133 : }
394 :
395 6 : static void ReplaceBundleHeadsWithHalts(uint8_t *dest,
396 : uint32_t size,
397 : int bundle_size) {
398 6 : uint32_t *dest_ptr = (uint32_t*) dest;
399 6 : uint32_t *end_ptr = (uint32_t*) (dest + size);
400 4114 : while (dest_ptr < end_ptr) {
401 : /* dont assume 1-byte halt, write entire NACL_HALT_WORD */
402 4102 : *dest_ptr = NACL_HALT_WORD;
403 4102 : dest_ptr += bundle_size / sizeof(uint32_t);
404 : }
405 6 : NaClWriteMemoryBarrier();
406 6 : }
407 :
408 2133 : static INLINE void CopyCodeSafelyInitial(uint8_t *dest,
409 : uint8_t *src,
410 : uint32_t size,
411 : int bundle_size) {
412 2133 : CopyBundleTails(dest, src, size, bundle_size);
413 2133 : NaClWriteMemoryBarrier();
414 2133 : CopyBundleHeads(dest, src, size, bundle_size);
415 2133 : }
416 :
417 83 : static void MakeDynamicCodePagesVisible(struct NaClApp *nap,
418 : uint32_t page_index_min,
419 : uint32_t page_index_max,
420 : uint8_t *writable_addr) {
421 : void *user_addr;
422 : uint32_t index;
423 83 : size_t size = (page_index_max - page_index_min) * NACL_MAP_PAGESIZE;
424 :
425 184 : for (index = page_index_min; index < page_index_max; index++) {
426 101 : CHECK(!BitmapIsBitSet(nap->dynamic_page_bitmap, index));
427 101 : BitmapSetBit(nap->dynamic_page_bitmap, index);
428 : }
429 166 : user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start
430 83 : + page_index_min * NACL_MAP_PAGESIZE);
431 :
432 : #if NACL_WINDOWS
433 : NaClUntrustedThreadsSuspendAll(nap, /* save_registers= */ 0);
434 :
435 : /*
436 : * The VirtualAlloc() call here has two effects:
437 : *
438 : * 1) It commits the page in the shared memory (SHM) object,
439 : * allocating swap space and making the page accessible. This
440 : * affects our writable mapping of the shared memory object too.
441 : * Before the VirtualAlloc() call, dereferencing writable_addr
442 : * would fault.
443 : * 2) It changes the page permissions of the mapping to
444 : * read+execute. Since this exposes the page in its unsafe,
445 : * non-HLT-filled state, this must be done with untrusted
446 : * threads suspended.
447 : */
448 : {
449 : uintptr_t offset;
450 : for (offset = 0; offset < size; offset += NACL_MAP_PAGESIZE) {
451 : void *user_page_addr = (char *) user_addr + offset;
452 : if (VirtualAlloc(user_page_addr, NACL_MAP_PAGESIZE,
453 : MEM_COMMIT, PAGE_EXECUTE_READ) != user_page_addr) {
454 : NaClLog(LOG_FATAL, "MakeDynamicCodePagesVisible: "
455 : "VirtualAlloc() failed -- probably out of swap space\n");
456 : }
457 : }
458 : }
459 : #endif
460 :
461 : /* Sanity check: Ensure the page is not already in use. */
462 83 : CHECK(*writable_addr == 0);
463 :
464 83 : NaClFillMemoryRegionWithHalt(writable_addr, size);
465 :
466 : #if NACL_WINDOWS
467 : NaClUntrustedThreadsResumeAll(nap);
468 : #else
469 83 : if (NaClMprotect(user_addr, size, PROT_READ | PROT_EXEC) != 0) {
470 0 : NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaClMprotect() failed\n");
471 : }
472 : #endif
473 83 : }
474 :
475 : /*
476 : * Maps a writable version of the code at [offset, offset+size) and returns a
477 : * pointer to the new mapping. Internally caches the last mapping between
478 : * calls. Pass offset=0,size=0 to clear cache.
479 : * Caller must hold nap->dynamic_load_mutex.
480 : */
481 2155 : static uintptr_t CachedMapWritableText(struct NaClApp *nap,
482 : uint32_t offset,
483 : uint32_t size) {
484 : /*
485 : * The nap->* variables used in this function can be in two states:
486 : *
487 : * 1)
488 : * nap->dynamic_mapcache_size == 0
489 : * nap->dynamic_mapcache_ret == 0
490 : *
491 : * Initial state, nothing is cached.
492 : *
493 : * 2)
494 : * nap->dynamic_mapcache_size != 0
495 : * nap->dynamic_mapcache_ret != 0
496 : *
497 : * We have a cached mmap result stored, that must be unmapped.
498 : */
499 2155 : struct NaClDesc *shm = nap->text_shm;
500 :
501 2155 : if (offset != nap->dynamic_mapcache_offset
502 2068 : || size != nap->dynamic_mapcache_size) {
503 : /*
504 : * cache miss, first clear the old cache if needed
505 : */
506 92 : if (nap->dynamic_mapcache_size > 0) {
507 76 : NaClDescUnmapUnsafe(shm, (void *) nap->dynamic_mapcache_ret,
508 : nap->dynamic_mapcache_size);
509 76 : nap->dynamic_mapcache_offset = 0;
510 76 : nap->dynamic_mapcache_size = 0;
511 76 : nap->dynamic_mapcache_ret = 0;
512 : }
513 :
514 : /*
515 : * update that cached version
516 : */
517 92 : if (size > 0) {
518 : uint32_t current_page_index;
519 : uint32_t end_page_index;
520 :
521 83 : uintptr_t mapping = (*((struct NaClDescVtbl const *)
522 83 : shm->base.vtbl)->
523 83 : Map)(shm,
524 : NaClDescEffectorTrustedMem(),
525 : NULL,
526 : size,
527 : NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
528 : NACL_ABI_MAP_SHARED,
529 : offset);
530 83 : if (NaClPtrIsNegErrno(&mapping)) {
531 0 : return 0;
532 : }
533 :
534 : /*
535 : * To reduce the number of mprotect() system calls, we coalesce
536 : * MakeDynamicCodePagesVisible() calls for adjacent pages that
537 : * have yet not been allocated.
538 : */
539 83 : current_page_index = offset / NACL_MAP_PAGESIZE;
540 83 : end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
541 252 : while (current_page_index < end_page_index) {
542 86 : uint32_t start_page_index = current_page_index;
543 : /* Find the end of this block of unallocated pages. */
544 378 : while (current_page_index < end_page_index &&
545 105 : !BitmapIsBitSet(nap->dynamic_page_bitmap, current_page_index)) {
546 101 : current_page_index++;
547 : }
548 86 : if (current_page_index > start_page_index) {
549 83 : uintptr_t writable_addr =
550 83 : mapping + (start_page_index * NACL_MAP_PAGESIZE - offset);
551 83 : MakeDynamicCodePagesVisible(nap, start_page_index, current_page_index,
552 : (uint8_t *) writable_addr);
553 : }
554 86 : current_page_index++;
555 : }
556 :
557 83 : nap->dynamic_mapcache_offset = offset;
558 83 : nap->dynamic_mapcache_size = size;
559 83 : nap->dynamic_mapcache_ret = mapping;
560 : }
561 : }
562 2155 : return nap->dynamic_mapcache_ret;
563 : }
564 :
565 : /*
566 : * A wrapper around CachedMapWritableText that performs common address
567 : * calculations.
568 : * Outputs *mmapped_addr.
569 : * Caller must hold nap->dynamic_load_mutex.
570 : * Returns boolean, true on success
571 : */
572 2146 : static INLINE int NaClTextMapWrapper(struct NaClApp *nap,
573 : uint32_t dest,
574 : uint32_t size,
575 : uint8_t **mapped_addr) {
576 : uint32_t shm_offset;
577 : uint32_t shm_map_offset;
578 : uint32_t within_page_offset;
579 : uint32_t shm_map_offset_end;
580 : uint32_t shm_map_size;
581 : uintptr_t mmap_ret;
582 : uint8_t *mmap_result;
583 :
584 2146 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
585 2146 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
586 2146 : within_page_offset = shm_offset & (NACL_MAP_PAGESIZE - 1);
587 2146 : shm_map_offset_end =
588 2146 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
589 2146 : shm_map_size = shm_map_offset_end - shm_map_offset;
590 :
591 2146 : mmap_ret = CachedMapWritableText(nap,
592 : shm_map_offset,
593 : shm_map_size);
594 2146 : if (0 == mmap_ret) {
595 0 : return 0;
596 : }
597 2146 : mmap_result = (uint8_t *) mmap_ret;
598 2146 : *mapped_addr = mmap_result + within_page_offset;
599 2146 : return 1;
600 : }
601 :
602 : /*
603 : * Clear the mmap cache if multiple pages were mapped.
604 : * Caller must hold nap->dynamic_load_mutex.
605 : */
606 2146 : static INLINE void NaClTextMapClearCacheIfNeeded(struct NaClApp *nap,
607 : uint32_t dest,
608 : uint32_t size) {
609 : uint32_t shm_offset;
610 : uint32_t shm_map_offset;
611 : uint32_t shm_map_offset_end;
612 : uint32_t shm_map_size;
613 2146 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
614 2146 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
615 2146 : shm_map_offset_end =
616 2146 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
617 2146 : shm_map_size = shm_map_offset_end - shm_map_offset;
618 2146 : if (shm_map_size > NACL_MAP_PAGESIZE) {
619 : /* call with size==offset==0 to clear cache */
620 9 : CachedMapWritableText(nap, 0, 0);
621 : }
622 2146 : }
623 :
624 2182 : int32_t NaClTextDyncodeCreate(struct NaClApp *nap,
625 : uint32_t dest,
626 : void *code_copy,
627 : uint32_t size,
628 : const struct NaClValidationMetadata *metadata) {
629 : uintptr_t dest_addr;
630 : uint8_t *mapped_addr;
631 2182 : int32_t retval = -NACL_ABI_EINVAL;
632 : int validator_result;
633 : struct NaClPerfCounter time_dyncode_create;
634 2182 : NaClPerfCounterCtor(&time_dyncode_create, "NaClTextDyncodeCreate");
635 :
636 2182 : if (NULL == nap->text_shm) {
637 1 : NaClLog(1, "NaClTextDyncodeCreate: Dynamic loading not enabled\n");
638 1 : return -NACL_ABI_EINVAL;
639 : }
640 4360 : if (0 != (dest & (nap->bundle_size - 1)) ||
641 2179 : 0 != (size & (nap->bundle_size - 1))) {
642 4 : NaClLog(1, "NaClTextDyncodeCreate: Non-bundle-aligned address or size\n");
643 4 : return -NACL_ABI_EINVAL;
644 : }
645 2177 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
646 2177 : if (kNaClBadAddress == dest_addr) {
647 0 : NaClLog(1, "NaClTextDyncodeCreate: Dest address out of range\n");
648 0 : return -NACL_ABI_EFAULT;
649 : }
650 2177 : if (dest < nap->dynamic_text_start) {
651 1 : NaClLog(1, "NaClTextDyncodeCreate: Below dynamic code area\n");
652 1 : return -NACL_ABI_EFAULT;
653 : }
654 : /*
655 : * We ensure that the final HLTs of the dynamic code region cannot
656 : * be overwritten, just in case of CPU bugs.
657 : */
658 2176 : if (dest + size > nap->dynamic_text_end - NACL_HALT_SLED_SIZE) {
659 3 : NaClLog(1, "NaClTextDyncodeCreate: Above dynamic code area\n");
660 3 : return -NACL_ABI_EFAULT;
661 : }
662 2173 : if (0 == size) {
663 : /* Nothing to load. Succeed trivially. */
664 1 : return 0;
665 : }
666 :
667 2172 : NaClXMutexLock(&nap->dynamic_load_mutex);
668 :
669 : /*
670 : * Validate the code before trying to create the region. This avoids the need
671 : * to delete the region if validation fails.
672 : * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
673 : */
674 2172 : if (!nap->skip_validator) {
675 2172 : validator_result = NaClValidateCode(nap, dest, code_copy, size, metadata);
676 : } else {
677 0 : NaClLog(LOG_ERROR, "VALIDATION SKIPPED.\n");
678 0 : validator_result = LOAD_OK;
679 : }
680 :
681 2172 : NaClPerfCounterMark(&time_dyncode_create,
682 : NACL_PERF_IMPORTANT_PREFIX "DynRegionValidate");
683 2172 : NaClPerfCounterIntervalLast(&time_dyncode_create);
684 :
685 2172 : if (validator_result != LOAD_OK
686 36 : && nap->ignore_validator_result) {
687 1 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
688 : "continuing anyway...\n");
689 1 : validator_result = LOAD_OK;
690 : }
691 :
692 2172 : if (validator_result != LOAD_OK) {
693 35 : NaClLog(1, "NaClTextDyncodeCreate: "
694 : "Validation of dynamic code failed\n");
695 35 : retval = -NACL_ABI_EINVAL;
696 35 : goto cleanup_unlock;
697 : }
698 :
699 2137 : if (NaClDynamicRegionCreate(nap, dest_addr, size, 0) != 1) {
700 : /* target addr is in use */
701 4 : NaClLog(1, "NaClTextDyncodeCreate: Code range already allocated\n");
702 4 : retval = -NACL_ABI_EINVAL;
703 4 : goto cleanup_unlock;
704 : }
705 :
706 2133 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
707 0 : retval = -NACL_ABI_ENOMEM;
708 0 : goto cleanup_unlock;
709 : }
710 :
711 2133 : CopyCodeSafelyInitial(mapped_addr, code_copy, size, nap->bundle_size);
712 : /*
713 : * Flush the processor's instruction cache. This is not necessary
714 : * for security, because any old cached instructions will just be
715 : * safe halt instructions. It is only necessary to ensure that
716 : * untrusted code runs correctly when it tries to execute the
717 : * dynamically-loaded code.
718 : */
719 2133 : NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
720 :
721 2133 : retval = 0;
722 :
723 2133 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
724 :
725 : cleanup_unlock:
726 2172 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
727 2172 : return retval;
728 : }
729 :
730 2178 : int32_t NaClSysDyncodeCreate(struct NaClAppThread *natp,
731 : uint32_t dest,
732 : uint32_t src,
733 : uint32_t size) {
734 2178 : struct NaClApp *nap = natp->nap;
735 : uintptr_t src_addr;
736 : uint8_t *code_copy;
737 2178 : int32_t retval = -NACL_ABI_EINVAL;
738 :
739 2178 : if (!nap->enable_dyncode_syscalls) {
740 1 : NaClLog(LOG_WARNING,
741 : "NaClSysDyncodeCreate: Dynamic code syscalls are disabled\n");
742 1 : return -NACL_ABI_ENOSYS;
743 : }
744 :
745 2177 : src_addr = NaClUserToSysAddrRange(nap, src, size);
746 2177 : if (kNaClBadAddress == src_addr) {
747 1 : NaClLog(1, "NaClSysDyncodeCreate: Source address out of range\n");
748 1 : return -NACL_ABI_EFAULT;
749 : }
750 :
751 : /*
752 : * Make a private copy of the code, so that we can validate it
753 : * without a TOCTTOU race condition.
754 : */
755 2176 : code_copy = malloc(size);
756 2176 : if (NULL == code_copy) {
757 0 : return -NACL_ABI_ENOMEM;
758 : }
759 2176 : memcpy(code_copy, (uint8_t*) src_addr, size);
760 :
761 : /* Unknown data source, no metadata. */
762 2176 : retval = NaClTextDyncodeCreate(nap, dest, code_copy, size, NULL);
763 :
764 2176 : free(code_copy);
765 2176 : return retval;
766 : }
767 :
768 22 : int32_t NaClSysDyncodeModify(struct NaClAppThread *natp,
769 : uint32_t dest,
770 : uint32_t src,
771 : uint32_t size) {
772 22 : struct NaClApp *nap = natp->nap;
773 : uintptr_t dest_addr;
774 : uintptr_t src_addr;
775 : uintptr_t beginbundle;
776 : uintptr_t endbundle;
777 : uintptr_t offset;
778 : uint8_t *mapped_addr;
779 22 : uint8_t *code_copy = NULL;
780 : uint8_t code_copy_buf[NACL_INSTR_BLOCK_SIZE];
781 : int validator_result;
782 22 : int32_t retval = -NACL_ABI_EINVAL;
783 : struct NaClDynamicRegion *region;
784 :
785 22 : if (!nap->validator->code_replacement) {
786 0 : NaClLog(LOG_WARNING,
787 : "NaClSysDyncodeModify: "
788 : "Dynamic code modification is not supported\n");
789 0 : return -NACL_ABI_ENOSYS;
790 : }
791 :
792 22 : if (!nap->enable_dyncode_syscalls) {
793 1 : NaClLog(LOG_WARNING,
794 : "NaClSysDyncodeModify: Dynamic code syscalls are disabled\n");
795 1 : return -NACL_ABI_ENOSYS;
796 : }
797 :
798 21 : if (NULL == nap->text_shm) {
799 0 : NaClLog(1, "NaClSysDyncodeModify: Dynamic loading not enabled\n");
800 0 : return -NACL_ABI_EINVAL;
801 : }
802 :
803 21 : if (0 == size) {
804 : /* Nothing to modify. Succeed trivially. */
805 2 : return 0;
806 : }
807 :
808 19 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
809 19 : src_addr = NaClUserToSysAddrRange(nap, src, size);
810 19 : if (kNaClBadAddress == src_addr || kNaClBadAddress == dest_addr) {
811 0 : NaClLog(1, "NaClSysDyncodeModify: Address out of range\n");
812 0 : return -NACL_ABI_EFAULT;
813 : }
814 :
815 19 : NaClXMutexLock(&nap->dynamic_load_mutex);
816 :
817 19 : region = NaClDynamicRegionFind(nap, dest_addr, size);
818 38 : if (NULL == region ||
819 38 : region->start > dest_addr ||
820 38 : region->start + region->size < dest_addr + size ||
821 19 : region->is_mmap) {
822 : /*
823 : * target not a subregion of region or region is null, or came from a file.
824 : */
825 0 : NaClLog(1, "NaClSysDyncodeModify: Can't find region to modify\n");
826 0 : retval = -NACL_ABI_EFAULT;
827 0 : goto cleanup_unlock;
828 : }
829 :
830 19 : beginbundle = dest_addr & ~(nap->bundle_size - 1);
831 38 : endbundle = (dest_addr + size - 1 + nap->bundle_size)
832 19 : & ~(nap->bundle_size - 1);
833 19 : offset = dest_addr & (nap->bundle_size - 1);
834 19 : if (endbundle-beginbundle <= sizeof code_copy_buf) {
835 : /* usually patches are a single bundle, so stack allocate */
836 19 : code_copy = code_copy_buf;
837 : } else {
838 : /* in general case heap allocate */
839 0 : code_copy = malloc(endbundle-beginbundle);
840 0 : if (NULL == code_copy) {
841 0 : retval = -NACL_ABI_ENOMEM;
842 0 : goto cleanup_unlock;
843 : }
844 : }
845 :
846 : /* copy the bundles from already-inserted code */
847 19 : memcpy(code_copy, (uint8_t*) beginbundle, endbundle - beginbundle);
848 :
849 : /*
850 : * make the requested change in temporary location
851 : * this avoids TOTTOU race
852 : */
853 19 : memcpy(code_copy + offset, (uint8_t*) src_addr, size);
854 :
855 : /* update dest/size to refer to entire bundles */
856 19 : dest &= ~(nap->bundle_size - 1);
857 19 : dest_addr &= ~((uintptr_t)nap->bundle_size - 1);
858 : /* since both are in sandbox memory this check should succeed */
859 19 : CHECK(endbundle-beginbundle < UINT32_MAX);
860 19 : size = (uint32_t)(endbundle - beginbundle);
861 :
862 : /* validate this code as a replacement */
863 19 : validator_result = NaClValidateCodeReplacement(nap,
864 : dest,
865 : (uint8_t*) dest_addr,
866 : code_copy,
867 : size);
868 :
869 19 : if (validator_result != LOAD_OK
870 12 : && nap->ignore_validator_result) {
871 0 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
872 : "continuing anyway...\n");
873 0 : validator_result = LOAD_OK;
874 : }
875 :
876 19 : if (validator_result != LOAD_OK) {
877 12 : NaClLog(1, "NaClSysDyncodeModify: Validation of dynamic code failed\n");
878 12 : retval = -NACL_ABI_EINVAL;
879 12 : goto cleanup_unlock;
880 : }
881 :
882 7 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
883 0 : retval = -NACL_ABI_ENOMEM;
884 0 : goto cleanup_unlock;
885 : }
886 :
887 7 : if (LOAD_OK != NaClCopyCode(nap, dest, mapped_addr, code_copy, size)) {
888 0 : NaClLog(1, "NaClSysDyncodeModify: Copying of replacement code failed\n");
889 0 : retval = -NACL_ABI_EINVAL;
890 0 : goto cleanup_unlock;
891 : }
892 7 : retval = 0;
893 :
894 7 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
895 :
896 : cleanup_unlock:
897 19 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
898 :
899 19 : if (code_copy != code_copy_buf) {
900 0 : free(code_copy);
901 : }
902 :
903 19 : return retval;
904 : }
905 :
906 18 : int32_t NaClSysDyncodeDelete(struct NaClAppThread *natp,
907 : uint32_t dest,
908 : uint32_t size) {
909 18 : struct NaClApp *nap = natp->nap;
910 : uintptr_t dest_addr;
911 : uint8_t *mapped_addr;
912 18 : int32_t retval = -NACL_ABI_EINVAL;
913 : struct NaClDynamicRegion *region;
914 :
915 18 : if (!nap->enable_dyncode_syscalls) {
916 1 : NaClLog(LOG_WARNING,
917 : "NaClSysDyncodeDelete: Dynamic code syscalls are disabled\n");
918 1 : return -NACL_ABI_ENOSYS;
919 : }
920 :
921 17 : if (NULL == nap->text_shm) {
922 0 : NaClLog(1, "NaClSysDyncodeDelete: Dynamic loading not enabled\n");
923 0 : return -NACL_ABI_EINVAL;
924 : }
925 :
926 17 : if (0 == size) {
927 : /* Nothing to delete. Just update our generation. */
928 : int gen;
929 : /* fetch current generation */
930 3 : NaClXMutexLock(&nap->dynamic_load_mutex);
931 3 : gen = nap->dynamic_delete_generation;
932 3 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
933 : /* set our generation */
934 3 : NaClSetThreadGeneration(natp, gen);
935 3 : return 0;
936 : }
937 :
938 14 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
939 14 : if (kNaClBadAddress == dest_addr) {
940 0 : NaClLog(1, "NaClSysDyncodeDelete: Address out of range\n");
941 0 : return -NACL_ABI_EFAULT;
942 : }
943 :
944 14 : NaClXMutexLock(&nap->dynamic_load_mutex);
945 :
946 : /*
947 : * this check ensures the to-be-deleted region is identical to a
948 : * previously inserted region, so no need to check for alignment/bounds/etc
949 : */
950 14 : region = NaClDynamicRegionFind(nap, dest_addr, size);
951 26 : if (NULL == region ||
952 22 : region->start != dest_addr ||
953 18 : region->size != size ||
954 8 : region->is_mmap) {
955 6 : NaClLog(1, "NaClSysDyncodeDelete: Can't find region to delete\n");
956 6 : retval = -NACL_ABI_EFAULT;
957 6 : goto cleanup_unlock;
958 : }
959 :
960 :
961 8 : if (region->delete_generation < 0) {
962 : /* first deletion request */
963 :
964 6 : if (nap->dynamic_delete_generation == INT32_MAX) {
965 0 : NaClLog(1, "NaClSysDyncodeDelete:"
966 : "Overflow, can only delete INT32_MAX regions\n");
967 0 : retval = -NACL_ABI_EFAULT;
968 0 : goto cleanup_unlock;
969 : }
970 :
971 6 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
972 0 : retval = -NACL_ABI_ENOMEM;
973 0 : goto cleanup_unlock;
974 : }
975 :
976 : /* make it so no new threads can enter target region */
977 6 : ReplaceBundleHeadsWithHalts(mapped_addr, size, nap->bundle_size);
978 :
979 : /*
980 : * Flush the instruction cache. In principle this is needed for
981 : * security on ARM so that, when new code is loaded, it is not
982 : * possible for it to jump to stale code that remains in the
983 : * icache.
984 : */
985 6 : NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
986 :
987 6 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
988 :
989 : /* increment and record the generation deletion was requested */
990 6 : region->delete_generation = ++nap->dynamic_delete_generation;
991 : }
992 :
993 : /* update our own generation */
994 8 : NaClSetThreadGeneration(natp, nap->dynamic_delete_generation);
995 :
996 8 : if (region->delete_generation <= NaClMinimumThreadGeneration(nap)) {
997 : /*
998 : * All threads have checked in since we marked region for deletion.
999 : * It is safe to remove the region.
1000 : *
1001 : * No need to memset the region to hlt since bundle heads are hlt
1002 : * and thus the bodies are unreachable.
1003 : */
1004 6 : NaClDynamicRegionDelete(nap, region);
1005 6 : retval = 0;
1006 : } else {
1007 : /*
1008 : * Still waiting for some threads to report in...
1009 : */
1010 2 : retval = -NACL_ABI_EAGAIN;
1011 : }
1012 :
1013 : cleanup_unlock:
1014 14 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
1015 14 : return retval;
1016 : }
1017 :
1018 3 : void NaClDyncodeVisit(
1019 : struct NaClApp *nap,
1020 : void (*fn)(void *state, struct NaClDynamicRegion *region),
1021 : void *state) {
1022 : int i;
1023 :
1024 3 : NaClXMutexLock(&nap->dynamic_load_mutex);
1025 3 : for (i = 0; i < nap->num_dynamic_regions; ++i) {
1026 0 : fn(state, &nap->dynamic_regions[i]);
1027 : }
1028 3 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
1029 3 : }
|