1 : /*
2 : * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include <string.h>
8 :
9 : #include "native_client/src/include/concurrency_ops.h"
10 : #include "native_client/src/include/nacl_platform.h"
11 : #include "native_client/src/include/portability.h"
12 : #include "native_client/src/shared/platform/nacl_check.h"
13 : #include "native_client/src/shared/platform/nacl_log.h"
14 : #include "native_client/src/shared/platform/nacl_sync.h"
15 : #include "native_client/src/shared/platform/nacl_sync_checked.h"
16 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
17 : #include "native_client/src/trusted/desc/nacl_desc_effector.h"
18 : #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
19 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
20 : #include "native_client/src/trusted/perf_counter/nacl_perf_counter.h"
21 : #include "native_client/src/trusted/service_runtime/arch/sel_ldr_arch.h"
22 : #include "native_client/src/trusted/service_runtime/include/bits/mman.h"
23 : #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
24 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
25 : #include "native_client/src/trusted/service_runtime/nacl_error_code.h"
26 : #include "native_client/src/trusted/service_runtime/nacl_text.h"
27 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
28 : #include "native_client/src/trusted/service_runtime/sel_memory.h"
29 : #include "native_client/src/trusted/service_runtime/thread_suspension.h"
30 :
31 :
32 : /* initial size of the malloced buffer for dynamic regions */
33 : static const int kMinDynamicRegionsAllocated = 32;
34 :
35 : static const int kBitsPerByte = 8;
36 :
37 260 : static uint8_t *BitmapAllocate(uint32_t indexes) {
38 260 : uint32_t byte_count = (indexes + kBitsPerByte - 1) / kBitsPerByte;
39 260 : uint8_t *bitmap = malloc(byte_count);
40 260 : if (bitmap != NULL) {
41 780 : memset(bitmap, 0, byte_count);
42 260 : }
43 260 : return bitmap;
44 : }
45 :
46 188 : static int BitmapIsBitSet(uint8_t *bitmap, uint32_t index) {
47 188 : return (bitmap[index / kBitsPerByte] & (1 << (index % kBitsPerByte))) != 0;
48 : }
49 :
50 92 : static void BitmapSetBit(uint8_t *bitmap, uint32_t index) {
51 92 : bitmap[index / kBitsPerByte] |= 1 << (index % kBitsPerByte);
52 92 : }
53 :
54 268 : NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
55 268 : enum NaClErrorCode retval = LOAD_INTERNAL;
56 268 : uintptr_t dynamic_text_size;
57 268 : struct NaClDescImcShm *shm = NULL;
58 268 : uintptr_t shm_vaddr_base;
59 268 : int mmap_protections;
60 268 : uintptr_t mmap_ret;
61 :
62 268 : uintptr_t shm_upper_bound;
63 268 : uintptr_t text_sysaddr;
64 :
65 268 : shm_vaddr_base = NaClEndOfStaticText(nap);
66 268 : NaClLog(4,
67 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
68 : shm_vaddr_base);
69 268 : shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
70 268 : NaClLog(4,
71 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
72 : shm_vaddr_base);
73 :
74 : /*
75 : * Default is that there is no usable dynamic code area.
76 : */
77 268 : nap->dynamic_text_start = shm_vaddr_base;
78 268 : nap->dynamic_text_end = shm_vaddr_base;
79 268 : if (!nap->use_shm_for_dynamic_text) {
80 2 : NaClLog(4,
81 : "NaClMakeDynamicTextShared:"
82 : " rodata / data segments not allocation aligned\n");
83 2 : NaClLog(4,
84 : " not using shm for text\n");
85 2 : return LOAD_OK;
86 : }
87 :
88 : /*
89 : * Allocate a shm region the size of which is nap->rodata_start -
90 : * end-of-text. This implies that the "core" text will not be
91 : * backed by shm.
92 : */
93 266 : shm_upper_bound = nap->rodata_start;
94 266 : if (0 == shm_upper_bound) {
95 4 : shm_upper_bound = NaClTruncAllocPage(nap->data_start);
96 4 : }
97 266 : if (0 == shm_upper_bound) {
98 4 : shm_upper_bound = shm_vaddr_base;
99 4 : }
100 :
101 266 : NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);
102 :
103 266 : dynamic_text_size = shm_upper_bound - shm_vaddr_base;
104 266 : NaClLog(4,
105 : "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
106 : dynamic_text_size);
107 :
108 266 : if (0 == dynamic_text_size) {
109 6 : NaClLog(4, "Empty JITtable region\n");
110 6 : return LOAD_OK;
111 : }
112 :
113 260 : shm = (struct NaClDescImcShm *) malloc(sizeof *shm);
114 260 : if (NULL == shm) {
115 0 : NaClLog(4, "NaClMakeDynamicTextShared: shm object allocation failed\n");
116 0 : retval = LOAD_NO_MEMORY;
117 0 : goto cleanup;
118 : }
119 260 : if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) {
120 : /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */
121 0 : free(shm);
122 0 : shm = NULL;
123 0 : NaClLog(4, "NaClMakeDynamicTextShared: shm alloc ctor for text failed\n");
124 0 : retval = LOAD_NO_MEMORY_FOR_DYNAMIC_TEXT;
125 0 : goto cleanup;
126 : }
127 :
128 260 : text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);
129 :
130 : /* Existing memory is anonymous paging file backed. */
131 260 : NaClPageFree((void *) text_sysaddr, dynamic_text_size);
132 :
133 : /*
134 : * Unix allows us to map pages with PROT_NONE initially and later
135 : * increase the mapping permissions with mprotect().
136 : *
137 : * Windows does not allow this, however: the initial permissions are
138 : * an upper bound on what the permissions may later be changed to
139 : * with VirtualProtect() or VirtualAlloc(). Given this, using
140 : * PROT_NONE at this point does not even make sense. On Windows,
141 : * the pages start off as uncommitted, which makes them inaccessible
142 : * regardless of the page permissions they are mapped with.
143 : *
144 : * Write permissions are included here for nacl64-gdb to set
145 : * breakpoints.
146 : */
147 : #if NACL_WINDOWS
148 : mmap_protections =
149 : NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
150 : #else
151 260 : mmap_protections = NACL_ABI_PROT_NONE;
152 : #endif
153 260 : NaClLog(4,
154 : "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
155 : " prot=0x%x, flags=0x%x, offset=0)\n",
156 : text_sysaddr,
157 : (int) dynamic_text_size,
158 : mmap_protections,
159 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
160 520 : mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)->
161 : Map)((struct NaClDesc *) shm,
162 260 : NaClDescEffectorTrustedMem(),
163 : (void *) text_sysaddr,
164 : dynamic_text_size,
165 : mmap_protections,
166 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
167 : 0);
168 260 : if (text_sysaddr != mmap_ret) {
169 0 : NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
170 0 : }
171 :
172 : nap->dynamic_page_bitmap =
173 260 : BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
174 260 : if (NULL == nap->dynamic_page_bitmap) {
175 0 : NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
176 0 : }
177 :
178 260 : nap->dynamic_text_start = shm_vaddr_base;
179 260 : nap->dynamic_text_end = shm_upper_bound;
180 260 : nap->text_shm = &shm->base;
181 260 : retval = LOAD_OK;
182 :
183 : cleanup:
184 260 : if (LOAD_OK != retval) {
185 0 : NaClDescSafeUnref((struct NaClDesc *) shm);
186 0 : free(shm);
187 0 : }
188 :
189 260 : return retval;
190 268 : }
191 :
192 : /*
193 : * Binary search nap->dynamic_regions to find the maximal region with start<=ptr
194 : * caller must hold nap->dynamic_load_mutex, and must discard result
195 : * when lock is released.
196 : */
197 1134 : struct NaClDynamicRegion* NaClDynamicRegionFindClosestLEQ(struct NaClApp *nap,
198 1134 : uintptr_t ptr) {
199 1134 : const int kBinarySearchToScanCutoff = 16;
200 1134 : int begin = 0;
201 1134 : int end = nap->num_dynamic_regions;
202 1134 : if (0 == nap->num_dynamic_regions) {
203 10 : return NULL;
204 : }
205 : /* as an optimization, check the last region first */
206 1124 : if (nap->dynamic_regions[nap->num_dynamic_regions-1].start <= ptr) {
207 31 : return nap->dynamic_regions + nap->num_dynamic_regions-1;
208 : }
209 : /* comes before everything */
210 1093 : if (ptr < nap->dynamic_regions[0].start) {
211 0 : return NULL;
212 : }
213 : /* binary search, until range is small */
214 7589 : while (begin + kBinarySearchToScanCutoff + 1 < end) {
215 5403 : int mid = begin + (end - begin)/2;
216 5403 : if (nap->dynamic_regions[mid].start <= ptr) {
217 5397 : begin = mid;
218 5397 : } else {
219 6 : end = mid;
220 : }
221 5403 : }
222 : /* linear scan, faster for small ranges */
223 27415 : while (begin + 1 < end && nap->dynamic_regions[begin + 1].start <= ptr) {
224 12068 : begin++;
225 12068 : }
226 1093 : return nap->dynamic_regions + begin;
227 1134 : }
228 :
229 27 : struct NaClDynamicRegion* NaClDynamicRegionFind(struct NaClApp *nap,
230 27 : uintptr_t ptr,
231 27 : size_t size) {
232 27 : struct NaClDynamicRegion *p =
233 27 : NaClDynamicRegionFindClosestLEQ(nap, ptr + size - 1);
234 108 : return (p != NULL && ptr < p->start + p->size) ? p : NULL;
235 : }
236 :
237 1107 : int NaClDynamicRegionCreate(struct NaClApp *nap,
238 1107 : uintptr_t start,
239 1107 : size_t size,
240 1107 : int is_mmap) {
241 1107 : struct NaClDynamicRegion item, *regionp, *end;
242 1107 : item.start = start;
243 1107 : item.size = size;
244 1107 : item.delete_generation = -1;
245 1107 : item.is_mmap = is_mmap;
246 1107 : if (nap->dynamic_regions_allocated == nap->num_dynamic_regions) {
247 : /* out of space, double buffer size */
248 16 : nap->dynamic_regions_allocated *= 2;
249 16 : if (nap->dynamic_regions_allocated < kMinDynamicRegionsAllocated) {
250 10 : nap->dynamic_regions_allocated = kMinDynamicRegionsAllocated;
251 10 : }
252 16 : nap->dynamic_regions = realloc(nap->dynamic_regions,
253 : sizeof(struct NaClDynamicRegion) *
254 : nap->dynamic_regions_allocated);
255 16 : if (NULL == nap->dynamic_regions) {
256 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
257 0 : return 0;
258 : }
259 16 : }
260 : /* find preceding entry */
261 1107 : regionp = NaClDynamicRegionFindClosestLEQ(nap, start + size - 1);
262 2204 : if (regionp != NULL && start < regionp->start + regionp->size) {
263 : /* target already in use */
264 4 : return 0;
265 : }
266 1103 : if (NULL == regionp) {
267 : /* start at beginning if we couldn't find predecessor */
268 10 : regionp = nap->dynamic_regions;
269 10 : }
270 1103 : end = nap->dynamic_regions + nap->num_dynamic_regions;
271 : /* scroll to insertion point (this should scroll at most 1 element) */
272 6561 : for (; regionp != end && regionp->start < item.start; ++regionp);
273 : /* insert and shift everything forward by 1 */
274 4934 : for (; regionp != end; ++regionp) {
275 : /* swap(*i, item); */
276 1364 : struct NaClDynamicRegion t = *regionp;
277 1364 : *regionp = item;
278 1364 : item = t;
279 1364 : }
280 1103 : *regionp = item;
281 1103 : nap->num_dynamic_regions++;
282 1103 : return 1;
283 1107 : }
284 :
285 6 : void NaClDynamicRegionDelete(struct NaClApp *nap, struct NaClDynamicRegion* r) {
286 6 : struct NaClDynamicRegion *end = nap->dynamic_regions
287 : + nap->num_dynamic_regions;
288 : /* shift everything down */
289 24 : for (; r + 1 < end; ++r) {
290 6 : r[0] = r[1];
291 6 : }
292 6 : nap->num_dynamic_regions--;
293 :
294 6 : if ( nap->dynamic_regions_allocated > kMinDynamicRegionsAllocated
295 : && nap->dynamic_regions_allocated/4 > nap->num_dynamic_regions) {
296 : /* too much waste, shrink buffer*/
297 0 : nap->dynamic_regions_allocated /= 2;
298 0 : nap->dynamic_regions = realloc(nap->dynamic_regions,
299 : sizeof(struct NaClDynamicRegion) *
300 : nap->dynamic_regions_allocated);
301 0 : if (NULL == nap->dynamic_regions) {
302 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
303 0 : return;
304 : }
305 0 : }
306 6 : }
307 :
308 :
309 11 : void NaClSetThreadGeneration(struct NaClAppThread *natp, int generation) {
310 : /*
311 : * outer check handles fast case (no change)
312 : * since threads only set their own generation it is safe
313 : */
314 11 : if (natp->dynamic_delete_generation != generation) {
315 8 : NaClXMutexLock(&natp->mu);
316 24 : CHECK(natp->dynamic_delete_generation <= generation);
317 8 : natp->dynamic_delete_generation = generation;
318 8 : NaClXMutexUnlock(&natp->mu);
319 8 : }
320 11 : }
321 :
322 13 : int NaClMinimumThreadGeneration(struct NaClApp *nap) {
323 13 : size_t index;
324 13 : int rv = INT_MAX;
325 13 : NaClXMutexLock(&nap->threads_mu);
326 66 : for (index = 0; index < nap->threads.num_entries; ++index) {
327 20 : struct NaClAppThread *thread = NaClGetThreadMu(nap, (int) index);
328 20 : if (thread != NULL) {
329 18 : NaClXMutexLock(&thread->mu);
330 18 : if (rv > thread->dynamic_delete_generation) {
331 15 : rv = thread->dynamic_delete_generation;
332 15 : }
333 18 : NaClXMutexUnlock(&thread->mu);
334 18 : }
335 20 : }
336 13 : NaClXMutexUnlock(&nap->threads_mu);
337 13 : return rv;
338 : }
339 :
340 1100 : static void CopyBundleTails(uint8_t *dest,
341 1100 : uint8_t *src,
342 1100 : int32_t size,
343 1100 : int bundle_size) {
344 : /*
345 : * The order in which these locations are written does not matter:
346 : * none of the locations will be reachable, because the bundle heads
347 : * still contains HLTs.
348 : */
349 1100 : int bundle_mask = bundle_size - 1;
350 1100 : uint32_t *src_ptr;
351 1100 : uint32_t *dest_ptr;
352 1100 : uint32_t *end_ptr;
353 :
354 3300 : CHECK(0 == ((uintptr_t) dest & 3));
355 :
356 1100 : src_ptr = (uint32_t *) src;
357 1100 : dest_ptr = (uint32_t *) dest;
358 1100 : end_ptr = (uint32_t *) (dest + size);
359 859992 : while (dest_ptr < end_ptr) {
360 857792 : if ((((uintptr_t) dest_ptr) & bundle_mask) != 0) {
361 750568 : *dest_ptr = *src_ptr;
362 750568 : }
363 857792 : dest_ptr++;
364 857792 : src_ptr++;
365 857792 : }
366 1100 : }
367 :
368 1100 : static void CopyBundleHeads(uint8_t *dest,
369 1100 : uint8_t *src,
370 1100 : uint32_t size,
371 1100 : int bundle_size) {
372 : /* Again, the order in which these locations are written does not matter. */
373 1100 : uint8_t *src_ptr;
374 1100 : uint8_t *dest_ptr;
375 1100 : uint8_t *end_ptr;
376 :
377 : /* dest must be aligned for the writes to be atomic. */
378 3300 : CHECK(0 == ((uintptr_t) dest & 3));
379 :
380 1100 : src_ptr = src;
381 1100 : dest_ptr = dest;
382 1100 : end_ptr = dest + size;
383 109424 : while (dest_ptr < end_ptr) {
384 : /*
385 : * We assume that writing the 32-bit int here is atomic, which is
386 : * the case on x86 and ARM as long as the address is word-aligned.
387 : * The read does not have to be atomic.
388 : */
389 107224 : *(uint32_t *) dest_ptr = *(uint32_t *) src_ptr;
390 107224 : dest_ptr += bundle_size;
391 107224 : src_ptr += bundle_size;
392 107224 : }
393 1100 : }
394 :
395 6 : static void ReplaceBundleHeadsWithHalts(uint8_t *dest,
396 6 : uint32_t size,
397 6 : int bundle_size) {
398 6 : uint32_t *dest_ptr = (uint32_t*) dest;
399 6 : uint32_t *end_ptr = (uint32_t*) (dest + size);
400 4116 : while (dest_ptr < end_ptr) {
401 : /* dont assume 1-byte halt, write entire NACL_HALT_WORD */
402 4104 : *dest_ptr = NACL_HALT_WORD;
403 4104 : dest_ptr += bundle_size / sizeof(uint32_t);
404 4104 : }
405 6 : NaClWriteMemoryBarrier();
406 6 : }
407 :
408 1100 : static INLINE void CopyCodeSafelyInitial(uint8_t *dest,
409 1100 : uint8_t *src,
410 1100 : uint32_t size,
411 1100 : int bundle_size) {
412 1100 : CopyBundleTails(dest, src, size, bundle_size);
413 1100 : NaClWriteMemoryBarrier();
414 1100 : CopyBundleHeads(dest, src, size, bundle_size);
415 1100 : }
416 :
417 74 : static void MakeDynamicCodePagesVisible(struct NaClApp *nap,
418 74 : uint32_t page_index_min,
419 74 : uint32_t page_index_max,
420 74 : uint8_t *writable_addr) {
421 74 : void *user_addr;
422 74 : uint32_t index;
423 74 : size_t size = (page_index_max - page_index_min) * NACL_MAP_PAGESIZE;
424 :
425 332 : for (index = page_index_min; index < page_index_max; index++) {
426 276 : CHECK(!BitmapIsBitSet(nap->dynamic_page_bitmap, index));
427 92 : BitmapSetBit(nap->dynamic_page_bitmap, index);
428 92 : }
429 74 : user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start
430 : + page_index_min * NACL_MAP_PAGESIZE);
431 :
432 : #if NACL_WINDOWS
433 : NaClUntrustedThreadsSuspendAll(nap, /* save_registers= */ 0);
434 :
435 : /*
436 : * The VirtualAlloc() call here has two effects:
437 : *
438 : * 1) It commits the page in the shared memory (SHM) object,
439 : * allocating swap space and making the page accessible. This
440 : * affects our writable mapping of the shared memory object too.
441 : * Before the VirtualAlloc() call, dereferencing writable_addr
442 : * would fault.
443 : * 2) It changes the page permissions of the mapping to
444 : * read+execute. Since this exposes the page in its unsafe,
445 : * non-HLT-filled state, this must be done with untrusted
446 : * threads suspended.
447 : */
448 : {
449 : uintptr_t offset;
450 : for (offset = 0; offset < size; offset += NACL_MAP_PAGESIZE) {
451 : void *user_page_addr = (char *) user_addr + offset;
452 : if (VirtualAlloc(user_page_addr, NACL_MAP_PAGESIZE,
453 : MEM_COMMIT, PAGE_EXECUTE_READ) != user_page_addr) {
454 : NaClLog(LOG_FATAL, "MakeDynamicCodePagesVisible: "
455 : "VirtualAlloc() failed -- probably out of swap space\n");
456 : }
457 : }
458 : }
459 : #endif
460 :
461 : /* Sanity check: Ensure the page is not already in use. */
462 222 : CHECK(*writable_addr == 0);
463 :
464 74 : NaClFillMemoryRegionWithHalt(writable_addr, size);
465 :
466 : #if NACL_WINDOWS
467 : NaClUntrustedThreadsResumeAll(nap);
468 : #else
469 74 : if (NaClMprotect(user_addr, size, PROT_READ | PROT_EXEC) != 0) {
470 0 : NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaClMprotect() failed\n");
471 0 : }
472 : #endif
473 74 : }
474 :
475 : /*
476 : * Maps a writable version of the code at [offset, offset+size) and returns a
477 : * pointer to the new mapping. Internally caches the last mapping between
478 : * calls. Pass offset=0,size=0 to clear cache.
479 : * Caller must hold nap->dynamic_load_mutex.
480 : */
481 1122 : static uintptr_t CachedMapWritableText(struct NaClApp *nap,
482 1122 : uint32_t offset,
483 1122 : uint32_t size) {
484 : /*
485 : * The nap->* variables used in this function can be in two states:
486 : *
487 : * 1)
488 : * nap->dynamic_mapcache_size == 0
489 : * nap->dynamic_mapcache_ret == 0
490 : *
491 : * Initial state, nothing is cached.
492 : *
493 : * 2)
494 : * nap->dynamic_mapcache_size != 0
495 : * nap->dynamic_mapcache_ret != 0
496 : *
497 : * We have a cached mmap result stored, that must be unmapped.
498 : */
499 1122 : struct NaClDesc *shm = nap->text_shm;
500 :
501 2164 : if (offset != nap->dynamic_mapcache_offset
502 : || size != nap->dynamic_mapcache_size) {
503 : /*
504 : * cache miss, first clear the old cache if needed
505 : */
506 83 : if (nap->dynamic_mapcache_size > 0) {
507 69 : NaClDescUnmapUnsafe(shm, (void *) nap->dynamic_mapcache_ret,
508 : nap->dynamic_mapcache_size);
509 69 : nap->dynamic_mapcache_offset = 0;
510 69 : nap->dynamic_mapcache_size = 0;
511 69 : nap->dynamic_mapcache_ret = 0;
512 69 : }
513 :
514 : /*
515 : * update that cached version
516 : */
517 83 : if (size > 0) {
518 74 : uint32_t current_page_index;
519 74 : uint32_t end_page_index;
520 :
521 148 : uintptr_t mapping = (*((struct NaClDescVtbl const *)
522 : shm->base.vtbl)->
523 : Map)(shm,
524 74 : NaClDescEffectorTrustedMem(),
525 : NULL,
526 : size,
527 : NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
528 : NACL_ABI_MAP_SHARED,
529 : offset);
530 74 : if (NaClPtrIsNegErrno(&mapping)) {
531 0 : return 0;
532 : }
533 :
534 : /*
535 : * To reduce the number of mprotect() system calls, we coalesce
536 : * MakeDynamicCodePagesVisible() calls for adjacent pages that
537 : * have yet not been allocated.
538 : */
539 74 : current_page_index = offset / NACL_MAP_PAGESIZE;
540 74 : end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
541 225 : while (current_page_index < end_page_index) {
542 77 : uint32_t start_page_index = current_page_index;
543 : /* Find the end of this block of unallocated pages. */
544 246 : while (current_page_index < end_page_index &&
545 96 : !BitmapIsBitSet(nap->dynamic_page_bitmap, current_page_index)) {
546 92 : current_page_index++;
547 92 : }
548 77 : if (current_page_index > start_page_index) {
549 74 : uintptr_t writable_addr =
550 : mapping + (start_page_index * NACL_MAP_PAGESIZE - offset);
551 74 : MakeDynamicCodePagesVisible(nap, start_page_index, current_page_index,
552 : (uint8_t *) writable_addr);
553 74 : }
554 77 : current_page_index++;
555 77 : }
556 :
557 74 : nap->dynamic_mapcache_offset = offset;
558 74 : nap->dynamic_mapcache_size = size;
559 74 : nap->dynamic_mapcache_ret = mapping;
560 74 : }
561 83 : }
562 1122 : return nap->dynamic_mapcache_ret;
563 1122 : }
564 :
565 : /*
566 : * A wrapper around CachedMapWritableText that performs common address
567 : * calculations.
568 : * Outputs *mmapped_addr.
569 : * Caller must hold nap->dynamic_load_mutex.
570 : * Returns boolean, true on success
571 : */
572 1113 : static INLINE int NaClTextMapWrapper(struct NaClApp *nap,
573 1113 : uint32_t dest,
574 1113 : uint32_t size,
575 1113 : uint8_t **mapped_addr) {
576 1113 : uint32_t shm_offset;
577 1113 : uint32_t shm_map_offset;
578 1113 : uint32_t within_page_offset;
579 1113 : uint32_t shm_map_offset_end;
580 1113 : uint32_t shm_map_size;
581 1113 : uintptr_t mmap_ret;
582 1113 : uint8_t *mmap_result;
583 :
584 1113 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
585 1113 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
586 1113 : within_page_offset = shm_offset & (NACL_MAP_PAGESIZE - 1);
587 1113 : shm_map_offset_end =
588 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
589 1113 : shm_map_size = shm_map_offset_end - shm_map_offset;
590 :
591 1113 : mmap_ret = CachedMapWritableText(nap,
592 : shm_map_offset,
593 : shm_map_size);
594 1113 : if (0 == mmap_ret) {
595 0 : return 0;
596 : }
597 1113 : mmap_result = (uint8_t *) mmap_ret;
598 1113 : *mapped_addr = mmap_result + within_page_offset;
599 1113 : return 1;
600 1113 : }
601 :
602 : /*
603 : * Clear the mmap cache if multiple pages were mapped.
604 : * Caller must hold nap->dynamic_load_mutex.
605 : */
606 1113 : static INLINE void NaClTextMapClearCacheIfNeeded(struct NaClApp *nap,
607 1113 : uint32_t dest,
608 1113 : uint32_t size) {
609 1113 : uint32_t shm_offset;
610 1113 : uint32_t shm_map_offset;
611 1113 : uint32_t shm_map_offset_end;
612 1113 : uint32_t shm_map_size;
613 1113 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
614 1113 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
615 1113 : shm_map_offset_end =
616 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
617 1113 : shm_map_size = shm_map_offset_end - shm_map_offset;
618 1113 : if (shm_map_size > NACL_MAP_PAGESIZE) {
619 : /* call with size==offset==0 to clear cache */
620 9 : CachedMapWritableText(nap, 0, 0);
621 9 : }
622 1113 : }
623 :
624 1149 : int32_t NaClTextDyncodeCreate(struct NaClApp *nap,
625 1149 : uint32_t dest,
626 1149 : void *code_copy,
627 1149 : uint32_t size,
628 1149 : const struct NaClValidationMetadata *metadata) {
629 1149 : uintptr_t dest_addr;
630 1149 : uint8_t *mapped_addr;
631 1149 : int32_t retval = -NACL_ABI_EINVAL;
632 1149 : int validator_result;
633 1149 : struct NaClPerfCounter time_dyncode_create;
634 1149 : NaClPerfCounterCtor(&time_dyncode_create, "NaClTextDyncodeCreate");
635 :
636 1149 : if (NULL == nap->text_shm) {
637 1 : NaClLog(1, "NaClTextDyncodeCreate: Dynamic loading not enabled\n");
638 1 : return -NACL_ABI_EINVAL;
639 : }
640 2294 : if (0 != (dest & (nap->bundle_size - 1)) ||
641 : 0 != (size & (nap->bundle_size - 1))) {
642 4 : NaClLog(1, "NaClTextDyncodeCreate: Non-bundle-aligned address or size\n");
643 4 : return -NACL_ABI_EINVAL;
644 : }
645 1144 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
646 1144 : if (kNaClBadAddress == dest_addr) {
647 0 : NaClLog(1, "NaClTextDyncodeCreate: Dest address out of range\n");
648 0 : return -NACL_ABI_EFAULT;
649 : }
650 1144 : if (dest < nap->dynamic_text_start) {
651 1 : NaClLog(1, "NaClTextDyncodeCreate: Below dynamic code area\n");
652 1 : return -NACL_ABI_EFAULT;
653 : }
654 : /*
655 : * We ensure that the final HLTs of the dynamic code region cannot
656 : * be overwritten, just in case of CPU bugs.
657 : */
658 1143 : if (dest + size > nap->dynamic_text_end - NACL_HALT_SLED_SIZE) {
659 3 : NaClLog(1, "NaClTextDyncodeCreate: Above dynamic code area\n");
660 3 : return -NACL_ABI_EFAULT;
661 : }
662 1140 : if (0 == size) {
663 : /* Nothing to load. Succeed trivially. */
664 1 : return 0;
665 : }
666 :
667 1139 : NaClXMutexLock(&nap->dynamic_load_mutex);
668 :
669 : /*
670 : * Validate the code before trying to create the region. This avoids the need
671 : * to delete the region if validation fails.
672 : * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
673 : */
674 1139 : if (!nap->skip_validator) {
675 1139 : validator_result = NaClValidateCode(nap, dest, code_copy, size, metadata);
676 1139 : } else {
677 0 : NaClLog(LOG_ERROR, "VALIDATION SKIPPED.\n");
678 0 : validator_result = LOAD_OK;
679 : }
680 :
681 1139 : NaClPerfCounterMark(&time_dyncode_create,
682 : NACL_PERF_IMPORTANT_PREFIX "DynRegionValidate");
683 1139 : NaClPerfCounterIntervalLast(&time_dyncode_create);
684 :
685 1175 : if (validator_result != LOAD_OK
686 : && nap->ignore_validator_result) {
687 1 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
688 : "continuing anyway...\n");
689 1 : validator_result = LOAD_OK;
690 1 : }
691 :
692 1139 : if (validator_result != LOAD_OK) {
693 35 : NaClLog(1, "NaClTextDyncodeCreate: "
694 : "Validation of dynamic code failed\n");
695 35 : retval = -NACL_ABI_EINVAL;
696 35 : goto cleanup_unlock;
697 : }
698 :
699 1104 : if (NaClDynamicRegionCreate(nap, dest_addr, size, 0) != 1) {
700 : /* target addr is in use */
701 4 : NaClLog(1, "NaClTextDyncodeCreate: Code range already allocated\n");
702 4 : retval = -NACL_ABI_EINVAL;
703 4 : goto cleanup_unlock;
704 : }
705 :
706 1100 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
707 0 : retval = -NACL_ABI_ENOMEM;
708 0 : goto cleanup_unlock;
709 : }
710 :
711 1100 : CopyCodeSafelyInitial(mapped_addr, code_copy, size, nap->bundle_size);
712 : /*
713 : * Flush the processor's instruction cache. This is not necessary
714 : * for security, because any old cached instructions will just be
715 : * safe halt instructions. It is only necessary to ensure that
716 : * untrusted code runs correctly when it tries to execute the
717 : * dynamically-loaded code.
718 : */
719 1100 : NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
720 :
721 1100 : retval = 0;
722 :
723 1100 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
724 :
725 : cleanup_unlock:
726 1139 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
727 1139 : return retval;
728 1149 : }
729 :
730 1145 : int32_t NaClSysDyncodeCreate(struct NaClAppThread *natp,
731 1145 : uint32_t dest,
732 1145 : uint32_t src,
733 1145 : uint32_t size) {
734 1145 : struct NaClApp *nap = natp->nap;
735 1145 : uintptr_t src_addr;
736 1145 : uint8_t *code_copy;
737 1145 : int32_t retval = -NACL_ABI_EINVAL;
738 :
739 1145 : if (!nap->enable_dyncode_syscalls) {
740 1 : NaClLog(LOG_WARNING,
741 : "NaClSysDyncodeCreate: Dynamic code syscalls are disabled\n");
742 1 : return -NACL_ABI_ENOSYS;
743 : }
744 :
745 1144 : src_addr = NaClUserToSysAddrRange(nap, src, size);
746 1144 : if (kNaClBadAddress == src_addr) {
747 1 : NaClLog(1, "NaClSysDyncodeCreate: Source address out of range\n");
748 1 : return -NACL_ABI_EFAULT;
749 : }
750 :
751 : /*
752 : * Make a private copy of the code, so that we can validate it
753 : * without a TOCTTOU race condition.
754 : */
755 1143 : code_copy = malloc(size);
756 1143 : if (NULL == code_copy) {
757 0 : return -NACL_ABI_ENOMEM;
758 : }
759 3429 : memcpy(code_copy, (uint8_t*) src_addr, size);
760 :
761 : /* Unknown data source, no metadata. */
762 1143 : retval = NaClTextDyncodeCreate(nap, dest, code_copy, size, NULL);
763 :
764 1143 : free(code_copy);
765 1143 : return retval;
766 1145 : }
767 :
768 16 : int32_t NaClSysDyncodeModify(struct NaClAppThread *natp,
769 16 : uint32_t dest,
770 16 : uint32_t src,
771 16 : uint32_t size) {
772 16 : struct NaClApp *nap = natp->nap;
773 16 : uintptr_t dest_addr;
774 16 : uintptr_t src_addr;
775 16 : uintptr_t beginbundle;
776 16 : uintptr_t endbundle;
777 16 : uintptr_t offset;
778 16 : uint8_t *mapped_addr;
779 16 : uint8_t *code_copy = NULL;
780 16 : uint8_t code_copy_buf[NACL_INSTR_BLOCK_SIZE];
781 16 : int validator_result;
782 16 : int32_t retval = -NACL_ABI_EINVAL;
783 16 : struct NaClDynamicRegion *region;
784 :
785 16 : if (!nap->validator->code_replacement) {
786 0 : NaClLog(LOG_WARNING,
787 : "NaClSysDyncodeModify: "
788 : "Dynamic code modification is not supported\n");
789 0 : return -NACL_ABI_ENOSYS;
790 : }
791 :
792 16 : if (!nap->enable_dyncode_syscalls) {
793 1 : NaClLog(LOG_WARNING,
794 : "NaClSysDyncodeModify: Dynamic code syscalls are disabled\n");
795 1 : return -NACL_ABI_ENOSYS;
796 : }
797 :
798 15 : if (NULL == nap->text_shm) {
799 0 : NaClLog(1, "NaClSysDyncodeModify: Dynamic loading not enabled\n");
800 0 : return -NACL_ABI_EINVAL;
801 : }
802 :
803 15 : if (0 == size) {
804 : /* Nothing to modify. Succeed trivially. */
805 2 : return 0;
806 : }
807 :
808 13 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
809 13 : src_addr = NaClUserToSysAddrRange(nap, src, size);
810 26 : if (kNaClBadAddress == src_addr || kNaClBadAddress == dest_addr) {
811 0 : NaClLog(1, "NaClSysDyncodeModify: Address out of range\n");
812 0 : return -NACL_ABI_EFAULT;
813 : }
814 :
815 13 : NaClXMutexLock(&nap->dynamic_load_mutex);
816 :
817 13 : region = NaClDynamicRegionFind(nap, dest_addr, size);
818 52 : if (NULL == region ||
819 : region->start > dest_addr ||
820 : region->start + region->size < dest_addr + size ||
821 : region->is_mmap) {
822 : /*
823 : * target not a subregion of region or region is null, or came from a file.
824 : */
825 0 : NaClLog(1, "NaClSysDyncodeModify: Can't find region to modify\n");
826 0 : retval = -NACL_ABI_EFAULT;
827 0 : goto cleanup_unlock;
828 : }
829 :
830 13 : beginbundle = dest_addr & ~(nap->bundle_size - 1);
831 13 : endbundle = (dest_addr + size - 1 + nap->bundle_size)
832 : & ~(nap->bundle_size - 1);
833 13 : offset = dest_addr & (nap->bundle_size - 1);
834 13 : if (endbundle-beginbundle <= sizeof code_copy_buf) {
835 : /* usually patches are a single bundle, so stack allocate */
836 4 : code_copy = code_copy_buf;
837 4 : } else {
838 : /* in general case heap allocate */
839 9 : code_copy = malloc(endbundle-beginbundle);
840 9 : if (NULL == code_copy) {
841 0 : retval = -NACL_ABI_ENOMEM;
842 0 : goto cleanup_unlock;
843 : }
844 : }
845 :
846 : /* copy the bundles from already-inserted code */
847 39 : memcpy(code_copy, (uint8_t*) beginbundle, endbundle - beginbundle);
848 :
849 : /*
850 : * make the requested change in temporary location
851 : * this avoids TOTTOU race
852 : */
853 39 : memcpy(code_copy + offset, (uint8_t*) src_addr, size);
854 :
855 : /* update dest/size to refer to entire bundles */
856 13 : dest &= ~(nap->bundle_size - 1);
857 13 : dest_addr &= ~((uintptr_t)nap->bundle_size - 1);
858 : /* since both are in sandbox memory this check should succeed */
859 39 : CHECK(endbundle-beginbundle < UINT32_MAX);
860 13 : size = (uint32_t)(endbundle - beginbundle);
861 :
862 : /* validate this code as a replacement */
863 13 : validator_result = NaClValidateCodeReplacement(nap,
864 : dest,
865 : (uint8_t*) dest_addr,
866 : code_copy,
867 : size);
868 :
869 19 : if (validator_result != LOAD_OK
870 : && nap->ignore_validator_result) {
871 0 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
872 : "continuing anyway...\n");
873 0 : validator_result = LOAD_OK;
874 0 : }
875 :
876 13 : if (validator_result != LOAD_OK) {
877 6 : NaClLog(1, "NaClSysDyncodeModify: Validation of dynamic code failed\n");
878 6 : retval = -NACL_ABI_EINVAL;
879 6 : goto cleanup_unlock;
880 : }
881 :
882 7 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
883 0 : retval = -NACL_ABI_ENOMEM;
884 0 : goto cleanup_unlock;
885 : }
886 :
887 7 : if (LOAD_OK != NaClCopyCode(nap, dest, mapped_addr, code_copy, size)) {
888 0 : NaClLog(1, "NaClSysDyncodeModify: Copying of replacement code failed\n");
889 0 : retval = -NACL_ABI_EINVAL;
890 0 : goto cleanup_unlock;
891 : }
892 7 : retval = 0;
893 :
894 7 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
895 :
896 : cleanup_unlock:
897 13 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
898 :
899 13 : if (code_copy != code_copy_buf) {
900 9 : free(code_copy);
901 9 : }
902 :
903 13 : return retval;
904 16 : }
905 :
906 18 : int32_t NaClSysDyncodeDelete(struct NaClAppThread *natp,
907 18 : uint32_t dest,
908 18 : uint32_t size) {
909 18 : struct NaClApp *nap = natp->nap;
910 18 : uintptr_t dest_addr;
911 18 : uint8_t *mapped_addr;
912 18 : int32_t retval = -NACL_ABI_EINVAL;
913 18 : struct NaClDynamicRegion *region;
914 :
915 18 : if (!nap->enable_dyncode_syscalls) {
916 1 : NaClLog(LOG_WARNING,
917 : "NaClSysDyncodeDelete: Dynamic code syscalls are disabled\n");
918 1 : return -NACL_ABI_ENOSYS;
919 : }
920 :
921 17 : if (NULL == nap->text_shm) {
922 0 : NaClLog(1, "NaClSysDyncodeDelete: Dynamic loading not enabled\n");
923 0 : return -NACL_ABI_EINVAL;
924 : }
925 :
926 17 : if (0 == size) {
927 : /* Nothing to delete. Just update our generation. */
928 3 : int gen;
929 : /* fetch current generation */
930 3 : NaClXMutexLock(&nap->dynamic_load_mutex);
931 3 : gen = nap->dynamic_delete_generation;
932 3 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
933 : /* set our generation */
934 3 : NaClSetThreadGeneration(natp, gen);
935 3 : return 0;
936 : }
937 :
938 14 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
939 14 : if (kNaClBadAddress == dest_addr) {
940 0 : NaClLog(1, "NaClSysDyncodeDelete: Address out of range\n");
941 0 : return -NACL_ABI_EFAULT;
942 : }
943 :
944 14 : NaClXMutexLock(&nap->dynamic_load_mutex);
945 :
946 : /*
947 : * this check ensures the to-be-deleted region is identical to a
948 : * previously inserted region, so no need to check for alignment/bounds/etc
949 : */
950 14 : region = NaClDynamicRegionFind(nap, dest_addr, size);
951 44 : if (NULL == region ||
952 : region->start != dest_addr ||
953 : region->size != size ||
954 : region->is_mmap) {
955 6 : NaClLog(1, "NaClSysDyncodeDelete: Can't find region to delete\n");
956 6 : retval = -NACL_ABI_EFAULT;
957 6 : goto cleanup_unlock;
958 : }
959 :
960 :
961 8 : if (region->delete_generation < 0) {
962 : /* first deletion request */
963 :
964 6 : if (nap->dynamic_delete_generation == INT32_MAX) {
965 0 : NaClLog(1, "NaClSysDyncodeDelete:"
966 : "Overflow, can only delete INT32_MAX regions\n");
967 0 : retval = -NACL_ABI_EFAULT;
968 0 : goto cleanup_unlock;
969 : }
970 :
971 6 : if (!NaClTextMapWrapper(nap, dest, size, &mapped_addr)) {
972 0 : retval = -NACL_ABI_ENOMEM;
973 0 : goto cleanup_unlock;
974 : }
975 :
976 : /* make it so no new threads can enter target region */
977 6 : ReplaceBundleHeadsWithHalts(mapped_addr, size, nap->bundle_size);
978 :
979 : /*
980 : * Flush the instruction cache. In principle this is needed for
981 : * security on ARM so that, when new code is loaded, it is not
982 : * possible for it to jump to stale code that remains in the
983 : * icache.
984 : */
985 6 : NaClFlushCacheForDoublyMappedCode(mapped_addr, (uint8_t *) dest_addr, size);
986 :
987 6 : NaClTextMapClearCacheIfNeeded(nap, dest, size);
988 :
989 : /* increment and record the generation deletion was requested */
990 6 : region->delete_generation = ++nap->dynamic_delete_generation;
991 6 : }
992 :
993 : /* update our own generation */
994 8 : NaClSetThreadGeneration(natp, nap->dynamic_delete_generation);
995 :
996 8 : if (region->delete_generation <= NaClMinimumThreadGeneration(nap)) {
997 : /*
998 : * All threads have checked in since we marked region for deletion.
999 : * It is safe to remove the region.
1000 : *
1001 : * No need to memset the region to hlt since bundle heads are hlt
1002 : * and thus the bodies are unreachable.
1003 : */
1004 6 : NaClDynamicRegionDelete(nap, region);
1005 6 : retval = 0;
1006 6 : } else {
1007 : /*
1008 : * Still waiting for some threads to report in...
1009 : */
1010 2 : retval = -NACL_ABI_EAGAIN;
1011 8 : }
1012 :
1013 : cleanup_unlock:
1014 14 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
1015 14 : return retval;
1016 18 : }
1017 :
1018 : void NaClDyncodeVisit(
1019 3 : struct NaClApp *nap,
1020 3 : void (*fn)(void *state, struct NaClDynamicRegion *region),
1021 3 : void *state) {
1022 3 : int i;
1023 :
1024 3 : NaClXMutexLock(&nap->dynamic_load_mutex);
1025 6 : for (i = 0; i < nap->num_dynamic_regions; ++i) {
1026 0 : fn(state, &nap->dynamic_regions[i]);
1027 0 : }
1028 3 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
1029 3 : }
|