1 : /*
2 : * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include <string.h>
8 :
9 : #include "native_client/src/include/concurrency_ops.h"
10 : #include "native_client/src/include/nacl_platform.h"
11 : #include "native_client/src/include/portability.h"
12 : #include "native_client/src/shared/platform/nacl_check.h"
13 : #include "native_client/src/shared/platform/nacl_log.h"
14 : #include "native_client/src/shared/platform/nacl_sync.h"
15 : #include "native_client/src/shared/platform/nacl_sync_checked.h"
16 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
17 : #include "native_client/src/trusted/desc/nacl_desc_effector.h"
18 : #include "native_client/src/trusted/desc/nacl_desc_effector.h"
19 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
20 : #include "native_client/src/trusted/perf_counter/nacl_perf_counter.h"
21 : #include "native_client/src/trusted/service_runtime/arch/sel_ldr_arch.h"
22 : #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
23 : #include "native_client/src/trusted/service_runtime/include/sys/mman.h"
24 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
25 : #include "native_client/src/trusted/service_runtime/nacl_error_code.h"
26 : #include "native_client/src/trusted/service_runtime/nacl_text.h"
27 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
28 : #include "native_client/src/trusted/service_runtime/sel_memory.h"
29 :
30 :
31 : /* initial size of the malloced buffer for dynamic regions */
32 : static const int kMinDynamicRegionsAllocated = 32;
33 :
34 : static const int kBitsPerByte = 8;
35 :
36 3 : static uint8_t *BitmapAllocate(uint32_t indexes) {
37 3 : uint32_t byte_count = (indexes + kBitsPerByte - 1) / kBitsPerByte;
38 3 : uint8_t *bitmap = malloc(byte_count);
39 3 : if (bitmap != NULL) {
40 3 : memset(bitmap, 0, byte_count);
41 : }
42 3 : return bitmap;
43 : }
44 :
45 0 : static int BitmapIsBitSet(uint8_t *bitmap, uint32_t index) {
46 0 : return (bitmap[index / kBitsPerByte] & (1 << (index % kBitsPerByte))) != 0;
47 : }
48 :
49 0 : static void BitmapSetBit(uint8_t *bitmap, uint32_t index) {
50 0 : bitmap[index / kBitsPerByte] |= 1 << (index % kBitsPerByte);
51 0 : }
52 :
53 : /*
54 : * Private subclass of NaClDescEffector, used only in this file.
55 : */
56 : struct NaClDescEffectorShm {
57 : struct NaClDescEffector base;
58 : };
59 :
60 : static
61 3 : void NaClDescEffectorShmDtor(struct NaClDescEffector *vself) {
62 : /* no base class dtor to invoke */
63 :
64 3 : vself->vtbl = (struct NaClDescEffectorVtbl *) NULL;
65 :
66 : return;
67 : }
68 :
69 : static
70 : int NaClDescEffectorShmUnmapMemory(struct NaClDescEffector *vself,
71 : uintptr_t sysaddr,
72 12278 : size_t nbytes) {
73 : UNREFERENCED_PARAMETER(vself);
74 : UNREFERENCED_PARAMETER(sysaddr);
75 : UNREFERENCED_PARAMETER(nbytes);
76 12278 : return 0;
77 : }
78 :
79 : static
80 : uintptr_t NaClDescEffectorShmMapAnonymousMemory(struct NaClDescEffector *vself,
81 : uintptr_t sysaddr,
82 : size_t nbytes,
83 0 : int prot) {
84 : UNREFERENCED_PARAMETER(vself);
85 : UNREFERENCED_PARAMETER(sysaddr);
86 : UNREFERENCED_PARAMETER(nbytes);
87 : UNREFERENCED_PARAMETER(prot);
88 :
89 0 : NaClLog(LOG_FATAL, "NaClDescEffectorShmMapAnonymousMemory called\n");
90 : /* NOTREACHED but gcc doesn't know that */
91 0 : return -NACL_ABI_EINVAL;
92 : }
93 :
94 : static
95 : struct NaClDescEffectorVtbl kNaClDescEffectorShmVtbl = {
96 : NaClDescEffectorShmDtor,
97 : NaClDescEffectorShmUnmapMemory,
98 : NaClDescEffectorShmMapAnonymousMemory,
99 : };
100 :
101 3 : int NaClDescEffectorShmCtor(struct NaClDescEffectorShm *self) {
102 3 : self->base.vtbl = &kNaClDescEffectorShmVtbl;
103 3 : return 1;
104 : }
105 :
106 3 : NaClErrorCode NaClMakeDynamicTextShared(struct NaClApp *nap) {
107 3 : enum NaClErrorCode retval = LOAD_INTERNAL;
108 : uintptr_t dynamic_text_size;
109 3 : struct NaClDescImcShm *shm = NULL;
110 : struct NaClDescEffectorShm shm_effector;
111 3 : int shm_effector_initialized = 0;
112 : uintptr_t shm_vaddr_base;
113 : int mmap_protections;
114 : uintptr_t mmap_ret;
115 :
116 : uintptr_t shm_upper_bound;
117 : uintptr_t text_sysaddr;
118 :
119 3 : shm_vaddr_base = NaClEndOfStaticText(nap);
120 3 : NaClLog(4,
121 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
122 : shm_vaddr_base);
123 3 : shm_vaddr_base = NaClRoundAllocPage(shm_vaddr_base);
124 3 : NaClLog(4,
125 : "NaClMakeDynamicTextShared: shm_vaddr_base = %08"NACL_PRIxPTR"\n",
126 : shm_vaddr_base);
127 :
128 3 : if (!nap->use_shm_for_dynamic_text) {
129 0 : NaClLog(4,
130 : "NaClMakeDynamicTextShared:"
131 : " rodata / data segments not allocation aligned\n");
132 0 : NaClLog(4,
133 : " not using shm for text\n");
134 0 : nap->dynamic_text_start = shm_vaddr_base;
135 0 : nap->dynamic_text_end = shm_vaddr_base;
136 0 : return LOAD_OK;
137 : }
138 :
139 : /*
140 : * Allocate a shm region the size of which is nap->rodata_start -
141 : * end-of-text. This implies that the "core" text will not be
142 : * backed by shm.
143 : */
144 3 : shm_upper_bound = nap->rodata_start;
145 3 : if (0 == shm_upper_bound) {
146 0 : shm_upper_bound = nap->data_start;
147 : }
148 3 : if (0 == shm_upper_bound) {
149 0 : shm_upper_bound = shm_vaddr_base;
150 : }
151 3 : nap->dynamic_text_start = shm_vaddr_base;
152 3 : nap->dynamic_text_end = shm_upper_bound;
153 :
154 3 : NaClLog(4, "shm_upper_bound = %08"NACL_PRIxPTR"\n", shm_upper_bound);
155 :
156 3 : dynamic_text_size = shm_upper_bound - shm_vaddr_base;
157 3 : NaClLog(4,
158 : "NaClMakeDynamicTextShared: dynamic_text_size = %"NACL_PRIxPTR"\n",
159 : dynamic_text_size);
160 :
161 3 : if (0 == dynamic_text_size) {
162 0 : NaClLog(4, "Empty JITtable region\n");
163 0 : return LOAD_OK;
164 : }
165 :
166 3 : shm = (struct NaClDescImcShm *) malloc(sizeof *shm);
167 3 : if (NULL == shm) {
168 0 : goto cleanup;
169 : }
170 3 : if (!NaClDescImcShmAllocCtor(shm, dynamic_text_size, /* executable= */ 1)) {
171 : /* cleanup invariant is if ptr is non-NULL, it's fully ctor'd */
172 0 : free(shm);
173 0 : shm = NULL;
174 0 : NaClLog(4, "NaClMakeDynamicTextShared: shm creation for text failed\n");
175 0 : retval = LOAD_NO_MEMORY;
176 0 : goto cleanup;
177 : }
178 3 : if (!NaClDescEffectorShmCtor(&shm_effector)) {
179 0 : NaClLog(4,
180 : "NaClMakeDynamicTextShared: shm effector"
181 : " initialization failed\n");
182 0 : retval = LOAD_INTERNAL;
183 0 : goto cleanup;
184 : }
185 3 : shm_effector_initialized = 1;
186 :
187 3 : text_sysaddr = NaClUserToSys(nap, shm_vaddr_base);
188 :
189 : /* Existing memory is anonymous paging file backed. */
190 3 : NaCl_page_free((void *) text_sysaddr, dynamic_text_size);
191 :
192 : /*
193 : * Unix allows us to map pages with PROT_NONE initially and later
194 : * increase the mapping permissions with mprotect().
195 : *
196 : * Windows does not allow this, however: the initial permissions are
197 : * an upper bound on what the permissions may later be changed to
198 : * with VirtualProtect(). Given this, using PROT_NONE at this point
199 : * does not even make sense. So we map with read+exec+write and
200 : * immediately turn down the permissions, so that we can later
201 : * re-enable read+exec page by page. Write permissions are needed
202 : * for gdb to set breakpoints.
203 : */
204 : #if NACL_WINDOWS
205 : mmap_protections =
206 : NACL_ABI_PROT_READ | NACL_ABI_PROT_EXEC | NACL_ABI_PROT_WRITE;
207 : #else
208 3 : mmap_protections = NACL_ABI_PROT_NONE;
209 : #endif
210 3 : NaClLog(4,
211 : "NaClMakeDynamicTextShared: Map(,,0x%"NACL_PRIxPTR",size = 0x%x,"
212 : " prot=0x%x, flags=0x%x, offset=0)\n",
213 : text_sysaddr,
214 : (int) dynamic_text_size,
215 : mmap_protections,
216 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED);
217 3 : mmap_ret = (*((struct NaClDescVtbl const *) shm->base.base.vtbl)->
218 : Map)((struct NaClDesc *) shm,
219 : (struct NaClDescEffector *) &shm_effector,
220 : (void *) text_sysaddr,
221 : dynamic_text_size,
222 : mmap_protections,
223 : NACL_ABI_MAP_SHARED | NACL_ABI_MAP_FIXED,
224 : 0);
225 3 : if (text_sysaddr != mmap_ret) {
226 0 : NaClLog(LOG_FATAL, "Could not map in shm for dynamic text region\n");
227 : }
228 :
229 : #if NACL_WINDOWS
230 : {
231 : /*
232 : * We need a loop here because the Map() call above creates one
233 : * mapping per page. However, there is no need for it to do that
234 : * for the dynamic code area.
235 : * TODO(mseaborn): Create a single mapping here.
236 : */
237 : uintptr_t offset;
238 : for (offset = 0; offset < dynamic_text_size; offset += NACL_MAP_PAGESIZE) {
239 : DWORD old_prot;
240 : if (!VirtualProtect((void *) (text_sysaddr + offset), NACL_MAP_PAGESIZE,
241 : PAGE_NOACCESS, &old_prot)) {
242 : NaClLog(LOG_FATAL,
243 : "NaClMakeDynamicTextShared: VirtualProtect() failed to "
244 : "set page permissions to PAGE_NOACCESS\n");
245 : }
246 : }
247 : }
248 : #endif
249 :
250 3 : nap->dynamic_page_bitmap =
251 : BitmapAllocate((uint32_t) (dynamic_text_size / NACL_MAP_PAGESIZE));
252 3 : if (NULL == nap->dynamic_page_bitmap) {
253 0 : NaClLog(LOG_FATAL, "NaClMakeDynamicTextShared: BitmapAllocate() failed\n");
254 : }
255 :
256 3 : nap->text_shm = &shm->base;
257 3 : retval = LOAD_OK;
258 :
259 3 : cleanup:
260 3 : if (shm_effector_initialized) {
261 3 : (*shm_effector.base.vtbl->Dtor)((struct NaClDescEffector *) &shm_effector);
262 : }
263 3 : if (LOAD_OK != retval) {
264 0 : NaClDescSafeUnref((struct NaClDesc *) shm);
265 0 : free(shm);
266 : }
267 :
268 3 : return retval;
269 : }
270 :
271 : /*
272 : * Binary search nap->dynamic_regions to find the maximal region with start<=ptr
273 : * caller must hold nap->dynamic_load_mutex, and must discard result
274 : * when lock is released.
275 : */
276 : struct NaClDynamicRegion* NaClDynamicRegionFindClosestLEQ(struct NaClApp *nap,
277 0 : uintptr_t ptr) {
278 0 : const int kBinarySearchToScanCutoff = 16;
279 0 : int begin = 0;
280 0 : int end = nap->num_dynamic_regions;
281 0 : if (0 == nap->num_dynamic_regions) {
282 0 : return NULL;
283 : }
284 : /* as an optimization, check the last region first */
285 0 : if (nap->dynamic_regions[nap->num_dynamic_regions-1].start <= ptr) {
286 0 : return nap->dynamic_regions + nap->num_dynamic_regions-1;
287 : }
288 : /* comes before everything */
289 0 : if (ptr < nap->dynamic_regions[0].start) {
290 0 : return NULL;
291 : }
292 : /* binary search, until range is small */
293 0 : while (begin + kBinarySearchToScanCutoff + 1 < end) {
294 0 : int mid = begin + (end - begin)/2;
295 0 : if (nap->dynamic_regions[mid].start <= ptr) {
296 0 : begin = mid;
297 : } else {
298 0 : end = mid;
299 : }
300 : }
301 : /* linear scan, faster for small ranges */
302 0 : while (begin + 1 < end && nap->dynamic_regions[begin + 1].start <= ptr) {
303 0 : begin++;
304 : }
305 0 : return nap->dynamic_regions + begin;
306 : }
307 :
308 : /*
309 : * Find the last region overlapping with the given memory range, return 0 if
310 : * region is unused.
311 : * caller must hold nap->dynamic_load_mutex, and must discard result
312 : * when lock is released.
313 : */
314 : struct NaClDynamicRegion* NaClDynamicRegionFind(struct NaClApp *nap,
315 : uintptr_t ptr,
316 0 : size_t size) {
317 : struct NaClDynamicRegion *p = NaClDynamicRegionFindClosestLEQ(nap,
318 0 : ptr + size - 1);
319 0 : return (p != NULL && ptr < p->start + p->size) ? p : NULL;
320 : }
321 :
322 : /*
323 : * Insert a new region into nap->dynamic regions, maintaining the sorted
324 : * ordering. Returns 1 on success, 0 if there is a conflicting region
325 : * Caller must hold nap->dynamic_load_mutex.
326 : * Invalidates all previous NaClDynamicRegion pointers.
327 : */
328 : int NaClDynamicRegionCreate(struct NaClApp *nap,
329 : uintptr_t start,
330 0 : size_t size) {
331 : struct NaClDynamicRegion item, *regionp, *end;
332 0 : item.start = start;
333 0 : item.size = size;
334 0 : item.delete_generation = -1;
335 0 : if (nap->dynamic_regions_allocated == nap->num_dynamic_regions) {
336 : /* out of space, double buffer size */
337 0 : nap->dynamic_regions_allocated *= 2;
338 0 : if (nap->dynamic_regions_allocated < kMinDynamicRegionsAllocated) {
339 0 : nap->dynamic_regions_allocated = kMinDynamicRegionsAllocated;
340 : }
341 0 : nap->dynamic_regions = realloc(nap->dynamic_regions,
342 : sizeof(struct NaClDynamicRegion) *
343 : nap->dynamic_regions_allocated);
344 0 : if (NULL == nap->dynamic_regions) {
345 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
346 0 : return 0;
347 : }
348 : }
349 : /* find preceding entry */
350 0 : regionp = NaClDynamicRegionFindClosestLEQ(nap, start + size - 1);
351 0 : if (regionp != NULL && start < regionp->start + regionp->size) {
352 : /* target already in use */
353 0 : return 0;
354 : }
355 0 : if (NULL == regionp) {
356 : /* start at beginning if we couldn't find predecessor */
357 0 : regionp = nap->dynamic_regions;
358 : }
359 0 : end = nap->dynamic_regions + nap->num_dynamic_regions;
360 : /* scroll to insertion point (this should scroll at most 1 element) */
361 0 : for (; regionp != end && regionp->start < item.start; ++regionp);
362 : /* insert and shift everything forward by 1 */
363 0 : for (; regionp != end; ++regionp) {
364 : /* swap(*i, item); */
365 0 : struct NaClDynamicRegion t = *regionp;
366 0 : *regionp = item;
367 0 : item = t;
368 : }
369 0 : *regionp = item;
370 0 : nap->num_dynamic_regions++;
371 0 : return 1;
372 : }
373 :
374 : /*
375 : * Delete a region from nap->dynamic_regions, maintaining the sorted ordering
376 : * Caller must hold nap->dynamic_load_mutex.
377 : * Invalidates all previous NaClDynamicRegion pointers.
378 : */
379 0 : void NaClDynamicRegionDelete(struct NaClApp *nap, struct NaClDynamicRegion* r) {
380 : struct NaClDynamicRegion *end = nap->dynamic_regions
381 0 : + nap->num_dynamic_regions;
382 : /* shift everything down */
383 0 : for (; r + 1 < end; ++r) {
384 0 : r[0] = r[1];
385 : }
386 0 : nap->num_dynamic_regions--;
387 :
388 0 : if ( nap->dynamic_regions_allocated > kMinDynamicRegionsAllocated
389 : && nap->dynamic_regions_allocated/4 > nap->num_dynamic_regions) {
390 : /* too much waste, shrink buffer*/
391 0 : nap->dynamic_regions_allocated /= 2;
392 0 : nap->dynamic_regions = realloc(nap->dynamic_regions,
393 : sizeof(struct NaClDynamicRegion) *
394 : nap->dynamic_regions_allocated);
395 0 : if (NULL == nap->dynamic_regions) {
396 0 : NaClLog(LOG_FATAL, "NaClDynamicRegionCreate: realloc failed");
397 0 : return;
398 : }
399 : }
400 : }
401 :
402 :
403 0 : void NaClSetThreadGeneration(struct NaClAppThread *natp, int generation) {
404 : /*
405 : * outer check handles fast case (no change)
406 : * since threads only set their own generation it is safe
407 : */
408 0 : if (natp->dynamic_delete_generation != generation) {
409 0 : NaClXMutexLock(&natp->mu);
410 0 : CHECK(natp->dynamic_delete_generation <= generation);
411 0 : natp->dynamic_delete_generation = generation;
412 0 : NaClXMutexUnlock(&natp->mu);
413 : }
414 0 : }
415 :
416 5 : int NaClMinimumThreadGeneration(struct NaClApp *nap) {
417 : size_t index;
418 5 : int rv = INT_MAX;
419 5 : NaClXMutexLock(&nap->threads_mu);
420 12 : for (index = 0; index < nap->threads.num_entries; ++index) {
421 7 : struct NaClAppThread *thread = NaClGetThreadMu(nap, (int) index);
422 7 : if (thread != NULL) {
423 6 : NaClXMutexLock(&thread->mu);
424 6 : if (rv > thread->dynamic_delete_generation) {
425 5 : rv = thread->dynamic_delete_generation;
426 : }
427 6 : NaClXMutexUnlock(&thread->mu);
428 : }
429 : }
430 5 : NaClXMutexUnlock(&nap->threads_mu);
431 5 : return rv;
432 : }
433 :
434 : static void CopyBundleTails(uint8_t *dest,
435 : uint8_t *src,
436 : int32_t size,
437 0 : int bundle_size) {
438 : /*
439 : * The order in which these locations are written does not matter:
440 : * none of the locations will be reachable, because the bundle heads
441 : * still contains HLTs.
442 : */
443 0 : int bundle_mask = bundle_size - 1;
444 : uint32_t *src_ptr;
445 : uint32_t *dest_ptr;
446 : uint32_t *end_ptr;
447 :
448 0 : CHECK(0 == ((uintptr_t) dest & 3));
449 :
450 0 : src_ptr = (uint32_t *) src;
451 0 : dest_ptr = (uint32_t *) dest;
452 0 : end_ptr = (uint32_t *) (dest + size);
453 0 : while (dest_ptr < end_ptr) {
454 0 : if ((((uintptr_t) dest_ptr) & bundle_mask) != 0) {
455 0 : *dest_ptr = *src_ptr;
456 : }
457 0 : dest_ptr++;
458 0 : src_ptr++;
459 : }
460 0 : }
461 :
462 : static void CopyBundleHeads(uint8_t *dest,
463 : uint8_t *src,
464 : uint32_t size,
465 0 : int bundle_size) {
466 : /* Again, the order in which these locations are written does not matter. */
467 : uint8_t *src_ptr;
468 : uint8_t *dest_ptr;
469 : uint8_t *end_ptr;
470 :
471 : /* dest must be aligned for the writes to be atomic. */
472 0 : CHECK(0 == ((uintptr_t) dest & 3));
473 :
474 0 : src_ptr = src;
475 0 : dest_ptr = dest;
476 0 : end_ptr = dest + size;
477 0 : while (dest_ptr < end_ptr) {
478 : /*
479 : * We assume that writing the 32-bit int here is atomic, which is
480 : * the case on x86 and ARM as long as the address is word-aligned.
481 : * The read does not have to be atomic.
482 : */
483 0 : *(uint32_t *) dest_ptr = *(uint32_t *) src_ptr;
484 0 : dest_ptr += bundle_size;
485 0 : src_ptr += bundle_size;
486 : }
487 0 : }
488 :
489 : static void ReplaceBundleHeadsWithHalts(uint8_t *dest,
490 : uint32_t size,
491 0 : int bundle_size) {
492 0 : uint32_t *dest_ptr = (uint32_t*) dest;
493 0 : uint32_t *end_ptr = (uint32_t*) (dest + size);
494 0 : while (dest_ptr < end_ptr) {
495 : /* dont assume 1-byte halt, write entire NACL_HALT_WORD */
496 0 : *dest_ptr = NACL_HALT_WORD;
497 0 : dest_ptr += bundle_size / sizeof(uint32_t);
498 : }
499 0 : NaClWriteMemoryBarrier();
500 0 : }
501 :
502 : static INLINE void CopyCodeSafelyInitial(uint8_t *dest,
503 : uint8_t *src,
504 : uint32_t size,
505 0 : int bundle_size) {
506 0 : CopyBundleTails(dest, src, size, bundle_size);
507 0 : NaClWriteMemoryBarrier();
508 0 : CopyBundleHeads(dest, src, size, bundle_size);
509 :
510 : /*
511 : * Flush the processor's instruction cache. This is not necessary
512 : * for security, because any old cached instructions will just be
513 : * safe halt instructions. It is only necessary to ensure that
514 : * untrusted code runs correctly when it tries to execute the
515 : * dynamically-loaded code.
516 : */
517 0 : NaClClearInstructionCache(dest, dest + size);
518 0 : }
519 :
520 : static void MakeDynamicCodePageVisible(struct NaClApp *nap,
521 : uint32_t page_index,
522 0 : uint8_t *writable_addr) {
523 : void *user_addr;
524 :
525 0 : if (BitmapIsBitSet(nap->dynamic_page_bitmap, page_index)) {
526 : /* The page is already visible: nothing to do. */
527 0 : return;
528 : }
529 0 : user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start
530 : + page_index * NACL_MAP_PAGESIZE);
531 :
532 : /* Sanity check: Ensure the page is not already in use. */
533 0 : CHECK(*writable_addr == 0);
534 :
535 0 : NaClFillMemoryRegionWithHalt(writable_addr, NACL_MAP_PAGESIZE);
536 :
537 0 : if (NaCl_mprotect(user_addr, NACL_MAP_PAGESIZE, PROT_READ | PROT_EXEC) != 0) {
538 0 : NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaCl_mprotect() failed\n");
539 : }
540 :
541 0 : BitmapSetBit(nap->dynamic_page_bitmap, page_index);
542 : }
543 :
544 : /*
545 : * Maps a writable version of the code at [offset, offset+size) and returns a
546 : * pointer to the new mapping. Internally caches the last mapping between
547 : * calls. Pass offset=0,size=0 to clear cache.
548 : * Caller must hold nap->dynamic_load_mutex.
549 : */
550 : static uintptr_t CachedMapWritableText(struct NaClApp *nap,
551 : uint32_t offset,
552 0 : uint32_t size) {
553 : /*
554 : * The nap->* variables used in this function can be in two states:
555 : *
556 : * 1)
557 : * nap->dynamic_mapcache_size == 0
558 : * nap->dynamic_mapcache_ret == 0
559 : *
560 : * Initial state, nothing is cached.
561 : *
562 : * 2)
563 : * nap->dynamic_mapcache_size != 0
564 : * nap->dynamic_mapcache_ret != 0
565 : *
566 : * We have a cached mmap result stored, that must be unmapped.
567 : */
568 : struct NaClDescEffectorShm shm_effector;
569 0 : struct NaClDesc *shm = nap->text_shm;
570 0 : if (!NaClDescEffectorShmCtor(&shm_effector)) {
571 0 : NaClLog(LOG_FATAL,
572 : "NaClTextSysDyncode_Copy: "
573 : "shm effector initialization failed\n");
574 :
575 0 : return -NACL_ABI_EFAULT;
576 : }
577 :
578 0 : if (offset != nap->dynamic_mapcache_offset
579 : || size != nap->dynamic_mapcache_size) {
580 : /*
581 : * cache miss, first clear the old cache if needed
582 : */
583 0 : if (nap->dynamic_mapcache_size > 0) {
584 0 : if (0 != (*((struct NaClDescVtbl const *) shm->base.vtbl)->
585 : UnmapUnsafe)(shm,
586 : (struct NaClDescEffector*) &shm_effector,
587 : (void*)nap->dynamic_mapcache_ret,
588 : nap->dynamic_mapcache_size)) {
589 0 : NaClLog(LOG_FATAL, "CachedMapWritableText: Failed to unmap\n");
590 0 : return -NACL_ABI_EFAULT;
591 : }
592 0 : nap->dynamic_mapcache_offset = 0;
593 0 : nap->dynamic_mapcache_size = 0;
594 0 : nap->dynamic_mapcache_ret = 0;
595 : }
596 :
597 : /*
598 : * update that cached version
599 : */
600 0 : if (size > 0) {
601 : uint32_t page_index;
602 : uint32_t end_page_index;
603 : uint8_t *writable_addr;
604 :
605 : uintptr_t mapping = (*((struct NaClDescVtbl const *)
606 : shm->base.vtbl)->
607 : Map)(shm,
608 : (struct NaClDescEffector*) &shm_effector,
609 : NULL,
610 : size,
611 : NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
612 : NACL_ABI_MAP_SHARED,
613 0 : offset);
614 0 : if (NaClPtrIsNegErrno(&mapping)) {
615 0 : return 0;
616 : }
617 :
618 0 : writable_addr = (uint8_t *) mapping;
619 0 : end_page_index = (offset + size) / NACL_MAP_PAGESIZE;
620 0 : for (page_index = offset / NACL_MAP_PAGESIZE;
621 0 : page_index < end_page_index;
622 0 : page_index++) {
623 0 : MakeDynamicCodePageVisible(nap, page_index, writable_addr);
624 0 : writable_addr += NACL_MAP_PAGESIZE;
625 : }
626 :
627 0 : nap->dynamic_mapcache_offset = offset;
628 0 : nap->dynamic_mapcache_size = size;
629 0 : nap->dynamic_mapcache_ret = mapping;
630 : }
631 : }
632 0 : return nap->dynamic_mapcache_ret;
633 : }
634 :
635 : /*
636 : * A wrapper around CachedMapWritableText that performs common address
637 : * calculations.
638 : * Outputs *mmapped_addr.
639 : * Caller must hold nap->dynamic_load_mutex.
640 : * Returns boolean, true on success
641 : */
642 : static INLINE int NaclTextMapWrapper(struct NaClApp *nap,
643 : uint32_t dest,
644 : uint32_t size,
645 0 : uint8_t **mapped_addr) {
646 : uint32_t shm_offset;
647 : uint32_t shm_map_offset;
648 : uint32_t within_page_offset;
649 : uint32_t shm_map_offset_end;
650 : uint32_t shm_map_size;
651 : uintptr_t mmap_ret;
652 : uint8_t *mmap_result;
653 :
654 0 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
655 0 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
656 0 : within_page_offset = shm_offset & (NACL_MAP_PAGESIZE - 1);
657 0 : shm_map_offset_end =
658 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
659 0 : shm_map_size = shm_map_offset_end - shm_map_offset;
660 :
661 0 : mmap_ret = CachedMapWritableText(nap,
662 : shm_map_offset,
663 : shm_map_size);
664 0 : if (0 == mmap_ret) {
665 0 : return 0;
666 : }
667 0 : mmap_result = (uint8_t *) mmap_ret;
668 0 : *mapped_addr = mmap_result + within_page_offset;
669 0 : return 1;
670 : }
671 :
672 : /*
673 : * Clear the mmap cache if multiple pages were mapped.
674 : * Caller must hold nap->dynamic_load_mutex.
675 : */
676 : static INLINE void NaclTextMapClearCacheIfNeeded(struct NaClApp *nap,
677 : uint32_t dest,
678 0 : uint32_t size) {
679 : uint32_t shm_offset;
680 : uint32_t shm_map_offset;
681 : uint32_t shm_map_offset_end;
682 : uint32_t shm_map_size;
683 0 : shm_offset = dest - (uint32_t) nap->dynamic_text_start;
684 0 : shm_map_offset = shm_offset & ~(NACL_MAP_PAGESIZE - 1);
685 0 : shm_map_offset_end =
686 : (shm_offset + size + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1);
687 0 : shm_map_size = shm_map_offset_end - shm_map_offset;
688 0 : if (shm_map_size > NACL_MAP_PAGESIZE) {
689 : /* call with size==offset==0 to clear cache */
690 0 : CachedMapWritableText(nap, 0, 0);
691 : }
692 0 : }
693 :
694 : int32_t NaClTextDyncodeCreate(struct NaClApp *nap,
695 : uint32_t dest,
696 : void *code_copy,
697 0 : uint32_t size) {
698 : uintptr_t dest_addr;
699 : uint8_t *mapped_addr;
700 0 : int32_t retval = -NACL_ABI_EINVAL;
701 : int validator_result;
702 : struct NaClPerfCounter time_dyncode_create;
703 0 : NaClPerfCounterCtor(&time_dyncode_create, "NaClTextDyncodeCreate");
704 :
705 0 : if (NULL == nap->text_shm) {
706 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Dynamic loading not enabled\n");
707 0 : return -NACL_ABI_EINVAL;
708 : }
709 0 : if (0 != (dest & (nap->bundle_size - 1)) ||
710 : 0 != (size & (nap->bundle_size - 1))) {
711 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Non-bundle-aligned address or size\n");
712 0 : return -NACL_ABI_EINVAL;
713 : }
714 0 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
715 0 : if (kNaClBadAddress == dest_addr) {
716 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Dest address out of range\n");
717 0 : return -NACL_ABI_EFAULT;
718 : }
719 0 : if (dest < nap->dynamic_text_start) {
720 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Below dynamic code area\n");
721 0 : return -NACL_ABI_EFAULT;
722 : }
723 : /*
724 : * We ensure that the final HLTs of the dynamic code region cannot
725 : * be overwritten, just in case of CPU bugs.
726 : */
727 0 : if (dest + size > nap->dynamic_text_end - NACL_HALT_SLED_SIZE) {
728 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Above dynamic code area\n");
729 0 : return -NACL_ABI_EFAULT;
730 : }
731 0 : if (0 == size) {
732 : /* Nothing to load. Succeed trivially. */
733 0 : return 0;
734 : }
735 :
736 0 : NaClXMutexLock(&nap->dynamic_load_mutex);
737 :
738 : /*
739 : * Validate the code before trying to create the region. This avoids the need
740 : * to delete the region if validation fails.
741 : * See: http://code.google.com/p/nativeclient/issues/detail?id=2566
742 : */
743 0 : if (!nap->skip_validator) {
744 0 : validator_result = NaClValidateCode(nap, dest, code_copy, size);
745 : } else {
746 0 : NaClLog(LOG_ERROR, "VALIDATION SKIPPED.\n");
747 0 : validator_result = LOAD_OK;
748 : }
749 :
750 0 : NaClPerfCounterMark(&time_dyncode_create,
751 : NACL_PERF_IMPORTANT_PREFIX "DynRegionValidate");
752 0 : NaClPerfCounterIntervalLast(&time_dyncode_create);
753 :
754 0 : if (validator_result != LOAD_OK
755 : && nap->ignore_validator_result) {
756 0 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
757 : "continuing anyway...\n");
758 0 : validator_result = LOAD_OK;
759 : }
760 :
761 0 : if (validator_result != LOAD_OK) {
762 0 : NaClLog(1, "NaClTextSysDyncode_Copy: "
763 : "Validation of dynamic code failed\n");
764 0 : retval = -NACL_ABI_EINVAL;
765 0 : goto cleanup_unlock;
766 : }
767 :
768 0 : if (NaClDynamicRegionCreate(nap, dest_addr, size) != 1) {
769 : /* target addr is in use */
770 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Code range already allocated\n");
771 0 : retval = -NACL_ABI_EINVAL;
772 0 : goto cleanup_unlock;
773 : }
774 :
775 0 : if (!NaclTextMapWrapper(nap, dest, size, &mapped_addr)) {
776 0 : retval = -NACL_ABI_ENOMEM;
777 0 : goto cleanup_unlock;
778 : }
779 :
780 0 : CopyCodeSafelyInitial(mapped_addr, code_copy, size, nap->bundle_size);
781 0 : retval = 0;
782 :
783 0 : NaclTextMapClearCacheIfNeeded(nap, dest, size);
784 :
785 0 : cleanup_unlock:
786 0 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
787 0 : return retval;
788 : }
789 :
790 : int32_t NaClTextSysDyncode_Create(struct NaClAppThread *natp,
791 : uint32_t dest,
792 : uint32_t src,
793 0 : uint32_t size) {
794 0 : struct NaClApp *nap = natp->nap;
795 : uintptr_t src_addr;
796 : uint8_t *code_copy;
797 0 : int32_t retval = -NACL_ABI_EINVAL;
798 :
799 0 : src_addr = NaClUserToSysAddrRange(nap, src, size);
800 0 : if (kNaClBadAddress == src_addr) {
801 0 : NaClLog(1, "NaClTextSysDyncode_Copy: Source address out of range\n");
802 0 : return -NACL_ABI_EFAULT;
803 : }
804 :
805 : /*
806 : * Make a private copy of the code, so that we can validate it
807 : * without a TOCTTOU race condition.
808 : */
809 0 : code_copy = malloc(size);
810 0 : if (NULL == code_copy) {
811 0 : return -NACL_ABI_ENOMEM;
812 : }
813 0 : memcpy(code_copy, (uint8_t*) src_addr, size);
814 :
815 0 : retval = NaClTextDyncodeCreate(nap, dest, code_copy, size);
816 :
817 0 : free(code_copy);
818 0 : return retval;
819 : }
820 :
821 : int32_t NaClTextSysDyncode_Modify(struct NaClAppThread *natp,
822 : uint32_t dest,
823 : uint32_t src,
824 0 : uint32_t size) {
825 0 : struct NaClApp *nap = natp->nap;
826 : uintptr_t dest_addr;
827 : uintptr_t src_addr;
828 : uintptr_t beginbundle;
829 : uintptr_t endbundle;
830 : uintptr_t offset;
831 : uint8_t *mapped_addr;
832 0 : uint8_t *code_copy = NULL;
833 : uint8_t code_copy_buf[NACL_INSTR_BLOCK_SIZE];
834 : int validator_result;
835 0 : int32_t retval = -NACL_ABI_EINVAL;
836 : struct NaClDynamicRegion *region;
837 :
838 0 : if (NULL == nap->text_shm) {
839 0 : NaClLog(1, "NaClTextSysDyncode_Modify: Dynamic loading not enabled\n");
840 0 : return -NACL_ABI_EINVAL;
841 : }
842 :
843 0 : if (0 == size) {
844 : /* Nothing to modify. Succeed trivially. */
845 0 : return 0;
846 : }
847 :
848 0 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
849 0 : src_addr = NaClUserToSysAddrRange(nap, src, size);
850 0 : if (kNaClBadAddress == src_addr || kNaClBadAddress == dest_addr) {
851 0 : NaClLog(1, "NaClTextSysDyncode_Modify: Address out of range\n");
852 0 : return -NACL_ABI_EFAULT;
853 : }
854 :
855 0 : NaClXMutexLock(&nap->dynamic_load_mutex);
856 :
857 0 : region = NaClDynamicRegionFind(nap, dest_addr, size);
858 0 : if (NULL == region || region->start > dest_addr
859 : || region->start + region->size < dest_addr + size) {
860 : /* target not a subregion of region or region is null */
861 0 : NaClLog(1, "NaClTextSysDyncode_Modify: Can't find region to modify\n");
862 0 : retval = -NACL_ABI_EFAULT;
863 0 : goto cleanup_unlock;
864 : }
865 :
866 0 : beginbundle = dest_addr & ~(nap->bundle_size - 1);
867 0 : endbundle = (dest_addr + size - 1 + nap->bundle_size)
868 : & ~(nap->bundle_size - 1);
869 0 : offset = dest_addr & (nap->bundle_size - 1);
870 0 : if (endbundle-beginbundle <= sizeof code_copy_buf) {
871 : /* usually patches are a single bundle, so stack allocate */
872 0 : code_copy = code_copy_buf;
873 : } else {
874 : /* in general case heap allocate */
875 0 : code_copy = malloc(endbundle-beginbundle);
876 0 : if (NULL == code_copy) {
877 0 : retval = -NACL_ABI_ENOMEM;
878 0 : goto cleanup_unlock;
879 : }
880 : }
881 :
882 : /* copy the bundles from already-inserted code */
883 0 : memcpy(code_copy, (uint8_t*) beginbundle, endbundle - beginbundle);
884 :
885 : /*
886 : * make the requested change in temporary location
887 : * this avoids TOTTOU race
888 : */
889 0 : memcpy(code_copy + offset, (uint8_t*) src_addr, size);
890 :
891 : /* update dest/size to refer to entire bundles */
892 0 : dest &= ~(nap->bundle_size - 1);
893 0 : dest_addr &= ~((uintptr_t)nap->bundle_size - 1);
894 : /* since both are in sandbox memory this check should succeed */
895 0 : CHECK(endbundle-beginbundle < UINT32_MAX);
896 0 : size = (uint32_t)(endbundle - beginbundle);
897 :
898 : /* validate this code as a replacement */
899 0 : validator_result = NaClValidateCodeReplacement(nap,
900 : dest,
901 : (uint8_t*) dest_addr,
902 : code_copy,
903 : size);
904 :
905 0 : if (validator_result != LOAD_OK
906 : && nap->ignore_validator_result) {
907 0 : NaClLog(LOG_ERROR, "VALIDATION FAILED for dynamically-loaded code: "
908 : "continuing anyway...\n");
909 0 : validator_result = LOAD_OK;
910 : }
911 :
912 0 : if (validator_result != LOAD_OK) {
913 0 : NaClLog(1, "NaClTextSysDyncode_Modify: "
914 : "Validation of dynamic code failed\n");
915 0 : retval = -NACL_ABI_EINVAL;
916 0 : goto cleanup_unlock;
917 : }
918 :
919 0 : if (!NaclTextMapWrapper(nap, dest, size, &mapped_addr)) {
920 0 : retval = -NACL_ABI_ENOMEM;
921 0 : goto cleanup_unlock;
922 : }
923 :
924 0 : if (LOAD_OK != NaClCopyCode(nap, dest, mapped_addr, code_copy, size)) {
925 0 : NaClLog(1, "NaClTextSysDyncode_Modify "
926 : "Copying of replacement code failed\n");
927 0 : retval = -NACL_ABI_EINVAL;
928 0 : goto cleanup_unlock;
929 : }
930 0 : retval = 0;
931 :
932 0 : NaclTextMapClearCacheIfNeeded(nap, dest, size);
933 :
934 0 : cleanup_unlock:
935 0 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
936 :
937 0 : if (code_copy != code_copy_buf) {
938 0 : free(code_copy);
939 : }
940 :
941 0 : return retval;
942 : }
943 :
944 : int32_t NaClTextSysDyncode_Delete(struct NaClAppThread *natp,
945 : uint32_t dest,
946 0 : uint32_t size) {
947 0 : struct NaClApp *nap = natp->nap;
948 : uintptr_t dest_addr;
949 : uint8_t *mapped_addr;
950 0 : int32_t retval = -NACL_ABI_EINVAL;
951 : struct NaClDynamicRegion *region;
952 :
953 0 : if (NULL == nap->text_shm) {
954 0 : NaClLog(1, "NaClTextSysDyncode_Delete: Dynamic loading not enabled\n");
955 0 : return -NACL_ABI_EINVAL;
956 : }
957 :
958 0 : dest_addr = NaClUserToSysAddrRange(nap, dest, size);
959 0 : if (kNaClBadAddress == dest_addr) {
960 0 : NaClLog(1, "NaClTextSysDyncode_Delete: Address out of range\n");
961 0 : return -NACL_ABI_EFAULT;
962 : }
963 :
964 0 : if (0 == size) {
965 : /* Nothing to delete. Just update our generation. */
966 : int gen;
967 : /* fetch current generation */
968 0 : NaClXMutexLock(&nap->dynamic_load_mutex);
969 0 : gen = nap->dynamic_delete_generation;
970 0 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
971 : /* set our generation */
972 0 : NaClSetThreadGeneration(natp, gen);
973 0 : return 0;
974 : }
975 :
976 0 : NaClXMutexLock(&nap->dynamic_load_mutex);
977 :
978 : /*
979 : * this check ensures the to-be-deleted region is identical to a
980 : * previously inserted region, so no need to check for alignment/bounds/etc
981 : */
982 0 : region = NaClDynamicRegionFind(nap, dest_addr, size);
983 0 : if (NULL == region || region->start != dest_addr || region->size != size) {
984 0 : NaClLog(1, "NaClTextSysDyncode_Delete: Can't find region to delete\n");
985 0 : retval = -NACL_ABI_EFAULT;
986 0 : goto cleanup_unlock;
987 : }
988 :
989 :
990 0 : if (region->delete_generation < 0) {
991 : /* first deletion request */
992 :
993 0 : if (nap->dynamic_delete_generation == INT32_MAX) {
994 0 : NaClLog(1, "NaClTextSysDyncode_Delete:"
995 : "Overflow, can only delete INT32_MAX regions\n");
996 0 : retval = -NACL_ABI_EFAULT;
997 0 : goto cleanup_unlock;
998 : }
999 :
1000 0 : if (!NaclTextMapWrapper(nap, dest, size, &mapped_addr)) {
1001 0 : retval = -NACL_ABI_ENOMEM;
1002 0 : goto cleanup_unlock;
1003 : }
1004 :
1005 : /* make it so no new threads can enter target region */
1006 0 : ReplaceBundleHeadsWithHalts(mapped_addr, size, nap->bundle_size);
1007 :
1008 0 : NaclTextMapClearCacheIfNeeded(nap, dest, size);
1009 :
1010 : /* increment and record the generation deletion was requested */
1011 0 : region->delete_generation = ++nap->dynamic_delete_generation;
1012 : }
1013 :
1014 : /* update our own generation */
1015 0 : NaClSetThreadGeneration(natp, nap->dynamic_delete_generation);
1016 :
1017 0 : if (region->delete_generation <= NaClMinimumThreadGeneration(nap)) {
1018 : /*
1019 : * All threads have checked in since we marked region for deletion.
1020 : * It is safe to remove the region.
1021 : *
1022 : * No need to memset the region to hlt since bundle heads are hlt
1023 : * and thus the bodies are unreachable.
1024 : */
1025 0 : NaClDynamicRegionDelete(nap, region);
1026 0 : retval = 0;
1027 : } else {
1028 : /*
1029 : * Still waiting for some threads to report in...
1030 : */
1031 0 : retval = -NACL_ABI_EAGAIN;
1032 : }
1033 :
1034 0 : cleanup_unlock:
1035 0 : NaClXMutexUnlock(&nap->dynamic_load_mutex);
1036 0 : return retval;
1037 : }
|