1 : /*
2 : * Copyright 2010 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can
4 : * be found in the LICENSE file.
5 : */
6 :
7 : #include <errno.h>
8 : #include <string.h>
9 :
10 : #include "native_client/src/include/portability.h"
11 :
12 : #include "native_client/src/shared/platform/nacl_check.h"
13 : #include "native_client/src/shared/platform/nacl_log.h"
14 :
15 : #include "native_client/src/trusted/gio/gio_shm.h"
16 : #include "native_client/src/trusted/service_runtime/include/sys/mman.h"
17 : #include "native_client/src/trusted/service_runtime/include/sys/stat.h"
18 : #include "native_client/src/trusted/service_runtime/nacl_config.h"
19 : #include "native_client/src/trusted/service_runtime/sel_util.h"
20 :
21 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
22 : #include "native_client/src/trusted/desc/nacl_desc_effector_trusted_mem.h"
23 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
24 :
25 : /*
26 : * This code maps in GIO_SHM_WINDOWSIZE bytes at a time for doing
27 : * "I/O" from/to the shared memory object. This value must be an
28 : * integer multiple of NACL_MAP_PAGESIZE.
29 : */
30 : #define GIO_SHM_WINDOWSIZE (16 * NACL_MAP_PAGESIZE)
31 :
32 : /*
33 : * Release current window if it exists, then map in window at the
34 : * provided new_window_offset. This is akin to filbuf.
35 : *
36 : * Preconditions: 0 == (new_win_offset & (NACL_MAP_PAGESIZE - 1))
37 : * new_win_offset < self->shm_sz
38 : */
39 : static int NaClGioShmSetWindow(struct NaClGioShm *self,
40 6 : size_t new_win_offset) {
41 : int rv;
42 : uintptr_t map_result;
43 : size_t actual_len;
44 :
45 6 : NaClLog(4,
46 : "NaClGioShmSetWindow: new_win_offset 0x%"NACL_PRIxS"\n",
47 : new_win_offset);
48 6 : if (0 != (new_win_offset & (NACL_MAP_PAGESIZE - 1))) {
49 0 : NaClLog(LOG_FATAL,
50 : ("NaClGioShmSetWindow: internal error, requested"
51 : " new window offset 0x%"NACL_PRIxS" is not aligned.\n"),
52 : new_win_offset);
53 : }
54 :
55 6 : if (new_win_offset >= self->shm_sz) {
56 0 : NaClLog(LOG_FATAL,
57 : ("NaClGioShmSetWindow: setting window beyond end of shm object"
58 : " offset 0x%"NACL_PRIxS", size 0x%"NACL_PRIxS"\n"),
59 : new_win_offset, self->shm_sz);
60 : }
61 :
62 6 : if (NULL != self->cur_window) {
63 0 : rv = (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)->
64 : UnmapUnsafe)(self->shmp,
65 : (struct NaClDescEffector *) &self->eff,
66 : (void *) self->cur_window,
67 : self->window_size);
68 0 : if (0 != rv) {
69 0 : NaClLog(LOG_FATAL,
70 : "NaClGioShmSetWindow: UnmapUnsafe returned %d\n",
71 : rv);
72 : }
73 : }
74 6 : self->cur_window = NULL;
75 6 : self->window_size = 0;
76 :
77 : /*
78 : * The Map virtual function will NOT pad space beyond the end of the
79 : * memory mapping object with zero-filled pages. This is done for
80 : * user code in nacl_syscall_common.c(NaClCommonSysMmap), and the
81 : * Map virtual function exposes the behavioral inconsistencies wrt
82 : * allowing but ignoring mapping an offset beyond the end of file
83 : * (linux) versus disallowing the mapping (MapViewOfFileEx).
84 : *
85 : * Here, we know the actual size of the shm object, and can deal
86 : * with it.
87 : */
88 6 : actual_len = GIO_SHM_WINDOWSIZE;
89 6 : if (actual_len > self->shm_sz - new_win_offset) {
90 5 : actual_len = self->shm_sz - new_win_offset;
91 : }
92 6 : map_result =
93 : (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)->
94 : Map)(self->shmp,
95 : (struct NaClDescEffector *) &self->eff,
96 : (void *) NULL,
97 : actual_len,
98 : NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE,
99 : NACL_ABI_MAP_SHARED,
100 : (nacl_off64_t) new_win_offset);
101 6 : NaClLog(4,
102 : "NaClGioShmSetWindow: Map returned 0x%"NACL_PRIxPTR"\n",
103 : map_result);
104 6 : if (NACL_ABI_MAP_FAILED == (void *) map_result) {
105 0 : return 0;
106 : }
107 :
108 6 : self->cur_window = (char *) map_result;
109 6 : self->window_size = actual_len;
110 6 : self->window_offset = new_win_offset;
111 :
112 6 : return 1;
113 : }
114 :
115 : static ssize_t NaClGioShmReadOrWrite(struct Gio *vself,
116 : void *buf,
117 : size_t count,
118 997892 : int is_write) {
119 997892 : struct NaClGioShm *self = (struct NaClGioShm *) vself;
120 : size_t new_window_offset;
121 : size_t transfer;
122 : size_t window_end;
123 : size_t window_remain;
124 : size_t sofar;
125 :
126 997892 : NaClLog(4,
127 : ("NaClGioShmReadOrWrite: 0x%"NACL_PRIxPTR","
128 : " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS", %d\n"),
129 : (uintptr_t) vself,
130 : (uintptr_t) buf,
131 : count,
132 : is_write);
133 997892 : sofar = 0;
134 2993675 : while (count > 0) {
135 997891 : NaClLog(4, "NaClGioShmReadOrWrite: count 0x%"NACL_PRIxS"\n", count);
136 997891 : if (self->io_offset >= self->shm_sz) {
137 0 : break;
138 : }
139 997891 : NaClLog(4, " cur_window 0x%"NACL_PRIxPTR"\n",
140 : (uintptr_t) self->cur_window);
141 997891 : NaClLog(4, " io_offset 0x%"NACL_PRIxS"\n", self->io_offset);
142 997891 : NaClLog(4, "window_offset 0x%"NACL_PRIxS"\n", self->window_offset);
143 997891 : if (NULL == self->cur_window
144 : || self->io_offset < self->window_offset
145 : || self->window_offset + self->window_size <= self->io_offset) {
146 : /*
147 : * io_offset is outside the window. move the window so that
148 : * it's within.
149 : */
150 0 : NaClLog(4, "Seek required\n");
151 :
152 0 : new_window_offset = (self->io_offset
153 : & (~(((size_t) NACL_MAP_PAGESIZE) - 1)));
154 0 : NaClLog(4, "new_window_offset 0x%"NACL_PRIxS"\n", new_window_offset);
155 0 : CHECK(0 == (new_window_offset &
156 : (((size_t) NACL_MAP_PAGESIZE)-1)));
157 0 : if (!NaClGioShmSetWindow(self, new_window_offset)) {
158 0 : if (0 == sofar) {
159 0 : errno = EIO;
160 0 : sofar = -1;
161 : }
162 0 : return sofar;
163 : }
164 : } else {
165 997891 : NaClLog(4, "no seek required\n");
166 : }
167 997891 : NaClLog(4, " cur_window 0x%"NACL_PRIxPTR"\n",
168 : (uintptr_t) self->cur_window);
169 997891 : NaClLog(4, " io_offset 0x%"NACL_PRIxS"\n", self->io_offset);
170 997891 : NaClLog(4, "window_offset 0x%"NACL_PRIxS"\n", self->window_offset);
171 :
172 997891 : CHECK(self->window_offset <= self->io_offset);
173 997891 : CHECK(self->io_offset < self->window_offset + self->window_size);
174 :
175 997891 : transfer = count;
176 997891 : window_end = self->window_offset + self->window_size;
177 997891 : if (window_end > self->shm_sz) {
178 0 : window_end = self->shm_sz;
179 : }
180 997891 : window_remain = window_end - self->io_offset;
181 :
182 997891 : NaClLog(4, "remaining in window 0x%"NACL_PRIxS"\n", window_remain);
183 :
184 997891 : CHECK(window_remain <= GIO_SHM_WINDOWSIZE);
185 :
186 997891 : if (transfer > window_remain) {
187 0 : transfer = window_remain;
188 : }
189 :
190 997891 : NaClLog(4, "transfer 0x%"NACL_PRIxS"\n", transfer);
191 :
192 997891 : if (is_write) {
193 400642 : NaClLog(4,
194 : ("about to \"write\" memcpy(0x%"NACL_PRIxPTR", "
195 : " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS" bytes)\n"),
196 : (uintptr_t) (self->cur_window
197 : + (self->io_offset - self->window_offset)),
198 : (uintptr_t) buf,
199 : transfer);
200 :
201 400642 : memcpy(self->cur_window + (self->io_offset - self->window_offset),
202 : buf,
203 : transfer);
204 : } else {
205 597249 : NaClLog(4,
206 : ("about to \"read\" memcpy(0x%"NACL_PRIxPTR", "
207 : " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS" bytes)\n"),
208 : (uintptr_t) buf,
209 : (uintptr_t) (self->cur_window
210 : + (self->io_offset - self->window_offset)),
211 : transfer);
212 :
213 597249 : memcpy(buf,
214 : self->cur_window + (self->io_offset - self->window_offset),
215 : transfer);
216 : }
217 997891 : self->io_offset += transfer;
218 997891 : sofar += transfer;
219 :
220 997891 : buf = (void *)((uintptr_t) buf + transfer);
221 997891 : count -= transfer;
222 : }
223 :
224 997892 : return sofar;
225 : }
226 :
227 : static ssize_t NaClGioShmRead(struct Gio *vself,
228 : void *buf,
229 597250 : size_t count) {
230 597250 : return NaClGioShmReadOrWrite(vself, buf, count, 0);
231 : }
232 :
233 : static ssize_t NaClGioShmWrite(struct Gio *vself,
234 : const void *buf,
235 400642 : size_t count) {
236 400642 : return NaClGioShmReadOrWrite(vself, (void *) buf, count, 1);
237 : }
238 :
239 : static off_t NaClGioShmSeek(struct Gio *vself,
240 : off_t offset,
241 987154 : int whence) {
242 987154 : struct NaClGioShm *self = (struct NaClGioShm *) vself;
243 987154 : size_t new_pos = (size_t) -1;
244 :
245 987154 : NaClLog(4, "NaClGioShmSeek(0x%"NACL_PRIxPTR", %ld (0x%lx), %d)\n",
246 : (uintptr_t) vself, (long) offset, (long) offset, whence);
247 : /*
248 : * Note that if sizeof(new_pos) < sizeof(offset), we are dropping
249 : * high-order bits and we do not detect this. However, the check
250 : * after the switch keeps the values somewhat sane: we will never
251 : * set the I/O offset to be outside the range [0, self->shm_sz].
252 : */
253 987154 : switch (whence) {
254 : case SEEK_SET:
255 987150 : new_pos = (size_t) offset;
256 987150 : break;
257 : case SEEK_CUR:
258 4 : new_pos = self->io_offset + offset;
259 4 : break;
260 : case SEEK_END:
261 0 : new_pos = self->shm_sz + offset;
262 : break;
263 : }
264 : /* allow equality, so setting to the end of file is okay */
265 987154 : if (self->shm_sz < new_pos) {
266 0 : NaClLog(4, " invalid offset\n");
267 0 : errno = EINVAL;
268 0 : return -1;
269 : }
270 987154 : NaClLog(4, " setting to %ld (0x%lx)\n", (long) new_pos, (long) new_pos);
271 : /* sizeof(off_t) >= sizeof(size_t) */
272 987154 : self->io_offset = new_pos;
273 987154 : return (off_t) self->io_offset;
274 : }
275 :
276 0 : static int NaClGioShmFlush(struct Gio *vself) {
277 : UNREFERENCED_PARAMETER(vself);
278 0 : return 0;
279 : }
280 :
281 5 : static int NaClGioShmClose(struct Gio *vself) {
282 5 : struct NaClGioShm *self = (struct NaClGioShm *) vself;
283 : int ret;
284 :
285 5 : if (NULL != self->cur_window) {
286 5 : ret = (*((struct NaClDescVtbl const *) self->shmp->base.vtbl)->
287 : UnmapUnsafe)(self->shmp,
288 : (struct NaClDescEffector *) &self->eff,
289 : (void *) self->cur_window,
290 : NACL_MAP_PAGESIZE);
291 5 : if (ret < 0) {
292 0 : errno = EIO;
293 0 : return -1;
294 : }
295 : }
296 5 : self->cur_window = NULL;
297 :
298 5 : if (NULL == self->shmp) {
299 0 : NaClLog(LOG_ERROR, "NaClGioShmClose: double close detected\n");
300 0 : errno = EIO;
301 0 : return -1;
302 : }
303 :
304 5 : NaClDescUnref(self->shmp);
305 5 : self->shmp = NULL; /* double close will fault */
306 5 : return 0;
307 : }
308 :
309 5 : static void NaClGioShmDtor(struct Gio *vself) {
310 5 : struct NaClGioShm *self = (struct NaClGioShm *) vself;
311 :
312 : /*
313 : * Users of Gio objects are expected to Close then Dtor, but Dtor
314 : * should cleanup regardless.
315 : */
316 5 : if (NULL != self->shmp) {
317 1 : if (-1 == (*vself->vtbl->Close)(vself)) {
318 0 : NaClLog(LOG_ERROR, "NaClGioShmDtor: auto Close failed!\n");
319 : }
320 : }
321 :
322 5 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
323 :
324 5 : self->shmp = NULL;
325 5 : self->base.vtbl = NULL;
326 5 : }
327 :
328 : const struct GioVtbl kNaClGioShmVtbl = {
329 : NaClGioShmRead,
330 : NaClGioShmWrite,
331 : NaClGioShmSeek,
332 : NaClGioShmFlush,
333 : NaClGioShmClose,
334 : NaClGioShmDtor,
335 : };
336 :
337 :
338 : static int NaClGioShmCtorIntern(struct NaClGioShm *self,
339 : struct NaClDesc *shmp,
340 6 : size_t shm_size) {
341 : struct nacl_abi_stat stbuf;
342 : int vfret;
343 6 : int rval = 0;
344 :
345 6 : self->base.vtbl = NULL;
346 :
347 6 : self->shmp = NULL;
348 6 : self->cur_window = NULL;
349 :
350 6 : if (0 != (vfret = (*((struct NaClDescVtbl const *) shmp->base.vtbl)->
351 : Fstat)(shmp, &stbuf))) {
352 0 : NaClLog(1, "NaClGioShmCtorIntern: Fstat virtual function returned %d\n",
353 : vfret);
354 0 : goto cleanup;
355 : }
356 : /*
357 : * nacl_abi_off_t is signed 32-bit quantity, but we don't want to
358 : * hardwire in that knowledge here.
359 : *
360 : * size_t is unsigned, and may be 32-bits or 64-bits, depending on
361 : * the underlying host OS.
362 : *
363 : * we want to ensure that the shm's size, as reported by the desc
364 : * abstraction and thus is in nacl_abi_off_t, is at least that
365 : * claimed by the ctor argument. so, if (as Integers)
366 : *
367 : * stbuf.nacl_abi_st_size < shm_size
368 : *
369 : * holds, this is an error. however, the value-preserving cast rule
370 : * makes this harder.
371 : *
372 : * Note that for signed sizes (ssize_t), the kernel ABI generally
373 : * only reserve -1 for error, and asking for an I/O operation via a
374 : * size_t that would succeed but yield a ssize_t return value that
375 : * is negative is okay, since -1 is never valid as an I/O size on a
376 : * von Neuman machine (except for a writev where the iov entries
377 : * overlap): there just isn't that much data to read/write, when the
378 : * instructions also take up space in the process address space.
379 : * Whether requiring the programmer to detect this corner case is
380 : * advisable is a different argument -- similar to negative ssize_t
381 : * sizes, the syscall can just succeed with a partial transfer to
382 : * avoid returning -1 on a success, just as we could avoid returning
383 : * negative values; in practice, we do the latter, since we often
384 : * see code written that tests for syscall error by comparing the
385 : * return value to see if it is less than zero, rather than if it is
386 : * equal to -1.
387 : */
388 6 : if (stbuf.nacl_abi_st_size < 0) {
389 0 : NaClLog(LOG_ERROR,
390 : ("NaClGioShmCtorIntern: actual shm size negative"
391 : " %"NACL_PRIdNACL_OFF"\n"),
392 : stbuf.nacl_abi_st_size);
393 0 : goto cleanup;
394 : }
395 6 : if (stbuf.nacl_abi_st_size <= (nacl_abi_off_t) SIZE_T_MAX
396 : && (size_t) stbuf.nacl_abi_st_size < shm_size) {
397 0 : NaClLog(LOG_ERROR,
398 : ("NaClGioShmCtorIntern: claimed shm file size greater than"
399 : " actual shm segment size, %"NACL_PRIuS" vs"
400 : " %"NACL_PRIuNACL_OFF"\n"),
401 : shm_size,
402 : stbuf.nacl_abi_st_size);
403 0 : goto cleanup;
404 : }
405 : if (OFF_T_MAX < SIZE_T_MAX && (size_t) OFF_T_MAX < shm_size) {
406 : NaClLog(LOG_ERROR,
407 : ("NaClGioShmCtorIntern: claimed shm file size greater than"
408 : " off_t max value, %"NACL_PRId64"\n"),
409 : (int64_t) OFF_T_MAX);
410 : goto cleanup;
411 : }
412 :
413 6 : self->shmp = NaClDescRef(shmp);
414 :
415 6 : self->io_offset = 0;
416 6 : self->shm_sz = shm_size;
417 6 : self->window_offset = 0;
418 :
419 6 : self->base.vtbl = &kNaClGioShmVtbl;
420 :
421 6 : if (!NaClGioShmSetWindow(self, 0)) {
422 0 : NaClLog(LOG_ERROR,
423 : ("NaClGioShmCtorIntern: initial seek to beginning failed\n"));
424 0 : self->base.vtbl = NULL;
425 0 : goto cleanup;
426 : }
427 :
428 6 : rval = 1;
429 6 : cleanup:
430 6 : if (!rval) {
431 0 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
432 : }
433 6 : return rval;
434 : }
435 :
436 : int NaClGioShmCtor(struct NaClGioShm *self,
437 : struct NaClDesc *shmp,
438 1 : size_t shm_size) {
439 :
440 : int rv;
441 :
442 1 : CHECK(shm_size == NaClRoundAllocPage(shm_size));
443 :
444 1 : if (!NaClDescEffectorTrustedMemCtor(&self->eff)) {
445 0 : return 0;
446 : }
447 :
448 1 : rv = NaClGioShmCtorIntern(self, shmp, shm_size);
449 :
450 1 : if (!rv) {
451 0 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
452 : }
453 1 : return rv;
454 : }
455 :
456 : int NaClGioShmAllocCtor(struct NaClGioShm *self,
457 5 : size_t shm_size) {
458 : struct NaClDescImcShm *shmp;
459 : int rv;
460 :
461 5 : CHECK(shm_size == NaClRoundAllocPage(shm_size));
462 :
463 5 : if (!NaClDescEffectorTrustedMemCtor(&self->eff)) {
464 0 : return 0;
465 : }
466 :
467 5 : shmp = malloc(sizeof *shmp);
468 5 : if (NULL == shmp) {
469 0 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
470 0 : return 0;
471 : }
472 5 : if (!NaClDescImcShmAllocCtor(shmp, shm_size, /* executable= */ 0)) {
473 0 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
474 0 : free(shmp);
475 0 : return 0;
476 : }
477 :
478 5 : rv = NaClGioShmCtorIntern(self, (struct NaClDesc *) shmp, shm_size);
479 :
480 5 : if (!rv) {
481 0 : NaClDescUnref((struct NaClDesc *) shmp);
482 0 : free(shmp);
483 0 : (*self->eff.base.vtbl->Dtor)(&self->eff.base);
484 : }
485 5 : return rv;
486 : }
|