1 : /*
2 : * Copyright (c) 2013 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include "native_client/src/trusted/service_runtime/sys_imc.h"
8 :
9 : #include <string.h>
10 :
11 : #include "native_client/src/trusted/desc/nacl_desc_imc.h"
12 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
13 : #include "native_client/src/trusted/desc/nacl_desc_invalid.h"
14 : #include "native_client/src/trusted/desc/nrd_xfer.h"
15 : #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
16 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
17 : #include "native_client/src/trusted/service_runtime/nacl_copy.h"
18 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
19 :
20 :
21 : static int const kKnownInvalidDescNumber = -1;
22 :
23 10 : int32_t NaClSysImcMakeBoundSock(struct NaClAppThread *natp,
24 : int32_t *sap) {
25 : /*
26 : * Create a bound socket descriptor and a socket address descriptor.
27 : */
28 10 : struct NaClApp *nap = natp->nap;
29 10 : int32_t retval = -NACL_ABI_EINVAL;
30 : struct NaClDesc *pair[2];
31 : int32_t usr_pair[2];
32 :
33 10 : NaClLog(3,
34 : ("Entered NaClSysImcMakeBoundSock(0x%08"NACL_PRIxPTR","
35 : " 0x%08"NACL_PRIxPTR")\n"),
36 : (uintptr_t) natp, (uintptr_t) sap);
37 :
38 10 : retval = NaClCommonDescMakeBoundSock(pair);
39 10 : if (0 != retval) {
40 0 : goto cleanup;
41 : }
42 :
43 10 : usr_pair[0] = NaClAppSetDescAvail(nap, pair[0]);
44 10 : usr_pair[1] = NaClAppSetDescAvail(nap, pair[1]);
45 10 : if (!NaClCopyOutToUser(nap, (uintptr_t) sap,
46 : usr_pair, sizeof usr_pair)) {
47 : /*
48 : * NB: The descriptors were briefly observable to untrusted code
49 : * in this window, even though the syscall had not returned yet,
50 : * and another thread which guesses their numbers could actually
51 : * use them, so the NaClDescSafeUnref inside NaClAppSetDesc below
52 : * might not actually deallocate right away. To avoid this, we
53 : * could grab the descriptor lock and hold it until after the
54 : * copyout is done, but that imposes an ordering between the
55 : * descriptor lock and the VM lock which can cause problems
56 : * elsewhere.
57 : */
58 0 : NaClAppSetDesc(nap, usr_pair[0], NULL);
59 0 : NaClAppSetDesc(nap, usr_pair[1], NULL);
60 0 : retval = -NACL_ABI_EFAULT;
61 0 : goto cleanup;
62 : }
63 :
64 10 : retval = 0;
65 :
66 : cleanup:
67 10 : return retval;
68 : }
69 :
70 17 : int32_t NaClSysImcAccept(struct NaClAppThread *natp,
71 : int d) {
72 17 : struct NaClApp *nap = natp->nap;
73 17 : int32_t retval = -NACL_ABI_EINVAL;
74 : struct NaClDesc *ndp;
75 :
76 17 : NaClLog(3, "Entered NaClSysImcAccept(0x%08"NACL_PRIxPTR", %d)\n",
77 : (uintptr_t) natp, d);
78 :
79 17 : ndp = NaClAppGetDesc(nap, d);
80 17 : if (NULL == ndp) {
81 0 : retval = -NACL_ABI_EBADF;
82 : } else {
83 : struct NaClDesc *result_desc;
84 34 : retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
85 17 : AcceptConn)(ndp, &result_desc);
86 13 : if (retval == 0) {
87 12 : retval = NaClAppSetDescAvail(nap, result_desc);
88 : }
89 13 : NaClDescUnref(ndp);
90 : }
91 :
92 13 : return retval;
93 : }
94 :
95 17 : int32_t NaClSysImcConnect(struct NaClAppThread *natp,
96 : int d) {
97 17 : struct NaClApp *nap = natp->nap;
98 17 : int32_t retval = -NACL_ABI_EINVAL;
99 : struct NaClDesc *ndp;
100 :
101 17 : NaClLog(3, "Entered NaClSysImcConnectAddr(0x%08"NACL_PRIxPTR", %d)\n",
102 : (uintptr_t) natp, d);
103 :
104 17 : ndp = NaClAppGetDesc(nap, d);
105 17 : if (NULL == ndp) {
106 0 : retval = -NACL_ABI_EBADF;
107 : } else {
108 : struct NaClDesc *result;
109 34 : retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
110 17 : ConnectAddr)(ndp, &result);
111 17 : if (retval == 0) {
112 16 : retval = NaClAppSetDescAvail(nap, result);
113 : }
114 17 : NaClDescUnref(ndp);
115 : }
116 :
117 17 : return retval;
118 : }
119 :
120 : /*
121 : * This function converts addresses from user addresses to system
122 : * addresses, copying into kernel space as needed to avoid TOCvTOU
123 : * races, then invokes the descriptor's SendMsg() method.
124 : */
125 9242 : int32_t NaClSysImcSendmsg(struct NaClAppThread *natp,
126 : int d,
127 : struct NaClAbiNaClImcMsgHdr *nanimhp,
128 : int flags) {
129 9242 : struct NaClApp *nap = natp->nap;
130 9242 : int32_t retval = -NACL_ABI_EINVAL;
131 : ssize_t ssize_retval;
132 : uintptr_t sysaddr;
133 : /* copy of user-space data for validation */
134 : struct NaClAbiNaClImcMsgHdr kern_nanimh;
135 : struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
136 : struct NaClImcMsgIoVec kern_iov[NACL_ABI_IMC_IOVEC_MAX];
137 : int32_t usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
138 : /* kernel-side representatin of descriptors */
139 : struct NaClDesc *kern_desc[NACL_ABI_IMC_USER_DESC_MAX];
140 : struct NaClImcTypedMsgHdr kern_msg_hdr;
141 : struct NaClDesc *ndp;
142 : size_t i;
143 :
144 9242 : NaClLog(3,
145 : ("Entered NaClSysImcSendmsg(0x%08"NACL_PRIxPTR", %d,"
146 : " 0x%08"NACL_PRIxPTR", 0x%x)\n"),
147 : (uintptr_t) natp, d, (uintptr_t) nanimhp, flags);
148 :
149 9242 : if (!NaClCopyInFromUser(nap, &kern_nanimh, (uintptr_t) nanimhp,
150 : sizeof kern_nanimh)) {
151 0 : NaClLog(4, "NaClImcMsgHdr not in user address space\n");
152 0 : retval = -NACL_ABI_EFAULT;
153 0 : goto cleanup_leave;
154 : }
155 : /* copy before validating contents */
156 :
157 : /*
158 : * Some of these checks duplicate checks that will be done in the
159 : * nrd xfer library, but it is better to check before doing the
160 : * address translation of memory/descriptor vectors if those vectors
161 : * might be too long. Plus, we need to copy and validate vectors
162 : * for TOCvTOU race protection, and we must prevent overflows. The
163 : * nrd xfer library's checks should never fire when called from the
164 : * service runtime, but the nrd xfer library might be called from
165 : * other code.
166 : */
167 9242 : if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
168 0 : NaClLog(4, "gather/scatter array too large\n");
169 0 : retval = -NACL_ABI_EINVAL;
170 0 : goto cleanup_leave;
171 : }
172 9242 : if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
173 1 : NaClLog(4, "handle vector too long\n");
174 1 : retval = -NACL_ABI_EINVAL;
175 1 : goto cleanup_leave;
176 : }
177 :
178 9241 : if (kern_nanimh.iov_length > 0) {
179 9239 : if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
180 9239 : (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
181 0 : NaClLog(4, "gather/scatter array not in user address space\n");
182 0 : retval = -NACL_ABI_EFAULT;
183 0 : goto cleanup_leave;
184 : }
185 :
186 29099 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
187 19860 : sysaddr = NaClUserToSysAddrRange(nap,
188 : (uintptr_t) kern_naiov[i].base,
189 : kern_naiov[i].length);
190 19860 : if (kNaClBadAddress == sysaddr) {
191 0 : retval = -NACL_ABI_EFAULT;
192 0 : goto cleanup_leave;
193 : }
194 19860 : kern_iov[i].base = (void *) sysaddr;
195 19860 : kern_iov[i].length = kern_naiov[i].length;
196 : }
197 : }
198 :
199 9241 : ndp = NaClAppGetDesc(nap, d);
200 9241 : if (NULL == ndp) {
201 1 : retval = -NACL_ABI_EBADF;
202 1 : goto cleanup_leave;
203 : }
204 :
205 : /*
206 : * make things easier for cleaup exit processing
207 : */
208 9240 : memset(kern_desc, 0, sizeof kern_desc);
209 9240 : retval = -NACL_ABI_EINVAL;
210 :
211 9240 : kern_msg_hdr.iov = kern_iov;
212 9240 : kern_msg_hdr.iov_length = kern_nanimh.iov_length;
213 :
214 9240 : if (0 == kern_nanimh.desc_length) {
215 8223 : kern_msg_hdr.ndescv = 0;
216 8223 : kern_msg_hdr.ndesc_length = 0;
217 : } else {
218 1017 : if (!NaClCopyInFromUser(nap, usr_desc, kern_nanimh.descv,
219 1017 : kern_nanimh.desc_length * sizeof usr_desc[0])) {
220 0 : retval = -NACL_ABI_EFAULT;
221 0 : goto cleanup;
222 : }
223 :
224 2096 : for (i = 0; i < kern_nanimh.desc_length; ++i) {
225 1082 : if (kKnownInvalidDescNumber == usr_desc[i]) {
226 1070 : kern_desc[i] = (struct NaClDesc *) NaClDescInvalidMake();
227 : } else {
228 : /* NaCl modules are ILP32, so this works on ILP32 and LP64 systems */
229 12 : kern_desc[i] = NaClAppGetDesc(nap, usr_desc[i]);
230 : }
231 1082 : if (NULL == kern_desc[i]) {
232 3 : retval = -NACL_ABI_EBADF;
233 3 : goto cleanup;
234 : }
235 : }
236 1014 : kern_msg_hdr.ndescv = kern_desc;
237 1014 : kern_msg_hdr.ndesc_length = kern_nanimh.desc_length;
238 : }
239 9237 : kern_msg_hdr.flags = kern_nanimh.flags;
240 :
241 : /* lock user memory ranges in kern_naiov */
242 29093 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
243 19856 : NaClVmIoWillStart(nap,
244 : kern_naiov[i].base,
245 19856 : kern_naiov[i].base + kern_naiov[i].length - 1);
246 : }
247 9237 : ssize_retval = NACL_VTBL(NaClDesc, ndp)->SendMsg(ndp, &kern_msg_hdr, flags);
248 : /* unlock user memory ranges in kern_naiov */
249 29093 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
250 19856 : NaClVmIoHasEnded(nap,
251 : kern_naiov[i].base,
252 19856 : kern_naiov[i].base + kern_naiov[i].length - 1);
253 : }
254 :
255 9237 : if (NaClSSizeIsNegErrno(&ssize_retval)) {
256 : /*
257 : * NaClWouldBlock uses TSD (for both the errno-based and
258 : * GetLastError()-based implementations), so this is threadsafe.
259 : */
260 2 : if (0 != (flags & NACL_DONT_WAIT) && NaClWouldBlock()) {
261 0 : retval = -NACL_ABI_EAGAIN;
262 2 : } else if (-NACL_ABI_EMSGSIZE == ssize_retval) {
263 : /*
264 : * Allow the caller to handle the case when imc_sendmsg fails because
265 : * the message is too large for the system to send in one piece.
266 : */
267 0 : retval = -NACL_ABI_EMSGSIZE;
268 : } else {
269 : /*
270 : * TODO(bsy): the else case is some mysterious internal error.
271 : * Should we destroy the ndp or otherwise mark it as bad? Was
272 : * the failure atomic? Did it send some partial data? Linux
273 : * implementation appears okay.
274 : */
275 2 : retval = -NACL_ABI_EIO;
276 : }
277 : } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
278 : retval = -NACL_ABI_EOVERFLOW;
279 : } else {
280 : /* cast is safe due to range checks above */
281 9235 : retval = (int32_t)ssize_retval;
282 : }
283 :
284 : cleanup:
285 10323 : for (i = 0; i < kern_nanimh.desc_length; ++i) {
286 1083 : if (NULL != kern_desc[i]) {
287 1079 : NaClDescUnref(kern_desc[i]);
288 1079 : kern_desc[i] = NULL;
289 : }
290 : }
291 9240 : NaClDescUnref(ndp);
292 : cleanup_leave:
293 9242 : NaClLog(3, "NaClSysImcSendmsg: returning %d\n", retval);
294 9242 : return retval;
295 : }
296 :
297 9241 : int32_t NaClSysImcRecvmsg(struct NaClAppThread *natp,
298 : int d,
299 : struct NaClAbiNaClImcMsgHdr *nanimhp,
300 : int flags) {
301 9241 : struct NaClApp *nap = natp->nap;
302 9241 : int32_t retval = -NACL_ABI_EINVAL;
303 : ssize_t ssize_retval;
304 : uintptr_t sysaddr;
305 : size_t i;
306 : struct NaClDesc *ndp;
307 : struct NaClAbiNaClImcMsgHdr kern_nanimh;
308 : struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
309 : struct NaClImcMsgIoVec kern_iov[NACL_ABI_IMC_IOVEC_MAX];
310 : int32_t usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
311 : struct NaClImcTypedMsgHdr recv_hdr;
312 : struct NaClDesc *new_desc[NACL_ABI_IMC_DESC_MAX];
313 : nacl_abi_size_t num_user_desc;
314 9241 : struct NaClDesc *invalid_desc = NULL;
315 :
316 9241 : NaClLog(3,
317 : ("Entered NaClSysImcRecvMsg(0x%08"NACL_PRIxPTR", %d,"
318 : " 0x%08"NACL_PRIxPTR")\n"),
319 : (uintptr_t) natp, d, (uintptr_t) nanimhp);
320 :
321 : /*
322 : * First, we validate user-supplied message headers before
323 : * allocating a receive buffer.
324 : */
325 9241 : if (!NaClCopyInFromUser(nap, &kern_nanimh, (uintptr_t) nanimhp,
326 : sizeof kern_nanimh)) {
327 0 : NaClLog(4, "NaClImcMsgHdr not in user address space\n");
328 0 : retval = -NACL_ABI_EFAULT;
329 0 : goto cleanup_leave;
330 : }
331 : /* copy before validating */
332 :
333 9241 : if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
334 0 : NaClLog(4, "gather/scatter array too large: %"NACL_PRIdNACL_SIZE"\n",
335 : kern_nanimh.iov_length);
336 0 : retval = -NACL_ABI_EINVAL;
337 0 : goto cleanup_leave;
338 : }
339 9241 : if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
340 0 : NaClLog(4, "handle vector too long: %"NACL_PRIdNACL_SIZE"\n",
341 : kern_nanimh.desc_length);
342 0 : retval = -NACL_ABI_EINVAL;
343 0 : goto cleanup_leave;
344 : }
345 :
346 9241 : if (kern_nanimh.iov_length > 0) {
347 : /*
348 : * Copy IOV array into kernel space. Validate this snapshot and do
349 : * user->kernel address conversions on this snapshot.
350 : */
351 9239 : if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
352 9239 : (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
353 0 : NaClLog(4, "gather/scatter array not in user address space\n");
354 0 : retval = -NACL_ABI_EFAULT;
355 0 : goto cleanup_leave;
356 : }
357 : /*
358 : * Convert every IOV base from user to system address, validate
359 : * range of bytes are really in user address space.
360 : */
361 :
362 28968 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
363 19729 : sysaddr = NaClUserToSysAddrRange(nap,
364 : (uintptr_t) kern_naiov[i].base,
365 : kern_naiov[i].length);
366 19729 : if (kNaClBadAddress == sysaddr) {
367 0 : NaClLog(4, "iov number %"NACL_PRIuS" not entirely in user space\n", i);
368 0 : retval = -NACL_ABI_EFAULT;
369 0 : goto cleanup_leave;
370 : }
371 19729 : kern_iov[i].base = (void *) sysaddr;
372 19729 : kern_iov[i].length = kern_naiov[i].length;
373 : }
374 : }
375 :
376 9241 : if (kern_nanimh.desc_length > 0) {
377 9229 : sysaddr = NaClUserToSysAddrRange(nap,
378 : (uintptr_t) kern_nanimh.descv,
379 9229 : kern_nanimh.desc_length * sizeof(int32_t));
380 9229 : if (kNaClBadAddress == sysaddr) {
381 0 : retval = -NACL_ABI_EFAULT;
382 0 : goto cleanup_leave;
383 : }
384 : }
385 :
386 9241 : ndp = NaClAppGetDesc(nap, d);
387 9241 : if (NULL == ndp) {
388 0 : NaClLog(4, "receiving descriptor invalid\n");
389 0 : retval = -NACL_ABI_EBADF;
390 0 : goto cleanup_leave;
391 : }
392 :
393 9241 : recv_hdr.iov = kern_iov;
394 9241 : recv_hdr.iov_length = kern_nanimh.iov_length;
395 :
396 9241 : recv_hdr.ndescv = new_desc;
397 9241 : recv_hdr.ndesc_length = NACL_ARRAY_SIZE(new_desc);
398 9241 : memset(new_desc, 0, sizeof new_desc);
399 :
400 9241 : recv_hdr.flags = 0; /* just to make it obvious; IMC will clear it for us */
401 :
402 : /* lock user memory ranges in kern_naiov */
403 28970 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
404 19729 : NaClVmIoWillStart(nap,
405 : kern_naiov[i].base,
406 19729 : kern_naiov[i].base + kern_naiov[i].length - 1);
407 : }
408 9241 : ssize_retval = NACL_VTBL(NaClDesc, ndp)->RecvMsg(ndp, &recv_hdr, flags,
409 : (struct NaClDescQuotaInterface *) nap->desc_quota_interface);
410 : /* unlock user memory ranges in kern_naiov */
411 28966 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
412 19727 : NaClVmIoHasEnded(nap,
413 : kern_naiov[i].base,
414 19727 : kern_naiov[i].base + kern_naiov[i].length - 1);
415 : }
416 : /*
417 : * retval is number of user payload bytes received and excludes the
418 : * header bytes.
419 : */
420 9239 : NaClLog(3, "NaClSysImcRecvMsg: RecvMsg() returned %"NACL_PRIdS"\n",
421 : ssize_retval);
422 9239 : if (NaClSSizeIsNegErrno(&ssize_retval)) {
423 : /* negative error numbers all have valid 32-bit representations,
424 : * so this cast is safe. */
425 6 : retval = (int32_t) ssize_retval;
426 6 : goto cleanup;
427 : } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
428 : retval = -NACL_ABI_EOVERFLOW;
429 : goto cleanup;
430 : } else {
431 : /* cast is safe due to range check above */
432 9233 : retval = (int32_t) ssize_retval;
433 : }
434 :
435 : /*
436 : * NB: recv_hdr.flags may contain NACL_ABI_MESSAGE_TRUNCATED and/or
437 : * NACL_ABI_HANDLES_TRUNCATED.
438 : */
439 :
440 9233 : kern_nanimh.flags = recv_hdr.flags;
441 :
442 : /*
443 : * Now internalize the NaClHandles as NaClDesc objects.
444 : */
445 9233 : num_user_desc = recv_hdr.ndesc_length;
446 :
447 9233 : if (kern_nanimh.desc_length < num_user_desc) {
448 3 : kern_nanimh.flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
449 20 : for (i = kern_nanimh.desc_length; i < num_user_desc; ++i) {
450 17 : NaClDescUnref(new_desc[i]);
451 17 : new_desc[i] = NULL;
452 : }
453 3 : num_user_desc = kern_nanimh.desc_length;
454 : }
455 :
456 9233 : invalid_desc = (struct NaClDesc *) NaClDescInvalidMake();
457 : /* prepare to write out to user space the descriptor numbers */
458 10301 : for (i = 0; i < num_user_desc; ++i) {
459 1068 : if (invalid_desc == new_desc[i]) {
460 1054 : usr_desc[i] = kKnownInvalidDescNumber;
461 1054 : NaClDescUnref(new_desc[i]);
462 : } else {
463 14 : usr_desc[i] = NaClAppSetDescAvail(nap, new_desc[i]);
464 : }
465 1068 : new_desc[i] = NULL;
466 : }
467 10250 : if (0 != num_user_desc &&
468 1017 : !NaClCopyOutToUser(nap, (uintptr_t) kern_nanimh.descv, usr_desc,
469 : num_user_desc * sizeof usr_desc[0])) {
470 0 : NaClLog(LOG_FATAL,
471 : ("NaClSysImcRecvMsg: in/out ptr (descv %"NACL_PRIxPTR
472 : ") became invalid at copyout?\n"),
473 : (uintptr_t) kern_nanimh.descv);
474 : }
475 :
476 9233 : kern_nanimh.desc_length = num_user_desc;
477 9233 : if (!NaClCopyOutToUser(nap, (uintptr_t) nanimhp, &kern_nanimh,
478 : sizeof kern_nanimh)) {
479 0 : NaClLog(LOG_FATAL,
480 : "NaClSysImcRecvMsg: in/out ptr (iov) became"
481 : " invalid at copyout?\n");
482 : }
483 : /* copy out updated desc count, flags */
484 : cleanup:
485 9239 : if (retval < 0) {
486 54 : for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) {
487 48 : if (NULL != new_desc[i]) {
488 0 : NaClDescUnref(new_desc[i]);
489 0 : new_desc[i] = NULL;
490 : }
491 : }
492 : }
493 9239 : NaClDescUnref(ndp);
494 9239 : NaClDescSafeUnref(invalid_desc);
495 9239 : NaClLog(3, "NaClSysImcRecvMsg: returning %d\n", retval);
496 : cleanup_leave:
497 9239 : return retval;
498 : }
499 :
500 12 : int32_t NaClSysImcMemObjCreate(struct NaClAppThread *natp,
501 : size_t size) {
502 12 : struct NaClApp *nap = natp->nap;
503 12 : int32_t retval = -NACL_ABI_EINVAL;
504 : struct NaClDescImcShm *shmp;
505 : off_t size_as_off;
506 :
507 12 : NaClLog(3,
508 : ("Entered NaClSysImcMemObjCreate(0x%08"NACL_PRIxPTR
509 : " 0x%08"NACL_PRIxS")\n"),
510 : (uintptr_t) natp, size);
511 :
512 12 : if (0 != (size & (NACL_MAP_PAGESIZE - 1))) {
513 2 : return -NACL_ABI_EINVAL;
514 : }
515 : /*
516 : * TODO(bsy): policy about maximum shm object size should be
517 : * enforced here.
518 : */
519 10 : size_as_off = (off_t) size;
520 10 : if (size_as_off < 0) {
521 0 : return -NACL_ABI_EINVAL;
522 : }
523 :
524 10 : shmp = NULL;
525 :
526 10 : shmp = malloc(sizeof *shmp);
527 10 : if (NULL == shmp) {
528 0 : retval = -NACL_ABI_ENOMEM;
529 0 : goto cleanup;
530 : }
531 :
532 10 : if (!NaClDescImcShmAllocCtor(shmp, size_as_off, /* executable= */ 0)) {
533 0 : retval = -NACL_ABI_ENOMEM; /* is this reasonable? */
534 0 : goto cleanup;
535 : }
536 :
537 10 : retval = NaClAppSetDescAvail(nap, (struct NaClDesc *) shmp);
538 10 : shmp = NULL;
539 :
540 : cleanup:
541 10 : free(shmp);
542 :
543 10 : return retval;
544 : }
545 :
546 5 : int32_t NaClSysImcSocketPair(struct NaClAppThread *natp,
547 : uint32_t descs_out) {
548 5 : struct NaClApp *nap = natp->nap;
549 : int32_t usr_pair[2];
550 : struct NaClDesc *pair[2];
551 : int32_t retval;
552 :
553 5 : NaClLog(3,
554 : ("Entered NaClSysImcSocketPair(0x%08"NACL_PRIxPTR
555 : " 0x%08"NACL_PRIx32")\n"),
556 : (uintptr_t) natp, descs_out);
557 :
558 5 : retval = NaClCommonDescSocketPair(pair);
559 5 : if (0 != retval) {
560 0 : goto cleanup;
561 : }
562 :
563 5 : usr_pair[0] = NaClAppSetDescAvail(nap, pair[0]);
564 5 : usr_pair[1] = NaClAppSetDescAvail(nap, pair[1]);
565 :
566 5 : if (!NaClCopyOutToUser(nap, (uintptr_t) descs_out, usr_pair,
567 : sizeof usr_pair)) {
568 0 : NaClAppSetDesc(nap, usr_pair[0], NULL);
569 0 : NaClAppSetDesc(nap, usr_pair[1], NULL);
570 0 : retval = -NACL_ABI_EFAULT;
571 0 : goto cleanup;
572 : }
573 5 : retval = 0;
574 :
575 : cleanup:
576 5 : return retval;
577 : }
|