1 : /*
2 : * Copyright (c) 2013 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include "native_client/src/trusted/service_runtime/sys_imc.h"
8 :
9 : #include <string.h>
10 :
11 : #include "native_client/src/trusted/desc/nacl_desc_imc.h"
12 : #include "native_client/src/trusted/desc/nacl_desc_imc_shm.h"
13 : #include "native_client/src/trusted/desc/nacl_desc_invalid.h"
14 : #include "native_client/src/trusted/desc/nrd_xfer.h"
15 : #include "native_client/src/trusted/service_runtime/include/sys/errno.h"
16 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
17 : #include "native_client/src/trusted/service_runtime/nacl_copy.h"
18 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
19 :
20 :
21 : static int const kKnownInvalidDescNumber = -1;
22 :
23 10 : int32_t NaClSysImcMakeBoundSock(struct NaClAppThread *natp,
24 10 : int32_t *sap) {
25 : /*
26 : * Create a bound socket descriptor and a socket address descriptor.
27 : */
28 10 : struct NaClApp *nap = natp->nap;
29 10 : int32_t retval = -NACL_ABI_EINVAL;
30 10 : struct NaClDesc *pair[2];
31 10 : int32_t usr_pair[2];
32 :
33 10 : NaClLog(3,
34 : ("Entered NaClSysImcMakeBoundSock(0x%08"NACL_PRIxPTR","
35 : " 0x%08"NACL_PRIxPTR")\n"),
36 : (uintptr_t) natp, (uintptr_t) sap);
37 :
38 10 : retval = NaClCommonDescMakeBoundSock(pair);
39 10 : if (0 != retval) {
40 0 : goto cleanup;
41 : }
42 :
43 10 : usr_pair[0] = NaClAppSetDescAvail(nap, pair[0]);
44 10 : usr_pair[1] = NaClAppSetDescAvail(nap, pair[1]);
45 10 : if (!NaClCopyOutToUser(nap, (uintptr_t) sap,
46 : usr_pair, sizeof usr_pair)) {
47 : /*
48 : * NB: The descriptors were briefly observable to untrusted code
49 : * in this window, even though the syscall had not returned yet,
50 : * and another thread which guesses their numbers could actually
51 : * use them, so the NaClDescSafeUnref inside NaClAppSetDesc below
52 : * might not actually deallocate right away. To avoid this, we
53 : * could grab the descriptor lock and hold it until after the
54 : * copyout is done, but that imposes an ordering between the
55 : * descriptor lock and the VM lock which can cause problems
56 : * elsewhere.
57 : */
58 0 : NaClAppSetDesc(nap, usr_pair[0], NULL);
59 0 : NaClAppSetDesc(nap, usr_pair[1], NULL);
60 0 : retval = -NACL_ABI_EFAULT;
61 0 : goto cleanup;
62 : }
63 :
64 10 : retval = 0;
65 :
66 : cleanup:
67 10 : return retval;
68 : }
69 :
70 15 : int32_t NaClSysImcAccept(struct NaClAppThread *natp,
71 15 : int d) {
72 15 : struct NaClApp *nap = natp->nap;
73 15 : int32_t retval = -NACL_ABI_EINVAL;
74 15 : struct NaClDesc *ndp;
75 :
76 15 : NaClLog(3, "Entered NaClSysImcAccept(0x%08"NACL_PRIxPTR", %d)\n",
77 : (uintptr_t) natp, d);
78 :
79 15 : ndp = NaClAppGetDesc(nap, d);
80 15 : if (NULL == ndp) {
81 0 : retval = -NACL_ABI_EBADF;
82 0 : } else {
83 12 : struct NaClDesc *result_desc;
84 12 : retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
85 : AcceptConn)(ndp, &result_desc);
86 12 : if (retval == 0) {
87 11 : retval = NaClAppSetDescAvail(nap, result_desc);
88 11 : }
89 12 : NaClDescUnref(ndp);
90 : }
91 :
92 12 : return retval;
93 : }
94 :
95 17 : int32_t NaClSysImcConnect(struct NaClAppThread *natp,
96 17 : int d) {
97 17 : struct NaClApp *nap = natp->nap;
98 17 : int32_t retval = -NACL_ABI_EINVAL;
99 17 : struct NaClDesc *ndp;
100 :
101 17 : NaClLog(3, "Entered NaClSysImcConnectAddr(0x%08"NACL_PRIxPTR", %d)\n",
102 : (uintptr_t) natp, d);
103 :
104 17 : ndp = NaClAppGetDesc(nap, d);
105 17 : if (NULL == ndp) {
106 0 : retval = -NACL_ABI_EBADF;
107 0 : } else {
108 17 : struct NaClDesc *result;
109 17 : retval = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
110 : ConnectAddr)(ndp, &result);
111 17 : if (retval == 0) {
112 16 : retval = NaClAppSetDescAvail(nap, result);
113 16 : }
114 17 : NaClDescUnref(ndp);
115 : }
116 :
117 17 : return retval;
118 : }
119 :
120 : /*
121 : * This function converts addresses from user addresses to system
122 : * addresses, copying into kernel space as needed to avoid TOCvTOU
123 : * races, then invokes the descriptor's SendMsg() method.
124 : */
125 9241 : int32_t NaClSysImcSendmsg(struct NaClAppThread *natp,
126 9241 : int d,
127 9241 : struct NaClAbiNaClImcMsgHdr *nanimhp,
128 9241 : int flags) {
129 9241 : struct NaClApp *nap = natp->nap;
130 9241 : int32_t retval = -NACL_ABI_EINVAL;
131 9241 : ssize_t ssize_retval;
132 9241 : uintptr_t sysaddr;
133 : /* copy of user-space data for validation */
134 9241 : struct NaClAbiNaClImcMsgHdr kern_nanimh;
135 9241 : struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
136 9241 : struct NaClImcMsgIoVec kern_iov[NACL_ABI_IMC_IOVEC_MAX];
137 9241 : int32_t usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
138 : /* kernel-side representatin of descriptors */
139 9241 : struct NaClDesc *kern_desc[NACL_ABI_IMC_USER_DESC_MAX];
140 9241 : struct NaClImcTypedMsgHdr kern_msg_hdr;
141 9241 : struct NaClDesc *ndp;
142 9241 : size_t i;
143 :
144 9241 : NaClLog(3,
145 : ("Entered NaClSysImcSendmsg(0x%08"NACL_PRIxPTR", %d,"
146 : " 0x%08"NACL_PRIxPTR", 0x%x)\n"),
147 : (uintptr_t) natp, d, (uintptr_t) nanimhp, flags);
148 :
149 9241 : if (!NaClCopyInFromUser(nap, &kern_nanimh, (uintptr_t) nanimhp,
150 : sizeof kern_nanimh)) {
151 0 : NaClLog(4, "NaClImcMsgHdr not in user address space\n");
152 0 : retval = -NACL_ABI_EFAULT;
153 0 : goto cleanup_leave;
154 : }
155 : /* copy before validating contents */
156 :
157 : /*
158 : * Some of these checks duplicate checks that will be done in the
159 : * nrd xfer library, but it is better to check before doing the
160 : * address translation of memory/descriptor vectors if those vectors
161 : * might be too long. Plus, we need to copy and validate vectors
162 : * for TOCvTOU race protection, and we must prevent overflows. The
163 : * nrd xfer library's checks should never fire when called from the
164 : * service runtime, but the nrd xfer library might be called from
165 : * other code.
166 : */
167 9241 : if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
168 0 : NaClLog(4, "gather/scatter array too large\n");
169 0 : retval = -NACL_ABI_EINVAL;
170 0 : goto cleanup_leave;
171 : }
172 9241 : if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
173 1 : NaClLog(4, "handle vector too long\n");
174 1 : retval = -NACL_ABI_EINVAL;
175 1 : goto cleanup_leave;
176 : }
177 :
178 9240 : if (kern_nanimh.iov_length > 0) {
179 9238 : if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
180 : (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
181 0 : NaClLog(4, "gather/scatter array not in user address space\n");
182 0 : retval = -NACL_ABI_EFAULT;
183 0 : goto cleanup_leave;
184 : }
185 :
186 58194 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
187 19859 : sysaddr = NaClUserToSysAddrRange(nap,
188 : (uintptr_t) kern_naiov[i].base,
189 : kern_naiov[i].length);
190 19859 : if (kNaClBadAddress == sysaddr) {
191 0 : retval = -NACL_ABI_EFAULT;
192 0 : goto cleanup_leave;
193 : }
194 19859 : kern_iov[i].base = (void *) sysaddr;
195 19859 : kern_iov[i].length = kern_naiov[i].length;
196 19859 : }
197 9238 : }
198 :
199 9240 : ndp = NaClAppGetDesc(nap, d);
200 9240 : if (NULL == ndp) {
201 1 : retval = -NACL_ABI_EBADF;
202 1 : goto cleanup_leave;
203 : }
204 :
205 : /*
206 : * make things easier for cleaup exit processing
207 : */
208 9239 : memset(kern_desc, 0, sizeof kern_desc);
209 9239 : retval = -NACL_ABI_EINVAL;
210 :
211 9239 : kern_msg_hdr.iov = kern_iov;
212 9239 : kern_msg_hdr.iov_length = kern_nanimh.iov_length;
213 :
214 9239 : if (0 == kern_nanimh.desc_length) {
215 8222 : kern_msg_hdr.ndescv = 0;
216 8222 : kern_msg_hdr.ndesc_length = 0;
217 8222 : } else {
218 1017 : if (!NaClCopyInFromUser(nap, usr_desc, kern_nanimh.descv,
219 : kern_nanimh.desc_length * sizeof usr_desc[0])) {
220 0 : retval = -NACL_ABI_EFAULT;
221 0 : goto cleanup;
222 : }
223 :
224 4192 : for (i = 0; i < kern_nanimh.desc_length; ++i) {
225 1082 : if (kKnownInvalidDescNumber == usr_desc[i]) {
226 1070 : kern_desc[i] = (struct NaClDesc *) NaClDescInvalidMake();
227 1070 : } else {
228 : /* NaCl modules are ILP32, so this works on ILP32 and LP64 systems */
229 12 : kern_desc[i] = NaClAppGetDesc(nap, usr_desc[i]);
230 : }
231 1082 : if (NULL == kern_desc[i]) {
232 3 : retval = -NACL_ABI_EBADF;
233 3 : goto cleanup;
234 : }
235 1079 : }
236 1014 : kern_msg_hdr.ndescv = kern_desc;
237 1014 : kern_msg_hdr.ndesc_length = kern_nanimh.desc_length;
238 : }
239 9236 : kern_msg_hdr.flags = kern_nanimh.flags;
240 :
241 : /* lock user memory ranges in kern_naiov */
242 58182 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
243 19855 : NaClVmIoWillStart(nap,
244 : kern_naiov[i].base,
245 : kern_naiov[i].base + kern_naiov[i].length - 1);
246 19855 : }
247 9236 : ssize_retval = NACL_VTBL(NaClDesc, ndp)->SendMsg(ndp, &kern_msg_hdr, flags);
248 : /* unlock user memory ranges in kern_naiov */
249 58182 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
250 19855 : NaClVmIoHasEnded(nap,
251 : kern_naiov[i].base,
252 : kern_naiov[i].base + kern_naiov[i].length - 1);
253 19855 : }
254 :
255 9236 : if (NaClSSizeIsNegErrno(&ssize_retval)) {
256 : /*
257 : * NaClWouldBlock uses TSD (for both the errno-based and
258 : * GetLastError()-based implementations), so this is threadsafe.
259 : */
260 2 : if (0 != (flags & NACL_DONT_WAIT) && NaClWouldBlock()) {
261 0 : retval = -NACL_ABI_EAGAIN;
262 2 : } else if (-NACL_ABI_EMSGSIZE == ssize_retval) {
263 : /*
264 : * Allow the caller to handle the case when imc_sendmsg fails because
265 : * the message is too large for the system to send in one piece.
266 : */
267 0 : retval = -NACL_ABI_EMSGSIZE;
268 0 : } else {
269 : /*
270 : * TODO(bsy): the else case is some mysterious internal error.
271 : * Should we destroy the ndp or otherwise mark it as bad? Was
272 : * the failure atomic? Did it send some partial data? Linux
273 : * implementation appears okay.
274 : */
275 2 : retval = -NACL_ABI_EIO;
276 : }
277 18470 : } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
278 0 : retval = -NACL_ABI_EOVERFLOW;
279 0 : } else {
280 : /* cast is safe due to range checks above */
281 9234 : retval = (int32_t)ssize_retval;
282 9236 : }
283 :
284 : cleanup:
285 20644 : for (i = 0; i < kern_nanimh.desc_length; ++i) {
286 1083 : if (NULL != kern_desc[i]) {
287 1079 : NaClDescUnref(kern_desc[i]);
288 1079 : kern_desc[i] = NULL;
289 1079 : }
290 1083 : }
291 9239 : NaClDescUnref(ndp);
292 : cleanup_leave:
293 9241 : NaClLog(3, "NaClSysImcSendmsg: returning %d\n", retval);
294 9241 : return retval;
295 : }
296 :
297 9239 : int32_t NaClSysImcRecvmsg(struct NaClAppThread *natp,
298 9239 : int d,
299 9239 : struct NaClAbiNaClImcMsgHdr *nanimhp,
300 9239 : int flags) {
301 9239 : struct NaClApp *nap = natp->nap;
302 9239 : int32_t retval = -NACL_ABI_EINVAL;
303 9239 : ssize_t ssize_retval;
304 9239 : uintptr_t sysaddr;
305 9239 : size_t i;
306 9239 : struct NaClDesc *ndp;
307 9239 : struct NaClAbiNaClImcMsgHdr kern_nanimh;
308 9239 : struct NaClAbiNaClImcMsgIoVec kern_naiov[NACL_ABI_IMC_IOVEC_MAX];
309 9239 : struct NaClImcMsgIoVec kern_iov[NACL_ABI_IMC_IOVEC_MAX];
310 9239 : int32_t usr_desc[NACL_ABI_IMC_USER_DESC_MAX];
311 9239 : struct NaClImcTypedMsgHdr recv_hdr;
312 9239 : struct NaClDesc *new_desc[NACL_ABI_IMC_DESC_MAX];
313 9239 : nacl_abi_size_t num_user_desc;
314 9239 : struct NaClDesc *invalid_desc = NULL;
315 :
316 9239 : NaClLog(3,
317 : ("Entered NaClSysImcRecvMsg(0x%08"NACL_PRIxPTR", %d,"
318 : " 0x%08"NACL_PRIxPTR")\n"),
319 : (uintptr_t) natp, d, (uintptr_t) nanimhp);
320 :
321 : /*
322 : * First, we validate user-supplied message headers before
323 : * allocating a receive buffer.
324 : */
325 9239 : if (!NaClCopyInFromUser(nap, &kern_nanimh, (uintptr_t) nanimhp,
326 : sizeof kern_nanimh)) {
327 0 : NaClLog(4, "NaClImcMsgHdr not in user address space\n");
328 0 : retval = -NACL_ABI_EFAULT;
329 0 : goto cleanup_leave;
330 : }
331 : /* copy before validating */
332 :
333 9239 : if (kern_nanimh.iov_length > NACL_ABI_IMC_IOVEC_MAX) {
334 0 : NaClLog(4, "gather/scatter array too large: %"NACL_PRIdNACL_SIZE"\n",
335 : kern_nanimh.iov_length);
336 0 : retval = -NACL_ABI_EINVAL;
337 0 : goto cleanup_leave;
338 : }
339 9239 : if (kern_nanimh.desc_length > NACL_ABI_IMC_USER_DESC_MAX) {
340 0 : NaClLog(4, "handle vector too long: %"NACL_PRIdNACL_SIZE"\n",
341 : kern_nanimh.desc_length);
342 0 : retval = -NACL_ABI_EINVAL;
343 0 : goto cleanup_leave;
344 : }
345 :
346 9239 : if (kern_nanimh.iov_length > 0) {
347 : /*
348 : * Copy IOV array into kernel space. Validate this snapshot and do
349 : * user->kernel address conversions on this snapshot.
350 : */
351 9237 : if (!NaClCopyInFromUser(nap, kern_naiov, (uintptr_t) kern_nanimh.iov,
352 : (kern_nanimh.iov_length * sizeof kern_naiov[0]))) {
353 0 : NaClLog(4, "gather/scatter array not in user address space\n");
354 0 : retval = -NACL_ABI_EFAULT;
355 0 : goto cleanup_leave;
356 : }
357 : /*
358 : * Convert every IOV base from user to system address, validate
359 : * range of bytes are really in user address space.
360 : */
361 :
362 57928 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
363 19727 : sysaddr = NaClUserToSysAddrRange(nap,
364 : (uintptr_t) kern_naiov[i].base,
365 : kern_naiov[i].length);
366 19727 : if (kNaClBadAddress == sysaddr) {
367 0 : NaClLog(4, "iov number %"NACL_PRIuS" not entirely in user space\n", i);
368 0 : retval = -NACL_ABI_EFAULT;
369 0 : goto cleanup_leave;
370 : }
371 19727 : kern_iov[i].base = (void *) sysaddr;
372 19727 : kern_iov[i].length = kern_naiov[i].length;
373 19727 : }
374 9237 : }
375 :
376 9239 : if (kern_nanimh.desc_length > 0) {
377 9229 : sysaddr = NaClUserToSysAddrRange(nap,
378 : (uintptr_t) kern_nanimh.descv,
379 : kern_nanimh.desc_length * sizeof(int32_t));
380 9229 : if (kNaClBadAddress == sysaddr) {
381 0 : retval = -NACL_ABI_EFAULT;
382 0 : goto cleanup_leave;
383 : }
384 9229 : }
385 :
386 9239 : ndp = NaClAppGetDesc(nap, d);
387 9239 : if (NULL == ndp) {
388 0 : NaClLog(4, "receiving descriptor invalid\n");
389 0 : retval = -NACL_ABI_EBADF;
390 0 : goto cleanup_leave;
391 : }
392 :
393 9239 : recv_hdr.iov = kern_iov;
394 9239 : recv_hdr.iov_length = kern_nanimh.iov_length;
395 :
396 9239 : recv_hdr.ndescv = new_desc;
397 9239 : recv_hdr.ndesc_length = NACL_ARRAY_SIZE(new_desc);
398 9239 : memset(new_desc, 0, sizeof new_desc);
399 :
400 9239 : recv_hdr.flags = 0; /* just to make it obvious; IMC will clear it for us */
401 :
402 : /* lock user memory ranges in kern_naiov */
403 57932 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
404 19727 : NaClVmIoWillStart(nap,
405 : kern_naiov[i].base,
406 : kern_naiov[i].base + kern_naiov[i].length - 1);
407 19727 : }
408 9239 : ssize_retval = NACL_VTBL(NaClDesc, ndp)->RecvMsg(ndp, &recv_hdr, flags,
409 : (struct NaClDescQuotaInterface *) nap->desc_quota_interface);
410 : /* unlock user memory ranges in kern_naiov */
411 57926 : for (i = 0; i < kern_nanimh.iov_length; ++i) {
412 19725 : NaClVmIoHasEnded(nap,
413 : kern_naiov[i].base,
414 : kern_naiov[i].base + kern_naiov[i].length - 1);
415 19725 : }
416 : /*
417 : * retval is number of user payload bytes received and excludes the
418 : * header bytes.
419 : */
420 9237 : NaClLog(3, "NaClSysImcRecvMsg: RecvMsg() returned %"NACL_PRIdS"\n",
421 : ssize_retval);
422 9237 : if (NaClSSizeIsNegErrno(&ssize_retval)) {
423 : /* negative error numbers all have valid 32-bit representations,
424 : * so this cast is safe. */
425 5 : retval = (int32_t) ssize_retval;
426 5 : goto cleanup;
427 18464 : } else if (ssize_retval > INT32_MAX || ssize_retval < INT32_MIN) {
428 0 : retval = -NACL_ABI_EOVERFLOW;
429 0 : goto cleanup;
430 : } else {
431 : /* cast is safe due to range check above */
432 9232 : retval = (int32_t) ssize_retval;
433 : }
434 :
435 : /*
436 : * NB: recv_hdr.flags may contain NACL_ABI_MESSAGE_TRUNCATED and/or
437 : * NACL_ABI_HANDLES_TRUNCATED.
438 : */
439 :
440 9232 : kern_nanimh.flags = recv_hdr.flags;
441 :
442 : /*
443 : * Now internalize the NaClHandles as NaClDesc objects.
444 : */
445 9232 : num_user_desc = recv_hdr.ndesc_length;
446 :
447 9232 : if (kern_nanimh.desc_length < num_user_desc) {
448 3 : kern_nanimh.flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
449 40 : for (i = kern_nanimh.desc_length; i < num_user_desc; ++i) {
450 17 : NaClDescUnref(new_desc[i]);
451 17 : new_desc[i] = NULL;
452 17 : }
453 3 : num_user_desc = kern_nanimh.desc_length;
454 3 : }
455 :
456 9232 : invalid_desc = (struct NaClDesc *) NaClDescInvalidMake();
457 : /* prepare to write out to user space the descriptor numbers */
458 20600 : for (i = 0; i < num_user_desc; ++i) {
459 1068 : if (invalid_desc == new_desc[i]) {
460 1054 : usr_desc[i] = kKnownInvalidDescNumber;
461 1054 : NaClDescUnref(new_desc[i]);
462 1054 : } else {
463 14 : usr_desc[i] = NaClAppSetDescAvail(nap, new_desc[i]);
464 : }
465 1068 : new_desc[i] = NULL;
466 1068 : }
467 9232 : if (0 != num_user_desc &&
468 1017 : !NaClCopyOutToUser(nap, (uintptr_t) kern_nanimh.descv, usr_desc,
469 : num_user_desc * sizeof usr_desc[0])) {
470 0 : NaClLog(LOG_FATAL,
471 : ("NaClSysImcRecvMsg: in/out ptr (descv %"NACL_PRIxPTR
472 : ") became invalid at copyout?\n"),
473 : (uintptr_t) kern_nanimh.descv);
474 0 : }
475 :
476 9232 : kern_nanimh.desc_length = num_user_desc;
477 9232 : if (!NaClCopyOutToUser(nap, (uintptr_t) nanimhp, &kern_nanimh,
478 : sizeof kern_nanimh)) {
479 0 : NaClLog(LOG_FATAL,
480 : "NaClSysImcRecvMsg: in/out ptr (iov) became"
481 : " invalid at copyout?\n");
482 9232 : }
483 : /* copy out updated desc count, flags */
484 : cleanup:
485 9237 : if (retval < 0) {
486 90 : for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) {
487 40 : if (NULL != new_desc[i]) {
488 0 : NaClDescUnref(new_desc[i]);
489 0 : new_desc[i] = NULL;
490 0 : }
491 40 : }
492 5 : }
493 9237 : NaClDescUnref(ndp);
494 9237 : NaClDescSafeUnref(invalid_desc);
495 9237 : NaClLog(3, "NaClSysImcRecvMsg: returning %d\n", retval);
496 : cleanup_leave:
497 9237 : return retval;
498 : }
499 :
500 12 : int32_t NaClSysImcMemObjCreate(struct NaClAppThread *natp,
501 12 : size_t size) {
502 12 : struct NaClApp *nap = natp->nap;
503 12 : int32_t retval = -NACL_ABI_EINVAL;
504 12 : struct NaClDescImcShm *shmp;
505 12 : off_t size_as_off;
506 :
507 12 : NaClLog(3,
508 : ("Entered NaClSysImcMemObjCreate(0x%08"NACL_PRIxPTR
509 : " 0x%08"NACL_PRIxS")\n"),
510 : (uintptr_t) natp, size);
511 :
512 12 : if (0 != (size & (NACL_MAP_PAGESIZE - 1))) {
513 2 : return -NACL_ABI_EINVAL;
514 : }
515 : /*
516 : * TODO(bsy): policy about maximum shm object size should be
517 : * enforced here.
518 : */
519 10 : size_as_off = (off_t) size;
520 10 : if (size_as_off < 0) {
521 0 : return -NACL_ABI_EINVAL;
522 : }
523 :
524 10 : shmp = NULL;
525 :
526 10 : shmp = malloc(sizeof *shmp);
527 10 : if (NULL == shmp) {
528 0 : retval = -NACL_ABI_ENOMEM;
529 0 : goto cleanup;
530 : }
531 :
532 10 : if (!NaClDescImcShmAllocCtor(shmp, size_as_off, /* executable= */ 0)) {
533 0 : retval = -NACL_ABI_ENOMEM; /* is this reasonable? */
534 0 : goto cleanup;
535 : }
536 :
537 10 : retval = NaClAppSetDescAvail(nap, (struct NaClDesc *) shmp);
538 10 : shmp = NULL;
539 :
540 : cleanup:
541 10 : free(shmp);
542 :
543 10 : return retval;
544 12 : }
545 :
546 5 : int32_t NaClSysImcSocketPair(struct NaClAppThread *natp,
547 5 : uint32_t descs_out) {
548 5 : struct NaClApp *nap = natp->nap;
549 5 : int32_t usr_pair[2];
550 5 : struct NaClDesc *pair[2];
551 5 : int32_t retval;
552 :
553 5 : NaClLog(3,
554 : ("Entered NaClSysImcSocketPair(0x%08"NACL_PRIxPTR
555 : " 0x%08"NACL_PRIx32")\n"),
556 : (uintptr_t) natp, descs_out);
557 :
558 5 : retval = NaClCommonDescSocketPair(pair);
559 5 : if (0 != retval) {
560 0 : goto cleanup;
561 : }
562 :
563 5 : usr_pair[0] = NaClAppSetDescAvail(nap, pair[0]);
564 5 : usr_pair[1] = NaClAppSetDescAvail(nap, pair[1]);
565 :
566 5 : if (!NaClCopyOutToUser(nap, (uintptr_t) descs_out, usr_pair,
567 : sizeof usr_pair)) {
568 0 : NaClAppSetDesc(nap, usr_pair[0], NULL);
569 0 : NaClAppSetDesc(nap, usr_pair[1], NULL);
570 0 : retval = -NACL_ABI_EFAULT;
571 0 : goto cleanup;
572 : }
573 5 : retval = 0;
574 :
575 : cleanup:
576 5 : return retval;
577 : }
|