1 : /*
2 : * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : /*
8 : * Not Prometheus, just shared memory buffers. We do the following:
9 : * we allocate and write into a NaClGioShm object until its allocation
10 : * size limit is reached. Then, we double its size. This means we
11 : * avoid quadratic copying.
12 : *
13 : * Also, we maintain the following invariant: all I/O operations are
14 : * done once, rather than split. So if a write would grow a shm
15 : * object, we grow before doing the write. This leads to more
16 : * copying, but makes the I/O operations simpler.
17 : */
18 :
19 : #include <errno.h>
20 :
21 : #include "native_client/src/trusted/gio/gio_shm_unbounded.h"
22 :
23 : #include "native_client/src/shared/platform/nacl_log.h"
24 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
25 : #include "native_client/src/trusted/service_runtime/nacl_config.h"
26 :
27 :
28 : #if !defined(SIZE_T_MAX)
29 : # define SIZE_T_MAX (~(size_t) 0)
30 : #endif
31 :
32 5138 : static ssize_t NaClGioShmUnboundedRead(struct Gio *vself,
33 5138 : void *buf,
34 5138 : size_t count) {
35 5138 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
36 5138 : ssize_t got;
37 5138 : size_t bytes_avail;
38 :
39 5138 : NaClLog(4,
40 : ("NaClGioShmUnboundedRead(0x%"NACL_PRIxPTR","
41 : " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n"),
42 : (uintptr_t) vself, (uintptr_t) buf, count);
43 : /* are we at the end, or did we seek pass the end? */
44 5138 : if (self->io_offset >= self->shm_written) {
45 0 : NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n",
46 : self->io_offset, self->shm_written);
47 0 : NaClLog(4, "I/O pointer outside of valid region, returning EOF\n");
48 0 : return 0; /* EOF */
49 : }
50 5138 : bytes_avail = self->shm_written - self->io_offset;
51 5138 : if (count > bytes_avail) {
52 0 : count = bytes_avail;
53 0 : }
54 5138 : NaClLog(4,
55 : ("NaClGioShmUnboundedRead: reading from underlying"
56 : " NaClGioShm 0x%"NACL_PRIxPTR"\n"),
57 : (uintptr_t) self->ngsp);
58 5138 : got = (*self->ngsp->base.vtbl->Read)(&self->ngsp->base, buf, count);
59 5138 : NaClLog(4,
60 : ("NaClGioShmUnboundedRead: got 0x%"NACL_PRIxS" bytes\n"),
61 : (size_t) got);
62 5138 : if (-1 != got) {
63 5138 : self->io_offset += (size_t) got;
64 5138 : }
65 5138 : return got;
66 5138 : }
67 :
68 4 : static void GioCopy(struct Gio *dst,
69 4 : struct Gio *src,
70 4 : size_t nbytes) {
71 4 : uint8_t buf[4096];
72 4 : uint8_t *bufp;
73 4 : ssize_t got;
74 4 : ssize_t this_copy;
75 4 : size_t ask;
76 4 : off_t cur_offset;
77 :
78 4 : NaClLog(3,
79 : ("GioCopy: dst 0x%"NACL_PRIxPTR
80 : ", src 0x%"NACL_PRIxPTR", nbytes 0x%"NACL_PRIxS"\n"),
81 : (uintptr_t) dst,
82 : (uintptr_t) src,
83 : nbytes);
84 4 : cur_offset = (*src->vtbl->Seek)(src, 0, SEEK_CUR);
85 4 : if (-1 == cur_offset) {
86 0 : NaClLog(LOG_FATAL,
87 : "NaClGioShmUnbounded::GioCopy: could not find source ptr\n");
88 0 : }
89 4 : if (-1 == (*src->vtbl->Seek)(src, 0, SEEK_SET)) {
90 0 : NaClLog(LOG_FATAL,
91 : "NaClGioShmUnbounded::GioCopy: could not rewind source\n");
92 0 : }
93 4 : if (-1 == (*dst->vtbl->Seek)(dst, 0, SEEK_SET)) {
94 0 : NaClLog(LOG_FATAL,
95 : "NaClGioShmUnbounded::GioCopy: could not rewind destination\n");
96 0 : }
97 : /*
98 : * This copy process will dirty every page. An optimization for
99 : * sparse data would check the result of a Read for all-zeros and
100 : * Seek the corresponding number of bytes forward. This is,
101 : * however, unlikely to be a common enough case in our projected use
102 : * cases.
103 : */
104 248 : while (nbytes > 0) {
105 240 : ask = sizeof buf;
106 240 : if (ask > nbytes) {
107 0 : ask = nbytes;
108 0 : }
109 240 : NaClLog(5,
110 : "GioCopy: copying 0x%"NACL_PRIxS" bytes, 0x%"NACL_PRIxS" remains\n",
111 : ask,
112 : nbytes);
113 240 : got = (*src->vtbl->Read)(src, buf, ask);
114 480 : if (got <= 0 || (size_t) got > ask) {
115 0 : NaClLog(LOG_FATAL,
116 : "NaClGioShmUnbounded::GioCopy: read failed, %"NACL_PRIdS"\n",
117 : got);
118 0 : }
119 240 : this_copy = got;
120 :
121 720 : for (ask = (size_t) got, bufp = buf;
122 : ask > 0;
123 240 : bufp += got, ask -= got) {
124 240 : got = (*dst->vtbl->Write)(dst, buf, ask);
125 480 : if (got <= 0 || (size_t) got > ask) {
126 0 : NaClLog(LOG_FATAL,
127 : "NaClGioShmUnbounded::GioCopy: write failed, %"NACL_PRIdS"\n",
128 : got);
129 0 : }
130 240 : }
131 240 : nbytes -= this_copy;
132 240 : }
133 4 : if (-1 == (*dst->vtbl->Seek)(dst, cur_offset, SEEK_SET)) {
134 0 : NaClLog(LOG_FATAL,
135 : "NaClGioShmUnbounded::GioCopy: could not seek dst ptr\n");
136 0 : }
137 4 : }
138 :
139 5138 : static ssize_t NaClGioShmUnboundedWrite(struct Gio *vself,
140 5138 : void const *buf,
141 5138 : size_t count) {
142 5138 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
143 5138 : size_t io_offset;
144 5138 : ssize_t retval;
145 5138 : size_t new_avail_sz;
146 5138 : size_t new_size;
147 5138 : struct NaClGioShm *ngsp;
148 :
149 5138 : NaClLog(4,
150 : ("NaClGioShmUnboundedWrite(0x%"NACL_PRIxPTR","
151 : " 0x%"NACL_PRIxPTR", 0x%"NACL_PRIxS")\n"),
152 : (uintptr_t) vself, (uintptr_t) buf, count);
153 5138 : if (SIZE_T_MAX - self->io_offset < count) {
154 0 : errno = EINVAL;
155 0 : return -1;
156 : }
157 :
158 : /*
159 : * where we'll end up when the I/O is done
160 : */
161 5138 : io_offset = self->io_offset + count;
162 :
163 : /*
164 : * For sequential I/O, an "if" suffices. For writes that occur
165 : * after a seek, however, we may need to double more than once.
166 : */
167 10280 : for (new_avail_sz = self->shm_avail_sz;
168 : new_avail_sz < io_offset;
169 4 : new_avail_sz = new_size) {
170 4 : if (SIZE_T_MAX / 2 >= new_avail_sz) {
171 4 : new_size = 2 * new_avail_sz;
172 4 : } else {
173 0 : new_size = SIZE_T_MAX - NACL_MAP_PAGESIZE;
174 0 : ++new_size;
175 : /*
176 : * We could return -1 w/ ENOMEM here as well, but let's peg the
177 : * max size.
178 : */
179 0 : if (new_size <= new_avail_sz) {
180 : /*
181 : * We get equality if we try to expand again.
182 : */
183 0 : errno = ENOMEM;
184 0 : return -1;
185 : }
186 : }
187 4 : }
188 5138 : if (new_avail_sz != self->shm_avail_sz) {
189 : /*
190 : * Replace the ngsp with one that is the new size. This means
191 : * that there is a temporary 3x VM hit in the worst case. This
192 : * should be primarily paging space, since I/O between the
193 : * NaClGioShm object should use relatively little RAM. It will
194 : * trash the cache, however. Hopefully the shm object is in the
195 : * buffer cache, and we're just mapping in chunks of it into our
196 : * address space. This is a bit more explicit than mmapping both
197 : * source and destinaton objects completely and using madvise with
198 : * MADV_SEQUENTIAL -- and likely less efficient -- but avoids
199 : * OS-specific calls.
200 : */
201 :
202 4 : ngsp = malloc(sizeof *ngsp);
203 :
204 4 : if (NULL == ngsp) {
205 0 : errno = ENOMEM;
206 0 : return -1;
207 : }
208 4 : if (!NaClGioShmAllocCtor(ngsp, new_avail_sz)) {
209 0 : free(ngsp);
210 0 : errno = ENOMEM;
211 0 : return -1;
212 : }
213 4 : GioCopy((struct Gio *) ngsp, (struct Gio *) self->ngsp, self->shm_avail_sz);
214 4 : self->shm_avail_sz = new_avail_sz;
215 :
216 4 : if (-1 == (*self->ngsp->base.vtbl->Close)(&self->ngsp->base)) {
217 0 : NaClLog(LOG_ERROR,
218 : "NaClGioShmUnboundedWrite: close of src temporary failed\n");
219 0 : }
220 4 : (*self->ngsp->base.vtbl->Dtor)(&self->ngsp->base);
221 4 : free(self->ngsp);
222 4 : self->ngsp = ngsp;
223 4 : ngsp = NULL;
224 4 : }
225 :
226 5138 : retval = (*self->ngsp->base.vtbl->Write)(&self->ngsp->base,
227 : buf, count);
228 5138 : if (-1 != retval) {
229 5138 : if ((size_t) retval > count) {
230 0 : errno = EIO; /* internal error */
231 0 : return -1;
232 : }
233 5138 : io_offset = self->io_offset + retval;
234 :
235 5138 : if (io_offset > self->shm_written) {
236 5138 : self->shm_written = io_offset;
237 5138 : NaClLog(4,
238 : ("UPDATE: io_offset 0x%"NACL_PRIxS
239 : ", shm_written 0x%"NACL_PRIxS"\n"),
240 : self->io_offset, self->shm_written);
241 5138 : }
242 5138 : self->io_offset = io_offset;
243 5138 : }
244 :
245 5138 : NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n",
246 : self->io_offset, self->shm_written);
247 :
248 5138 : return retval;
249 5138 : }
250 :
251 2 : static off_t NaClGioShmUnboundedSeek(struct Gio *vself,
252 2 : off_t offset,
253 2 : int whence) {
254 2 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
255 2 : off_t new_pos;
256 :
257 2 : NaClLog(4, "NaClGioShmUnboundedSeek(0x%"NACL_PRIxPTR", %ld, %d)\n",
258 : (uintptr_t) vself, (long) offset, whence);
259 2 : NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n",
260 : self->io_offset, self->shm_written);
261 2 : new_pos = (*self->ngsp->base.vtbl->Seek)(&self->ngsp->base, offset, whence);
262 2 : if (-1 != new_pos) {
263 2 : NaClLog(4, " setting io_offset to %ld\n", (long) new_pos);
264 2 : self->io_offset = new_pos;
265 2 : }
266 2 : NaClLog(4, "io_offset 0x%"NACL_PRIxS", shm_written 0x%"NACL_PRIxS"\n",
267 : self->io_offset, self->shm_written);
268 2 : return new_pos;
269 : }
270 :
271 0 : static int NaClGioShmUnboundedFlush(struct Gio *vself) {
272 0 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
273 :
274 0 : return (*self->ngsp->base.vtbl->Flush)(&self->ngsp->base);
275 : }
276 :
277 1 : static int NaClGioShmUnboundedClose(struct Gio *vself) {
278 1 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
279 :
280 1 : if (NULL != self->ngsp) {
281 1 : if (0 != (*self->ngsp->base.vtbl->Close)(&self->ngsp->base)) {
282 0 : errno = EIO;
283 0 : return -1;
284 : }
285 1 : (*self->ngsp->base.vtbl->Dtor)(&self->ngsp->base);
286 1 : free(self->ngsp);
287 1 : self->ngsp = NULL;
288 1 : }
289 1 : return 0;
290 1 : }
291 :
292 1 : static void NaClGioShmUnboundedDtor(struct Gio *vself) {
293 1 : struct NaClGioShmUnbounded *self = (struct NaClGioShmUnbounded *) vself;
294 :
295 1 : if (NULL != self->ngsp) {
296 1 : if (-1 == (*vself->vtbl->Close)(vself)) {
297 0 : NaClLog(LOG_ERROR, "NaClGioShmUnboundedDtor: auto Close failed\n");
298 0 : }
299 1 : }
300 1 : self->base.vtbl = NULL;
301 1 : }
302 :
303 : const struct GioVtbl kNaClGioShmUnboundedVtbl = {
304 : NaClGioShmUnboundedDtor,
305 : NaClGioShmUnboundedRead,
306 : NaClGioShmUnboundedWrite,
307 : NaClGioShmUnboundedSeek,
308 : NaClGioShmUnboundedFlush,
309 : NaClGioShmUnboundedClose,
310 : };
311 :
312 1 : int NaClGioShmUnboundedCtor(struct NaClGioShmUnbounded *self) {
313 1 : self->base.vtbl = NULL;
314 1 : self->ngsp = malloc(sizeof *self->ngsp);
315 1 : if (NULL == self->ngsp) {
316 0 : return 0;
317 : }
318 1 : if (!NaClGioShmAllocCtor(self->ngsp, NACL_MAP_PAGESIZE)) {
319 0 : free(self->ngsp);
320 0 : return 0;
321 : }
322 1 : self->shm_avail_sz = NACL_MAP_PAGESIZE;
323 1 : self->shm_written = 0;
324 1 : self->io_offset = 0;
325 1 : self->base.vtbl = &kNaClGioShmUnboundedVtbl;
326 1 : return 1;
327 1 : }
328 :
329 : struct NaClDesc *NaClGioShmUnboundedGetNaClDesc(
330 0 : struct NaClGioShmUnbounded *self,
331 0 : size_t *written) {
332 0 : *written = self->shm_written;
333 0 : return self->ngsp->shmp;
334 : }
|