1 : /*
2 : * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 : * Use of this source code is governed by a BSD-style license that can be
4 : * found in the LICENSE file.
5 : */
6 :
7 : #include "native_client/src/shared/platform/aligned_malloc.h"
8 : #include "native_client/src/shared/platform/nacl_host_desc.h"
9 : #include "native_client/src/shared/platform/nacl_log.h"
10 : #include "native_client/src/trusted/service_runtime/nacl_app_thread.h"
11 : #include "native_client/src/trusted/service_runtime/nacl_text.h"
12 : #include "native_client/src/trusted/service_runtime/sel_ldr.h"
13 : #include "native_client/src/trusted/desc/nacl_desc_base.h"
14 : #include "native_client/src/trusted/desc/nacl_desc_io.h"
15 : #include "native_client/src/trusted/desc/nrd_all_modules.h"
16 :
17 : #include "gtest/gtest.h"
18 :
19 : //
20 : // There are several problems in how these tests are set up.
21 : //
22 : // 1. NaCl modules such as the Log module are supposed to be
23 : // initialized at process startup and finalized at shutdown. In
24 : // particular, there should not be any threads other than the main
25 : // thread running when the Log module initializes, since the verbosity
26 : // level is set then -- and thereafter it is assumed to be invariant
27 : // and read without acquring locks. If any threads are left running
28 : // (e.g., NaClApp internal service threads), then race detectors would
29 : // legitimately report an error which is inappropriate because the
30 : // test is ignoring the API contract.
31 : //
32 : // 2. NaClApp objects, while they don't have a Dtor, are expected to
33 : // have a lifetime equal to that of the process that contain them. In
34 : // particular, when the untrusted thread invokes the exit syscall, it
35 : // expects to be able to use _exit to exit, killing all other
36 : // untrusted threads as a side effect. Furthermore, once a NaClApp
37 : // object is initialized and NaClAppLaunchServiceThreads invoked,
38 : // system service threads are running holding references to the
39 : // NaClApp object. If the NaClApp object goes out of scope or is
40 : // otherwise destroyed and its memory freed, then these system thread
41 : // may access memory that is no longer valid. Tests cannot readily be
42 : // written to cleanly exercise the state space of a NaClApp after
43 : // NaClAppLaunchServiceThreads unless the test process exits---thereby
44 : // killing the service threads as a side-effect---when each individual
45 : // test is complete.
46 : //
47 : // These tests do not invoke NaClAppLaunchServiceThreads, so there
48 : // should be no service threads left running between tests.
49 :
50 10 : class SelLdrTest : public testing::Test {
51 : protected:
52 : virtual void SetUp();
53 : virtual void TearDown();
54 : };
55 :
56 : void SelLdrTest::SetUp() {
57 5 : NaClNrdAllModulesInit();
58 5 : }
59 :
60 : void SelLdrTest::TearDown() {
61 5 : NaClNrdAllModulesFini();
62 5 : }
63 :
64 : // set, get, setavail operations on the descriptor table
65 8 : TEST_F(SelLdrTest, DescTable) {
66 1 : struct NaClApp app;
67 1 : struct NaClHostDesc *host_desc;
68 1 : struct NaClDesc* io_desc;
69 1 : struct NaClDesc* ret_desc;
70 1 : int ret_code;
71 :
72 1 : ret_code = NaClAppCtor(&app);
73 5 : ASSERT_EQ(1, ret_code);
74 :
75 1 : host_desc = (struct NaClHostDesc *) malloc(sizeof *host_desc);
76 1 : if (NULL == host_desc) {
77 0 : fprintf(stderr, "No memory\n");
78 0 : }
79 5 : ASSERT_TRUE(NULL != host_desc);
80 :
81 1 : io_desc = (struct NaClDesc *) NaClDescIoDescMake(host_desc);
82 :
83 : // 1st pos available is 0
84 1 : ret_code = NaClAppSetDescAvail(&app, io_desc);
85 5 : ASSERT_EQ(0, ret_code);
86 : // valid desc at pos 0
87 1 : ret_desc = NaClAppGetDesc(&app, 0);
88 5 : ASSERT_TRUE(NULL != ret_desc);
89 :
90 : // next pos available is 1
91 1 : ret_code = NaClAppSetDescAvail(&app, NULL);
92 5 : ASSERT_EQ(1, ret_code);
93 : // no desc at pos 1
94 1 : ret_desc = NaClAppGetDesc(&app, 1);
95 5 : ASSERT_TRUE(NULL == ret_desc);
96 :
97 : // no desc at pos 1 -> pos 1 is available
98 1 : ret_code = NaClAppSetDescAvail(&app, io_desc);
99 5 : ASSERT_EQ(1, ret_code);
100 :
101 : // valid desc at pos 1
102 1 : ret_desc = NaClAppGetDesc(&app, 1);
103 5 : ASSERT_TRUE(NULL != ret_desc);
104 :
105 : // set no desc at pos 3
106 1 : NaClAppSetDesc(&app, 3, NULL);
107 :
108 : // valid desc at pos 4
109 1 : NaClAppSetDesc(&app, 4, io_desc);
110 1 : ret_desc = NaClAppGetDesc(&app, 4);
111 5 : ASSERT_TRUE(NULL != ret_desc);
112 :
113 : // never set a desc at pos 10
114 1 : ret_desc = NaClAppGetDesc(&app, 10);
115 5 : ASSERT_TRUE(NULL == ret_desc);
116 1 : }
117 :
118 : // create service socket
119 8 : TEST_F(SelLdrTest, CreateServiceSocket) {
120 1 : struct NaClApp app;
121 1 : int ret_code;
122 :
123 1 : ret_code = NaClAppCtor(&app);
124 5 : ASSERT_EQ(1, ret_code);
125 :
126 : // CreateServiceSocket sets the app service_port to a service port
127 : // desc and service_address to a service
128 5 : ASSERT_TRUE(NULL == app.service_port);
129 5 : ASSERT_TRUE(NULL == app.service_address);
130 1 : NaClCreateServiceSocket(&app);
131 5 : ASSERT_TRUE(NULL != app.service_port);
132 5 : ASSERT_TRUE(NULL != app.service_address);
133 1 : }
134 :
135 : // add and remove operations on the threads table
136 : // Remove thread from an empty table is tested in a death test.
137 : // TODO(tuduce): specify the death test name when checking in.
138 8 : TEST_F(SelLdrTest, ThreadTableTest) {
139 1 : struct NaClApp app;
140 1 : struct NaClAppThread nat, *appt=&nat;
141 1 : int ret_code;
142 :
143 1 : ret_code = NaClAppCtor(&app);
144 5 : ASSERT_EQ(1, ret_code);
145 :
146 : // 1st pos available is 0
147 5 : ASSERT_EQ(0, app.num_threads);
148 1 : ret_code = NaClAddThread(&app, appt);
149 5 : ASSERT_EQ(0, ret_code);
150 5 : ASSERT_EQ(1, app.num_threads);
151 :
152 : // next pos available is 1
153 1 : ret_code = NaClAddThread(&app, NULL);
154 5 : ASSERT_EQ(1, ret_code);
155 5 : ASSERT_EQ(2, app.num_threads);
156 :
157 : // no thread at pos 1 -> pos 1 is available
158 1 : ret_code = NaClAddThread(&app, appt);
159 5 : ASSERT_EQ(1, ret_code);
160 5 : ASSERT_EQ(3, app.num_threads);
161 :
162 1 : NaClRemoveThread(&app, 0);
163 5 : ASSERT_EQ(2, app.num_threads);
164 1 : }
165 :
166 8 : TEST_F(SelLdrTest, MinimumThreadGenerationTest) {
167 1 : struct NaClApp app;
168 5 : ASSERT_EQ(1, NaClAppCtor(&app));
169 5 : ASSERT_EQ(INT_MAX, NaClMinimumThreadGeneration(&app));
170 :
171 1 : struct NaClAppThread thread1;
172 1 : struct NaClAppThread thread2;
173 : // Perform some minimal initialisation of our NaClAppThreads based
174 : // on what we know NaClMinimumThreadGeneration() does. Reusing
175 : // NaClAppThreadMake() here is difficult because it launches an
176 : // untrusted thread.
177 1 : memset(&thread1, 0xff, sizeof(thread1));
178 1 : memset(&thread2, 0xff, sizeof(thread2));
179 5 : ASSERT_EQ(1, NaClMutexCtor(&thread1.mu));
180 5 : ASSERT_EQ(1, NaClMutexCtor(&thread2.mu));
181 1 : thread1.dynamic_delete_generation = 200;
182 1 : thread2.dynamic_delete_generation = 100;
183 :
184 5 : ASSERT_EQ(0, NaClAddThread(&app, &thread1));
185 5 : ASSERT_EQ(200, NaClMinimumThreadGeneration(&app));
186 5 : ASSERT_EQ(1, NaClAddThread(&app, &thread2));
187 5 : ASSERT_EQ(100, NaClMinimumThreadGeneration(&app));
188 :
189 1 : thread2.dynamic_delete_generation = 300;
190 5 : ASSERT_EQ(200, NaClMinimumThreadGeneration(&app));
191 :
192 : // This is a regression test for
193 : // http://code.google.com/p/nativeclient/issues/detail?id=2190.
194 : // The thread array can contain NULL entries where threads have
195 : // exited and been removed. NaClMinimumThreadGeneration() should
196 : // know to skip those. Also, if it wrongly uses num_threads instead
197 : // of threads.num_entries it will miss thread2 and not return 300.
198 1 : NaClRemoveThread(&app, 0);
199 5 : ASSERT_EQ(300, NaClMinimumThreadGeneration(&app));
200 1 : }
201 :
202 8 : TEST_F(SelLdrTest, NaClUserToSysAddrRangeTest) {
203 1 : struct NaClApp app;
204 :
205 5 : ASSERT_EQ(1, NaClAppCtor(&app));
206 : /*
207 : * addr_bits set appropriately. mem_start is 0, which is bogus but
208 : * doesn't matter wrt to what this is testing.
209 : */
210 1 : uintptr_t addr_test;
211 1 : size_t obj_size;
212 :
213 1 : obj_size = 16;
214 :
215 : /*
216 : * small object placement
217 : */
218 1 : addr_test = 65536;
219 5 : ASSERT_EQ(addr_test,
220 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
221 :
222 1 : addr_test = ((uintptr_t) 1U << app.addr_bits) - obj_size;
223 5 : ASSERT_EQ(addr_test,
224 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
225 :
226 1 : addr_test = ((uintptr_t) 1U << app.addr_bits) - obj_size + 1;
227 5 : ASSERT_EQ(kNaClBadAddress,
228 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
229 :
230 : /* size-based exceed range */
231 1 : addr_test = 65536;
232 1 : obj_size = ((uintptr_t) 1U << app.addr_bits) - addr_test;
233 5 : ASSERT_EQ(addr_test,
234 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
235 :
236 1 : addr_test = 65536;
237 1 : obj_size = ((uintptr_t) 1U << app.addr_bits) - addr_test + 1;
238 5 : ASSERT_EQ(kNaClBadAddress,
239 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
240 :
241 : /*
242 : * wraparound; assumes ~(uintptr_t) 0 is greater than
243 : * ((uintptr_t) 1U) << app.addr_bits
244 : */
245 :
246 1 : addr_test = 65536;
247 1 : obj_size = ~(uintptr_t) 0U - addr_test;
248 5 : ASSERT_EQ(kNaClBadAddress,
249 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
250 :
251 1 : addr_test = 65536;
252 1 : obj_size = ~(uintptr_t) 0U - addr_test + 1;
253 5 : ASSERT_EQ(kNaClBadAddress,
254 : NaClUserToSysAddrRange(&app, addr_test, obj_size));
255 1 : }
256 :
257 : // On Intel Atom CPUs, memory accesses through the %gs segment are
258 : // slow unless the start of the %gs segment is 64-byte-aligned. This
259 : // is a sanity check to ensure our alignment declarations work.
260 : #if NACL_ARCH(NACL_BUILD_ARCH) == NACL_x86 && NACL_BUILD_SUBARCH == 32
261 : TEST_F(SelLdrTest, GsSegmentAlignmentTest) {
262 : struct NaClAppThread *natp =
263 : (struct NaClAppThread *)
264 : NaClAlignedMalloc(sizeof(*natp), __alignof(struct NaClAppThread));
265 : ASSERT_TRUE(natp);
266 : // We use "volatile" in an attempt to prevent the compiler from
267 : // optimizing away our assertion based on the compiler's own
268 : // knowledge of the alignment of the struct it allocated.
269 : volatile uintptr_t addr = (uintptr_t) &natp->user.gs_segment;
270 : ASSERT_EQ((int) (addr % 64), 0);
271 : NaClAlignedFree(natp);
272 : }
273 : #endif
|