Parent Directory
|
Revision Log
Upgrade to SpiderMonkey from Firefox 3.5.3.
1 | /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 | * vim: set ts=8 sw=4 et tw=78: |
3 | * |
4 | * ***** BEGIN LICENSE BLOCK ***** |
5 | * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
6 | * |
7 | * The contents of this file are subject to the Mozilla Public License Version |
8 | * 1.1 (the "License"); you may not use this file except in compliance with |
9 | * the License. You may obtain a copy of the License at |
10 | * http://www.mozilla.org/MPL/ |
11 | * |
12 | * Software distributed under the License is distributed on an "AS IS" basis, |
13 | * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
14 | * for the specific language governing rights and limitations under the |
15 | * License. |
16 | * |
17 | * The Original Code is Mozilla Communicator client code, released |
18 | * March 31, 1998. |
19 | * |
20 | * The Initial Developer of the Original Code is |
21 | * Netscape Communications Corporation. |
22 | * Portions created by the Initial Developer are Copyright (C) 1998 |
23 | * the Initial Developer. All Rights Reserved. |
24 | * |
25 | * Contributor(s): |
26 | * |
27 | * Alternatively, the contents of this file may be used under the terms of |
28 | * either of the GNU General Public License Version 2 or later (the "GPL"), |
29 | * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
30 | * in which case the provisions of the GPL or the LGPL are applicable instead |
31 | * of those above. If you wish to allow use of your version of this file only |
32 | * under the terms of either the GPL or the LGPL, and not to allow others to |
33 | * use your version of this file under the terms of the MPL, indicate your |
34 | * decision by deleting the provisions above and replace them with the notice |
35 | * and other provisions required by the GPL or the LGPL. If you do not delete |
36 | * the provisions above, a recipient may use your version of this file under |
37 | * the terms of any one of the MPL, the GPL or the LGPL. |
38 | * |
39 | * ***** END LICENSE BLOCK ***** */ |
40 | |
41 | /* |
42 | * JS Mark-and-Sweep Garbage Collector. |
43 | * |
44 | * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see |
45 | * jsgc.h). It allocates from a special GC arena pool with each arena allocated |
46 | * using malloc. It uses an ideally parallel array of flag bytes to hold the |
47 | * mark bit, finalizer type index, etc. |
48 | * |
49 | * XXX swizzle page to freelist for better locality of reference |
50 | */ |
51 | #include "jsstddef.h" |
52 | #include <stdlib.h> /* for free */ |
53 | #include <math.h> |
54 | #include <string.h> /* for memset used when DEBUG */ |
55 | #include "jstypes.h" |
56 | #include "jsutil.h" /* Added by JSIFY */ |
57 | #include "jshash.h" /* Added by JSIFY */ |
58 | #include "jsbit.h" |
59 | #include "jsclist.h" |
60 | #include "jsprf.h" |
61 | #include "jsapi.h" |
62 | #include "jsatom.h" |
63 | #include "jscntxt.h" |
64 | #include "jsversion.h" |
65 | #include "jsdbgapi.h" |
66 | #include "jsexn.h" |
67 | #include "jsfun.h" |
68 | #include "jsgc.h" |
69 | #include "jsinterp.h" |
70 | #include "jsiter.h" |
71 | #include "jslock.h" |
72 | #include "jsnum.h" |
73 | #include "jsobj.h" |
74 | #include "jsparse.h" |
75 | #include "jsscope.h" |
76 | #include "jsscript.h" |
77 | #include "jsstaticcheck.h" |
78 | #include "jsstr.h" |
79 | #include "jstracer.h" |
80 | |
81 | #if JS_HAS_XML_SUPPORT |
82 | #include "jsxml.h" |
83 | #endif |
84 | |
85 | /* |
86 | * Check if posix_memalign is available. |
87 | */ |
88 | #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY |
89 | # define HAS_POSIX_MEMALIGN 1 |
90 | #else |
91 | # define HAS_POSIX_MEMALIGN 0 |
92 | #endif |
93 | |
94 | /* |
95 | * jemalloc provides posix_memalign. |
96 | */ |
97 | #ifdef MOZ_MEMORY |
98 | extern "C" { |
99 | #include "../../memory/jemalloc/jemalloc.h" |
100 | } |
101 | #endif |
102 | |
103 | /* |
104 | * Include the headers for mmap unless we have posix_memalign and do not |
105 | * insist on mmap. |
106 | */ |
107 | #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN) |
108 | # if defined(XP_WIN) |
109 | # ifndef JS_GC_USE_MMAP |
110 | # define JS_GC_USE_MMAP 1 |
111 | # endif |
112 | # include <windows.h> |
113 | # else |
114 | # if defined(XP_UNIX) || defined(XP_BEOS) |
115 | # include <unistd.h> |
116 | # endif |
117 | # if _POSIX_MAPPED_FILES > 0 |
118 | # ifndef JS_GC_USE_MMAP |
119 | # define JS_GC_USE_MMAP 1 |
120 | # endif |
121 | # include <sys/mman.h> |
122 | |
123 | /* On Mac OS X MAP_ANONYMOUS is not defined. */ |
124 | # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
125 | # define MAP_ANONYMOUS MAP_ANON |
126 | # endif |
127 | # if !defined(MAP_ANONYMOUS) |
128 | # define MAP_ANONYMOUS 0 |
129 | # endif |
130 | # else |
131 | # if JS_GC_USE_MMAP |
132 | # error "JS_GC_USE_MMAP is set when mmap is not available" |
133 | # endif |
134 | # endif |
135 | # endif |
136 | #endif |
137 | |
138 | /* |
139 | * Check JSTempValueUnion has the size of jsval and void * so we can |
140 | * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for |
141 | * different GC-things. |
142 | */ |
143 | JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval)); |
144 | JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *)); |
145 | |
146 | |
147 | /* |
148 | * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and |
149 | * JSTRACE_STRING. |
150 | */ |
151 | JS_STATIC_ASSERT(JSTRACE_OBJECT == 0); |
152 | JS_STATIC_ASSERT(JSTRACE_DOUBLE == 1); |
153 | JS_STATIC_ASSERT(JSTRACE_STRING == 2); |
154 | JS_STATIC_ASSERT(JSTRACE_XML == 3); |
155 | |
156 | /* |
157 | * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml |
158 | * trace kind when JS_HAS_XML_SUPPORT is false. |
159 | */ |
160 | JS_STATIC_ASSERT(JSTRACE_STRING + 1 == JSTRACE_XML); |
161 | |
162 | /* |
163 | * The number of used GCX-types must stay within GCX_LIMIT. |
164 | */ |
165 | JS_STATIC_ASSERT(GCX_NTYPES <= GCX_LIMIT); |
166 | |
167 | |
168 | /* |
169 | * Check that we can reinterpret double as JSGCDoubleCell. |
170 | */ |
171 | JS_STATIC_ASSERT(sizeof(JSGCDoubleCell) == sizeof(double)); |
172 | |
173 | /* |
174 | * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS. |
175 | */ |
176 | JS_STATIC_ASSERT(JSVAL_NULL == 0); |
177 | |
178 | |
179 | /* |
180 | * A GC arena contains a fixed number of flag bits for each thing in its heap, |
181 | * and supports O(1) lookup of a flag given its thing's address. |
182 | * |
183 | * To implement this, we allocate things of the same size from a GC arena |
184 | * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The |
185 | * following picture shows arena's layout: |
186 | * |
187 | * +------------------------------+--------------------+---------------+ |
188 | * | allocation area for GC thing | flags of GC things | JSGCArenaInfo | |
189 | * +------------------------------+--------------------+---------------+ |
190 | * |
191 | * To find the flag bits for the thing we calculate the thing index counting |
192 | * from arena's start using: |
193 | * |
194 | * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize |
195 | * |
196 | * The details of flag's lookup depend on thing's kind. For all GC things |
197 | * except doubles we use one byte of flags where the 4 bits determine thing's |
198 | * type and the rest is used to implement GC marking, finalization and |
199 | * locking. We calculate the address of flag's byte using: |
200 | * |
201 | * flagByteAddress = |
202 | * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex |
203 | * |
204 | * where |
205 | * |
206 | * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) |
207 | * |
208 | * is the last byte of flags' area. |
209 | * |
210 | * This implies that the things are allocated from the start of their area and |
211 | * flags are allocated from the end. This arrangement avoids a relatively |
212 | * expensive calculation of the location of the boundary separating things and |
213 | * flags. The boundary's offset from the start of the arena is given by: |
214 | * |
215 | * thingsPerArena * thingSize |
216 | * |
217 | * where thingsPerArena is the number of things that the arena can hold: |
218 | * |
219 | * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1). |
220 | * |
221 | * To allocate doubles we use a specialized arena. It can contain only numbers |
222 | * so we do not need the type bits. Moreover, since the doubles do not require |
223 | * a finalizer and very few of them are locked via js_LockGCThing API, we use |
224 | * just one bit of flags per double to denote if it was marked during the |
225 | * marking phase of the GC. The locking is implemented via a hash table. Thus |
226 | * for doubles the flag area becomes a bitmap. |
227 | * |
228 | * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator. |
229 | * When it is true, a platform-dependent function like mmap is used to get |
230 | * memory aligned on CPU page boundaries. If the macro is false or undefined, |
231 | * posix_memalign is used when available. Otherwise the code uses malloc to |
232 | * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The |
233 | * approximate space overhead of this is 1/js_gcArenasPerChunk. For details, |
234 | * see NewGCChunk/DestroyGCChunk below. |
235 | * |
236 | * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to |
237 | * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can |
238 | * not be a compile-time constant as the system page size is not known until |
239 | * runtime. |
240 | */ |
241 | #if JS_GC_USE_MMAP |
242 | static uint32 js_gcArenasPerChunk = 0; |
243 | static JSBool js_gcUseMmap = JS_FALSE; |
244 | #elif HAS_POSIX_MEMALIGN |
245 | # define js_gcArenasPerChunk 1 |
246 | #else |
247 | # define js_gcArenasPerChunk 7 |
248 | #endif |
249 | |
250 | #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1 |
251 | # define CHUNKED_ARENA_ALLOCATION 0 |
252 | #else |
253 | # define CHUNKED_ARENA_ALLOCATION 1 |
254 | #endif |
255 | |
256 | #define GC_ARENA_SHIFT 12 |
257 | #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT)) |
258 | #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT) |
259 | |
260 | /* |
261 | * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure. |
262 | * It is used to improve allocation efficiency when using posix_memalign. If |
263 | * malloc's implementation uses internal headers, then calling |
264 | * |
265 | * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk) |
266 | * |
267 | * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE |
268 | * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code |
269 | * calls |
270 | * |
271 | * posix_memalign(&p, GC_ARENA_SIZE, |
272 | * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD) |
273 | * |
274 | * When JS_GC_ARENA_PAD is equal or greater than the number of words in the |
275 | * system header, the system can pack all allocations together without holes. |
276 | * |
277 | * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign |
278 | * comes from jemalloc that does not use any headers/trailers. |
279 | */ |
280 | #ifndef JS_GC_ARENA_PAD |
281 | # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY |
282 | # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD) |
283 | # else |
284 | # define JS_GC_ARENA_PAD 0 |
285 | # endif |
286 | #endif |
287 | |
288 | struct JSGCArenaInfo { |
289 | /* |
290 | * Allocation list for the arena or NULL if the arena holds double values. |
291 | */ |
292 | JSGCArenaList *list; |
293 | |
294 | /* |
295 | * Pointer to the previous arena in a linked list. The arena can either |
296 | * belong to one of JSContext.gcArenaList lists or, when it does not have |
297 | * any allocated GC things, to the list of free arenas in the chunk with |
298 | * head stored in JSGCChunkInfo.lastFreeArena. |
299 | */ |
300 | JSGCArenaInfo *prev; |
301 | |
302 | #if !CHUNKED_ARENA_ALLOCATION |
303 | jsuword prevUntracedPage; |
304 | #else |
305 | /* |
306 | * A link field for the list of arenas with marked but not yet traced |
307 | * things. The field is encoded as arena's page to share the space with |
308 | * firstArena and arenaIndex fields. |
309 | */ |
310 | jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT; |
311 | |
312 | /* |
313 | * When firstArena is false, the index of arena in the chunk. When |
314 | * firstArena is true, the index of a free arena holding JSGCChunkInfo or |
315 | * NO_FREE_ARENAS if there are no free arenas in the chunk. |
316 | * |
317 | * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to |
318 | * access either of indexes. |
319 | */ |
320 | jsuword arenaIndex : GC_ARENA_SHIFT - 1; |
321 | |
322 | /* Flag indicating if the arena is the first in the chunk. */ |
323 | jsuword firstArena : 1; |
324 | #endif |
325 | |
326 | union { |
327 | jsuword untracedThings; /* bitset for fast search of marked |
328 | but not yet traced things */ |
329 | JSBool hasMarkedDoubles; /* the arena has marked doubles */ |
330 | } u; |
331 | |
332 | #if JS_GC_ARENA_PAD != 0 |
333 | uint8 pad[JS_GC_ARENA_PAD]; |
334 | #endif |
335 | }; |
336 | |
337 | /* |
338 | * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small |
339 | * as possible. The code does not rely on this check so if on a particular |
340 | * platform this does not compile, then, as a workaround, comment the assert |
341 | * out and submit a bug report. |
342 | */ |
343 | JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword)); |
344 | |
345 | /* |
346 | * Macros to convert between JSGCArenaInfo, the start address of the arena and |
347 | * arena's page defined as (start address) >> GC_ARENA_SHIFT. |
348 | */ |
349 | #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) |
350 | |
351 | #define IS_ARENA_INFO_ADDRESS(arena) \ |
352 | (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET) |
353 | |
354 | #define ARENA_START_TO_INFO(arenaStart) \ |
355 | (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \ |
356 | (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET)) |
357 | |
358 | #define ARENA_INFO_TO_START(arena) \ |
359 | (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \ |
360 | (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK) |
361 | |
362 | #define ARENA_PAGE_TO_INFO(arenaPage) \ |
363 | (JS_ASSERT(arenaPage != 0), \ |
364 | JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \ |
365 | ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT)) |
366 | |
367 | #define ARENA_INFO_TO_PAGE(arena) \ |
368 | (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \ |
369 | ((jsuword) (arena) >> GC_ARENA_SHIFT)) |
370 | |
371 | #define GET_ARENA_INFO(chunk, index) \ |
372 | (JS_ASSERT((index) < js_gcArenasPerChunk), \ |
373 | ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT))) |
374 | |
375 | #if CHUNKED_ARENA_ALLOCATION |
376 | /* |
377 | * Definitions for allocating arenas in chunks. |
378 | * |
379 | * All chunks that have at least one free arena are put on the doubly-linked |
380 | * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains |
381 | * the head of the chunk's free arena list together with the link fields for |
382 | * gcChunkList. |
383 | * |
384 | * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives |
385 | * the index of this arena. When all arenas in the chunk are used, it is |
386 | * removed from the list and the index is set to NO_FREE_ARENAS indicating |
387 | * that the chunk is not on gcChunkList and has no JSGCChunkInfo available. |
388 | */ |
389 | |
390 | struct JSGCChunkInfo { |
391 | JSGCChunkInfo **prevp; |
392 | JSGCChunkInfo *next; |
393 | JSGCArenaInfo *lastFreeArena; |
394 | uint32 numFreeArenas; |
395 | }; |
396 | |
397 | #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1) |
398 | |
399 | #ifdef js_gcArenasPerChunk |
400 | JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk && |
401 | js_gcArenasPerChunk <= NO_FREE_ARENAS); |
402 | #endif |
403 | |
404 | #define GET_ARENA_CHUNK(arena, index) \ |
405 | (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \ |
406 | ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT)) |
407 | |
408 | #define GET_ARENA_INDEX(arena) \ |
409 | ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex) |
410 | |
411 | #define GET_CHUNK_INFO_INDEX(chunk) \ |
412 | ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex) |
413 | |
414 | #define SET_CHUNK_INFO_INDEX(chunk, index) \ |
415 | (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \ |
416 | (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index))) |
417 | |
418 | #define GET_CHUNK_INFO(chunk, infoIndex) \ |
419 | (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \ |
420 | JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \ |
421 | (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT))) |
422 | |
423 | #define CHUNK_INFO_TO_INDEX(ci) \ |
424 | GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci)) |
425 | |
426 | #endif |
427 | |
428 | /* |
429 | * Macros for GC-thing operations. |
430 | */ |
431 | #define THINGS_PER_ARENA(thingSize) \ |
432 | ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U)) |
433 | |
434 | #define THING_TO_ARENA(thing) \ |
435 | ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \ |
436 | 1 - sizeof(JSGCArenaInfo))) |
437 | |
438 | #define THING_TO_INDEX(thing, thingSize) \ |
439 | ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize)) |
440 | |
441 | #define THING_FLAGS_END(arena) ((uint8 *)(arena)) |
442 | |
443 | #define THING_FLAGP(arena, thingIndex) \ |
444 | (JS_ASSERT((jsuword) (thingIndex) \ |
445 | < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \ |
446 | (uint8 *)(arena) - 1 - (thingIndex)) |
447 | |
448 | #define THING_TO_FLAGP(thing, thingSize) \ |
449 | THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize)) |
450 | |
451 | #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp) |
452 | |
453 | #define FLAGP_TO_INDEX(flagp) \ |
454 | (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \ |
455 | (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK))) |
456 | |
457 | #define FLAGP_TO_THING(flagp, thingSize) \ |
458 | (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \ |
459 | (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \ |
460 | (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \ |
461 | (thingSize) * FLAGP_TO_INDEX(flagp))) |
462 | |
463 | /* |
464 | * Macros for the specialized arena for doubles. |
465 | * |
466 | * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can |
467 | * hold. We find it as the following. Let n be the number of doubles in the |
468 | * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit |
469 | * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for |
470 | * which the following holds: |
471 | * |
472 | * n*s + ceil(n/B) <= M (1) |
473 | * |
474 | * where "/" denotes normal real division, |
475 | * ceil(r) gives the least integer not smaller than the number r, |
476 | * s is the number of words in jsdouble, |
477 | * B is number of bits per word or B == JS_BITS_PER_WORD |
478 | * M is the number of words in the arena before JSGCArenaInfo or |
479 | * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword). |
480 | * M == ARENA_INFO_OFFSET / sizeof(jsuword) |
481 | * |
482 | * We rewrite the inequality as |
483 | * |
484 | * n*B*s/B + ceil(n/B) <= M, |
485 | * ceil(n*B*s/B + n/B) <= M, |
486 | * ceil(n*(B*s + 1)/B) <= M (2) |
487 | * |
488 | * We define a helper function e(n, s, B), |
489 | * |
490 | * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1. |
491 | * |
492 | * It gives: |
493 | * |
494 | * n*(B*s + 1)/B + e(n, s, B) <= M, |
495 | * n + e*B/(B*s + 1) <= M*B/(B*s + 1) |
496 | * |
497 | * We apply the floor function to both sides of the last equation, where |
498 | * floor(r) gives the biggest integer not greater than r. As a consequence we |
499 | * have: |
500 | * |
501 | * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)), |
502 | * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)), |
503 | * n <= floor(M*B/(B*s + 1)), (3) |
504 | * |
505 | * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1. |
506 | * Thus any n that satisfies the original constraint (1) or its equivalent (2), |
507 | * must also satisfy (3). That is, we got an upper estimate for the maximum |
508 | * value of n. Lets show that this upper estimate, |
509 | * |
510 | * floor(M*B/(B*s + 1)), (4) |
511 | * |
512 | * also satisfies (1) and, as such, gives the required maximum value. |
513 | * Substituting it into (2) gives: |
514 | * |
515 | * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X) |
516 | * |
517 | * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and |
518 | * |
519 | * ceil(floor(M/X)*X) <= ceil(M) == M. |
520 | * |
521 | * Thus the value of (4) gives the maximum n satisfying (1). |
522 | * |
523 | * For the final result we observe that in (4) |
524 | * |
525 | * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD |
526 | * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE |
527 | * |
528 | * and |
529 | * |
530 | * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword) |
531 | * == JS_BITS_PER_DOUBLE. |
532 | */ |
533 | #define DOUBLES_PER_ARENA \ |
534 | ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1)) |
535 | |
536 | /* |
537 | * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword). |
538 | */ |
539 | JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0); |
540 | JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0); |
541 | JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword)); |
542 | |
543 | #define DOUBLES_ARENA_BITMAP_WORDS \ |
544 | (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD)) |
545 | |
546 | /* Check that DOUBLES_PER_ARENA indeed maximises (1). */ |
547 | JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) + |
548 | DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <= |
549 | ARENA_INFO_OFFSET); |
550 | |
551 | JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) + |
552 | sizeof(jsuword) * |
553 | JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) > |
554 | ARENA_INFO_OFFSET); |
555 | |
556 | /* |
557 | * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the |
558 | * last byte of the occupation bitmap are unused. |
559 | */ |
560 | #define UNUSED_DOUBLE_BITMAP_BITS \ |
561 | (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA) |
562 | |
563 | JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD); |
564 | |
565 | #define DOUBLES_ARENA_BITMAP_OFFSET \ |
566 | (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword)) |
567 | |
568 | #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \ |
569 | (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \ |
570 | JS_ASSERT(!(arenaInfo)->list)) \ |
571 | |
572 | /* |
573 | * Get the start of the bitmap area containing double mark flags in the arena. |
574 | * To access the flag the code uses |
575 | * |
576 | * JS_TEST_BIT(bitmapStart, index) |
577 | * |
578 | * That is, compared with the case of arenas with non-double things, we count |
579 | * flags from the start of the bitmap area, not from the end. |
580 | */ |
581 | #define DOUBLE_ARENA_BITMAP(arenaInfo) \ |
582 | (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \ |
583 | (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS) |
584 | |
585 | #define DOUBLE_THING_TO_INDEX(thing) \ |
586 | (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \ |
587 | JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \ |
588 | DOUBLES_ARENA_BITMAP_OFFSET), \ |
589 | ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble)))) |
590 | |
591 | static void |
592 | ClearDoubleArenaFlags(JSGCArenaInfo *a) |
593 | { |
594 | jsbitmap *bitmap, mask; |
595 | uintN nused; |
596 | |
597 | /* |
598 | * When some high bits in the last byte of the double occupation bitmap |
599 | * are unused, we must set them. Otherwise RefillDoubleFreeList will |
600 | * assume that they corresponds to some free cells and tries to allocate |
601 | * them. |
602 | * |
603 | * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0. |
604 | */ |
605 | bitmap = DOUBLE_ARENA_BITMAP(a); |
606 | memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap); |
607 | mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1; |
608 | nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS; |
609 | bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused; |
610 | } |
611 | |
612 | static JS_ALWAYS_INLINE JSBool |
613 | IsMarkedDouble(JSGCArenaInfo *a, uint32 index) |
614 | { |
615 | jsbitmap *bitmap; |
616 | |
617 | JS_ASSERT(a->u.hasMarkedDoubles); |
618 | bitmap = DOUBLE_ARENA_BITMAP(a); |
619 | return JS_TEST_BIT(bitmap, index); |
620 | } |
621 | |
622 | /* |
623 | * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to: |
624 | * |
625 | * 1. The next byte in the bitmap area for doubles to check for unmarked |
626 | * (or free) doubles. |
627 | * 2. Or to the end of the bitmap area when all GC cells of the arena are |
628 | * allocated. |
629 | * 3. Or to a special sentinel value indicating that there are no arenas |
630 | * to check for unmarked doubles. |
631 | * |
632 | * We set the sentinel to ARENA_INFO_OFFSET so the single check |
633 | * |
634 | * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET |
635 | * |
636 | * will cover both the second and the third cases. |
637 | */ |
638 | #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET) |
639 | |
640 | #ifdef JS_THREADSAFE |
641 | /* |
642 | * The maximum number of things to put on the local free list by taking |
643 | * several things from the global free list or from the tail of the last |
644 | * allocated arena to amortize the cost of rt->gcLock. |
645 | * |
646 | * We use number 8 based on benchmarks from bug 312238. |
647 | */ |
648 | #define MAX_THREAD_LOCAL_THINGS 8 |
649 | |
650 | #endif |
651 | |
652 | JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval)); |
653 | |
654 | JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString)); |
655 | JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble)); |
656 | |
657 | /* We want to use all the available GC thing space for object's slots. */ |
658 | JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0); |
659 | |
660 | /* |
661 | * Ensure that JSObject is allocated from a different GC-list rather than |
662 | * jsdouble and JSString so we can easily finalize JSObject before these 2 |
663 | * types of GC things. See comments in js_GC. |
664 | */ |
665 | JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) != |
666 | GC_FREELIST_INDEX(sizeof(JSObject))); |
667 | JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) != |
668 | GC_FREELIST_INDEX(sizeof(JSObject))); |
669 | |
670 | /* |
671 | * JSPtrTable capacity growth descriptor. The table grows by powers of two |
672 | * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear |
673 | * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold. |
674 | */ |
675 | typedef struct JSPtrTableInfo { |
676 | uint16 minCapacity; |
677 | uint16 linearGrowthThreshold; |
678 | } JSPtrTableInfo; |
679 | |
680 | #define GC_ITERATOR_TABLE_MIN 4 |
681 | #define GC_ITERATOR_TABLE_LINEAR 1024 |
682 | |
683 | static const JSPtrTableInfo iteratorTableInfo = { |
684 | GC_ITERATOR_TABLE_MIN, |
685 | GC_ITERATOR_TABLE_LINEAR |
686 | }; |
687 | |
688 | /* Calculate table capacity based on the current value of JSPtrTable.count. */ |
689 | static size_t |
690 | PtrTableCapacity(size_t count, const JSPtrTableInfo *info) |
691 | { |
692 | size_t linear, log, capacity; |
693 | |
694 | linear = info->linearGrowthThreshold; |
695 | JS_ASSERT(info->minCapacity <= linear); |
696 | |
697 | if (count == 0) { |
698 | capacity = 0; |
699 | } else if (count < linear) { |
700 | log = JS_CEILING_LOG2W(count); |
701 | JS_ASSERT(log != JS_BITS_PER_WORD); |
702 | capacity = (size_t)1 << log; |
703 | if (capacity < info->minCapacity) |
704 | capacity = info->minCapacity; |
705 | } else { |
706 | capacity = JS_ROUNDUP(count, linear); |
707 | } |
708 | |
709 | JS_ASSERT(capacity >= count); |
710 | return capacity; |
711 | } |
712 | |
713 | static void |
714 | FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info) |
715 | { |
716 | if (table->array) { |
717 | JS_ASSERT(table->count > 0); |
718 | free(table->array); |
719 | table->array = NULL; |
720 | table->count = 0; |
721 | } |
722 | JS_ASSERT(table->count == 0); |
723 | } |
724 | |
725 | static JSBool |
726 | AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info, |
727 | void *ptr) |
728 | { |
729 | size_t count, capacity; |
730 | void **array; |
731 | |
732 | count = table->count; |
733 | capacity = PtrTableCapacity(count, info); |
734 | |
735 | if (count == capacity) { |
736 | if (capacity < info->minCapacity) { |
737 | JS_ASSERT(capacity == 0); |
738 | JS_ASSERT(!table->array); |
739 | capacity = info->minCapacity; |
740 | } else { |
741 | /* |
742 | * Simplify the overflow detection assuming pointer is bigger |
743 | * than byte. |
744 | */ |
745 | JS_STATIC_ASSERT(2 <= sizeof table->array[0]); |
746 | capacity = (capacity < info->linearGrowthThreshold) |
747 | ? 2 * capacity |
748 | : capacity + info->linearGrowthThreshold; |
749 | if (capacity > (size_t)-1 / sizeof table->array[0]) |
750 | goto bad; |
751 | } |
752 | array = (void **) realloc(table->array, |
753 | capacity * sizeof table->array[0]); |
754 | if (!array) |
755 | goto bad; |
756 | #ifdef DEBUG |
757 | memset(array + count, JS_FREE_PATTERN, |
758 | (capacity - count) * sizeof table->array[0]); |
759 | #endif |
760 | table->array = array; |
761 | } |
762 | |
763 | table->array[count] = ptr; |
764 | table->count = count + 1; |
765 | |
766 | return JS_TRUE; |
767 | |
768 | bad: |
769 | JS_ReportOutOfMemory(cx); |
770 | return JS_FALSE; |
771 | } |
772 | |
773 | static void |
774 | ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info, |
775 | size_t newCount) |
776 | { |
777 | size_t oldCapacity, capacity; |
778 | void **array; |
779 | |
780 | JS_ASSERT(newCount <= table->count); |
781 | if (newCount == table->count) |
782 | return; |
783 | |
784 | oldCapacity = PtrTableCapacity(table->count, info); |
785 | table->count = newCount; |
786 | capacity = PtrTableCapacity(newCount, info); |
787 | |
788 | if (oldCapacity != capacity) { |
789 | array = table->array; |
790 | JS_ASSERT(array); |
791 | if (capacity == 0) { |
792 | free(array); |
793 | table->array = NULL; |
794 | return; |
795 | } |
796 | array = (void **) realloc(array, capacity * sizeof array[0]); |
797 | if (array) |
798 | table->array = array; |
799 | } |
800 | #ifdef DEBUG |
801 | memset(table->array + newCount, JS_FREE_PATTERN, |
802 | (capacity - newCount) * sizeof table->array[0]); |
803 | #endif |
804 | } |
805 | |
806 | #ifdef JS_GCMETER |
807 | # define METER(x) ((void) (x)) |
808 | # define METER_IF(condition, x) ((void) ((condition) && (x))) |
809 | #else |
810 | # define METER(x) ((void) 0) |
811 | # define METER_IF(condition, x) ((void) 0) |
812 | #endif |
813 | |
814 | #define METER_UPDATE_MAX(maxLval, rval) \ |
815 | METER_IF((maxLval) < (rval), (maxLval) = (rval)) |
816 | |
817 | #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN |
818 | |
819 | /* |
820 | * For chunks allocated via over-sized malloc, get a pointer to store the gap |
821 | * between the malloc's result and the first arena in the chunk. |
822 | */ |
823 | static uint32 * |
824 | GetMallocedChunkGapPtr(jsuword chunk) |
825 | { |
826 | JS_ASSERT((chunk & GC_ARENA_MASK) == 0); |
827 | |
828 | /* Use the memory after the chunk, see NewGCChunk for details. */ |
829 | return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT)); |
830 | } |
831 | |
832 | #endif |
833 | |
834 | static jsuword |
835 | NewGCChunk(void) |
836 | { |
837 | void *p; |
838 | |
839 | #if JS_GC_USE_MMAP |
840 | if (js_gcUseMmap) { |
841 | # if defined(XP_WIN) |
842 | p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT, |
843 | MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
844 | return (jsuword) p; |
845 | # else |
846 | p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT, |
847 | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
848 | return (p == MAP_FAILED) ? 0 : (jsuword) p; |
849 | # endif |
850 | } |
851 | #endif |
852 | |
853 | #if HAS_POSIX_MEMALIGN |
854 | if (0 != posix_memalign(&p, GC_ARENA_SIZE, |
855 | GC_ARENA_SIZE * js_gcArenasPerChunk - |
856 | JS_GC_ARENA_PAD)) { |
857 | return 0; |
858 | } |
859 | return (jsuword) p; |
860 | #else |
861 | /* |
862 | * Implement chunk allocation using oversized malloc if mmap and |
863 | * posix_memalign are not available. |
864 | * |
865 | * Since malloc allocates pointers aligned on the word boundary, to get |
866 | * js_gcArenasPerChunk aligned arenas, we need to malloc only |
867 | * |
868 | * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t) |
869 | * |
870 | * bytes. But since we stores the gap between the malloced pointer and the |
871 | * first arena in the chunk after the chunk, we need to ask for |
872 | * |
873 | * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) |
874 | * |
875 | * bytes to ensure that we always have room to store the gap. |
876 | */ |
877 | p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); |
878 | if (!p) |
879 | return 0; |
880 | |
881 | { |
882 | jsuword chunk; |
883 | |
884 | chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK; |
885 | *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p); |
886 | return chunk; |
887 | } |
888 | #endif |
889 | } |
890 | |
891 | static void |
892 | DestroyGCChunk(jsuword chunk) |
893 | { |
894 | JS_ASSERT((chunk & GC_ARENA_MASK) == 0); |
895 | #if JS_GC_USE_MMAP |
896 | if (js_gcUseMmap) { |
897 | # if defined(XP_WIN) |
898 | VirtualFree((void *) chunk, 0, MEM_RELEASE); |
899 | # elif defined(SOLARIS) |
900 | munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT); |
901 | # else |
902 | munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT); |
903 | # endif |
904 | return; |
905 | } |
906 | #endif |
907 | |
908 | #if HAS_POSIX_MEMALIGN |
909 | free((void *) chunk); |
910 | #else |
911 | /* See comments in NewGCChunk. */ |
912 | JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE); |
913 | free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); |
914 | #endif |
915 | } |
916 | |
917 | #if CHUNKED_ARENA_ALLOCATION |
918 | |
919 | static void |
920 | AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci) |
921 | { |
922 | ci->prevp = &rt->gcChunkList; |
923 | ci->next = rt->gcChunkList; |
924 | if (rt->gcChunkList) { |
925 | JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList); |
926 | rt->gcChunkList->prevp = &ci->next; |
927 | } |
928 | rt->gcChunkList = ci; |
929 | } |
930 | |
931 | static void |
932 | RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci) |
933 | { |
934 | *ci->prevp = ci->next; |
935 | if (ci->next) { |
936 | JS_ASSERT(ci->next->prevp == &ci->next); |
937 | ci->next->prevp = ci->prevp; |
938 | } |
939 | } |
940 | |
941 | #endif |
942 | |
943 | static JSGCArenaInfo * |
944 | NewGCArena(JSRuntime *rt) |
945 | { |
946 | jsuword chunk; |
947 | JSGCArenaInfo *a; |
948 | |
949 | if (rt->gcBytes >= rt->gcMaxBytes) |
950 | return NULL; |
951 | |
952 | #if CHUNKED_ARENA_ALLOCATION |
953 | if (js_gcArenasPerChunk == 1) { |
954 | #endif |
955 | chunk = NewGCChunk(); |
956 | if (chunk == 0) |
957 | return NULL; |
958 | a = ARENA_START_TO_INFO(chunk); |
959 | #if CHUNKED_ARENA_ALLOCATION |
960 | } else { |
961 | JSGCChunkInfo *ci; |
962 | uint32 i; |
963 | JSGCArenaInfo *aprev; |
964 | |
965 | ci = rt->gcChunkList; |
966 | if (!ci) { |
967 | chunk = NewGCChunk(); |
968 | if (chunk == 0) |
969 | return NULL; |
970 | JS_ASSERT((chunk & GC_ARENA_MASK) == 0); |
971 | a = GET_ARENA_INFO(chunk, 0); |
972 | a->firstArena = JS_TRUE; |
973 | a->arenaIndex = 0; |
974 | aprev = NULL; |
975 | i = 0; |
976 | do { |
977 | a->prev = aprev; |
978 | aprev = a; |
979 | ++i; |
980 | a = GET_ARENA_INFO(chunk, i); |
981 | a->firstArena = JS_FALSE; |
982 | a->arenaIndex = i; |
983 | } while (i != js_gcArenasPerChunk - 1); |
984 | ci = GET_CHUNK_INFO(chunk, 0); |
985 | ci->lastFreeArena = aprev; |
986 | ci->numFreeArenas = js_gcArenasPerChunk - 1; |
987 | AddChunkToList(rt, ci); |
988 | } else { |
989 | JS_ASSERT(ci->prevp == &rt->gcChunkList); |
990 | a = ci->lastFreeArena; |
991 | aprev = a->prev; |
992 | if (!aprev) { |
993 | JS_ASSERT(ci->numFreeArenas == 1); |
994 | JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci); |
995 | RemoveChunkFromList(rt, ci); |
996 | chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a)); |
997 | SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS); |
998 | } else { |
999 | JS_ASSERT(ci->numFreeArenas >= 2); |
1000 | JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci); |
1001 | ci->lastFreeArena = aprev; |
1002 | ci->numFreeArenas--; |
1003 | } |
1004 | } |
1005 | } |
1006 | #endif |
1007 | |
1008 | rt->gcBytes += GC_ARENA_SIZE; |
1009 | a->prevUntracedPage = 0; |
1010 | memset(&a->u, 0, sizeof(a->u)); |
1011 | |
1012 | return a; |
1013 | } |
1014 | |
1015 | static void |
1016 | DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last) |
1017 | { |
1018 | JSGCArenaInfo *a; |
1019 | |
1020 | while (last) { |
1021 | a = last; |
1022 | last = last->prev; |
1023 | |
1024 | METER(rt->gcStats.afree++); |
1025 | JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE); |
1026 | rt->gcBytes -= GC_ARENA_SIZE; |
1027 | |
1028 | #if CHUNKED_ARENA_ALLOCATION |
1029 | if (js_gcArenasPerChunk == 1) { |
1030 | #endif |
1031 | DestroyGCChunk(ARENA_INFO_TO_START(a)); |
1032 | #if CHUNKED_ARENA_ALLOCATION |
1033 | } else { |
1034 | uint32 arenaIndex; |
1035 | jsuword chunk; |
1036 | uint32 chunkInfoIndex; |
1037 | JSGCChunkInfo *ci; |
1038 | # ifdef DEBUG |
1039 | jsuword firstArena; |
1040 | |
1041 | firstArena = a->firstArena; |
1042 | arenaIndex = a->arenaIndex; |
1043 | memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN, |
1044 | GC_ARENA_SIZE - JS_GC_ARENA_PAD); |
1045 | a->firstArena = firstArena; |
1046 | a->arenaIndex = arenaIndex; |
1047 | # endif |
1048 | arenaIndex = GET_ARENA_INDEX(a); |
1049 | chunk = GET_ARENA_CHUNK(a, arenaIndex); |
1050 | chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk); |
1051 | if (chunkInfoIndex == NO_FREE_ARENAS) { |
1052 | chunkInfoIndex = arenaIndex; |
1053 | SET_CHUNK_INFO_INDEX(chunk, arenaIndex); |
1054 | ci = GET_CHUNK_INFO(chunk, chunkInfoIndex); |
1055 | a->prev = NULL; |
1056 | ci->lastFreeArena = a; |
1057 | ci->numFreeArenas = 1; |
1058 | AddChunkToList(rt, ci); |
1059 | } else { |
1060 | JS_ASSERT(chunkInfoIndex != arenaIndex); |
1061 | ci = GET_CHUNK_INFO(chunk, chunkInfoIndex); |
1062 | JS_ASSERT(ci->numFreeArenas != 0); |
1063 | JS_ASSERT(ci->lastFreeArena); |
1064 | JS_ASSERT(a != ci->lastFreeArena); |
1065 | if (ci->numFreeArenas == js_gcArenasPerChunk - 1) { |
1066 | RemoveChunkFromList(rt, ci); |
1067 | DestroyGCChunk(chunk); |
1068 | } else { |
1069 | ++ci->numFreeArenas; |
1070 | a->prev = ci->lastFreeArena; |
1071 | ci->lastFreeArena = a; |
1072 | } |
1073 | } |
1074 | } |
1075 | # endif |
1076 | } |
1077 | } |
1078 | |
1079 | static void |
1080 | InitGCArenaLists(JSRuntime *rt) |
1081 | { |
1082 | uintN i, thingSize; |
1083 | JSGCArenaList *arenaList; |
1084 | |
1085 | for (i = 0; i < GC_NUM_FREELISTS; i++) { |
1086 | arenaList = &rt->gcArenaList[i]; |
1087 | thingSize = GC_FREELIST_NBYTES(i); |
1088 | JS_ASSERT((size_t)(uint16)thingSize == thingSize); |
1089 | arenaList->last = NULL; |
1090 | arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize); |
1091 | arenaList->thingSize = (uint16) thingSize; |
1092 | arenaList->freeList = NULL; |
1093 | } |
1094 | rt->gcDoubleArenaList.first = NULL; |
1095 | rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL; |
1096 | } |
1097 | |
1098 | static void |
1099 | FinishGCArenaLists(JSRuntime *rt) |
1100 | { |
1101 | uintN i; |
1102 | JSGCArenaList *arenaList; |
1103 | |
1104 | for (i = 0; i < GC_NUM_FREELISTS; i++) { |
1105 | arenaList = &rt->gcArenaList[i]; |
1106 | DestroyGCArenas(rt, arenaList->last); |
1107 | arenaList->last = NULL; |
1108 | arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize); |
1109 | arenaList->freeList = NULL; |
1110 | } |
1111 | DestroyGCArenas(rt, rt->gcDoubleArenaList.first); |
1112 | rt->gcDoubleArenaList.first = NULL; |
1113 | rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL; |
1114 | |
1115 | rt->gcBytes = 0; |
1116 | JS_ASSERT(rt->gcChunkList == 0); |
1117 | } |
1118 | |
1119 | /* |
1120 | * This function must not be called when thing is jsdouble. |
1121 | */ |
1122 | static uint8 * |
1123 | GetGCThingFlags(void *thing) |
1124 | { |
1125 | JSGCArenaInfo *a; |
1126 | uint32 index; |
1127 | |
1128 | a = THING_TO_ARENA(thing); |
1129 | index = THING_TO_INDEX(thing, a->list->thingSize); |
1130 | return THING_FLAGP(a, index); |
1131 | } |
1132 | |
1133 | /* |
1134 | * This function returns null when thing is jsdouble. |
1135 | */ |
1136 | static uint8 * |
1137 | GetGCThingFlagsOrNull(void *thing) |
1138 | { |
1139 | JSGCArenaInfo *a; |
1140 | uint32 index; |
1141 | |
1142 | a = THING_TO_ARENA(thing); |
1143 | if (!a->list) |
1144 | return NULL; |
1145 | index = THING_TO_INDEX(thing, a->list->thingSize); |
1146 | return THING_FLAGP(a, index); |
1147 | } |
1148 | |
1149 | intN |
1150 | js_GetExternalStringGCType(JSString *str) |
1151 | { |
1152 | uintN type; |
1153 | |
1154 | type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK; |
1155 | JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING); |
1156 | return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING); |
1157 | } |
1158 | |
1159 | static uint32 |
1160 | MapGCFlagsToTraceKind(uintN flags) |
1161 | { |
1162 | uint32 type; |
1163 | |
1164 | type = flags & GCF_TYPEMASK; |
1165 | JS_ASSERT(type != GCX_DOUBLE); |
1166 | JS_ASSERT(type < GCX_NTYPES); |
1167 | return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING; |
1168 | } |
1169 | |
1170 | JS_FRIEND_API(uint32) |
1171 | js_GetGCThingTraceKind(void *thing) |
1172 | { |
1173 | JSGCArenaInfo *a; |
1174 | uint32 index; |
1175 | |
1176 | a = THING_TO_ARENA(thing); |
1177 | if (!a->list) |
1178 | return JSTRACE_DOUBLE; |
1179 | |
1180 | index = THING_TO_INDEX(thing, a->list->thingSize); |
1181 | return MapGCFlagsToTraceKind(*THING_FLAGP(a, index)); |
1182 | } |
1183 | |
1184 | JSRuntime* |
1185 | js_GetGCStringRuntime(JSString *str) |
1186 | { |
1187 | JSGCArenaList *list; |
1188 | |
1189 | list = THING_TO_ARENA(str)->list; |
1190 | |
1191 | JS_ASSERT(list->thingSize == sizeof(JSGCThing)); |
1192 | JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0); |
1193 | |
1194 | return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList)); |
1195 | } |
1196 | |
1197 | JSBool |
1198 | js_IsAboutToBeFinalized(JSContext *cx, void *thing) |
1199 | { |
1200 | JSGCArenaInfo *a; |
1201 | uint32 index, flags; |
1202 | |
1203 | a = THING_TO_ARENA(thing); |
1204 | if (!a->list) { |
1205 | /* |
1206 | * Check if arena has no marked doubles. In that case the bitmap with |
1207 | * the mark flags contains all garbage as it is initialized only when |
1208 | * marking the first double in the arena. |
1209 | */ |
1210 | if (!a->u.hasMarkedDoubles) |
1211 | return JS_TRUE; |
1212 | index = DOUBLE_THING_TO_INDEX(thing); |
1213 | return !IsMarkedDouble(a, index); |
1214 | } |
1215 | index = THING_TO_INDEX(thing, a->list->thingSize); |
1216 | flags = *THING_FLAGP(a, index); |
1217 | return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL)); |
1218 | } |
1219 | |
1220 | /* This is compatible with JSDHashEntryStub. */ |
1221 | typedef struct JSGCRootHashEntry { |
1222 | JSDHashEntryHdr hdr; |
1223 | void *root; |
1224 | const char *name; |
1225 | } JSGCRootHashEntry; |
1226 | |
1227 | /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */ |
1228 | #define GC_ROOTS_SIZE 256 |
1229 | |
1230 | #if CHUNKED_ARENA_ALLOCATION |
1231 | |
1232 | /* |
1233 | * For a CPU with extremely large pages using them for GC things wastes |
1234 | * too much memory. |
1235 | */ |
1236 | # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT) |
1237 | |
1238 | JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS); |
1239 | |
1240 | #endif |
1241 | |
1242 | JSBool |
1243 | js_InitGC(JSRuntime *rt, uint32 maxbytes) |
1244 | { |
1245 | #if JS_GC_USE_MMAP |
1246 | if (js_gcArenasPerChunk == 0) { |
1247 | size_t cpuPageSize, arenasPerPage; |
1248 | # if defined(XP_WIN) |
1249 | SYSTEM_INFO si; |
1250 | |
1251 | GetSystemInfo(&si); |
1252 | cpuPageSize = si.dwPageSize; |
1253 | |
1254 | # elif defined(XP_UNIX) || defined(XP_BEOS) |
1255 | cpuPageSize = (size_t) sysconf(_SC_PAGESIZE); |
1256 | # else |
1257 | # error "Not implemented" |
1258 | # endif |
1259 | /* cpuPageSize is a power of 2. */ |
1260 | JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0); |
1261 | arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT; |
1262 | #ifdef DEBUG |
1263 | if (arenasPerPage == 0) { |
1264 | fprintf(stderr, |
1265 | "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n" |
1266 | "paged allocation for the garbage collector. Please report this.\n", |
1267 | (unsigned) cpuPageSize); |
1268 | } |
1269 | #endif |
1270 | if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) { |
1271 | /* |
1272 | * Use at least 4 GC arenas per paged allocation chunk to minimize |
1273 | * the overhead of mmap/VirtualAlloc. |
1274 | */ |
1275 | js_gcUseMmap = JS_TRUE; |
1276 | js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4); |
1277 | } else { |
1278 | js_gcUseMmap = JS_FALSE; |
1279 | js_gcArenasPerChunk = 7; |
1280 | } |
1281 | } |
1282 | JS_ASSERT(1 <= js_gcArenasPerChunk && |
1283 | js_gcArenasPerChunk <= NO_FREE_ARENAS); |
1284 | #endif |
1285 | |
1286 | InitGCArenaLists(rt); |
1287 | if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL, |
1288 | sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) { |
1289 | rt->gcRootsHash.ops = NULL; |
1290 | return JS_FALSE; |
1291 | } |
1292 | rt->gcLocksHash = NULL; /* create lazily */ |
1293 | |
1294 | /* |
1295 | * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes |
1296 | * for default backward API compatibility. |
1297 | */ |
1298 | rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes; |
1299 | rt->gcEmptyArenaPoolLifespan = 30000; |
1300 | |
1301 | /* |
1302 | * By default the trigger factor gets maximum possible value. This |
1303 | * means that GC will not be triggered by growth of GC memory (gcBytes). |
1304 | */ |
1305 | rt->gcTriggerFactor = (uint32) -1; |
1306 | |
1307 | /* |
1308 | * The assigned value prevents GC from running when GC memory is too low |
1309 | * (during JS engine start). |
1310 | */ |
1311 | rt->gcLastBytes = 8192; |
1312 | |
1313 | METER(memset(&rt->gcStats, 0, sizeof rt->gcStats)); |
1314 | return JS_TRUE; |
1315 | } |
1316 | |
1317 | #ifdef JS_GCMETER |
1318 | |
1319 | static void |
1320 | UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas, |
1321 | uint32 nthings) |
1322 | { |
1323 | size_t narenas; |
1324 | |
1325 | narenas = nlivearenas + nkilledArenas; |
1326 | JS_ASSERT(narenas >= st->livearenas); |
1327 | |
1328 | st->newarenas = narenas - st->livearenas; |
1329 | st->narenas = narenas; |
1330 | st->livearenas = nlivearenas; |
1331 | if (st->maxarenas < narenas) |
1332 | st->maxarenas = narenas; |
1333 | st->totalarenas += narenas; |
1334 | |
1335 | st->nthings = nthings; |
1336 | if (st->maxthings < nthings) |
1337 | st->maxthings = nthings; |
1338 | st->totalthings += nthings; |
1339 | } |
1340 | |
1341 | JS_FRIEND_API(void) |
1342 | js_DumpGCStats(JSRuntime *rt, FILE *fp) |
1343 | { |
1344 | int i; |
1345 | size_t sumArenas, sumTotalArenas; |
1346 | size_t sumThings, sumMaxThings; |
1347 | size_t sumThingSize, sumTotalThingSize; |
1348 | size_t sumArenaCapacity, sumTotalArenaCapacity; |
1349 | JSGCArenaStats *st; |
1350 | size_t thingSize, thingsPerArena; |
1351 | size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry; |
1352 | |
1353 | fprintf(fp, "\nGC allocation statistics:\n"); |
1354 | |
1355 | #define UL(x) ((unsigned long)(x)) |
1356 | #define ULSTAT(x) UL(rt->gcStats.x) |
1357 | #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y)) |
1358 | |
1359 | sumArenas = 0; |
1360 | sumTotalArenas = 0; |
1361 | sumThings = 0; |
1362 | sumMaxThings = 0; |
1363 | sumThingSize = 0; |
1364 | sumTotalThingSize = 0; |
1365 | sumArenaCapacity = 0; |
1366 | sumTotalArenaCapacity = 0; |
1367 | sumAlloc = 0; |
1368 | sumLocalAlloc = 0; |
1369 | sumFail = 0; |
1370 | sumRetry = 0; |
1371 | for (i = -1; i < (int) GC_NUM_FREELISTS; i++) { |
1372 | if (i == -1) { |
1373 | thingSize = sizeof(jsdouble); |
1374 | thingsPerArena = DOUBLES_PER_ARENA; |
1375 | st = &rt->gcStats.doubleArenaStats; |
1376 | fprintf(fp, |
1377 | "Arena list for double values (%lu doubles per arena):", |
1378 | UL(thingsPerArena)); |
1379 | } else { |
1380 | thingSize = rt->gcArenaList[i].thingSize; |
1381 | thingsPerArena = THINGS_PER_ARENA(thingSize); |
1382 | st = &rt->gcStats.arenaStats[i]; |
1383 | fprintf(fp, |
1384 | "Arena list %d (thing size %lu, %lu things per arena):", |
1385 | i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena)); |
1386 | } |
1387 | if (st->maxarenas == 0) { |
1388 | fputs(" NEVER USED\n", fp); |
1389 | continue; |
1390 | } |
1391 | putc('\n', fp); |
1392 | fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas)); |
1393 | fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n", |
1394 | UL(st->newarenas), PERCENT(st->newarenas, st->narenas)); |
1395 | fprintf(fp, " arenas after GC: %lu (%.1f%%)\n", |
1396 | UL(st->livearenas), PERCENT(st->livearenas, st->narenas)); |
1397 | fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas)); |
1398 | fprintf(fp, " things: %lu\n", UL(st->nthings)); |
1399 | fprintf(fp, " GC cell utilization: %.1f%%\n", |
1400 | PERCENT(st->nthings, thingsPerArena * st->narenas)); |
1401 | fprintf(fp, " average cell utilization: %.1f%%\n", |
1402 | PERCENT(st->totalthings, thingsPerArena * st->totalarenas)); |
1403 | fprintf(fp, " max things: %lu\n", UL(st->maxthings)); |
1404 | fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc)); |
1405 | fprintf(fp, " alloc without locks: %1u (%.1f%%)\n", |
1406 | UL(st->localalloc), PERCENT(st->localalloc, st->alloc)); |
1407 | sumArenas += st->narenas; |
1408 | sumTotalArenas += st->totalarenas; |
1409 | sumThings += st->nthings; |
1410 | sumMaxThings += st->maxthings; |
1411 | sumThingSize += thingSize * st->nthings; |
1412 | sumTotalThingSize += thingSize * st->totalthings; |
1413 | sumArenaCapacity += thingSize * thingsPerArena * st->narenas; |
1414 | sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas; |
1415 | sumAlloc += st->alloc; |
1416 | sumLocalAlloc += st->localalloc; |
1417 | sumFail += st->fail; |
1418 | sumRetry += st->retry; |
1419 | } |
1420 | fprintf(fp, "TOTAL STATS:\n"); |
1421 | fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes)); |
1422 | fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas)); |
1423 | fprintf(fp, " total GC things: %lu\n", UL(sumThings)); |
1424 | fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings)); |
1425 | fprintf(fp, " GC cell utilization: %.1f%%\n", |
1426 | PERCENT(sumThingSize, sumArenaCapacity)); |
1427 | fprintf(fp, " average cell utilization: %.1f%%\n", |
1428 | PERCENT(sumTotalThingSize, sumTotalArenaCapacity)); |
1429 | fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry)); |
1430 | fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc)); |
1431 | fprintf(fp, " alloc without locks: %1u (%.1f%%)\n", |
1432 | UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc)); |
1433 | fprintf(fp, " allocation failures: %lu\n", UL(sumFail)); |
1434 | fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn)); |
1435 | fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock)); |
1436 | fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock)); |
1437 | fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth)); |
1438 | fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth)); |
1439 | fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth)); |
1440 | fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth)); |
1441 | fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced)); |
1442 | #ifdef DEBUG |
1443 | fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced)); |
1444 | #endif |
1445 | fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel)); |
1446 | fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke)); |
1447 | fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree)); |
1448 | fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg)); |
1449 | fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots)); |
1450 | fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose)); |
1451 | fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose)); |
1452 | fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater)); |
1453 | fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater)); |
1454 | |
1455 | #undef UL |
1456 | #undef ULSTAT |
1457 | #undef PERCENT |
1458 | |
1459 | #ifdef JS_ARENAMETER |
1460 | JS_DumpArenaStats(fp); |
1461 | #endif |
1462 | } |
1463 | #endif |
1464 | |
1465 | #ifdef DEBUG |
1466 | static void |
1467 | CheckLeakedRoots(JSRuntime *rt); |
1468 | #endif |
1469 | |
1470 | #ifdef JS_THREADSAFE |
1471 | static void |
1472 | TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount); |
1473 | #endif |
1474 | |
1475 | void |
1476 | js_FinishGC(JSRuntime *rt) |
1477 | { |
1478 | #ifdef JS_ARENAMETER |
1479 | JS_DumpArenaStats(stdout); |
1480 | #endif |
1481 | #ifdef JS_GCMETER |
1482 | js_DumpGCStats(rt, stdout); |
1483 | #endif |
1484 | |
1485 | FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo); |
1486 | #ifdef JS_THREADSAFE |
1487 | TrimGCFreeListsPool(rt, 0); |
1488 | JS_ASSERT(!rt->gcFreeListsPool); |
1489 | #endif |
1490 | FinishGCArenaLists(rt); |
1491 | |
1492 | if (rt->gcRootsHash.ops) { |
1493 | #ifdef DEBUG |
1494 | CheckLeakedRoots(rt); |
1495 | #endif |
1496 | JS_DHashTableFinish(&rt->gcRootsHash); |
1497 | rt->gcRootsHash.ops = NULL; |
1498 | } |
1499 | if (rt->gcLocksHash) { |
1500 | JS_DHashTableDestroy(rt->gcLocksHash); |
1501 | rt->gcLocksHash = NULL; |
1502 | } |
1503 | } |
1504 | |
1505 | JSBool |
1506 | js_AddRoot(JSContext *cx, void *rp, const char *name) |
1507 | { |
1508 | JSBool ok = js_AddRootRT(cx->runtime, rp, name); |
1509 | if (!ok) |
1510 | JS_ReportOutOfMemory(cx); |
1511 | return ok; |
1512 | } |
1513 | |
1514 | JSBool |
1515 | js_AddRootRT(JSRuntime *rt, void *rp, const char *name) |
1516 | { |
1517 | JSBool ok; |
1518 | JSGCRootHashEntry *rhe; |
1519 | |
1520 | /* |
1521 | * Due to the long-standing, but now removed, use of rt->gcLock across the |
1522 | * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking |
1523 | * properly with a racing GC, without calling JS_AddRoot from a request. |
1524 | * We have to preserve API compatibility here, now that we avoid holding |
1525 | * rt->gcLock across the mark phase (including the root hashtable mark). |
1526 | */ |
1527 | JS_LOCK_GC(rt); |
1528 | js_WaitForGC(rt); |
1529 | rhe = (JSGCRootHashEntry *) |
1530 | JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD); |
1531 | if (rhe) { |
1532 | rhe->root = rp; |
1533 | rhe->name = name; |
1534 | ok = JS_TRUE; |
1535 | } else { |
1536 | ok = JS_FALSE; |
1537 | } |
1538 | JS_UNLOCK_GC(rt); |
1539 | return ok; |
1540 | } |
1541 | |
1542 | JSBool |
1543 | js_RemoveRoot(JSRuntime *rt, void *rp) |
1544 | { |
1545 | /* |
1546 | * Due to the JS_RemoveRootRT API, we may be called outside of a request. |
1547 | * Same synchronization drill as above in js_AddRoot. |
1548 | */ |
1549 | JS_LOCK_GC(rt); |
1550 | js_WaitForGC(rt); |
1551 | (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE); |
1552 | rt->gcPoke = JS_TRUE; |
1553 | JS_UNLOCK_GC(rt); |
1554 | return JS_TRUE; |
1555 | } |
1556 | |
1557 | #ifdef DEBUG |
1558 | |
1559 | static JSDHashOperator |
1560 | js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg) |
1561 | { |
1562 | uint32 *leakedroots = (uint32 *)arg; |
1563 | JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr; |
1564 | |
1565 | (*leakedroots)++; |
1566 | fprintf(stderr, |
1567 | "JS engine warning: leaking GC root \'%s\' at %p\n", |
1568 | rhe->name ? (char *)rhe->name : "", rhe->root); |
1569 | |
1570 | return JS_DHASH_NEXT; |
1571 | } |
1572 | |
1573 | static void |
1574 | CheckLeakedRoots(JSRuntime *rt) |
1575 | { |
1576 | uint32 leakedroots = 0; |
1577 | |
1578 | /* Warn (but don't assert) debug builds of any remaining roots. */ |
1579 | JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer, |
1580 | &leakedroots); |
1581 | if (leakedroots > 0) { |
1582 | if (leakedroots == 1) { |
1583 | fprintf(stderr, |
1584 | "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n" |
1585 | " This root may point to freed memory. Objects reachable\n" |
1586 | " through it have not been finalized.\n", |
1587 | (void *) rt); |
1588 | } else { |
1589 | fprintf(stderr, |
1590 | "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n" |
1591 | " These roots may point to freed memory. Objects reachable\n" |
1592 | " through them have not been finalized.\n", |
1593 | (unsigned long) leakedroots, (void *) rt); |
1594 | } |
1595 | } |
1596 | } |
1597 | |
1598 | typedef struct NamedRootDumpArgs { |
1599 | void (*dump)(const char *name, void *rp, void *data); |
1600 | void *data; |
1601 | } NamedRootDumpArgs; |
1602 | |
1603 | static JSDHashOperator |
1604 | js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, |
1605 | void *arg) |
1606 | { |
1607 | NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg; |
1608 | JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr; |
1609 | |
1610 | if (rhe->name) |
1611 | args->dump(rhe->name, rhe->root, args->data); |
1612 | return JS_DHASH_NEXT; |
1613 | } |
1614 | |
1615 | JS_BEGIN_EXTERN_C |
1616 | void |
1617 | js_DumpNamedRoots(JSRuntime *rt, |
1618 | void (*dump)(const char *name, void *rp, void *data), |
1619 | void *data) |
1620 | { |
1621 | NamedRootDumpArgs args; |
1622 | |
1623 | args.dump = dump; |
1624 | args.data = data; |
1625 | JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args); |
1626 | } |
1627 | JS_END_EXTERN_C |
1628 | |
1629 | #endif /* DEBUG */ |
1630 | |
1631 | typedef struct GCRootMapArgs { |
1632 | JSGCRootMapFun map; |
1633 | void *data; |
1634 | } GCRootMapArgs; |
1635 | |
1636 | static JSDHashOperator |
1637 | js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, |
1638 | void *arg) |
1639 | { |
1640 | GCRootMapArgs *args = (GCRootMapArgs *) arg; |
1641 | JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr; |
1642 | intN mapflags; |
1643 | int op; |
1644 | |
1645 | mapflags = args->map(rhe->root, rhe->name, args->data); |
1646 | |
1647 | #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \ |
1648 | JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \ |
1649 | JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE |
1650 | op = (JSDHashOperator)mapflags; |
1651 | #else |
1652 | op = JS_DHASH_NEXT; |
1653 | if (mapflags & JS_MAP_GCROOT_STOP) |
1654 | op |= JS_DHASH_STOP; |
1655 | if (mapflags & JS_MAP_GCROOT_REMOVE) |
1656 | op |= JS_DHASH_REMOVE; |
1657 | #endif |
1658 | |
1659 | return (JSDHashOperator) op; |
1660 | } |
1661 | |
1662 | uint32 |
1663 | js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data) |
1664 | { |
1665 | GCRootMapArgs args; |
1666 | uint32 rv; |
1667 | |
1668 | args.map = map; |
1669 | args.data = data; |
1670 | JS_LOCK_GC(rt); |
1671 | rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args); |
1672 | JS_UNLOCK_GC(rt); |
1673 | return rv; |
1674 | } |
1675 | |
1676 | JSBool |
1677 | js_RegisterCloseableIterator(JSContext *cx, JSObject *obj) |
1678 | { |
1679 | JSRuntime *rt; |
1680 | JSBool ok; |
1681 | |
1682 | rt = cx->runtime; |
1683 | JS_ASSERT(!rt->gcRunning); |
1684 | |
1685 | JS_LOCK_GC(rt); |
1686 | ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj); |
1687 | JS_UNLOCK_GC(rt); |
1688 | return ok; |
1689 | } |
1690 | |
1691 | static void |
1692 | CloseNativeIterators(JSContext *cx) |
1693 | { |
1694 | JSRuntime *rt; |
1695 | size_t count, newCount, i; |
1696 | void **array; |
1697 | JSObject *obj; |
1698 | |
1699 | rt = cx->runtime; |
1700 | count = rt->gcIteratorTable.count; |
1701 | array = rt->gcIteratorTable.array; |
1702 | |
1703 | newCount = 0; |
1704 | for (i = 0; i != count; ++i) { |
1705 | obj = (JSObject *)array[i]; |
1706 | if (js_IsAboutToBeFinalized(cx, obj)) |
1707 | js_CloseNativeIterator(cx, obj); |
1708 | else |
1709 | array[newCount++] = obj; |
1710 | } |
1711 | ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount); |
1712 | } |
1713 | |
1714 | #if defined(DEBUG_brendan) || defined(DEBUG_timeless) |
1715 | #define DEBUG_gchist |
1716 | #endif |
1717 | |
1718 | #ifdef DEBUG_gchist |
1719 | #define NGCHIST 64 |
1720 | |
1721 | static struct GCHist { |
1722 | bool lastDitch; |
1723 | JSGCThing *freeList; |
1724 | } gchist[NGCHIST]; |
1725 | |
1726 | unsigned gchpos = 0; |
1727 | #endif |
1728 | |
1729 | #ifdef JS_THREADSAFE |
1730 | |
1731 | const JSGCFreeListSet js_GCEmptyFreeListSet = { |
1732 | { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, NULL |
1733 | }; |
1734 | |
1735 | static void |
1736 | TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount) |
1737 | { |
1738 | JSGCFreeListSet **cursor, *freeLists, *link; |
1739 | |
1740 | cursor = &rt->gcFreeListsPool; |
1741 | while (keepCount != 0) { |
1742 | --keepCount; |
1743 | freeLists = *cursor; |
1744 | if (!freeLists) |
1745 | return; |
1746 | memset(freeLists->array, 0, sizeof freeLists->array); |
1747 | cursor = &freeLists->link; |
1748 | } |
1749 | freeLists = *cursor; |
1750 | if (freeLists) { |
1751 | *cursor = NULL; |
1752 | do { |
1753 | link = freeLists->link; |
1754 | free(freeLists); |
1755 | } while ((freeLists = link) != NULL); |
1756 | } |
1757 | } |
1758 | |
1759 | void |
1760 | js_RevokeGCLocalFreeLists(JSContext *cx) |
1761 | { |
1762 | JS_ASSERT(!cx->gcLocalFreeLists->link); |
1763 | if (cx->gcLocalFreeLists != &js_GCEmptyFreeListSet) { |
1764 | cx->gcLocalFreeLists->link = cx->runtime->gcFreeListsPool; |
1765 | cx->runtime->gcFreeListsPool = cx->gcLocalFreeLists; |
1766 | cx->gcLocalFreeLists = (JSGCFreeListSet *) &js_GCEmptyFreeListSet; |
1767 | } |
1768 | } |
1769 | |
1770 | static JSGCFreeListSet * |
1771 | EnsureLocalFreeList(JSContext *cx) |
1772 | { |
1773 | JSGCFreeListSet *freeLists; |
1774 | |
1775 | freeLists = cx->gcLocalFreeLists; |
1776 | if (freeLists != &js_GCEmptyFreeListSet) { |
1777 | JS_ASSERT(freeLists); |
1778 | return freeLists; |
1779 | } |
1780 | |
1781 | freeLists = cx->runtime->gcFreeListsPool; |
1782 | if (freeLists) { |
1783 | cx->runtime->gcFreeListsPool = freeLists->link; |
1784 | freeLists->link = NULL; |
1785 | } else { |
1786 | /* JS_malloc is not used as the caller reports out-of-memory itself. */ |
1787 | freeLists = (JSGCFreeListSet *) calloc(1, sizeof *freeLists); |
1788 | if (!freeLists) |
1789 | return NULL; |
1790 | } |
1791 | cx->gcLocalFreeLists = freeLists; |
1792 | return freeLists; |
1793 | } |
1794 | |
1795 | #endif |
1796 | |
1797 | static JS_INLINE bool |
1798 | IsGCThresholdReached(JSRuntime *rt) |
1799 | { |
1800 | #ifdef JS_GC_ZEAL |
1801 | if (rt->gcZeal >= 1) |
1802 | return true; |
1803 | #endif |
1804 | |
1805 | /* |
1806 | * Since the initial value of the gcLastBytes parameter is not equal to |
1807 | * zero (see the js_InitGC function) the return value is false when |
1808 | * the gcBytes value is close to zero at the JS engine start. |
1809 | */ |
1810 | return rt->gcMallocBytes >= rt->gcMaxMallocBytes || |
1811 | rt->gcBytes / rt->gcTriggerFactor >= rt->gcLastBytes / 100; |
1812 | } |
1813 | |
1814 | void * |
1815 | js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes) |
1816 | { |
1817 | JSRuntime *rt; |
1818 | uintN flindex; |
1819 | bool doGC; |
1820 | JSGCThing *thing; |
1821 | uint8 *flagp; |
1822 | JSGCArenaList *arenaList; |
1823 | JSGCArenaInfo *a; |
1824 | uintN thingsLimit; |
1825 | JSLocalRootStack *lrs; |
1826 | #ifdef JS_GCMETER |
1827 | JSGCArenaStats *astats; |
1828 | #endif |
1829 | #ifdef JS_THREADSAFE |
1830 | JSBool gcLocked; |
1831 | uintN localMallocBytes; |
1832 | JSGCFreeListSet *freeLists; |
1833 | JSGCThing **lastptr; |
1834 | JSGCThing *tmpthing; |
1835 | uint8 *tmpflagp; |
1836 | uintN maxFreeThings; /* max to take from the global free list */ |
1837 | #endif |
1838 | |
1839 | JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE); |
1840 | rt = cx->runtime; |
1841 | nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing)); |
1842 | flindex = GC_FREELIST_INDEX(nbytes); |
1843 | |
1844 | /* Updates of metering counters here may not be thread-safe. */ |
1845 | METER(astats = &cx->runtime->gcStats.arenaStats[flindex]); |
1846 | METER(astats->alloc++); |
1847 | |
1848 | #ifdef JS_THREADSAFE |
1849 | gcLocked = JS_FALSE; |
1850 | JS_ASSERT(cx->thread); |
1851 | freeLists = cx->gcLocalFreeLists; |
1852 | thing = freeLists->array[flindex]; |
1853 | localMallocBytes = cx->thread->gcMallocBytes; |
1854 | if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) { |
1855 | flagp = thing->flagp; |
1856 | freeLists->array[flindex] = thing->next; |
1857 | METER(astats->localalloc++); |
1858 | goto success; |
1859 | } |
1860 | |
1861 | JS_LOCK_GC(rt); |
1862 | gcLocked = JS_TRUE; |
1863 | |
1864 | /* Transfer thread-local counter to global one. */ |
1865 | if (localMallocBytes != 0) { |
1866 | cx->thread->gcMallocBytes = 0; |
1867 | if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes) |
1868 | rt->gcMallocBytes = rt->gcMaxMallocBytes; |
1869 | else |
1870 | rt->gcMallocBytes += localMallocBytes; |
1871 | } |
1872 | #endif |
1873 | JS_ASSERT(!rt->gcRunning); |
1874 | if (rt->gcRunning) { |
1875 | METER(rt->gcStats.finalfail++); |
1876 | JS_UNLOCK_GC(rt); |
1877 | return NULL; |
1878 | } |
1879 | |
1880 | #if defined JS_GC_ZEAL && defined JS_TRACER |
1881 | if (rt->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects) |
1882 | goto testReservedObjects; |
1883 | #endif |
1884 | |
1885 | arenaList = &rt->gcArenaList[flindex]; |
1886 | doGC = IsGCThresholdReached(rt); |
1887 | for (;;) { |
1888 | if (doGC |
1889 | #ifdef JS_TRACER |
1890 | && !JS_ON_TRACE(cx) && !JS_TRACE_MONITOR(cx).useReservedObjects |
1891 | #endif |
1892 | ) { |
1893 | /* |
1894 | * Keep rt->gcLock across the call into js_GC so we don't starve |
1895 | * and lose to racing threads who deplete the heap just after |
1896 | * js_GC has replenished it (or has synchronized with a racing |
1897 | * GC that collected a bunch of garbage). This unfair scheduling |
1898 | * can happen on certain operating systems. For the gory details, |
1899 | * see bug 162779 at https://bugzilla.mozilla.org/. |
1900 | */ |
1901 | js_GC(cx, GC_LAST_DITCH); |
1902 | METER(astats->retry++); |
1903 | } |
1904 | |
1905 | /* Try to get thing from the free list. */ |
1906 | thing = arenaList->freeList; |
1907 | if (thing) { |
1908 | arenaList->freeList = thing->next; |
1909 | flagp = thing->flagp; |
1910 | JS_ASSERT(*flagp & GCF_FINAL); |
1911 | |
1912 | #ifdef JS_THREADSAFE |
1913 | /* |
1914 | * Refill the local free list by taking several things from the |
1915 | * global free list unless we are still at rt->gcMaxMallocBytes |
1916 | * barrier or the free list is already populated. The former |
1917 | * happens when GC is canceled due to gcCallback(cx, JSGC_BEGIN) |
1918 | * returning false. The latter is caused via allocating new |
1919 | * things in gcCallback(cx, JSGC_END). |
1920 | */ |
1921 | if (rt->gcMallocBytes >= rt->gcMaxMallocBytes) |
1922 | break; |
1923 | |
1924 | freeLists = EnsureLocalFreeList(cx); |
1925 | if (!freeLists) |
1926 | goto fail; |
1927 | if (freeLists->array[flindex]) |
1928 | break; |
1929 | |
1930 | tmpthing = arenaList->freeList; |
1931 | if (tmpthing) { |
1932 | maxFreeThings = MAX_THREAD_LOCAL_THINGS; |
1933 | do { |
1934 | if (!tmpthing->next) |
1935 | break; |
1936 | tmpthing = tmpthing->next; |
1937 | } while (--maxFreeThings != 0); |
1938 | |
1939 | freeLists->array[flindex] = arenaList->freeList; |
1940 | arenaList->freeList = tmpthing->next; |
1941 | tmpthing->next = NULL; |
1942 | } |
1943 | #endif |
1944 | break; |
1945 | } |
1946 | |
1947 | /* |
1948 | * Try to allocate things from the last arena. If it is fully used, |
1949 | * check if we can allocate a new one and, if we cannot, consider |
1950 | * doing a "last ditch" GC unless already tried. |
1951 | */ |
1952 | thingsLimit = THINGS_PER_ARENA(nbytes); |
1953 | if (arenaList->lastCount != thingsLimit) { |
1954 | JS_ASSERT(arenaList->lastCount < thingsLimit); |
1955 | a = arenaList->last; |
1956 | } else { |
1957 | #ifdef JS_TRACER |
1958 | if (JS_TRACE_MONITOR(cx).useReservedObjects) { |
1959 | #ifdef JS_GC_ZEAL |
1960 | testReservedObjects: |
1961 | #endif |
1962 | JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx); |
1963 | |
1964 | thing = (JSGCThing *) tm->reservedObjects; |
1965 | flagp = GetGCThingFlags(thing); |
1966 | JS_ASSERT(thing); |
1967 | tm->reservedObjects = JSVAL_TO_OBJECT(tm->reservedObjects->fslots[0]); |
1968 | break; |
1969 | } |
1970 | #endif |
1971 | |
1972 | a = NewGCArena(rt); |
1973 | if (!a) { |
1974 | if (doGC || JS_ON_TRACE(cx)) |
1975 | goto fail; |
1976 | doGC = true; |
1977 | continue; |
1978 | } |
1979 | a->list = arenaList; |
1980 | a->prev = arenaList->last; |
1981 | a->prevUntracedPage = 0; |
1982 | a->u.untracedThings = 0; |
1983 | arenaList->last = a; |
1984 | arenaList->lastCount = 0; |
1985 | } |
1986 | |
1987 | flagp = THING_FLAGP(a, arenaList->lastCount); |
1988 | thing = FLAGP_TO_THING(flagp, nbytes); |
1989 | arenaList->lastCount++; |
1990 | |
1991 | #ifdef JS_THREADSAFE |
1992 | /* |
1993 | * Refill the local free list by taking free things from the last |
1994 | * arena. Prefer to order free things by ascending address in the |
1995 | * (unscientific) hope of better cache locality. |
1996 | */ |
1997 | if (rt->gcMallocBytes >= rt->gcMaxMallocBytes) |
1998 | break; |
1999 | |
2000 | freeLists = EnsureLocalFreeList(cx); |
2001 | if (!freeLists) |
2002 | goto fail; |
2003 | if (freeLists->array[flindex]) |
2004 | break; |
2005 | lastptr = &freeLists->array[flindex]; |
2006 | maxFreeThings = thingsLimit - arenaList->lastCount; |
2007 | if (maxFreeThings > MAX_THREAD_LOCAL_THINGS) |
2008 | maxFreeThings = MAX_THREAD_LOCAL_THINGS; |
2009 | while (maxFreeThings != 0) { |
2010 | --maxFreeThings; |
2011 | |
2012 | tmpflagp = THING_FLAGP(a, arenaList->lastCount); |
2013 | tmpthing = FLAGP_TO_THING(tmpflagp, nbytes); |
2014 | arenaList->lastCount++; |
2015 | tmpthing->flagp = tmpflagp; |
2016 | *tmpflagp = GCF_FINAL; /* signifying that thing is free */ |
2017 | |
2018 | *lastptr = tmpthing; |
2019 | lastptr = &tmpthing->next; |
2020 | } |
2021 | *lastptr = NULL; |
2022 | #endif |
2023 | break; |
2024 | } |
2025 | |
2026 | /* We successfully allocated the thing. */ |
2027 | #ifdef JS_THREADSAFE |
2028 | success: |
2029 | #endif |
2030 | lrs = cx->localRootStack; |
2031 | if (lrs) { |
2032 | /* |
2033 | * If we're in a local root scope, don't set newborn[type] at all, to |
2034 | * avoid entraining garbage from it for an unbounded amount of time |
2035 | * on this context. A caller will leave the local root scope and pop |
2036 | * this reference, allowing thing to be GC'd if it has no other refs. |
2037 | * See JS_EnterLocalRootScope and related APIs. |
2038 | */ |
2039 | if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) { |
2040 | /* |
2041 | * When we fail for a thing allocated through the tail of the last |
2042 | * arena, thing's flag byte is not initialized. So to prevent GC |
2043 | * accessing the uninitialized flags during the finalization, we |
2044 | * always mark the thing as final. See bug 337407. |
2045 | */ |
2046 | *flagp = GCF_FINAL; |
2047 | goto fail; |
2048 | } |
2049 | } else { |
2050 | /* |
2051 | * No local root scope, so we're stuck with the old, fragile model of |
2052 | * depending on a pigeon-hole newborn per type per context. |
2053 | */ |
2054 | cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing; |
2055 | } |
2056 | |
2057 | /* We can't fail now, so update flags. */ |
2058 | *flagp = (uint8)flags; |
2059 | |
2060 | #ifdef DEBUG_gchist |
2061 | gchist[gchpos].lastDitch = doGC; |
2062 | gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList; |
2063 | if (++gchpos == NGCHIST) |
2064 | gchpos = 0; |
2065 | #endif |
2066 | |
2067 | /* This is not thread-safe for thread-local allocations. */ |
2068 | METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++); |
2069 | |
2070 | #ifdef JS_THREADSAFE |
2071 | if (gcLocked) |
2072 | JS_UNLOCK_GC(rt); |
2073 | #endif |
2074 | return thing; |
2075 | |
2076 | fail: |
2077 | #ifdef JS_THREADSAFE |
2078 | if (gcLocked) |
2079 | JS_UNLOCK_GC(rt); |
2080 | #endif |
2081 | METER(astats->fail++); |
2082 | js_ReportOutOfMemory(cx); |
2083 | return NULL; |
2084 | } |
2085 | |
2086 | static JSGCDoubleCell * |
2087 | RefillDoubleFreeList(JSContext *cx) |
2088 | { |
2089 | JSRuntime *rt; |
2090 | jsbitmap *doubleFlags, usedBits; |
2091 | JSBool didGC = JS_FALSE; |
2092 | JSGCArenaInfo *a; |
2093 | uintN bit, index; |
2094 | JSGCDoubleCell *cell, *list, *lastcell; |
2095 | |
2096 | JS_ASSERT(!cx->doubleFreeList); |
2097 | |
2098 | rt = cx->runtime; |
2099 | JS_LOCK_GC(rt); |
2100 | |
2101 | JS_ASSERT(!rt->gcRunning); |
2102 | if (rt->gcRunning) { |
2103 | METER(rt->gcStats.finalfail++); |
2104 | JS_UNLOCK_GC(rt); |
2105 | return NULL; |
2106 | } |
2107 | |
2108 | if (IsGCThresholdReached(rt)) |
2109 | goto do_gc; |
2110 | |
2111 | /* |
2112 | * Loop until we find a flag bitmap byte with unset bits indicating free |
2113 | * double cells, then set all bits as used and put the cells to the free |
2114 | * list for the current context. |
2115 | */ |
2116 | doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags; |
2117 | for (;;) { |
2118 | if (((jsuword) doubleFlags & GC_ARENA_MASK) == |
2119 | ARENA_INFO_OFFSET) { |
2120 | if (doubleFlags == DOUBLE_BITMAP_SENTINEL || |
2121 | !((JSGCArenaInfo *) doubleFlags)->prev) { |
2122 | a = NewGCArena(rt); |
2123 | if (!a) { |
2124 | do_gc: |
2125 | if (didGC || JS_ON_TRACE(cx)) { |
2126 | METER(rt->gcStats.doubleArenaStats.fail++); |
2127 | JS_UNLOCK_GC(rt); |
2128 | js_ReportOutOfMemory(cx); |
2129 | return NULL; |
2130 | } |
2131 | js_GC(cx, GC_LAST_DITCH); |
2132 | METER(rt->gcStats.doubleArenaStats.retry++); |
2133 | doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags; |
2134 | didGC = JS_TRUE; |
2135 | continue; |
2136 | } |
2137 | a->list = NULL; |
2138 | a->prev = NULL; |
2139 | if (doubleFlags == DOUBLE_BITMAP_SENTINEL) { |
2140 | JS_ASSERT(!rt->gcDoubleArenaList.first); |
2141 | rt->gcDoubleArenaList.first = a; |
2142 | } else { |
2143 | JS_ASSERT(rt->gcDoubleArenaList.first); |
2144 | ((JSGCArenaInfo *) doubleFlags)->prev = a; |
2145 | } |
2146 | ClearDoubleArenaFlags(a); |
2147 | doubleFlags = DOUBLE_ARENA_BITMAP(a); |
2148 | break; |
2149 | } |
2150 | doubleFlags = |
2151 | DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev); |
2152 | } |
2153 | |
2154 | /* |
2155 | * When doubleFlags points the last bitmap's word in the arena, its |
2156 | * high bits corresponds to non-existing cells. ClearDoubleArenaFlags |
2157 | * sets such bits to 1. Thus even for this last word its bit is unset |
2158 | * iff the corresponding cell exists and free. |
2159 | */ |
2160 | if (*doubleFlags != (jsbitmap) -1) |
2161 | break; |
2162 | ++doubleFlags; |
2163 | } |
2164 | |
2165 | rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1; |
2166 | usedBits = *doubleFlags; |
2167 | JS_ASSERT(usedBits != (jsbitmap) -1); |
2168 | *doubleFlags = (jsbitmap) -1; |
2169 | JS_UNLOCK_GC(rt); |
2170 | |
2171 | /* |
2172 | * Find the index corresponding to the first bit in *doubleFlags. The last |
2173 | * bit will have "index + JS_BITS_PER_WORD - 1". |
2174 | */ |
2175 | index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) - |
2176 | DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE; |
2177 | cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index; |
2178 | |
2179 | if (usedBits == 0) { |
2180 | /* The common case when all doubles from *doubleFlags are free. */ |
2181 | JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA); |
2182 | list = cell; |
2183 | for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell) |
2184 | cell->link = cell + 1; |
2185 | lastcell->link = NULL; |
2186 | } else { |
2187 | /* |
2188 | * Assemble the free list from free cells from *doubleFlags starting |
2189 | * from the tail. In the loop |
2190 | * |
2191 | * index + bit >= DOUBLES_PER_ARENA |
2192 | * |
2193 | * when bit is one of the unused bits. We do not check for such bits |
2194 | * explicitly as they must be set and the "if" check filters them out. |
2195 | */ |
2196 | JS_ASSERT(index + JS_BITS_PER_WORD <= |
2197 | DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS); |
2198 | bit = JS_BITS_PER_WORD; |
2199 | cell += bit; |
2200 | list = NULL; |
2201 | do { |
2202 | --bit; |
2203 | --cell; |
2204 | if (!(((jsbitmap) 1 << bit) & usedBits)) { |
2205 | JS_ASSERT(index + bit < DOUBLES_PER_ARENA); |
2206 | JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list); |
2207 | cell->link = list; |
2208 | list = cell; |
2209 | } |
2210 | } while (bit != 0); |
2211 | } |
2212 | JS_ASSERT(list); |
2213 | |
2214 | /* |
2215 | * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as |
2216 | * it immediately consumes the head of the list. |
2217 | */ |
2218 | return list; |
2219 | } |
2220 | |
2221 | JSBool |
2222 | js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp) |
2223 | { |
2224 | #ifdef JS_GCMETER |
2225 | JSGCArenaStats *astats; |
2226 | #endif |
2227 | JSGCDoubleCell *cell; |
2228 | |
2229 | /* Updates of metering counters here are not thread-safe. */ |
2230 | METER(astats = &cx->runtime->gcStats.doubleArenaStats); |
2231 | METER(astats->alloc++); |
2232 | cell = cx->doubleFreeList; |
2233 | if (!cell) { |
2234 | cell = RefillDoubleFreeList(cx); |
2235 | if (!cell) { |
2236 | METER(astats->fail++); |
2237 | return JS_FALSE; |
2238 | } |
2239 | } else { |
2240 | METER(astats->localalloc++); |
2241 | } |
2242 | cx->doubleFreeList = cell->link; |
2243 | cell->number = d; |
2244 | *vp = DOUBLE_TO_JSVAL(&cell->number); |
2245 | return JS_TRUE; |
2246 | } |
2247 | |
2248 | jsdouble * |
2249 | js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d) |
2250 | { |
2251 | jsval v; |
2252 | jsdouble *dp; |
2253 | |
2254 | if (!js_NewDoubleInRootedValue(cx, d, &v)) |
2255 | return NULL; |
2256 | |
2257 | JS_ASSERT(JSVAL_IS_DOUBLE(v)); |
2258 | dp = JSVAL_TO_DOUBLE(v); |
2259 | if (cx->localRootStack) { |
2260 | if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0) |
2261 | return NULL; |
2262 | } else { |
2263 | cx->weakRoots.newborn[GCX_DOUBLE] = dp; |
2264 | } |
2265 | return dp; |
2266 | } |
2267 | |
2268 | #ifdef JS_TRACER |
2269 | JSBool |
2270 | js_ReserveObjects(JSContext *cx, size_t nobjects) |
2271 | { |
2272 | /* |
2273 | * Ensure at least nobjects objects are in the list. fslots[1] of each |
2274 | * object on the reservedObjects list is the length of the list from there. |
2275 | */ |
2276 | JSObject *&head = JS_TRACE_MONITOR(cx).reservedObjects; |
2277 | size_t i = head ? JSVAL_TO_INT(head->fslots[1]) : 0; |
2278 | while (i < nobjects) { |
2279 | JSObject *obj = (JSObject *) js_NewGCThing(cx, GCX_OBJECT, sizeof(JSObject)); |
2280 | if (!obj) |
2281 | return JS_FALSE; |
2282 | memset(obj, 0, sizeof(JSObject)); |
2283 | /* The class must be set to something for finalization. */ |
2284 | obj->classword = (jsuword) &js_ObjectClass; |
2285 | obj->fslots[0] = OBJECT_TO_JSVAL(head); |
2286 | i++; |
2287 | obj->fslots[1] = INT_TO_JSVAL(i); |
2288 | head = obj; |
2289 | } |
2290 | |
2291 | return JS_TRUE; |
2292 | } |
2293 | #endif |
2294 | |
2295 | JSBool |
2296 | js_AddAsGCBytes(JSContext *cx, size_t sz) |
2297 | { |
2298 | JSRuntime *rt; |
2299 | |
2300 | rt = cx->runtime; |
2301 | if (rt->gcBytes >= rt->gcMaxBytes || |
2302 | sz > (size_t) (rt->gcMaxBytes - rt->gcBytes) || |
2303 | IsGCThresholdReached(rt)) { |
2304 | if (JS_ON_TRACE(cx)) { |
2305 | /* |
2306 | * If we can't leave the trace, signal OOM condition, otherwise |
2307 | * exit from trace and proceed with GC. |
2308 | */ |
2309 | if (!js_CanLeaveTrace(cx)) { |
2310 | JS_UNLOCK_GC(rt); |
2311 | return JS_FALSE; |
2312 | } |
2313 | js_LeaveTrace(cx); |
2314 | } |
2315 | js_GC(cx, GC_LAST_DITCH); |
2316 | if (rt->gcBytes >= rt->gcMaxBytes || |
2317 | sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)) { |
2318 | JS_UNLOCK_GC(rt); |
2319 | JS_ReportOutOfMemory(cx); |
2320 | return JS_FALSE; |
2321 | } |
2322 | } |
2323 | rt->gcBytes += (uint32) sz; |
2324 | return JS_TRUE; |
2325 | } |
2326 | |
2327 | void |
2328 | js_RemoveAsGCBytes(JSRuntime *rt, size_t sz) |
2329 | { |
2330 | JS_ASSERT((size_t) rt->gcBytes >= sz); |
2331 | rt->gcBytes -= (uint32) sz; |
2332 | } |
2333 | |
2334 | /* |
2335 | * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because |
2336 | * they have no descendants to mark during the GC. Currently the optimization |
2337 | * is only used for non-dependant strings. |
2338 | */ |
2339 | #define GC_THING_IS_SHALLOW(flagp, thing) \ |
2340 | ((flagp) && \ |
2341 | ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \ |
2342 | ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \ |
2343 | !JSSTRING_IS_DEPENDENT((JSString *) (thing))))) |
2344 | |
2345 | /* This is compatible with JSDHashEntryStub. */ |
2346 | typedef struct JSGCLockHashEntry { |
2347 | JSDHashEntryHdr hdr; |
2348 | const void *thing; |
2349 | uint32 count; |
2350 | } JSGCLockHashEntry; |
2351 | |
2352 | JSBool |
2353 | js_LockGCThingRT(JSRuntime *rt, void *thing) |
2354 | { |
2355 | JSBool shallow, ok; |
2356 | uint8 *flagp; |
2357 | JSGCLockHashEntry *lhe; |
2358 | |
2359 | if (!thing) |
2360 | return JS_TRUE; |
2361 | |
2362 | flagp = GetGCThingFlagsOrNull(thing); |
2363 | JS_LOCK_GC(rt); |
2364 | shallow = GC_THING_IS_SHALLOW(flagp, thing); |
2365 | |
2366 | /* |
2367 | * Avoid adding a rt->gcLocksHash entry for shallow things until someone |
2368 | * nests a lock. |
2369 | */ |
2370 | if (shallow && !(*flagp & GCF_LOCK)) { |
2371 | *flagp |= GCF_LOCK; |
2372 | METER(rt->gcStats.lock++); |
2373 | ok = JS_TRUE; |
2374 | goto out; |
2375 | } |
2376 | |
2377 | if (!rt->gcLocksHash) { |
2378 | rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL, |
2379 | sizeof(JSGCLockHashEntry), |
2380 | GC_ROOTS_SIZE); |
2381 | if (!rt->gcLocksHash) { |
2382 | ok = JS_FALSE; |
2383 | goto out; |
2384 | } |
2385 | } |
2386 | |
2387 | lhe = (JSGCLockHashEntry *) |
2388 | JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD); |
2389 | if (!lhe) { |
2390 | ok = JS_FALSE; |
2391 | goto out; |
2392 | } |
2393 | if (!lhe->thing) { |
2394 | lhe->thing = thing; |
2395 | lhe->count = 1; |
2396 | } else { |
2397 | JS_ASSERT(lhe->count >= 1); |
2398 | lhe->count++; |
2399 | } |
2400 | |
2401 | METER(rt->gcStats.lock++); |
2402 | ok = JS_TRUE; |
2403 | out: |
2404 | JS_UNLOCK_GC(rt); |
2405 | return ok; |
2406 | } |
2407 | |
2408 | JSBool |
2409 | js_UnlockGCThingRT(JSRuntime *rt, void *thing) |
2410 | { |
2411 | uint8 *flagp; |
2412 | JSBool shallow; |
2413 | JSGCLockHashEntry *lhe; |
2414 | |
2415 | if (!thing) |
2416 | return JS_TRUE; |
2417 | |
2418 | flagp = GetGCThingFlagsOrNull(thing); |
2419 | JS_LOCK_GC(rt); |
2420 | shallow = GC_THING_IS_SHALLOW(flagp, thing); |
2421 | |
2422 | if (shallow && !(*flagp & GCF_LOCK)) |
2423 | goto out; |
2424 | if (!rt->gcLocksHash || |
2425 | (lhe = (JSGCLockHashEntry *) |
2426 | JS_DHashTableOperate(rt->gcLocksHash, thing, |
2427 | JS_DHASH_LOOKUP), |
2428 | JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) { |
2429 | /* Shallow entry is not in the hash -> clear its lock bit. */ |
2430 | if (shallow) |
2431 | *flagp &= ~GCF_LOCK; |
2432 | else |
2433 | goto out; |
2434 | } else { |
2435 | if (--lhe->count != 0) |
2436 | goto out; |
2437 | JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE); |
2438 | } |
2439 | |
2440 | rt->gcPoke = JS_TRUE; |
2441 | METER(rt->gcStats.unlock++); |
2442 | out: |
2443 | JS_UNLOCK_GC(rt); |
2444 | return JS_TRUE; |
2445 | } |
2446 | |
2447 | JS_PUBLIC_API(void) |
2448 | JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind) |
2449 | { |
2450 | JSObject *obj; |
2451 | size_t nslots, i; |
2452 | jsval v; |
2453 | JSString *str; |
2454 | |
2455 | switch (kind) { |
2456 | case JSTRACE_OBJECT: |
2457 | /* If obj has no map, it must be a newborn. */ |
2458 | obj = (JSObject *) thing; |
2459 | if (!obj->map) |
2460 | break; |
2461 | if (obj->map->ops->trace) { |
2462 | obj->map->ops->trace(trc, obj); |
2463 | } else { |
2464 | nslots = STOBJ_NSLOTS(obj); |
2465 | for (i = 0; i != nslots; ++i) { |
2466 | v = STOBJ_GET_SLOT(obj, i); |
2467 | if (JSVAL_IS_TRACEABLE(v)) { |
2468 | JS_SET_TRACING_INDEX(trc, "slot", i); |
2469 | JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v), |
2470 | JSVAL_TRACE_KIND(v)); |
2471 | } |
2472 | } |
2473 | } |
2474 | break; |
2475 | |
2476 | case JSTRACE_STRING: |
2477 | str = (JSString *)thing; |
2478 | if (JSSTRING_IS_DEPENDENT(str)) |
2479 | JS_CALL_STRING_TRACER(trc, JSSTRDEP_BASE(str), "base"); |
2480 | break; |
2481 | |
2482 | #if JS_HAS_XML_SUPPORT |
2483 | case JSTRACE_XML: |
2484 | js_TraceXML(trc, (JSXML *)thing); |
2485 | break; |
2486 | #endif |
2487 | } |
2488 | } |
2489 | |
2490 | /* |
2491 | * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings. |
2492 | */ |
2493 | #define THINGS_PER_UNTRACED_BIT(thingSize) \ |
2494 | JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD) |
2495 | |
2496 | static void |
2497 | DelayTracingChildren(JSRuntime *rt, uint8 *flagp) |
2498 | { |
2499 | JSGCArenaInfo *a; |
2500 | uint32 untracedBitIndex; |
2501 | jsuword bit; |
2502 | |
2503 | /* |
2504 | * Things with children to be traced later are marked with |
2505 | * GCF_MARK | GCF_FINAL flags. |
2506 | */ |
2507 | JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK); |
2508 | *flagp |= GCF_FINAL; |
2509 | |
2510 | METER(rt->gcStats.untraced++); |
2511 | #ifdef DEBUG |
2512 | ++rt->gcTraceLaterCount; |
2513 | METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount); |
2514 | #endif |
2515 | |
2516 | a = FLAGP_TO_ARENA(flagp); |
2517 | untracedBitIndex = FLAGP_TO_INDEX(flagp) / |
2518 | THINGS_PER_UNTRACED_BIT(a->list->thingSize); |
2519 | JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD); |
2520 | bit = (jsuword)1 << untracedBitIndex; |
2521 | if (a->u.untracedThings != 0) { |
2522 | JS_ASSERT(rt->gcUntracedArenaStackTop); |
2523 | if (a->u.untracedThings & bit) { |
2524 | /* bit already covers things with children to trace later. */ |
2525 | return; |
2526 | } |
2527 | a->u.untracedThings |= bit; |
2528 | } else { |
2529 | /* |
2530 | * The thing is the first thing with not yet traced children in the |
2531 | * whole arena, so push the arena on the stack of arenas with things |
2532 | * to be traced later unless the arena has already been pushed. We |
2533 | * detect that through checking prevUntracedPage as the field is 0 |
2534 | * only for not yet pushed arenas. To ensure that |
2535 | * prevUntracedPage != 0 |
2536 | * even when the stack contains one element, we make prevUntracedPage |
2537 | * for the arena at the bottom to point to itself. |
2538 | * |
2539 | * See comments in TraceDelayedChildren. |
2540 | */ |
2541 | a->u.untracedThings = bit; |
2542 | if (a->prevUntracedPage == 0) { |
2543 | if (!rt->gcUntracedArenaStackTop) { |
2544 | /* Stack was empty, mark the arena as the bottom element. */ |
2545 | a->prevUntracedPage = ARENA_INFO_TO_PAGE(a); |
2546 | } else { |
2547 | JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0); |
2548 | a->prevUntracedPage = |
2549 | ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop); |
2550 | } |
2551 | rt->gcUntracedArenaStackTop = a; |
2552 | } |
2553 | } |
2554 | JS_ASSERT(rt->gcUntracedArenaStackTop); |
2555 | } |
2556 | |
2557 | static void |
2558 | TraceDelayedChildren(JSTracer *trc) |
2559 | { |
2560 | JSRuntime *rt; |
2561 | JSGCArenaInfo *a, *aprev; |
2562 | uint32 thingSize; |
2563 | uint32 thingsPerUntracedBit; |
2564 | uint32 untracedBitIndex, thingIndex, indexLimit, endIndex; |
2565 | JSGCThing *thing; |
2566 | uint8 *flagp; |
2567 | |
2568 | rt = trc->context->runtime; |
2569 | a = rt->gcUntracedArenaStackTop; |
2570 | if (!a) { |
2571 | JS_ASSERT(rt->gcTraceLaterCount == 0); |
2572 | return; |
2573 | } |
2574 | |
2575 | for (;;) { |
2576 | /* |
2577 | * The following assert verifies that the current arena belongs to the |
2578 | * untraced stack, since DelayTracingChildren ensures that even for |
2579 | * stack's bottom prevUntracedPage != 0 but rather points to itself. |
2580 | */ |
2581 | JS_ASSERT(a->prevUntracedPage != 0); |
2582 | JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0); |
2583 | thingSize = a->list->thingSize; |
2584 | indexLimit = (a == a->list->last) |
2585 | ? a->list->lastCount |
2586 | : THINGS_PER_ARENA(thingSize); |
2587 | thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize); |
2588 | |
2589 | /* |
2590 | * We cannot use do-while loop here as a->u.untracedThings can be zero |
2591 | * before the loop as a leftover from the previous iterations. See |
2592 | * comments after the loop. |
2593 | */ |
2594 | while (a->u.untracedThings != 0) { |
2595 | untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings); |
2596 | a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex); |
2597 | thingIndex = untracedBitIndex * thingsPerUntracedBit; |
2598 | endIndex = thingIndex + thingsPerUntracedBit; |
2599 | |
2600 | /* |
2601 | * endIndex can go beyond the last allocated thing as the real |
2602 | * limit can be "inside" the bit. |
2603 | */ |
2604 | if (endIndex > indexLimit) |
2605 | endIndex = indexLimit; |
2606 | JS_ASSERT(thingIndex < indexLimit); |
2607 | |
2608 | do { |
2609 | /* |
2610 | * Skip free or already traced things that share the bit |
2611 | * with untraced ones. |
2612 | */ |
2613 | flagp = THING_FLAGP(a, thingIndex); |
2614 | if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL)) |
2615 | continue; |
2616 | *flagp &= ~GCF_FINAL; |
2617 | #ifdef DEBUG |
2618 | JS_ASSERT(rt->gcTraceLaterCount != 0); |
2619 | --rt->gcTraceLaterCount; |
2620 | #endif |
2621 | thing = FLAGP_TO_THING(flagp, thingSize); |
2622 | JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp)); |
2623 | } while (++thingIndex != endIndex); |
2624 | } |
2625 | |
2626 | /* |
2627 | * We finished tracing of all things in the the arena but we can only |
2628 | * pop it from the stack if the arena is the stack's top. |
2629 | * |
2630 | * When JS_TraceChildren from the above calls JS_CallTracer that in |
2631 | * turn on low C stack calls DelayTracingChildren and the latter |
2632 | * pushes new arenas to the untraced stack, we have to skip popping |
2633 | * of this arena until it becomes the top of the stack again. |
2634 | */ |
2635 | if (a == rt->gcUntracedArenaStackTop) { |
2636 | aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage); |
2637 | a->prevUntracedPage = 0; |
2638 | if (a == aprev) { |
2639 | /* |
2640 | * prevUntracedPage points to itself and we reached the |
2641 | * bottom of the stack. |
2642 | */ |
2643 | break; |
2644 | } |
2645 | rt->gcUntracedArenaStackTop = a = aprev; |
2646 | } else { |
2647 | a = rt->gcUntracedArenaStackTop; |
2648 | } |
2649 | } |
2650 | JS_ASSERT(rt->gcUntracedArenaStackTop); |
2651 | JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0); |
2652 | rt->gcUntracedArenaStackTop = NULL; |
2653 | JS_ASSERT(rt->gcTraceLaterCount == 0); |
2654 | } |
2655 | |
2656 | JS_PUBLIC_API(void) |
2657 | JS_CallTracer(JSTracer *trc, void *thing, uint32 kind) |
2658 | { |
2659 | JSContext *cx; |
2660 | JSRuntime *rt; |
2661 | JSGCArenaInfo *a; |
2662 | uintN index; |
2663 | uint8 *flagp; |
2664 | |
2665 | JS_ASSERT(thing); |
2666 | JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind)); |
2667 | JS_ASSERT(trc->debugPrinter || trc->debugPrintArg); |
2668 | |
2669 | if (!IS_GC_MARKING_TRACER(trc)) { |
2670 | trc->callback(trc, thing, kind); |
2671 | goto out; |
2672 | } |
2673 | |
2674 | cx = trc->context; |
2675 | rt = cx->runtime; |
2676 | JS_ASSERT(rt->gcMarkingTracer == trc); |
2677 | JS_ASSERT(rt->gcLevel > 0); |
2678 | |
2679 | /* |
2680 | * Optimize for string and double as their size is known and their tracing |
2681 | * is not recursive. |
2682 | */ |
2683 | switch (kind) { |
2684 | case JSTRACE_DOUBLE: |
2685 | a = THING_TO_ARENA(thing); |
2686 | JS_ASSERT(!a->list); |
2687 | if (!a->u.hasMarkedDoubles) { |
2688 | ClearDoubleArenaFlags(a); |
2689 | a->u.hasMarkedDoubles = JS_TRUE; |
2690 | } |
2691 | index = DOUBLE_THING_TO_INDEX(thing); |
2692 | JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index); |
2693 | goto out; |
2694 | |
2695 | case JSTRACE_STRING: |
2696 | for (;;) { |
2697 | flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing)); |
2698 | JS_ASSERT((*flagp & GCF_FINAL) == 0); |
2699 | JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp)); |
2700 | if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) { |
2701 | *flagp |= GCF_MARK; |
2702 | goto out; |
2703 | } |
2704 | if (*flagp & GCF_MARK) |
2705 | goto out; |
2706 | *flagp |= GCF_MARK; |
2707 | thing = JSSTRDEP_BASE((JSString *) thing); |
2708 | } |
2709 | /* NOTREACHED */ |
2710 | } |
2711 | |
2712 | flagp = GetGCThingFlags(thing); |
2713 | JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp)); |
2714 | if (*flagp & GCF_MARK) |
2715 | goto out; |
2716 | |
2717 | /* |
2718 | * We check for non-final flag only if mark is unset as |
2719 | * DelayTracingChildren uses the flag. See comments in the function. |
2720 | */ |
2721 | JS_ASSERT(*flagp != GCF_FINAL); |
2722 | *flagp |= GCF_MARK; |
2723 | if (!cx->insideGCMarkCallback) { |
2724 | /* |
2725 | * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always |
2726 | * uses the non-recursive code that otherwise would be called only on |
2727 | * a low C stack condition. |
2728 | */ |
2729 | #ifdef JS_GC_ASSUME_LOW_C_STACK |
2730 | # define RECURSION_TOO_DEEP() JS_TRUE |
2731 | #else |
2732 | int stackDummy; |
2733 | # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy)) |
2734 | #endif |
2735 | if (RECURSION_TOO_DEEP()) |
2736 | DelayTracingChildren(rt, flagp); |
2737 | else |
2738 | JS_TraceChildren(trc, thing, kind); |
2739 | } else { |
2740 | /* |
2741 | * For API compatibility we allow for the callback to assume that |
2742 | * after it calls JS_MarkGCThing for the last time, the callback can |
2743 | * start to finalize its own objects that are only referenced by |
2744 | * unmarked GC things. |
2745 | * |
2746 | * Since we do not know which call from inside the callback is the |
2747 | * last, we ensure that children of all marked things are traced and |
2748 | * call TraceDelayedChildren(trc) after tracing the thing. |
2749 | * |
2750 | * As TraceDelayedChildren unconditionally invokes JS_TraceChildren |
2751 | * for the things with untraced children, calling DelayTracingChildren |
2752 | * is useless here. Hence we always trace thing's children even with a |
2753 | * low native stack. |
2754 | */ |
2755 | cx->insideGCMarkCallback = JS_FALSE; |
2756 | JS_TraceChildren(trc, thing, kind); |
2757 | TraceDelayedChildren(trc); |
2758 | cx->insideGCMarkCallback = JS_TRUE; |
2759 | } |
2760 | |
2761 | out: |
2762 | #ifdef DEBUG |
2763 | trc->debugPrinter = NULL; |
2764 | trc->debugPrintArg = NULL; |
2765 | #endif |
2766 | return; /* to avoid out: right_curl when DEBUG is not defined */ |
2767 | } |
2768 | |
2769 | void |
2770 | js_CallValueTracerIfGCThing(JSTracer *trc, jsval v) |
2771 | { |
2772 | void *thing; |
2773 | uint32 kind; |
2774 | |
2775 | if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) { |
2776 | thing = JSVAL_TO_TRACEABLE(v); |
2777 | kind = JSVAL_TRACE_KIND(v); |
2778 | JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v))); |
2779 | } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) { |
2780 | /* v can be an arbitrary GC thing reinterpreted as an object. */ |
2781 | thing = JSVAL_TO_OBJECT(v); |
2782 | kind = js_GetGCThingTraceKind(thing); |
2783 | } else { |
2784 | return; |
2785 | } |
2786 | JS_CallTracer(trc, thing, kind); |
2787 | } |
2788 | |
2789 | static JSDHashOperator |
2790 | gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, |
2791 | void *arg) |
2792 | { |
2793 | JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr; |
2794 | JSTracer *trc = (JSTracer *)arg; |
2795 | jsval *rp = (jsval *)rhe->root; |
2796 | jsval v = *rp; |
2797 | |
2798 | /* Ignore null object and scalar values. */ |
2799 | if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) { |
2800 | #ifdef DEBUG |
2801 | JSBool root_points_to_gcArenaList = JS_FALSE; |
2802 | jsuword thing = (jsuword) JSVAL_TO_GCTHING(v); |
2803 | JSRuntime *rt; |
2804 | uintN i; |
2805 | JSGCArenaList *arenaList; |
2806 | uint32 thingSize; |
2807 | JSGCArenaInfo *a; |
2808 | size_t limit; |
2809 | |
2810 | rt = trc->context->runtime; |
2811 | for (i = 0; i < GC_NUM_FREELISTS; i++) { |
2812 | arenaList = &rt->gcArenaList[i]; |
2813 | thingSize = arenaList->thingSize; |
2814 | limit = (size_t) arenaList->lastCount * thingSize; |
2815 | for (a = arenaList->last; a; a = a->prev) { |
2816 | if (thing - ARENA_INFO_TO_START(a) < limit) { |
2817 | root_points_to_gcArenaList = JS_TRUE; |
2818 | break; |
2819 | } |
2820 | limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize; |
2821 | } |
2822 | } |
2823 | if (!root_points_to_gcArenaList) { |
2824 | for (a = rt->gcDoubleArenaList.first; a; a = a->prev) { |
2825 | if (thing - ARENA_INFO_TO_START(a) < |
2826 | DOUBLES_PER_ARENA * sizeof(jsdouble)) { |
2827 | root_points_to_gcArenaList = JS_TRUE; |
2828 | break; |
2829 | } |
2830 | } |
2831 | } |
2832 | if (!root_points_to_gcArenaList && rhe->name) { |
2833 | fprintf(stderr, |
2834 | "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n" |
2835 | "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n" |
2836 | "The root's name is \"%s\".\n", |
2837 | rhe->name); |
2838 | } |
2839 | JS_ASSERT(root_points_to_gcArenaList); |
2840 | #endif |
2841 | JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root"); |
2842 | js_CallValueTracerIfGCThing(trc, v); |
2843 | } |
2844 | |
2845 | return JS_DHASH_NEXT; |
2846 | } |
2847 | |
2848 | static JSDHashOperator |
2849 | gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, |
2850 | void *arg) |
2851 | { |
2852 | JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr; |
2853 | void *thing = (void *)lhe->thing; |
2854 | JSTracer *trc = (JSTracer *)arg; |
2855 | uint32 traceKind; |
2856 | |
2857 | JS_ASSERT(lhe->count >= 1); |
2858 | traceKind = js_GetGCThingTraceKind(thing); |
2859 | JS_CALL_TRACER(trc, thing, traceKind, "locked object"); |
2860 | return JS_DHASH_NEXT; |
2861 | } |
2862 | |
2863 | #define TRACE_JSVALS(trc, len, vec, name) \ |
2864 | JS_BEGIN_MACRO \ |
2865 | jsval _v, *_vp, *_end; \ |
2866 | \ |
2867 | for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \ |
2868 | _v = *_vp; \ |
2869 | if (JSVAL_IS_TRACEABLE(_v)) { \ |
2870 | JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \ |
2871 | JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \ |
2872 | JSVAL_TRACE_KIND(_v)); \ |
2873 | } \ |
2874 | } \ |
2875 | JS_END_MACRO |
2876 | |
2877 | void |
2878 | js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp) |
2879 | { |
2880 | uintN nslots, minargs, skip; |
2881 | |
2882 | if (fp->callobj) |
2883 | JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call"); |
2884 | if (fp->argsobj) |
2885 | JS_CALL_OBJECT_TRACER(trc, fp->argsobj, "arguments"); |
2886 | if (fp->varobj) |
2887 | JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables"); |
2888 | if (fp->script) { |
2889 | js_TraceScript(trc, fp->script); |
2890 | |
2891 | /* fp->slots is null for watch pseudo-frames, see js_watch_set. */ |
2892 | if (fp->slots) { |
2893 | /* |
2894 | * Don't mark what has not been pushed yet, or what has been |
2895 | * popped already. |
2896 | */ |
2897 | if (fp->regs) { |
2898 | nslots = (uintN) (fp->regs->sp - fp->slots); |
2899 | JS_ASSERT(nslots >= fp->script->nfixed); |
2900 | } else { |
2901 | nslots = fp->script->nfixed; |
2902 | } |
2903 | TRACE_JSVALS(trc, nslots, fp->slots, "slot"); |
2904 | } |
2905 | } else { |
2906 | JS_ASSERT(!fp->slots); |
2907 | JS_ASSERT(!fp->regs); |
2908 | } |
2909 | |
2910 | /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */ |
2911 | JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) || |
2912 | (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags))); |
2913 | JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this"); |
2914 | |
2915 | if (fp->callee) |
2916 | JS_CALL_OBJECT_TRACER(trc, fp->callee, "callee"); |
2917 | |
2918 | if (fp->argv) { |
2919 | nslots = fp->argc; |
2920 | skip = 0; |
2921 | if (fp->fun) { |
2922 | minargs = FUN_MINARGS(fp->fun); |
2923 | if (minargs > nslots) |
2924 | nslots = minargs; |
2925 | if (!FUN_INTERPRETED(fp->fun)) { |
2926 | JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE)); |
2927 | nslots += fp->fun->u.n.extra; |
2928 | } |
2929 | if (fp->fun->flags & JSFRAME_ROOTED_ARGV) |
2930 | skip = 2 + fp->argc; |
2931 | } |
2932 | TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand"); |
2933 | } |
2934 | |
2935 | JS_CALL_VALUE_TRACER(trc, fp->rval, "rval"); |
2936 | if (fp->scopeChain) |
2937 | JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain"); |
2938 | if (fp->sharpArray) |
2939 | JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array"); |
2940 | |
2941 | if (fp->xmlNamespace) |
2942 | JS_CALL_OBJECT_TRACER(trc, fp->xmlNamespace, "xmlNamespace"); |
2943 | } |
2944 | |
2945 | static void |
2946 | TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr) |
2947 | { |
2948 | uint32 i; |
2949 | void *thing; |
2950 | |
2951 | #ifdef DEBUG |
2952 | static const char *weakRootNames[JSTRACE_LIMIT] = { |
2953 | "newborn object", |
2954 | "newborn double", |
2955 | "newborn string", |
2956 | "newborn xml" |
2957 | }; |
2958 | #endif |
2959 | |
2960 | for (i = 0; i != JSTRACE_LIMIT; i++) { |
2961 | thing = wr->newborn[i]; |
2962 | if (thing) |
2963 | JS_CALL_TRACER(trc, thing, i, weakRootNames[i]); |
2964 | } |
2965 | JS_ASSERT(i == GCX_EXTERNAL_STRING); |
2966 | for (; i != GCX_NTYPES; ++i) { |
2967 | thing = wr->newborn[i]; |
2968 | if (thing) { |
2969 | JS_SET_TRACING_INDEX(trc, "newborn external string", |
2970 | i - GCX_EXTERNAL_STRING); |
2971 | JS_CallTracer(trc, thing, JSTRACE_STRING); |
2972 | } |
2973 | } |
2974 | |
2975 | JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom"); |
2976 | JS_SET_TRACING_NAME(trc, "lastInternalResult"); |
2977 | js_CallValueTracerIfGCThing(trc, wr->lastInternalResult); |
2978 | } |
2979 | |
2980 | JS_REQUIRES_STACK JS_FRIEND_API(void) |
2981 | js_TraceContext(JSTracer *trc, JSContext *acx) |
2982 | { |
2983 | JSStackFrame *fp, *nextChain; |
2984 | JSStackHeader *sh; |
2985 | JSTempValueRooter *tvr; |
2986 | |
2987 | if (IS_GC_MARKING_TRACER(trc)) { |
2988 | |
2989 | #define FREE_OLD_ARENAS(pool) \ |
2990 | JS_BEGIN_MACRO \ |
2991 | int64 _age; \ |
2992 | JSArena * _a = (pool).current; \ |
2993 | if (_a == (pool).first.next && \ |
2994 | _a->avail == _a->base + sizeof(int64)) { \ |
2995 | _age = JS_Now() - *(int64 *) _a->base; \ |
2996 | if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \ |
2997 | 1000) \ |
2998 | JS_FreeArenaPool(&(pool)); \ |
2999 | } \ |
3000 | JS_END_MACRO |
3001 | |
3002 | #ifdef JS_THREADSAFE |
3003 | js_RevokeGCLocalFreeLists(acx); |
3004 | #endif |
3005 | |
3006 | /* |
3007 | * Release the stackPool's arenas if the stackPool has existed for |
3008 | * longer than the limit specified by gcEmptyArenaPoolLifespan. |
3009 | */ |
3010 | FREE_OLD_ARENAS(acx->stackPool); |
3011 | |
3012 | /* |
3013 | * Release the regexpPool's arenas based on the same criterion as for |
3014 | * the stackPool. |
3015 | */ |
3016 | FREE_OLD_ARENAS(acx->regexpPool); |
3017 | |
3018 | /* |
3019 | * Clear the double free list to release all the pre-allocated doubles. |
3020 | */ |
3021 | acx->doubleFreeList = NULL; |
3022 | } |
3023 | |
3024 | /* |
3025 | * Iterate frame chain and dormant chains. |
3026 | * |
3027 | * (NB: see comment on this whole "dormant" thing in js_Execute.) |
3028 | * |
3029 | * Since js_GetTopStackFrame needs to dereference cx->thread to check for |
3030 | * JIT frames, we check for non-null thread here and avoid null checks |
3031 | * there. See bug 471197. |
3032 | */ |
3033 | #ifdef JS_THREADSAFE |
3034 | if (acx->thread) |
3035 | #endif |
3036 | { |
3037 | fp = js_GetTopStackFrame(acx); |
3038 | nextChain = acx->dormantFrameChain; |
3039 | if (!fp) |
3040 | goto next_chain; |
3041 | |
3042 | /* The top frame must not be dormant. */ |
3043 | JS_ASSERT(!fp->dormantNext); |
3044 | for (;;) { |
3045 | do { |
3046 | js_TraceStackFrame(trc, fp); |
3047 | } while ((fp = fp->down) != NULL); |
3048 | |
3049 | next_chain: |
3050 | if (!nextChain) |
3051 | break; |
3052 | fp = nextChain; |
3053 | nextChain = nextChain->dormantNext; |
3054 | } |
3055 | } |
3056 | |
3057 | /* Mark other roots-by-definition in acx. */ |
3058 | if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL)) |
3059 | JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object"); |
3060 | TraceWeakRoots(trc, &acx->weakRoots); |
3061 | if (acx->throwing) { |
3062 | JS_CALL_VALUE_TRACER(trc, acx->exception, "exception"); |
3063 | } else { |
3064 | /* Avoid keeping GC-ed junk stored in JSContext.exception. */ |
3065 | acx->exception = JSVAL_NULL; |
3066 | } |
3067 | #if JS_HAS_LVALUE_RETURN |
3068 | if (acx->rval2set) |
3069 | JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2"); |
3070 | #endif |
3071 | |
3072 | for (sh = acx->stackHeaders; sh; sh = sh->down) { |
3073 | METER(trc->context->runtime->gcStats.stackseg++); |
3074 | METER(trc->context->runtime->gcStats.segslots += sh->nslots); |
3075 | TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack"); |
3076 | } |
3077 | |
3078 | if (acx->localRootStack) |
3079 | js_TraceLocalRoots(trc, acx->localRootStack); |
3080 | |
3081 | for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) { |
3082 | switch (tvr->count) { |
3083 | case JSTVU_SINGLE: |
3084 | JS_SET_TRACING_NAME(trc, "tvr->u.value"); |
3085 | js_CallValueTracerIfGCThing(trc, tvr->u.value); |
3086 | break; |
3087 | case JSTVU_TRACE: |
3088 | tvr->u.trace(trc, tvr); |
3089 | break; |
3090 | case JSTVU_SPROP: |
3091 | TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop); |
3092 | break; |
3093 | case JSTVU_WEAK_ROOTS: |
3094 | TraceWeakRoots(trc, tvr->u.weakRoots); |
3095 | break; |
3096 | case JSTVU_COMPILER: |
3097 | tvr->u.compiler->trace(trc); |
3098 | break; |
3099 | case JSTVU_SCRIPT: |
3100 | js_TraceScript(trc, tvr->u.script); |
3101 | break; |
3102 | default: |
3103 | JS_ASSERT(tvr->count >= 0); |
3104 | TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array"); |
3105 | } |
3106 | } |
3107 | |
3108 | if (acx->sharpObjectMap.depth > 0) |
3109 | js_TraceSharpMap(trc, &acx->sharpObjectMap); |
3110 | |
3111 | js_TraceRegExpStatics(trc, acx); |
3112 | |
3113 | #ifdef JS_TRACER |
3114 | if (acx->nativeVp) |
3115 | TRACE_JSVALS(trc, acx->nativeVpLen, acx->nativeVp, "nativeVp"); |
3116 | #endif |
3117 | } |
3118 | |
3119 | #ifdef JS_TRACER |
3120 | |
3121 | static void |
3122 | MarkReservedObjects(JSTraceMonitor *tm) |
3123 | { |
3124 | /* Keep the reserved objects. */ |
3125 | for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) { |
3126 | uint8 *flagp = GetGCThingFlags(obj); |
3127 | JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT); |
3128 | JS_ASSERT(*flagp != GCF_FINAL); |
3129 | *flagp |= GCF_MARK; |
3130 | } |
3131 | } |
3132 | |
3133 | #ifdef JS_THREADSAFE |
3134 | static JSDHashOperator |
3135 | reserved_objects_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, |
3136 | uint32, void *) |
3137 | { |
3138 | JSThread *thread = ((JSThreadsHashEntry *) hdr)->thread; |
3139 | |
3140 | MarkReservedObjects(&thread->data.traceMonitor); |
3141 | return JS_DHASH_NEXT; |
3142 | } |
3143 | #endif |
3144 | |
3145 | #endif |
3146 | |
3147 | JS_REQUIRES_STACK void |
3148 | js_TraceRuntime(JSTracer *trc, JSBool allAtoms) |
3149 | { |
3150 | JSRuntime *rt = trc->context->runtime; |
3151 | JSContext *iter, *acx; |
3152 | |
3153 | JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc); |
3154 | if (rt->gcLocksHash) |
3155 | JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc); |
3156 | js_TraceAtomState(trc, allAtoms); |
3157 | js_TraceNativeEnumerators(trc); |
3158 | js_TraceRuntimeNumberState(trc); |
3159 | |
3160 | iter = NULL; |
3161 | while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) |
3162 | js_TraceContext(trc, acx); |
3163 | |
3164 | if (rt->gcExtraRootsTraceOp) |
3165 | rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData); |
3166 | |
3167 | #ifdef JS_TRACER |
3168 | for (int i = 0; i < JSBUILTIN_LIMIT; i++) { |
3169 | if (rt->builtinFunctions[i]) |
3170 | JS_CALL_OBJECT_TRACER(trc, rt->builtinFunctions[i], "builtin function"); |
3171 | } |
3172 | |
3173 | /* Mark the reserved objects unless we are shutting down. */ |
3174 | if (IS_GC_MARKING_TRACER(trc) && rt->state != JSRTS_LANDING) { |
3175 | #ifdef JS_THREADSAFE |
3176 | JS_DHashTableEnumerate(&rt->threads, reserved_objects_marker, NULL); |
3177 | #else |
3178 | MarkReservedObjects(&rt->threadData.traceMonitor); |
3179 | #endif |
3180 | } |
3181 | |
3182 | #endif |
3183 | } |
3184 | |
3185 | void |
3186 | js_TriggerGC(JSContext *cx, JSBool gcLocked) |
3187 | { |
3188 | JSRuntime *rt = cx->runtime; |
3189 | |
3190 | #ifdef JS_THREADSAFE |
3191 | JS_ASSERT(cx->requestDepth > 0); |
3192 | #endif |
3193 | JS_ASSERT(!rt->gcRunning); |
3194 | if (rt->gcIsNeeded) |
3195 | return; |
3196 | |
3197 | /* |
3198 | * Trigger the GC when it is safe to call an operation callback on any |
3199 | * thread. |
3200 | */ |
3201 | rt->gcIsNeeded = JS_TRUE; |
3202 | js_TriggerAllOperationCallbacks(rt, gcLocked); |
3203 | } |
3204 | |
3205 | static void |
3206 | ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr) |
3207 | { |
3208 | JSObject *obj = ssr->obj; |
3209 | JSObject *pobj = ssr->pobj; |
3210 | uint32 slot = ssr->slot; |
3211 | |
3212 | while (pobj) { |
3213 | pobj = js_GetWrappedObject(cx, pobj); |
3214 | if (pobj == obj) { |
3215 | ssr->cycle = true; |
3216 | return; |
3217 | } |
3218 | pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot)); |
3219 | } |
3220 | |
3221 | pobj = ssr->pobj; |
3222 | if (slot == JSSLOT_PROTO) { |
3223 | STOBJ_SET_PROTO(obj, pobj); |
3224 | } else { |
3225 | JS_ASSERT(slot == JSSLOT_PARENT); |
3226 | STOBJ_SET_PARENT(obj, pobj); |
3227 | } |
3228 | } |
3229 | |
3230 | void |
3231 | js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data) |
3232 | { |
3233 | JSScript **listp, *script; |
3234 | |
3235 | for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) { |
3236 | listp = &data->scriptsToGC[i]; |
3237 | while ((script = *listp) != NULL) { |
3238 | *listp = script->u.nextToGC; |
3239 | script->u.nextToGC = NULL; |
3240 | js_DestroyScript(cx, script); |
3241 | } |
3242 | } |
3243 | } |
3244 | |
3245 | /* |
3246 | * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with |
3247 | * rt->gcLock already held, so the lock should be kept on return. |
3248 | */ |
3249 | void |
3250 | js_GC(JSContext *cx, JSGCInvocationKind gckind) |
3251 | { |
3252 | JSRuntime *rt; |
3253 | JSBool keepAtoms; |
3254 | JSGCCallback callback; |
3255 | uintN i, type; |
3256 | JSTracer trc; |
3257 | uint32 thingSize, indexLimit; |
3258 | JSGCArenaInfo *a, **ap, *emptyArenas; |
3259 | uint8 flags, *flagp; |
3260 | JSGCThing *thing, *freeList; |
3261 | JSGCArenaList *arenaList; |
3262 | JSBool allClear; |
3263 | #ifdef JS_THREADSAFE |
3264 | uint32 requestDebit; |
3265 | #endif |
3266 | #ifdef JS_GCMETER |
3267 | uint32 nlivearenas, nkilledarenas, nthings; |
3268 | #endif |
3269 | |
3270 | JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx)); |
3271 | rt = cx->runtime; |
3272 | |
3273 | #ifdef JS_THREADSAFE |
3274 | /* |
3275 | * We allow js_GC calls outside a request but the context must be bound |
3276 | * to the current thread. |
3277 | */ |
3278 | JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread)); |
3279 | |
3280 | /* Avoid deadlock. */ |
3281 | JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt)); |
3282 | #endif |
3283 | |
3284 | if (gckind & GC_KEEP_ATOMS) { |
3285 | /* |
3286 | * The set slot request and last ditch GC kinds preserve all atoms and |
3287 | * weak roots. |
3288 | */ |
3289 | keepAtoms = JS_TRUE; |
3290 | } else { |
3291 | /* Keep atoms when a suspended compile is running on another context. */ |
3292 | keepAtoms = (rt->gcKeepAtoms != 0); |
3293 | JS_CLEAR_WEAK_ROOTS(&cx->weakRoots); |
3294 | } |
3295 | |
3296 | /* |
3297 | * Don't collect garbage if the runtime isn't up, and cx is not the last |
3298 | * context in the runtime. The last context must force a GC, and nothing |
3299 | * should suppress that final collection or there may be shutdown leaks, |
3300 | * or runtime bloat until the next context is created. |
3301 | */ |
3302 | if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT) |
3303 | return; |
3304 | |
3305 | restart_at_beginning: |
3306 | /* |
3307 | * Let the API user decide to defer a GC if it wants to (unless this |
3308 | * is the last context). Invoke the callback regardless. Sample the |
3309 | * callback in case we are freely racing with a JS_SetGCCallback{,RT} on |
3310 | * another thread. |
3311 | */ |
3312 | if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) { |
3313 | JSBool ok; |
3314 | |
3315 | if (gckind & GC_LOCK_HELD) |
3316 | JS_UNLOCK_GC(rt); |
3317 | ok = callback(cx, JSGC_BEGIN); |
3318 | if (gckind & GC_LOCK_HELD) |
3319 | JS_LOCK_GC(rt); |
3320 | if (!ok && gckind != GC_LAST_CONTEXT) { |
3321 | /* |
3322 | * It's possible that we've looped back to this code from the 'goto |
3323 | * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and |
3324 | * that rt->gcLevel is now 0. Don't return without notifying! |
3325 | */ |
3326 | if (rt->gcLevel == 0 && (gckind & GC_LOCK_HELD)) |
3327 | JS_NOTIFY_GC_DONE(rt); |
3328 | return; |
3329 | } |
3330 | } |
3331 | |
3332 | /* Lock out other GC allocator and collector invocations. */ |
3333 | if (!(gckind & GC_LOCK_HELD)) |
3334 | JS_LOCK_GC(rt); |
3335 | |
3336 | METER(rt->gcStats.poke++); |
3337 | rt->gcPoke = JS_FALSE; |
3338 | |
3339 | #ifdef JS_THREADSAFE |
3340 | /* |
3341 | * Check if the GC is already running on this or another thread and |
3342 | * delegate the job to it. |
3343 | */ |
3344 | if (rt->gcLevel > 0) { |
3345 | JS_ASSERT(rt->gcThread); |
3346 | |
3347 | /* Bump gcLevel to restart the current GC, so it finds new garbage. */ |
3348 | rt->gcLevel++; |
3349 | METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel); |
3350 | |
3351 | /* |
3352 | * If the GC runs on another thread, temporarily suspend the current |
3353 | * request and wait until the GC is done. |
3354 | */ |
3355 | if (rt->gcThread != cx->thread) { |
3356 | requestDebit = js_DiscountRequestsForGC(cx); |
3357 | js_RecountRequestsAfterGC(rt, requestDebit); |
3358 | } |
3359 | if (!(gckind & GC_LOCK_HELD)) |
3360 | JS_UNLOCK_GC(rt); |
3361 | return; |
3362 | } |
3363 | |
3364 | /* No other thread is in GC, so indicate that we're now in GC. */ |
3365 | rt->gcLevel = 1; |
3366 | rt->gcThread = cx->thread; |
3367 | |
3368 | /* |
3369 | * Notify all operation callbacks, which will give them a chance to |
3370 | * yield their current request. Contexts that are not currently |
3371 | * executing will perform their callback at some later point, |
3372 | * which then will be unnecessary, but harmless. |
3373 | */ |
3374 | js_NudgeOtherContexts(cx); |
3375 | |
3376 | /* |
3377 | * Discount all the requests on the current thread from contributing |
3378 | * to rt->requestCount before we wait for all other requests to finish. |
3379 | * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on |
3380 | * rt->requestCount transitions to 0. |
3381 | */ |
3382 | requestDebit = js_CountThreadRequests(cx); |
3383 | JS_ASSERT_IF(cx->requestDepth != 0, requestDebit >= 1); |
3384 | rt->requestCount -= requestDebit; |
3385 | while (rt->requestCount > 0) |
3386 | JS_AWAIT_REQUEST_DONE(rt); |
3387 | rt->requestCount += requestDebit; |
3388 | |
3389 | #else /* !JS_THREADSAFE */ |
3390 | |
3391 | /* Bump gcLevel and return rather than nest; the outer gc will restart. */ |
3392 | rt->gcLevel++; |
3393 | METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel); |
3394 | if (rt->gcLevel > 1) |
3395 | return; |
3396 | |
3397 | #endif /* !JS_THREADSAFE */ |
3398 | |
3399 | /* |
3400 | * Set rt->gcRunning here within the GC lock, and after waiting for any |
3401 | * active requests to end, so that new requests that try to JS_AddRoot, |
3402 | * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for |
3403 | * rt->gcLevel to drop to zero, while request-less calls to the *Root* |
3404 | * APIs block in js_AddRoot or js_RemoveRoot (see above in this file), |
3405 | * waiting for GC to finish. |
3406 | */ |
3407 | rt->gcRunning = JS_TRUE; |
3408 | |
3409 | if (gckind == GC_SET_SLOT_REQUEST) { |
3410 | JSSetSlotRequest *ssr; |
3411 | |
3412 | while ((ssr = rt->setSlotRequests) != NULL) { |
3413 | rt->setSlotRequests = ssr->next; |
3414 | JS_UNLOCK_GC(rt); |
3415 | ssr->next = NULL; |
3416 | ProcessSetSlotRequest(cx, ssr); |
3417 | JS_LOCK_GC(rt); |
3418 | } |
3419 | |
3420 | /* |
3421 | * We assume here that killing links to parent and prototype objects |
3422 | * does not create garbage (such objects typically are long-lived and |
3423 | * widely shared, e.g. global objects, Function.prototype, etc.). We |
3424 | * collect garbage only if a racing thread attempted GC and is waiting |
3425 | * for us to finish (gcLevel > 1) or if someone already poked us. |
3426 | */ |
3427 | if (rt->gcLevel == 1 && !rt->gcPoke && !rt->gcIsNeeded) |
3428 | goto done_running; |
3429 | |
3430 | rt->gcLevel = 0; |
3431 | rt->gcPoke = JS_FALSE; |
3432 | rt->gcRunning = JS_FALSE; |
3433 | #ifdef JS_THREADSAFE |
3434 | rt->gcThread = NULL; |
3435 | #endif |
3436 | gckind = GC_LOCK_HELD; |
3437 | goto restart_at_beginning; |
3438 | } |
3439 | |
3440 | JS_UNLOCK_GC(rt); |
3441 | |
3442 | #ifdef JS_TRACER |
3443 | if (JS_ON_TRACE(cx)) |
3444 | goto out; |
3445 | #endif |
3446 | VOUCH_HAVE_STACK(); |
3447 | |
3448 | /* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */ |
3449 | rt->gcIsNeeded = JS_FALSE; |
3450 | |
3451 | /* Reset malloc counter. */ |
3452 | rt->gcMallocBytes = 0; |
3453 | |
3454 | #ifdef JS_DUMP_SCOPE_METERS |
3455 | { extern void js_DumpScopeMeters(JSRuntime *rt); |
3456 | js_DumpScopeMeters(rt); |
3457 | } |
3458 | #endif |
3459 | |
3460 | #ifdef JS_TRACER |
3461 | js_PurgeJITOracle(); |
3462 | #endif |
3463 | js_PurgeThreads(cx); |
3464 | |
3465 | restart: |
3466 | rt->gcNumber++; |
3467 | JS_ASSERT(!rt->gcUntracedArenaStackTop); |
3468 | JS_ASSERT(rt->gcTraceLaterCount == 0); |
3469 | |
3470 | /* |
3471 | * Reset the property cache's type id generator so we can compress ids. |
3472 | * Same for the protoHazardShape proxy-shape standing in for all object |
3473 | * prototypes having readonly or setter properties. |
3474 | */ |
3475 | rt->shapeGen = 0; |
3476 | rt->protoHazardShape = 0; |
3477 | |
3478 | /* |
3479 | * Mark phase. |
3480 | */ |
3481 | JS_TRACER_INIT(&trc, cx, NULL); |
3482 | rt->gcMarkingTracer = &trc; |
3483 | JS_ASSERT(IS_GC_MARKING_TRACER(&trc)); |
3484 | |
3485 | for (a = rt->gcDoubleArenaList.first; a; a = a->prev) |
3486 | a->u.hasMarkedDoubles = JS_FALSE; |
3487 | |
3488 | js_TraceRuntime(&trc, keepAtoms); |
3489 | js_MarkScriptFilenames(rt, keepAtoms); |
3490 | |
3491 | /* |
3492 | * Mark children of things that caused too deep recursion during the above |
3493 | * tracing. |
3494 | */ |
3495 | TraceDelayedChildren(&trc); |
3496 | |
3497 | JS_ASSERT(!cx->insideGCMarkCallback); |
3498 | if (rt->gcCallback) { |
3499 | cx->insideGCMarkCallback = JS_TRUE; |
3500 | (void) rt->gcCallback(cx, JSGC_MARK_END); |
3501 | JS_ASSERT(cx->insideGCMarkCallback); |
3502 | cx->insideGCMarkCallback = JS_FALSE; |
3503 | } |
3504 | JS_ASSERT(rt->gcTraceLaterCount == 0); |
3505 | |
3506 | rt->gcMarkingTracer = NULL; |
3507 | |
3508 | /* |
3509 | * Sweep phase. |
3510 | * |
3511 | * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set |
3512 | * so that any attempt to allocate a GC-thing from a finalizer will fail, |
3513 | * rather than nest badly and leave the unmarked newborn to be swept. |
3514 | * |
3515 | * We first sweep atom state so we can use js_IsAboutToBeFinalized on |
3516 | * JSString or jsdouble held in a hashtable to check if the hashtable |
3517 | * entry can be freed. Note that even after the entry is freed, JSObject |
3518 | * finalizers can continue to access the corresponding jsdouble* and |
3519 | * JSString* assuming that they are unique. This works since the |
3520 | * atomization API must not be called during GC. |
3521 | */ |
3522 | js_SweepAtomState(cx); |
3523 | |
3524 | /* Finalize iterator states before the objects they iterate over. */ |
3525 | CloseNativeIterators(cx); |
3526 | |
3527 | /* Finalize watch points associated with unreachable objects. */ |
3528 | js_SweepWatchPoints(cx); |
3529 | |
3530 | #ifdef DEBUG |
3531 | /* Save the pre-sweep count of scope-mapped properties. */ |
3532 | rt->liveScopePropsPreSweep = rt->liveScopeProps; |
3533 | #endif |
3534 | |
3535 | /* |
3536 | * Here we need to ensure that JSObject instances are finalized before GC- |
3537 | * allocated JSString and jsdouble instances so object's finalizer can |
3538 | * access them even if they will be freed. For that we simply finalize the |
3539 | * list containing JSObject first since the static assert at the beginning |
3540 | * of the file guarantees that JSString and jsdouble instances are |
3541 | * allocated from a different list. |
3542 | */ |
3543 | emptyArenas = NULL; |
3544 | for (i = 0; i < GC_NUM_FREELISTS; i++) { |
3545 | arenaList = &rt->gcArenaList[i == 0 |
3546 | ? GC_FREELIST_INDEX(sizeof(JSObject)) |
3547 | : i == GC_FREELIST_INDEX(sizeof(JSObject)) |
3548 | ? 0 |
3549 | : i]; |
3550 | ap = &arenaList->last; |
3551 | if (!(a = *ap)) |
3552 | continue; |
3553 | |
3554 | JS_ASSERT(arenaList->lastCount > 0); |
3555 | arenaList->freeList = NULL; |
3556 | freeList = NULL; |
3557 | thingSize = arenaList->thingSize; |
3558 | indexLimit = THINGS_PER_ARENA(thingSize); |
3559 | flagp = THING_FLAGP(a, arenaList->lastCount - 1); |
3560 | METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0)); |
3561 | for (;;) { |
3562 | JS_ASSERT(a->prevUntracedPage == 0); |
3563 | JS_ASSERT(a->u.untracedThings == 0); |
3564 | allClear = JS_TRUE; |
3565 | do { |
3566 | flags = *flagp; |
3567 | if (flags & (GCF_MARK | GCF_LOCK)) { |
3568 | *flagp |