/[jscoverage]/trunk/js/jsgc.cpp
ViewVC logotype

Contents of /trunk/js/jsgc.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 585 - (show annotations)
Sun Sep 12 15:13:23 2010 UTC (8 years, 9 months ago) by siliconforks
File size: 123282 byte(s)
Update to SpiderMonkey from Firefox 3.6.9.

1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
3 *
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 *
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
11 *
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
16 *
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
19 *
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
24 *
25 * Contributor(s):
26 *
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
38 *
39 * ***** END LICENSE BLOCK ***** */
40
41 /*
42 * JS Mark-and-Sweep Garbage Collector.
43 *
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
48 *
49 * XXX swizzle page to freelist for better locality of reference
50 */
51 #include <stdlib.h> /* for free */
52 #include <math.h>
53 #include <string.h> /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsstdint.h"
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
58 #include "jsbit.h"
59 #include "jsclist.h"
60 #include "jsprf.h"
61 #include "jsapi.h"
62 #include "jsatom.h"
63 #include "jscntxt.h"
64 #include "jsversion.h"
65 #include "jsdbgapi.h"
66 #include "jsexn.h"
67 #include "jsfun.h"
68 #include "jsgc.h"
69 #include "jsinterp.h"
70 #include "jsiter.h"
71 #include "jslock.h"
72 #include "jsnum.h"
73 #include "jsobj.h"
74 #include "jsparse.h"
75 #include "jsscope.h"
76 #include "jsscript.h"
77 #include "jsstaticcheck.h"
78 #include "jsstr.h"
79 #include "jstask.h"
80 #include "jstracer.h"
81
82 #if JS_HAS_XML_SUPPORT
83 #include "jsxml.h"
84 #endif
85
86 #ifdef INCLUDE_MOZILLA_DTRACE
87 #include "jsdtracef.h"
88 #endif
89
90 /*
91 * Check if posix_memalign is available.
92 */
93 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
94 # define HAS_POSIX_MEMALIGN 1
95 #else
96 # define HAS_POSIX_MEMALIGN 0
97 #endif
98
99 /*
100 * jemalloc provides posix_memalign.
101 */
102 #ifdef MOZ_MEMORY
103 extern "C" {
104 #include "../../memory/jemalloc/jemalloc.h"
105 }
106 #endif
107
108 /*
109 * Include the headers for mmap unless we have posix_memalign and do not
110 * insist on mmap.
111 */
112 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
113 # if defined(XP_WIN)
114 # ifndef JS_GC_USE_MMAP
115 # define JS_GC_USE_MMAP 1
116 # endif
117 # include <windows.h>
118 # elif defined(__SYMBIAN32__)
119 // Symbian's OpenC has mmap (and #defines _POSIX_MAPPED_FILES), but
120 // doesn't implement MAP_ANON. If we have MOZ_MEMORY, then we can use
121 // posix_memalign; we've defined HAS_POSIX_MEMALIGN above. Otherwise,
122 // we overallocate.
123 # else
124 # if defined(XP_UNIX) || defined(XP_BEOS)
125 # include <unistd.h>
126 # endif
127 # if _POSIX_MAPPED_FILES > 0
128 # ifndef JS_GC_USE_MMAP
129 # define JS_GC_USE_MMAP 1
130 # endif
131 # include <sys/mman.h>
132
133 /* On Mac OS X MAP_ANONYMOUS is not defined. */
134 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
135 # define MAP_ANONYMOUS MAP_ANON
136 # endif
137 # if !defined(MAP_ANONYMOUS)
138 # define MAP_ANONYMOUS 0
139 # endif
140 # else
141 # if JS_GC_USE_MMAP
142 # error "JS_GC_USE_MMAP is set when mmap is not available"
143 # endif
144 # endif
145 # endif
146 #endif
147
148 /*
149 * Check JSTempValueUnion has the size of jsval and void * so we can
150 * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for
151 * different GC-things.
152 */
153 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval));
154 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *));
155
156 /*
157 * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and
158 * JSTRACE_STRING.
159 */
160 JS_STATIC_ASSERT(JSTRACE_OBJECT == 0);
161 JS_STATIC_ASSERT(JSTRACE_DOUBLE == 1);
162 JS_STATIC_ASSERT(JSTRACE_STRING == 2);
163 JS_STATIC_ASSERT(JSTRACE_XML == 3);
164
165 /*
166 * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml
167 * trace kind when JS_HAS_XML_SUPPORT is false.
168 */
169 JS_STATIC_ASSERT(JSTRACE_STRING + 1 == JSTRACE_XML);
170
171 /*
172 * The number of used GCX-types must stay within GCX_LIMIT.
173 */
174 JS_STATIC_ASSERT(GCX_NTYPES <= GCX_LIMIT);
175
176
177 /*
178 * Check that we can reinterpret double as JSGCDoubleCell.
179 */
180 JS_STATIC_ASSERT(sizeof(JSGCDoubleCell) == sizeof(double));
181
182 /*
183 * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS.
184 */
185 JS_STATIC_ASSERT(JSVAL_NULL == 0);
186
187
188 /*
189 * A GC arena contains a fixed number of flag bits for each thing in its heap,
190 * and supports O(1) lookup of a flag given its thing's address.
191 *
192 * To implement this, we allocate things of the same size from a GC arena
193 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
194 * following picture shows arena's layout:
195 *
196 * +------------------------------+--------------------+---------------+
197 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
198 * +------------------------------+--------------------+---------------+
199 *
200 * To find the flag bits for the thing we calculate the thing index counting
201 * from arena's start using:
202 *
203 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
204 *
205 * The details of flag's lookup depend on thing's kind. For all GC things
206 * except doubles we use one byte of flags where the 4 bits determine thing's
207 * type and the rest is used to implement GC marking, finalization and
208 * locking. We calculate the address of flag's byte using:
209 *
210 * flagByteAddress =
211 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
212 *
213 * where
214 *
215 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
216 *
217 * is the last byte of flags' area.
218 *
219 * This implies that the things are allocated from the start of their area and
220 * flags are allocated from the end. This arrangement avoids a relatively
221 * expensive calculation of the location of the boundary separating things and
222 * flags. The boundary's offset from the start of the arena is given by:
223 *
224 * thingsPerArena * thingSize
225 *
226 * where thingsPerArena is the number of things that the arena can hold:
227 *
228 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
229 *
230 * To allocate doubles we use a specialized arena. It can contain only numbers
231 * so we do not need the type bits. Moreover, since the doubles do not require
232 * a finalizer and very few of them are locked via js_LockGCThing API, we use
233 * just one bit of flags per double to denote if it was marked during the
234 * marking phase of the GC. The locking is implemented via a hash table. Thus
235 * for doubles the flag area becomes a bitmap.
236 *
237 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
238 * When it is true, a platform-dependent function like mmap is used to get
239 * memory aligned on CPU page boundaries. If the macro is false or undefined,
240 * posix_memalign is used when available. Otherwise the code uses malloc to
241 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
242 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
243 * see NewGCChunk/DestroyGCChunk below.
244 *
245 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
246 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
247 * not be a compile-time constant as the system page size is not known until
248 * runtime.
249 */
250 #if JS_GC_USE_MMAP
251 static uint32 js_gcArenasPerChunk = 0;
252 static JSBool js_gcUseMmap = JS_FALSE;
253 #elif HAS_POSIX_MEMALIGN
254 # define js_gcArenasPerChunk 1
255 #else
256 # define js_gcArenasPerChunk 7
257 #endif
258
259 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
260 # define CHUNKED_ARENA_ALLOCATION 0
261 #else
262 # define CHUNKED_ARENA_ALLOCATION 1
263 #endif
264
265 #define GC_ARENA_SHIFT 12
266 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
267 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
268
269 /*
270 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
271 * It is used to improve allocation efficiency when using posix_memalign. If
272 * malloc's implementation uses internal headers, then calling
273 *
274 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
275 *
276 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
277 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
278 * calls
279 *
280 * posix_memalign(&p, GC_ARENA_SIZE,
281 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
282 *
283 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
284 * system header, the system can pack all allocations together without holes.
285 *
286 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
287 * comes from jemalloc that does not use any headers/trailers.
288 */
289 #ifndef JS_GC_ARENA_PAD
290 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
291 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
292 # else
293 # define JS_GC_ARENA_PAD 0
294 # endif
295 #endif
296
297 struct JSGCArenaInfo {
298 /*
299 * Allocation list for the arena or NULL if the arena holds double values.
300 */
301 JSGCArenaList *list;
302
303 /*
304 * Pointer to the previous arena in a linked list. The arena can either
305 * belong to one of JSContext.gcArenaList lists or, when it does not have
306 * any allocated GC things, to the list of free arenas in the chunk with
307 * head stored in JSGCChunkInfo.lastFreeArena.
308 */
309 JSGCArenaInfo *prev;
310
311 #if !CHUNKED_ARENA_ALLOCATION
312 jsuword prevUntracedPage;
313 #else
314 /*
315 * A link field for the list of arenas with marked but not yet traced
316 * things. The field is encoded as arena's page to share the space with
317 * firstArena and arenaIndex fields.
318 */
319 jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
320
321 /*
322 * When firstArena is false, the index of arena in the chunk. When
323 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
324 * NO_FREE_ARENAS if there are no free arenas in the chunk.
325 *
326 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
327 * access either of indexes.
328 */
329 jsuword arenaIndex : GC_ARENA_SHIFT - 1;
330
331 /* Flag indicating if the arena is the first in the chunk. */
332 jsuword firstArena : 1;
333 #endif
334
335 union {
336 jsuword untracedThings; /* bitset for fast search of marked
337 but not yet traced things */
338 JSBool hasMarkedDoubles; /* the arena has marked doubles */
339 } u;
340
341 #if JS_GC_ARENA_PAD != 0
342 uint8 pad[JS_GC_ARENA_PAD];
343 #endif
344 };
345
346 /*
347 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
348 * as possible. The code does not rely on this check so if on a particular
349 * platform this does not compile, then, as a workaround, comment the assert
350 * out and submit a bug report.
351 */
352 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
353
354 /*
355 * Macros to convert between JSGCArenaInfo, the start address of the arena and
356 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
357 */
358 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
359
360 #define IS_ARENA_INFO_ADDRESS(arena) \
361 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
362
363 #define ARENA_START_TO_INFO(arenaStart) \
364 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
365 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
366
367 #define ARENA_INFO_TO_START(arena) \
368 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
369 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
370
371 #define ARENA_PAGE_TO_INFO(arenaPage) \
372 (JS_ASSERT(arenaPage != 0), \
373 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
374 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
375
376 #define ARENA_INFO_TO_PAGE(arena) \
377 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
378 ((jsuword) (arena) >> GC_ARENA_SHIFT))
379
380 #define GET_ARENA_INFO(chunk, index) \
381 (JS_ASSERT((index) < js_gcArenasPerChunk), \
382 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
383
384 #if CHUNKED_ARENA_ALLOCATION
385 /*
386 * Definitions for allocating arenas in chunks.
387 *
388 * All chunks that have at least one free arena are put on the doubly-linked
389 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
390 * the head of the chunk's free arena list together with the link fields for
391 * gcChunkList.
392 *
393 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
394 * the index of this arena. When all arenas in the chunk are used, it is
395 * removed from the list and the index is set to NO_FREE_ARENAS indicating
396 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
397 */
398
399 struct JSGCChunkInfo {
400 JSGCChunkInfo **prevp;
401 JSGCChunkInfo *next;
402 JSGCArenaInfo *lastFreeArena;
403 uint32 numFreeArenas;
404 };
405
406 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
407
408 #ifdef js_gcArenasPerChunk
409 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
410 js_gcArenasPerChunk <= NO_FREE_ARENAS);
411 #endif
412
413 #define GET_ARENA_CHUNK(arena, index) \
414 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
415 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
416
417 #define GET_ARENA_INDEX(arena) \
418 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
419
420 #define GET_CHUNK_INFO_INDEX(chunk) \
421 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
422
423 #define SET_CHUNK_INFO_INDEX(chunk, index) \
424 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
425 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
426
427 #define GET_CHUNK_INFO(chunk, infoIndex) \
428 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
429 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
430 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
431
432 #define CHUNK_INFO_TO_INDEX(ci) \
433 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
434
435 #endif
436
437 /*
438 * Macros for GC-thing operations.
439 */
440 #define THINGS_PER_ARENA(thingSize) \
441 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
442
443 #define THING_TO_ARENA(thing) \
444 (JS_ASSERT(!JSString::isStatic(thing)), \
445 (JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) \
446 + 1 - sizeof(JSGCArenaInfo)))
447
448 #define THING_TO_INDEX(thing, thingSize) \
449 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
450
451 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
452
453 #define THING_FLAGP(arena, thingIndex) \
454 (JS_ASSERT((jsuword) (thingIndex) \
455 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
456 (uint8 *)(arena) - 1 - (thingIndex))
457
458 #define THING_TO_FLAGP(thing, thingSize) \
459 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
460
461 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
462
463 #define FLAGP_TO_INDEX(flagp) \
464 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
465 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
466
467 #define FLAGP_TO_THING(flagp, thingSize) \
468 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
469 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
470 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
471 (thingSize) * FLAGP_TO_INDEX(flagp)))
472
473 /*
474 * Macros for the specialized arena for doubles.
475 *
476 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
477 * hold. We find it as the following. Let n be the number of doubles in the
478 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
479 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
480 * which the following holds:
481 *
482 * n*s + ceil(n/B) <= M (1)
483 *
484 * where "/" denotes normal real division,
485 * ceil(r) gives the least integer not smaller than the number r,
486 * s is the number of words in jsdouble,
487 * B is number of bits per word or B == JS_BITS_PER_WORD
488 * M is the number of words in the arena before JSGCArenaInfo or
489 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
490 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
491 *
492 * We rewrite the inequality as
493 *
494 * n*B*s/B + ceil(n/B) <= M,
495 * ceil(n*B*s/B + n/B) <= M,
496 * ceil(n*(B*s + 1)/B) <= M (2)
497 *
498 * We define a helper function e(n, s, B),
499 *
500 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
501 *
502 * It gives:
503 *
504 * n*(B*s + 1)/B + e(n, s, B) <= M,
505 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
506 *
507 * We apply the floor function to both sides of the last equation, where
508 * floor(r) gives the biggest integer not greater than r. As a consequence we
509 * have:
510 *
511 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
512 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
513 * n <= floor(M*B/(B*s + 1)), (3)
514 *
515 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
516 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
517 * must also satisfy (3). That is, we got an upper estimate for the maximum
518 * value of n. Lets show that this upper estimate,
519 *
520 * floor(M*B/(B*s + 1)), (4)
521 *
522 * also satisfies (1) and, as such, gives the required maximum value.
523 * Substituting it into (2) gives:
524 *
525 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
526 *
527 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
528 *
529 * ceil(floor(M/X)*X) <= ceil(M) == M.
530 *
531 * Thus the value of (4) gives the maximum n satisfying (1).
532 *
533 * For the final result we observe that in (4)
534 *
535 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
536 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
537 *
538 * and
539 *
540 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
541 * == JS_BITS_PER_DOUBLE.
542 */
543 #define DOUBLES_PER_ARENA \
544 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
545
546 /*
547 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
548 */
549 JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
550 JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
551 JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
552
553 #define DOUBLES_ARENA_BITMAP_WORDS \
554 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
555
556 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
557 JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
558 DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
559 ARENA_INFO_OFFSET);
560
561 JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
562 sizeof(jsuword) *
563 JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
564 ARENA_INFO_OFFSET);
565
566 /*
567 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
568 * last byte of the occupation bitmap are unused.
569 */
570 #define UNUSED_DOUBLE_BITMAP_BITS \
571 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
572
573 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
574
575 #define DOUBLES_ARENA_BITMAP_OFFSET \
576 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
577
578 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
579 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
580 JS_ASSERT(!(arenaInfo)->list)) \
581
582 /*
583 * Get the start of the bitmap area containing double mark flags in the arena.
584 * To access the flag the code uses
585 *
586 * JS_TEST_BIT(bitmapStart, index)
587 *
588 * That is, compared with the case of arenas with non-double things, we count
589 * flags from the start of the bitmap area, not from the end.
590 */
591 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
592 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
593 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
594
595 #define DOUBLE_THING_TO_INDEX(thing) \
596 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
597 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
598 DOUBLES_ARENA_BITMAP_OFFSET), \
599 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
600
601 static void
602 ClearDoubleArenaFlags(JSGCArenaInfo *a)
603 {
604 jsbitmap *bitmap, mask;
605 uintN nused;
606
607 /*
608 * When some high bits in the last byte of the double occupation bitmap
609 * are unused, we must set them. Otherwise RefillDoubleFreeList will
610 * assume that they corresponds to some free cells and tries to allocate
611 * them.
612 *
613 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
614 */
615 bitmap = DOUBLE_ARENA_BITMAP(a);
616 memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
617 mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
618 nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
619 bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
620 }
621
622 static JS_ALWAYS_INLINE JSBool
623 IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
624 {
625 jsbitmap *bitmap;
626
627 JS_ASSERT(a->u.hasMarkedDoubles);
628 bitmap = DOUBLE_ARENA_BITMAP(a);
629 return JS_TEST_BIT(bitmap, index);
630 }
631
632 /*
633 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
634 *
635 * 1. The next byte in the bitmap area for doubles to check for unmarked
636 * (or free) doubles.
637 * 2. Or to the end of the bitmap area when all GC cells of the arena are
638 * allocated.
639 * 3. Or to a special sentinel value indicating that there are no arenas
640 * to check for unmarked doubles.
641 *
642 * We set the sentinel to ARENA_INFO_OFFSET so the single check
643 *
644 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
645 *
646 * will cover both the second and the third cases.
647 */
648 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
649
650 #ifdef JS_THREADSAFE
651 /*
652 * The maximum number of things to put on the local free list by taking
653 * several things from the global free list or from the tail of the last
654 * allocated arena to amortize the cost of rt->gcLock.
655 */
656 #define MAX_THREAD_LOCAL_THINGS 64
657
658 #endif
659
660 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
661
662 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
663 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
664
665 /* We want to use all the available GC thing space for object's slots. */
666 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
667
668 /*
669 * Ensure that JSObject is allocated from a different GC-list rather than
670 * jsdouble and JSString so we can easily finalize JSObject before these 2
671 * types of GC things. See comments in js_GC.
672 */
673 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
674 GC_FREELIST_INDEX(sizeof(JSObject)));
675 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
676 GC_FREELIST_INDEX(sizeof(JSObject)));
677
678 /*
679 * JSPtrTable capacity growth descriptor. The table grows by powers of two
680 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
681 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
682 */
683 typedef struct JSPtrTableInfo {
684 uint16 minCapacity;
685 uint16 linearGrowthThreshold;
686 } JSPtrTableInfo;
687
688 #define GC_ITERATOR_TABLE_MIN 4
689 #define GC_ITERATOR_TABLE_LINEAR 1024
690
691 static const JSPtrTableInfo iteratorTableInfo = {
692 GC_ITERATOR_TABLE_MIN,
693 GC_ITERATOR_TABLE_LINEAR
694 };
695
696 /* Calculate table capacity based on the current value of JSPtrTable.count. */
697 static size_t
698 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
699 {
700 size_t linear, log, capacity;
701
702 linear = info->linearGrowthThreshold;
703 JS_ASSERT(info->minCapacity <= linear);
704
705 if (count == 0) {
706 capacity = 0;
707 } else if (count < linear) {
708 log = JS_CEILING_LOG2W(count);
709 JS_ASSERT(log != JS_BITS_PER_WORD);
710 capacity = (size_t)1 << log;
711 if (capacity < info->minCapacity)
712 capacity = info->minCapacity;
713 } else {
714 capacity = JS_ROUNDUP(count, linear);
715 }
716
717 JS_ASSERT(capacity >= count);
718 return capacity;
719 }
720
721 static void
722 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
723 {
724 if (table->array) {
725 JS_ASSERT(table->count > 0);
726 js_free(table->array);
727 table->array = NULL;
728 table->count = 0;
729 }
730 JS_ASSERT(table->count == 0);
731 }
732
733 static JSBool
734 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
735 void *ptr)
736 {
737 size_t count, capacity;
738 void **array;
739
740 count = table->count;
741 capacity = PtrTableCapacity(count, info);
742
743 if (count == capacity) {
744 if (capacity < info->minCapacity) {
745 JS_ASSERT(capacity == 0);
746 JS_ASSERT(!table->array);
747 capacity = info->minCapacity;
748 } else {
749 /*
750 * Simplify the overflow detection assuming pointer is bigger
751 * than byte.
752 */
753 JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
754 capacity = (capacity < info->linearGrowthThreshold)
755 ? 2 * capacity
756 : capacity + info->linearGrowthThreshold;
757 if (capacity > (size_t)-1 / sizeof table->array[0])
758 goto bad;
759 }
760 array = (void **) js_realloc(table->array,
761 capacity * sizeof table->array[0]);
762 if (!array)
763 goto bad;
764 #ifdef DEBUG
765 memset(array + count, JS_FREE_PATTERN,
766 (capacity - count) * sizeof table->array[0]);
767 #endif
768 table->array = array;
769 }
770
771 table->array[count] = ptr;
772 table->count = count + 1;
773
774 return JS_TRUE;
775
776 bad:
777 JS_ReportOutOfMemory(cx);
778 return JS_FALSE;
779 }
780
781 static void
782 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
783 size_t newCount)
784 {
785 size_t oldCapacity, capacity;
786 void **array;
787
788 JS_ASSERT(newCount <= table->count);
789 if (newCount == table->count)
790 return;
791
792 oldCapacity = PtrTableCapacity(table->count, info);
793 table->count = newCount;
794 capacity = PtrTableCapacity(newCount, info);
795
796 if (oldCapacity != capacity) {
797 array = table->array;
798 JS_ASSERT(array);
799 if (capacity == 0) {
800 js_free(array);
801 table->array = NULL;
802 return;
803 }
804 array = (void **) js_realloc(array, capacity * sizeof array[0]);
805 if (array)
806 table->array = array;
807 }
808 #ifdef DEBUG
809 memset(table->array + newCount, JS_FREE_PATTERN,
810 (capacity - newCount) * sizeof table->array[0]);
811 #endif
812 }
813
814 #ifdef JS_GCMETER
815 # define METER(x) ((void) (x))
816 # define METER_IF(condition, x) ((void) ((condition) && (x)))
817 #else
818 # define METER(x) ((void) 0)
819 # define METER_IF(condition, x) ((void) 0)
820 #endif
821
822 #define METER_UPDATE_MAX(maxLval, rval) \
823 METER_IF((maxLval) < (rval), (maxLval) = (rval))
824
825 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
826
827 /*
828 * For chunks allocated via over-sized malloc, get a pointer to store the gap
829 * between the malloc's result and the first arena in the chunk.
830 */
831 static uint32 *
832 GetMallocedChunkGapPtr(jsuword chunk)
833 {
834 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
835
836 /* Use the memory after the chunk, see NewGCChunk for details. */
837 return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
838 }
839
840 #endif
841
842 static jsuword
843 NewGCChunk(void)
844 {
845 void *p;
846
847 #if JS_GC_USE_MMAP
848 if (js_gcUseMmap) {
849 # if defined(XP_WIN)
850 p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
851 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
852 return (jsuword) p;
853 # else
854 p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
855 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
856 return (p == MAP_FAILED) ? 0 : (jsuword) p;
857 # endif
858 }
859 #endif
860
861 #if HAS_POSIX_MEMALIGN
862 if (0 != posix_memalign(&p, GC_ARENA_SIZE,
863 GC_ARENA_SIZE * js_gcArenasPerChunk -
864 JS_GC_ARENA_PAD)) {
865 return 0;
866 }
867 return (jsuword) p;
868 #else
869 /*
870 * Implement chunk allocation using oversized malloc if mmap and
871 * posix_memalign are not available.
872 *
873 * Since malloc allocates pointers aligned on the word boundary, to get
874 * js_gcArenasPerChunk aligned arenas, we need to malloc only
875 *
876 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
877 *
878 * bytes. But since we stores the gap between the malloced pointer and the
879 * first arena in the chunk after the chunk, we need to ask for
880 *
881 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
882 *
883 * bytes to ensure that we always have room to store the gap.
884 */
885 p = js_malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
886 if (!p)
887 return 0;
888
889 {
890 jsuword chunk;
891
892 chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
893 *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
894 return chunk;
895 }
896 #endif
897 }
898
899 static void
900 DestroyGCChunk(jsuword chunk)
901 {
902 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
903 #if JS_GC_USE_MMAP
904 if (js_gcUseMmap) {
905 # if defined(XP_WIN)
906 VirtualFree((void *) chunk, 0, MEM_RELEASE);
907 # elif defined(SOLARIS)
908 munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
909 # else
910 munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
911 # endif
912 return;
913 }
914 #endif
915
916 #if HAS_POSIX_MEMALIGN
917 js_free((void *) chunk);
918 #else
919 /* See comments in NewGCChunk. */
920 JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
921 js_free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
922 #endif
923 }
924
925 #if CHUNKED_ARENA_ALLOCATION
926
927 static void
928 AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
929 {
930 ci->prevp = &rt->gcChunkList;
931 ci->next = rt->gcChunkList;
932 if (rt->gcChunkList) {
933 JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
934 rt->gcChunkList->prevp = &ci->next;
935 }
936 rt->gcChunkList = ci;
937 }
938
939 static void
940 RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
941 {
942 *ci->prevp = ci->next;
943 if (ci->next) {
944 JS_ASSERT(ci->next->prevp == &ci->next);
945 ci->next->prevp = ci->prevp;
946 }
947 }
948
949 #endif
950
951 static JSGCArenaInfo *
952 NewGCArena(JSRuntime *rt)
953 {
954 jsuword chunk;
955 JSGCArenaInfo *a;
956
957 if (rt->gcBytes >= rt->gcMaxBytes)
958 return NULL;
959
960 #if CHUNKED_ARENA_ALLOCATION
961 if (js_gcArenasPerChunk == 1) {
962 #endif
963 chunk = NewGCChunk();
964 if (chunk == 0)
965 return NULL;
966 a = ARENA_START_TO_INFO(chunk);
967 #if CHUNKED_ARENA_ALLOCATION
968 } else {
969 JSGCChunkInfo *ci;
970 uint32 i;
971 JSGCArenaInfo *aprev;
972
973 ci = rt->gcChunkList;
974 if (!ci) {
975 chunk = NewGCChunk();
976 if (chunk == 0)
977 return NULL;
978 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
979 a = GET_ARENA_INFO(chunk, 0);
980 a->firstArena = JS_TRUE;
981 a->arenaIndex = 0;
982 aprev = NULL;
983 i = 0;
984 do {
985 a->prev = aprev;
986 aprev = a;
987 ++i;
988 a = GET_ARENA_INFO(chunk, i);
989 a->firstArena = JS_FALSE;
990 a->arenaIndex = i;
991 } while (i != js_gcArenasPerChunk - 1);
992 ci = GET_CHUNK_INFO(chunk, 0);
993 ci->lastFreeArena = aprev;
994 ci->numFreeArenas = js_gcArenasPerChunk - 1;
995 AddChunkToList(rt, ci);
996 } else {
997 JS_ASSERT(ci->prevp == &rt->gcChunkList);
998 a = ci->lastFreeArena;
999 aprev = a->prev;
1000 if (!aprev) {
1001 JS_ASSERT(ci->numFreeArenas == 1);
1002 JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
1003 RemoveChunkFromList(rt, ci);
1004 chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
1005 SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
1006 } else {
1007 JS_ASSERT(ci->numFreeArenas >= 2);
1008 JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
1009 ci->lastFreeArena = aprev;
1010 ci->numFreeArenas--;
1011 }
1012 }
1013 }
1014 #endif
1015
1016 rt->gcBytes += GC_ARENA_SIZE;
1017 a->prevUntracedPage = 0;
1018 memset(&a->u, 0, sizeof(a->u));
1019
1020 return a;
1021 }
1022
1023 static void
1024 DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
1025 {
1026 JSGCArenaInfo *a;
1027
1028 while (last) {
1029 a = last;
1030 last = last->prev;
1031
1032 METER(rt->gcStats.afree++);
1033 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
1034 rt->gcBytes -= GC_ARENA_SIZE;
1035
1036 #if CHUNKED_ARENA_ALLOCATION
1037 if (js_gcArenasPerChunk == 1) {
1038 #endif
1039 DestroyGCChunk(ARENA_INFO_TO_START(a));
1040 #if CHUNKED_ARENA_ALLOCATION
1041 } else {
1042 uint32 arenaIndex;
1043 jsuword chunk;
1044 uint32 chunkInfoIndex;
1045 JSGCChunkInfo *ci;
1046 # ifdef DEBUG
1047 jsuword firstArena;
1048
1049 firstArena = a->firstArena;
1050 arenaIndex = a->arenaIndex;
1051 memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
1052 GC_ARENA_SIZE - JS_GC_ARENA_PAD);
1053 a->firstArena = firstArena;
1054 a->arenaIndex = arenaIndex;
1055 # endif
1056 arenaIndex = GET_ARENA_INDEX(a);
1057 chunk = GET_ARENA_CHUNK(a, arenaIndex);
1058 chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
1059 if (chunkInfoIndex == NO_FREE_ARENAS) {
1060 chunkInfoIndex = arenaIndex;
1061 SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
1062 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1063 a->prev = NULL;
1064 ci->lastFreeArena = a;
1065 ci->numFreeArenas = 1;
1066 AddChunkToList(rt, ci);
1067 } else {
1068 JS_ASSERT(chunkInfoIndex != arenaIndex);
1069 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1070 JS_ASSERT(ci->numFreeArenas != 0);
1071 JS_ASSERT(ci->lastFreeArena);
1072 JS_ASSERT(a != ci->lastFreeArena);
1073 if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
1074 RemoveChunkFromList(rt, ci);
1075 DestroyGCChunk(chunk);
1076 } else {
1077 ++ci->numFreeArenas;
1078 a->prev = ci->lastFreeArena;
1079 ci->lastFreeArena = a;
1080 }
1081 }
1082 }
1083 # endif
1084 }
1085 }
1086
1087 static void
1088 InitGCArenaLists(JSRuntime *rt)
1089 {
1090 uintN i, thingSize;
1091 JSGCArenaList *arenaList;
1092
1093 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1094 arenaList = &rt->gcArenaList[i];
1095 thingSize = GC_FREELIST_NBYTES(i);
1096 arenaList->last = NULL;
1097 arenaList->lastCount = THINGS_PER_ARENA(thingSize);
1098 arenaList->thingSize = thingSize;
1099 arenaList->freeList = NULL;
1100 }
1101 rt->gcDoubleArenaList.first = NULL;
1102 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1103 }
1104
1105 static void
1106 FinishGCArenaLists(JSRuntime *rt)
1107 {
1108 uintN i;
1109 JSGCArenaList *arenaList;
1110
1111 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1112 arenaList = &rt->gcArenaList[i];
1113 DestroyGCArenas(rt, arenaList->last);
1114 arenaList->last = NULL;
1115 arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
1116 arenaList->freeList = NULL;
1117 }
1118 DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
1119 rt->gcDoubleArenaList.first = NULL;
1120 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1121
1122 rt->gcBytes = 0;
1123 JS_ASSERT(rt->gcChunkList == 0);
1124 }
1125
1126 /*
1127 * This function must not be called when thing is jsdouble.
1128 */
1129 static uint8 *
1130 GetGCThingFlags(void *thing)
1131 {
1132 JSGCArenaInfo *a;
1133 uint32 index;
1134
1135 a = THING_TO_ARENA(thing);
1136 index = THING_TO_INDEX(thing, a->list->thingSize);
1137 return THING_FLAGP(a, index);
1138 }
1139
1140 /*
1141 * This function returns null when thing is jsdouble.
1142 */
1143 static uint8 *
1144 GetGCThingFlagsOrNull(void *thing)
1145 {
1146 JSGCArenaInfo *a;
1147 uint32 index;
1148
1149 if (JSString::isStatic(thing))
1150 return NULL;
1151 a = THING_TO_ARENA(thing);
1152 if (!a->list)
1153 return NULL;
1154 index = THING_TO_INDEX(thing, a->list->thingSize);
1155 return THING_FLAGP(a, index);
1156 }
1157
1158 intN
1159 js_GetExternalStringGCType(JSString *str)
1160 {
1161 JS_ASSERT(!JSString::isStatic(str));
1162
1163 uintN type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
1164 JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
1165 return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
1166 }
1167
1168 static uint32
1169 MapGCFlagsToTraceKind(uintN flags)
1170 {
1171 uint32 type;
1172
1173 type = flags & GCF_TYPEMASK;
1174 JS_ASSERT(type != GCX_DOUBLE);
1175 JS_ASSERT(type < GCX_NTYPES);
1176 return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
1177 }
1178
1179 JS_FRIEND_API(uint32)
1180 js_GetGCThingTraceKind(void *thing)
1181 {
1182 JSGCArenaInfo *a;
1183 uint32 index;
1184
1185 if (JSString::isStatic(thing))
1186 return JSTRACE_STRING;
1187
1188 a = THING_TO_ARENA(thing);
1189 if (!a->list)
1190 return JSTRACE_DOUBLE;
1191
1192 index = THING_TO_INDEX(thing, a->list->thingSize);
1193 return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
1194 }
1195
1196 JSRuntime*
1197 js_GetGCStringRuntime(JSString *str)
1198 {
1199 JSGCArenaList *list;
1200
1201 list = THING_TO_ARENA(str)->list;
1202
1203 JS_ASSERT(list->thingSize == sizeof(JSGCThing));
1204 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
1205
1206 return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
1207 }
1208
1209 JSBool
1210 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
1211 {
1212 JSGCArenaInfo *a;
1213 uint32 index, flags;
1214
1215 if (JSString::isStatic(thing))
1216 return false;
1217
1218 a = THING_TO_ARENA(thing);
1219 if (!a->list) {
1220 /*
1221 * Check if arena has no marked doubles. In that case the bitmap with
1222 * the mark flags contains all garbage as it is initialized only when
1223 * marking the first double in the arena.
1224 */
1225 if (!a->u.hasMarkedDoubles)
1226 return JS_TRUE;
1227 index = DOUBLE_THING_TO_INDEX(thing);
1228 return !IsMarkedDouble(a, index);
1229 }
1230 index = THING_TO_INDEX(thing, a->list->thingSize);
1231 flags = *THING_FLAGP(a, index);
1232 return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
1233 }
1234
1235 /* This is compatible with JSDHashEntryStub. */
1236 typedef struct JSGCRootHashEntry {
1237 JSDHashEntryHdr hdr;
1238 void *root;
1239 const char *name;
1240 } JSGCRootHashEntry;
1241
1242 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1243 #define GC_ROOTS_SIZE 256
1244
1245 #if CHUNKED_ARENA_ALLOCATION
1246
1247 /*
1248 * For a CPU with extremely large pages using them for GC things wastes
1249 * too much memory.
1250 */
1251 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1252
1253 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
1254
1255 #endif
1256
1257 JSBool
1258 js_InitGC(JSRuntime *rt, uint32 maxbytes)
1259 {
1260 #if JS_GC_USE_MMAP
1261 if (js_gcArenasPerChunk == 0) {
1262 size_t cpuPageSize, arenasPerPage;
1263 # if defined(XP_WIN)
1264 SYSTEM_INFO si;
1265
1266 GetSystemInfo(&si);
1267 cpuPageSize = si.dwPageSize;
1268
1269 # elif defined(XP_UNIX) || defined(XP_BEOS)
1270 cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
1271 # else
1272 # error "Not implemented"
1273 # endif
1274 /* cpuPageSize is a power of 2. */
1275 JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
1276 arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
1277 #ifdef DEBUG
1278 if (arenasPerPage == 0) {
1279 fprintf(stderr,
1280 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1281 "paged allocation for the garbage collector. Please report this.\n",
1282 (unsigned) cpuPageSize);
1283 }
1284 #endif
1285 if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
1286 /*
1287 * Use at least 4 GC arenas per paged allocation chunk to minimize
1288 * the overhead of mmap/VirtualAlloc.
1289 */
1290 js_gcUseMmap = JS_TRUE;
1291 js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
1292 } else {
1293 js_gcUseMmap = JS_FALSE;
1294 js_gcArenasPerChunk = 7;
1295 }
1296 }
1297 JS_ASSERT(1 <= js_gcArenasPerChunk &&
1298 js_gcArenasPerChunk <= NO_FREE_ARENAS);
1299 #endif
1300
1301 InitGCArenaLists(rt);
1302 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
1303 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
1304 rt->gcRootsHash.ops = NULL;
1305 return JS_FALSE;
1306 }
1307 rt->gcLocksHash = NULL; /* create lazily */
1308
1309 /*
1310 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1311 * for default backward API compatibility.
1312 */
1313 rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
1314 rt->gcEmptyArenaPoolLifespan = 30000;
1315
1316 /*
1317 * By default the trigger factor gets maximum possible value. This
1318 * means that GC will not be triggered by growth of GC memory (gcBytes).
1319 */
1320 rt->setGCTriggerFactor((uint32) -1);
1321
1322 /*
1323 * The assigned value prevents GC from running when GC memory is too low
1324 * (during JS engine start).
1325 */
1326 rt->setGCLastBytes(8192);
1327
1328 METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
1329 return JS_TRUE;
1330 }
1331
1332 #ifdef JS_GCMETER
1333
1334 static void
1335 UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
1336 uint32 nthings)
1337 {
1338 size_t narenas;
1339
1340 narenas = nlivearenas + nkilledArenas;
1341 JS_ASSERT(narenas >= st->livearenas);
1342
1343 st->newarenas = narenas - st->livearenas;
1344 st->narenas = narenas;
1345 st->livearenas = nlivearenas;
1346 if (st->maxarenas < narenas)
1347 st->maxarenas = narenas;
1348 st->totalarenas += narenas;
1349
1350 st->nthings = nthings;
1351 if (st->maxthings < nthings)
1352 st->maxthings = nthings;
1353 st->totalthings += nthings;
1354 }
1355
1356 JS_FRIEND_API(void)
1357 js_DumpGCStats(JSRuntime *rt, FILE *fp)
1358 {
1359 int i;
1360 size_t sumArenas, sumTotalArenas;
1361 size_t sumThings, sumMaxThings;
1362 size_t sumThingSize, sumTotalThingSize;
1363 size_t sumArenaCapacity, sumTotalArenaCapacity;
1364 JSGCArenaStats *st;
1365 size_t thingSize, thingsPerArena;
1366 size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
1367
1368 fprintf(fp, "\nGC allocation statistics:\n");
1369
1370 #define UL(x) ((unsigned long)(x))
1371 #define ULSTAT(x) UL(rt->gcStats.x)
1372 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1373
1374 sumArenas = 0;
1375 sumTotalArenas = 0;
1376 sumThings = 0;
1377 sumMaxThings = 0;
1378 sumThingSize = 0;
1379 sumTotalThingSize = 0;
1380 sumArenaCapacity = 0;
1381 sumTotalArenaCapacity = 0;
1382 sumAlloc = 0;
1383 sumLocalAlloc = 0;
1384 sumFail = 0;
1385 sumRetry = 0;
1386 for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
1387 if (i == -1) {
1388 thingSize = sizeof(jsdouble);
1389 thingsPerArena = DOUBLES_PER_ARENA;
1390 st = &rt->gcStats.doubleArenaStats;
1391 fprintf(fp,
1392 "Arena list for double values (%lu doubles per arena):",
1393 UL(thingsPerArena));
1394 } else {
1395 thingSize = rt->gcArenaList[i].thingSize;
1396 thingsPerArena = THINGS_PER_ARENA(thingSize);
1397 st = &rt->gcStats.arenaStats[i];
1398 fprintf(fp,
1399 "Arena list %d (thing size %lu, %lu things per arena):",
1400 i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
1401 }
1402 if (st->maxarenas == 0) {
1403 fputs(" NEVER USED\n", fp);
1404 continue;
1405 }
1406 putc('\n', fp);
1407 fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
1408 fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
1409 UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
1410 fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
1411 UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
1412 fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
1413 fprintf(fp, " things: %lu\n", UL(st->nthings));
1414 fprintf(fp, " GC cell utilization: %.1f%%\n",
1415 PERCENT(st->nthings, thingsPerArena * st->narenas));
1416 fprintf(fp, " average cell utilization: %.1f%%\n",
1417 PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
1418 fprintf(fp, " max things: %lu\n", UL(st->maxthings));
1419 fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
1420 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1421 UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
1422 sumArenas += st->narenas;
1423 sumTotalArenas += st->totalarenas;
1424 sumThings += st->nthings;
1425 sumMaxThings += st->maxthings;
1426 sumThingSize += thingSize * st->nthings;
1427 sumTotalThingSize += thingSize * st->totalthings;
1428 sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
1429 sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
1430 sumAlloc += st->alloc;
1431 sumLocalAlloc += st->localalloc;
1432 sumFail += st->fail;
1433 sumRetry += st->retry;
1434 }
1435 fprintf(fp, "TOTAL STATS:\n");
1436 fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
1437 fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
1438 fprintf(fp, " total GC things: %lu\n", UL(sumThings));
1439 fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
1440 fprintf(fp, " GC cell utilization: %.1f%%\n",
1441 PERCENT(sumThingSize, sumArenaCapacity));
1442 fprintf(fp, " average cell utilization: %.1f%%\n",
1443 PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
1444 fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
1445 fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
1446 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1447 UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
1448 fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
1449 fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
1450 fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
1451 fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
1452 fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
1453 fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1454 fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
1455 fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1456 fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
1457 #ifdef DEBUG
1458 fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
1459 #endif
1460 fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1461 fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1462 fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
1463 fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
1464 fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1465 fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1466 fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
1467 fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
1468 fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1469
1470 #undef UL
1471 #undef ULSTAT
1472 #undef PERCENT
1473
1474 #ifdef JS_ARENAMETER
1475 JS_DumpArenaStats(fp);
1476 #endif
1477 }
1478 #endif
1479
1480 #ifdef DEBUG
1481 static void
1482 CheckLeakedRoots(JSRuntime *rt);
1483 #endif
1484
1485 void
1486 js_FinishGC(JSRuntime *rt)
1487 {
1488 #ifdef JS_ARENAMETER
1489 JS_DumpArenaStats(stdout);
1490 #endif
1491 #ifdef JS_GCMETER
1492 js_DumpGCStats(rt, stdout);
1493 #endif
1494
1495 FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1496 FinishGCArenaLists(rt);
1497
1498 if (rt->gcRootsHash.ops) {
1499 #ifdef DEBUG
1500 CheckLeakedRoots(rt);
1501 #endif
1502 JS_DHashTableFinish(&rt->gcRootsHash);
1503 rt->gcRootsHash.ops = NULL;
1504 }
1505 if (rt->gcLocksHash) {
1506 JS_DHashTableDestroy(rt->gcLocksHash);
1507 rt->gcLocksHash = NULL;
1508 }
1509 }
1510
1511 JSBool
1512 js_AddRoot(JSContext *cx, void *rp, const char *name)
1513 {
1514 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1515 if (!ok)
1516 JS_ReportOutOfMemory(cx);
1517 return ok;
1518 }
1519
1520 JSBool
1521 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1522 {
1523 JSBool ok;
1524 JSGCRootHashEntry *rhe;
1525
1526 /*
1527 * Due to the long-standing, but now removed, use of rt->gcLock across the
1528 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1529 * properly with a racing GC, without calling JS_AddRoot from a request.
1530 * We have to preserve API compatibility here, now that we avoid holding
1531 * rt->gcLock across the mark phase (including the root hashtable mark).
1532 */
1533 JS_LOCK_GC(rt);
1534 js_WaitForGC(rt);
1535 rhe = (JSGCRootHashEntry *)
1536 JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
1537 if (rhe) {
1538 rhe->root = rp;
1539 rhe->name = name;
1540 ok = JS_TRUE;
1541 } else {
1542 ok = JS_FALSE;
1543 }
1544 JS_UNLOCK_GC(rt);
1545 return ok;
1546 }
1547
1548 JSBool
1549 js_RemoveRoot(JSRuntime *rt, void *rp)
1550 {
1551 /*
1552 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1553 * Same synchronization drill as above in js_AddRoot.
1554 */
1555 JS_LOCK_GC(rt);
1556 js_WaitForGC(rt);
1557 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
1558 rt->gcPoke = JS_TRUE;
1559 JS_UNLOCK_GC(rt);
1560 return JS_TRUE;
1561 }
1562
1563 #ifdef DEBUG
1564
1565 static JSDHashOperator
1566 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
1567 {
1568 uint32 *leakedroots = (uint32 *)arg;
1569 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1570
1571 (*leakedroots)++;
1572 fprintf(stderr,
1573 "JS engine warning: leaking GC root \'%s\' at %p\n",
1574 rhe->name ? (char *)rhe->name : "", rhe->root);
1575
1576 return JS_DHASH_NEXT;
1577 }
1578
1579 static void
1580 CheckLeakedRoots(JSRuntime *rt)
1581 {
1582 uint32 leakedroots = 0;
1583
1584 /* Warn (but don't assert) debug builds of any remaining roots. */
1585 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
1586 &leakedroots);
1587 if (leakedroots > 0) {
1588 if (leakedroots == 1) {
1589 fprintf(stderr,
1590 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1591 " This root may point to freed memory. Objects reachable\n"
1592 " through it have not been finalized.\n",
1593 (void *) rt);
1594 } else {
1595 fprintf(stderr,
1596 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1597 " These roots may point to freed memory. Objects reachable\n"
1598 " through them have not been finalized.\n",
1599 (unsigned long) leakedroots, (void *) rt);
1600 }
1601 }
1602 }
1603
1604 typedef struct NamedRootDumpArgs {
1605 void (*dump)(const char *name, void *rp, void *data);
1606 void *data;
1607 } NamedRootDumpArgs;
1608
1609 static JSDHashOperator
1610 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1611 void *arg)
1612 {
1613 NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
1614 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1615
1616 if (rhe->name)
1617 args->dump(rhe->name, rhe->root, args->data);
1618 return JS_DHASH_NEXT;
1619 }
1620
1621 JS_BEGIN_EXTERN_C
1622 void
1623 js_DumpNamedRoots(JSRuntime *rt,
1624 void (*dump)(const char *name, void *rp, void *data),
1625 void *data)
1626 {
1627 NamedRootDumpArgs args;
1628
1629 args.dump = dump;
1630 args.data = data;
1631 JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
1632 }
1633 JS_END_EXTERN_C
1634
1635 #endif /* DEBUG */
1636
1637 typedef struct GCRootMapArgs {
1638 JSGCRootMapFun map;
1639 void *data;
1640 } GCRootMapArgs;
1641
1642 static JSDHashOperator
1643 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1644 void *arg)
1645 {
1646 GCRootMapArgs *args = (GCRootMapArgs *) arg;
1647 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1648 intN mapflags;
1649 int op;
1650
1651 mapflags = args->map(rhe->root, rhe->name, args->data);
1652
1653 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1654 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1655 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1656 op = (JSDHashOperator)mapflags;
1657 #else
1658 op = JS_DHASH_NEXT;
1659 if (mapflags & JS_MAP_GCROOT_STOP)
1660 op |= JS_DHASH_STOP;
1661 if (mapflags & JS_MAP_GCROOT_REMOVE)
1662 op |= JS_DHASH_REMOVE;
1663 #endif
1664
1665 return (JSDHashOperator) op;
1666 }
1667
1668 uint32
1669 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1670 {
1671 GCRootMapArgs args;
1672 uint32 rv;
1673
1674 args.map = map;
1675 args.data = data;
1676 JS_LOCK_GC(rt);
1677 rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
1678 JS_UNLOCK_GC(rt);
1679 return rv;
1680 }
1681
1682 JSBool
1683 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
1684 {
1685 JSRuntime *rt;
1686 JSBool ok;
1687
1688 rt = cx->runtime;
1689 JS_ASSERT(!rt->gcRunning);
1690
1691 JS_LOCK_GC(rt);
1692 ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
1693 JS_UNLOCK_GC(rt);
1694 return ok;
1695 }
1696
1697 static void
1698 CloseNativeIterators(JSContext *cx)
1699 {
1700 JSRuntime *rt;
1701 size_t count, newCount, i;
1702 void **array;
1703 JSObject *obj;
1704
1705 rt = cx->runtime;
1706 count = rt->gcIteratorTable.count;
1707 array = rt->gcIteratorTable.array;
1708
1709 newCount = 0;
1710 for (i = 0; i != count; ++i) {
1711 obj = (JSObject *)array[i];
1712 if (js_IsAboutToBeFinalized(cx, obj))
1713 js_CloseNativeIterator(cx, obj);
1714 else
1715 array[newCount++] = obj;
1716 }
1717 ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
1718 }
1719
1720 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1721 #define DEBUG_gchist
1722 #endif
1723
1724 #ifdef DEBUG_gchist
1725 #define NGCHIST 64
1726
1727 static struct GCHist {
1728 bool lastDitch;
1729 JSGCThing *freeList;
1730 } gchist[NGCHIST];
1731
1732 unsigned gchpos = 0;
1733 #endif
1734
1735 void
1736 JSRuntime::setGCTriggerFactor(uint32 factor)
1737 {
1738 JS_ASSERT(factor >= 100);
1739
1740 gcTriggerFactor = factor;
1741 setGCLastBytes(gcLastBytes);
1742 }
1743
1744 void
1745 JSRuntime::setGCLastBytes(size_t lastBytes)
1746 {
1747 gcLastBytes = lastBytes;
1748 uint64 triggerBytes = uint64(lastBytes) * uint64(gcTriggerFactor / 100);
1749 if (triggerBytes != size_t(triggerBytes))
1750 triggerBytes = size_t(-1);
1751 gcTriggerBytes = size_t(triggerBytes);
1752 }
1753
1754 static JS_INLINE bool
1755 IsGCThresholdReached(JSRuntime *rt)
1756 {
1757 #ifdef JS_GC_ZEAL
1758 if (rt->gcZeal >= 1)
1759 return true;
1760 #endif
1761
1762 /*
1763 * Since the initial value of the gcLastBytes parameter is not equal to
1764 * zero (see the js_InitGC function) the return value is false when
1765 * the gcBytes value is close to zero at the JS engine start.
1766 */
1767 return rt->gcMallocBytes >= rt->gcMaxMallocBytes ||
1768 rt->gcBytes >= rt->gcTriggerBytes;
1769 }
1770
1771 template <class T> static JS_INLINE T*
1772 NewGCThing(JSContext *cx, uintN flags)
1773 {
1774 JSRuntime *rt;
1775 bool doGC;
1776 JSGCThing *thing;
1777 uint8 *flagp;
1778 JSGCArenaList *arenaList;
1779 JSGCArenaInfo *a;
1780 uintN thingsLimit;
1781 JSLocalRootStack *lrs;
1782 #ifdef JS_GCMETER
1783 JSGCArenaStats *astats;
1784 #endif
1785 #ifdef JS_THREADSAFE
1786 JSBool gcLocked;
1787 uintN localMallocBytes;
1788 JSGCThing **lastptr;
1789 JSGCThing *tmpthing;
1790 uint8 *tmpflagp;
1791 uintN maxFreeThings; /* max to take from the global free list */
1792 #endif
1793
1794 JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
1795 rt = cx->runtime;
1796 size_t nbytes = sizeof(T);
1797 JS_ASSERT(JS_ROUNDUP(nbytes, sizeof(JSGCThing)) == nbytes);
1798 uintN flindex = GC_FREELIST_INDEX(nbytes);
1799
1800 /* Updates of metering counters here may not be thread-safe. */
1801 METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
1802 METER(astats->alloc++);
1803
1804 #ifdef JS_THREADSAFE
1805 gcLocked = JS_FALSE;
1806 JS_ASSERT(cx->thread);
1807
1808 JSGCThing *&freeList = cx->thread->gcFreeLists[flindex];
1809 thing = freeList;
1810 localMallocBytes = JS_THREAD_DATA(cx)->gcMallocBytes;
1811 if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1812 flagp = thing->flagp;
1813 freeList = thing->next;
1814 METER(astats->localalloc++);
1815 goto success;
1816 }
1817
1818 JS_LOCK_GC(rt);
1819 gcLocked = JS_TRUE;
1820
1821 /* Transfer thread-local counter to global one. */
1822 if (localMallocBytes != 0) {
1823 JS_THREAD_DATA(cx)->gcMallocBytes = 0;
1824 if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1825 rt->gcMallocBytes = rt->gcMaxMallocBytes;
1826 else
1827 rt->gcMallocBytes += localMallocBytes;
1828 }
1829 #endif
1830 JS_ASSERT(!rt->gcRunning);
1831 if (rt->gcRunning) {
1832 METER(rt->gcStats.finalfail++);
1833 JS_UNLOCK_GC(rt);
1834 return NULL;
1835 }
1836
1837 #if defined JS_GC_ZEAL && defined JS_TRACER
1838 if (rt->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects)
1839 goto testReservedObjects;
1840 #endif
1841
1842 arenaList = &rt->gcArenaList[flindex];
1843 doGC = IsGCThresholdReached(rt);
1844 for (;;) {
1845 if (doGC
1846 #ifdef JS_TRACER
1847 && !JS_ON_TRACE(cx) && !JS_TRACE_MONITOR(cx).useReservedObjects
1848 #endif
1849 ) {
1850 /*
1851 * Keep rt->gcLock across the call into js_GC so we don't starve
1852 * and lose to racing threads who deplete the heap just after
1853 * js_GC has replenished it (or has synchronized with a racing
1854 * GC that collected a bunch of garbage). This unfair scheduling
1855 * can happen on certain operating systems. For the gory details,
1856 * see bug 162779 at https://bugzilla.mozilla.org/.
1857 */
1858 js_GC(cx, GC_LAST_DITCH);
1859 METER(astats->retry++);
1860 }
1861
1862 /* Try to get thing from the free list. */
1863 thing = arenaList->freeList;
1864 if (thing) {
1865 arenaList->freeList = thing->next;
1866 flagp = thing->flagp;
1867 JS_ASSERT(*flagp & GCF_FINAL);
1868
1869 #ifdef JS_THREADSAFE
1870 /*
1871 * Refill the local free list by taking several things from the
1872 * global free list unless the free list is already populated or
1873 * we are still at rt->gcMaxMallocBytes barrier. The former is
1874 * caused via allocating new things in gcCallback(cx, JSGC_END).
1875 * The latter happens when GC is canceled due to
1876 * gcCallback(cx, JSGC_BEGIN) returning false.
1877 */
1878 if (freeList || rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1879 break;
1880
1881 tmpthing = arenaList->freeList;
1882 if (tmpthing) {
1883 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1884 do {
1885 if (!tmpthing->next)
1886 break;
1887 tmpthing = tmpthing->next;
1888 } while (--maxFreeThings != 0);
1889
1890 freeList = arenaList->freeList;
1891 arenaList->freeList = tmpthing->next;
1892 tmpthing->next = NULL;
1893 }
1894 #endif
1895 break;
1896 }
1897
1898 /*
1899 * Try to allocate things from the last arena. If it is fully used,
1900 * check if we can allocate a new one and, if we cannot, consider
1901 * doing a "last ditch" GC unless already tried.
1902 */
1903 thingsLimit = THINGS_PER_ARENA(nbytes);
1904 if (arenaList->lastCount != thingsLimit) {
1905 JS_ASSERT(arenaList->lastCount < thingsLimit);
1906 a = arenaList->last;
1907 } else {
1908 #ifdef JS_TRACER
1909 if (JS_TRACE_MONITOR(cx).useReservedObjects) {
1910 #ifdef JS_GC_ZEAL
1911 testReservedObjects:
1912 #endif
1913 JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
1914
1915 thing = (JSGCThing *) tm->reservedObjects;
1916 flagp = GetGCThingFlags(thing);
1917 JS_ASSERT(thing);
1918 tm->reservedObjects = JSVAL_TO_OBJECT(tm->reservedObjects->fslots[0]);
1919 break;
1920 }
1921 #endif
1922
1923 a = NewGCArena(rt);
1924 if (!a) {
1925 if (doGC || JS_ON_TRACE(cx))
1926 goto fail;
1927 doGC = true;
1928 continue;
1929 }
1930 a->list = arenaList;
1931 a->prev = arenaList->last;
1932 a->prevUntracedPage = 0;
1933 a->u.untracedThings = 0;
1934 arenaList->last = a;
1935 arenaList->lastCount = 0;
1936 }
1937
1938 flagp = THING_FLAGP(a, arenaList->lastCount);
1939 thing = FLAGP_TO_THING(flagp, nbytes);
1940 arenaList->lastCount++;
1941
1942 #ifdef JS_THREADSAFE
1943 /*
1944 * Refill the local free list by taking free things from the last
1945 * arena. Prefer to order free things by ascending address in the
1946 * (unscientific) hope of better cache locality.
1947 */
1948 if (freeList || rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1949 break;
1950 lastptr = &freeList;
1951 maxFreeThings = thingsLimit - arenaList->lastCount;
1952 if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
1953 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1954 uint32 lastCount = arenaList->lastCount;
1955 while (maxFreeThings != 0) {
1956 --maxFreeThings;
1957
1958 tmpflagp = THING_FLAGP(a, lastCount);
1959 tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
1960 lastCount++;
1961 tmpthing->flagp = tmpflagp;
1962 *tmpflagp = GCF_FINAL; /* signifying that thing is free */
1963
1964 *lastptr = tmpthing;
1965 lastptr = &tmpthing->next;
1966 }
1967 *lastptr = NULL;
1968 arenaList->lastCount = lastCount;
1969 #endif
1970 break;
1971 }
1972
1973 /* We successfully allocated the thing. */
1974 #ifdef JS_THREADSAFE
1975 success:
1976 #endif
1977 lrs = cx->localRootStack;
1978 if (lrs) {
1979 /*
1980 * If we're in a local root scope, don't set newborn[type] at all, to
1981 * avoid entraining garbage from it for an unbounded amount of time
1982 * on this context. A caller will leave the local root scope and pop
1983 * this reference, allowing thing to be GC'd if it has no other refs.
1984 * See JS_EnterLocalRootScope and related APIs.
1985 */
1986 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
1987 /*
1988 * When we fail for a thing allocated through the tail of the last
1989 * arena, thing's flag byte is not initialized. So to prevent GC
1990 * accessing the uninitialized flags during the finalization, we
1991 * always mark the thing as final. See bug 337407.
1992 */
1993 *flagp = GCF_FINAL;
1994 goto fail;
1995 }
1996 } else {
1997 /*
1998 * No local root scope, so we're stuck with the old, fragile model of
1999 * depending on a pigeon-hole newborn per type per context.
2000 */
2001 cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
2002 }
2003
2004 /* We can't fail now, so update flags. */
2005 *flagp = (uint8)flags;
2006
2007 #ifdef DEBUG_gchist
2008 gchist[gchpos].lastDitch = doGC;
2009 gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
2010 if (++gchpos == NGCHIST)
2011 gchpos = 0;
2012 #endif
2013
2014 /* This is not thread-safe for thread-local allocations. */
2015 METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
2016
2017 #ifdef JS_THREADSAFE
2018 if (gcLocked)
2019 JS_UNLOCK_GC(rt);
2020 #endif
2021 return (T*)thing;
2022
2023 fail:
2024 #ifdef JS_THREADSAFE
2025 if (gcLocked)
2026 JS_UNLOCK_GC(rt);
2027 #endif
2028 METER(astats->fail++);
2029 js_ReportOutOfMemory(cx);
2030 return NULL;
2031 }
2032
2033 extern JSObject* js_NewGCObject(JSContext *cx, uintN flags)
2034 {
2035 return NewGCThing<JSObject>(cx, flags);
2036 }
2037
2038 extern JSString* js_NewGCString(JSContext *cx, uintN flags)
2039 {
2040 return NewGCThing<JSString>(cx, flags);
2041 }
2042
2043 extern JSFunction* js_NewGCFunction(JSContext *cx, uintN flags)
2044 {
2045 return NewGCThing<JSFunction>(cx, flags);
2046 }
2047
2048 extern JSXML* js_NewGCXML(JSContext *cx, uintN flags)
2049 {
2050 return NewGCThing<JSXML>(cx, flags);
2051 }
2052
2053 static JSGCDoubleCell *
2054 RefillDoubleFreeList(JSContext *cx)
2055 {
2056 JSRuntime *rt;
2057 jsbitmap *doubleFlags, usedBits;
2058 JSBool didGC = JS_FALSE;
2059 JSGCArenaInfo *a;
2060 uintN bit, index;
2061 JSGCDoubleCell *cell, *list, *lastcell;
2062
2063 JS_ASSERT(!cx->doubleFreeList);
2064
2065 rt = cx->runtime;
2066 JS_LOCK_GC(rt);
2067
2068 JS_ASSERT(!rt->gcRunning);
2069 if (rt->gcRunning) {
2070 METER(rt->gcStats.finalfail++);
2071 JS_UNLOCK_GC(rt);
2072 return NULL;
2073 }
2074
2075 if (IsGCThresholdReached(rt))
2076 goto do_gc;
2077
2078 /*
2079 * Loop until we find a flag bitmap byte with unset bits indicating free
2080 * double cells, then set all bits as used and put the cells to the free
2081 * list for the current context.
2082 */
2083 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2084 for (;;) {
2085 if (((jsuword) doubleFlags & GC_ARENA_MASK) ==
2086 ARENA_INFO_OFFSET) {
2087 if (doubleFlags == DOUBLE_BITMAP_SENTINEL ||
2088 !((JSGCArenaInfo *) doubleFlags)->prev) {
2089 a = NewGCArena(rt);
2090 if (!a) {
2091 do_gc:
2092 if (didGC || JS_ON_TRACE(cx)) {
2093 METER(rt->gcStats.doubleArenaStats.fail++);
2094 JS_UNLOCK_GC(rt);
2095 js_ReportOutOfMemory(cx);
2096 return NULL;
2097 }
2098 js_GC(cx, GC_LAST_DITCH);
2099 METER(rt->gcStats.doubleArenaStats.retry++);
2100 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2101 didGC = JS_TRUE;
2102 continue;
2103 }
2104 a->list = NULL;
2105 a->prev = NULL;
2106 if (doubleFlags == DOUBLE_BITMAP_SENTINEL) {
2107 JS_ASSERT(!rt->gcDoubleArenaList.first);
2108 rt->gcDoubleArenaList.first = a;
2109 } else {
2110 JS_ASSERT(rt->gcDoubleArenaList.first);
2111 ((JSGCArenaInfo *) doubleFlags)->prev = a;
2112 }
2113 ClearDoubleArenaFlags(a);
2114 doubleFlags = DOUBLE_ARENA_BITMAP(a);
2115 break;
2116 }
2117 doubleFlags =
2118 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev);
2119 }
2120
2121 /*
2122 * When doubleFlags points the last bitmap's word in the arena, its
2123 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2124 * sets such bits to 1. Thus even for this last word its bit is unset
2125 * iff the corresponding cell exists and free.
2126 */
2127 if (*doubleFlags != (jsbitmap) -1)
2128 break;
2129 ++doubleFlags;
2130 }
2131
2132 rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1;
2133 usedBits = *doubleFlags;
2134 JS_ASSERT(usedBits != (jsbitmap) -1);
2135 *doubleFlags = (jsbitmap) -1;
2136 JS_UNLOCK_GC(rt);
2137
2138 /*
2139 * Find the index corresponding to the first bit in *doubleFlags. The last
2140 * bit will have "index + JS_BITS_PER_WORD - 1".
2141 */
2142 index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) -
2143 DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE;
2144 cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index;
2145
2146 if (usedBits == 0) {
2147 /* The common case when all doubles from *doubleFlags are free. */
2148 JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA);
2149 list = cell;
2150 for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell)
2151 cell->link = cell + 1;
2152 lastcell->link = NULL;
2153 } else {
2154 /*
2155 * Assemble the free list from free cells from *doubleFlags starting
2156 * from the tail. In the loop
2157 *
2158 * index + bit >= DOUBLES_PER_ARENA
2159 *
2160 * when bit is one of the unused bits. We do not check for such bits
2161 * explicitly as they must be set and the "if" check filters them out.
2162 */
2163 JS_ASSERT(index + JS_BITS_PER_WORD <=
2164 DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS);
2165 bit = JS_BITS_PER_WORD;
2166 cell += bit;
2167 list = NULL;
2168 do {
2169 --bit;
2170 --cell;
2171 if (!(((jsbitmap) 1 << bit) & usedBits)) {
2172 JS_ASSERT(index + bit < DOUBLES_PER_ARENA);
2173 JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list);
2174 cell->link = list;
2175 list = cell;
2176 }
2177 } while (bit != 0);
2178 }
2179 JS_ASSERT(list);
2180
2181 /*
2182 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2183 * it immediately consumes the head of the list.
2184 */
2185 return list;
2186 }
2187
2188 JSBool
2189 js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp)
2190 {
2191 #ifdef JS_GCMETER
2192 JSGCArenaStats *astats;
2193 #endif
2194 JSGCDoubleCell *cell;
2195
2196 /* Updates of metering counters here are not thread-safe. */
2197 METER(astats = &cx->runtime->gcStats.doubleArenaStats);
2198 METER(astats->alloc++);
2199 cell = cx->doubleFreeList;
2200 if (!cell) {
2201 cell = RefillDoubleFreeList(cx);
2202 if (!cell) {
2203 METER(astats->fail++);
2204 return JS_FALSE;
2205 }
2206 } else {
2207 METER(astats->localalloc++);
2208 }
2209 cx->doubleFreeList = cell->link;
2210 cell->number = d;
2211 *vp = DOUBLE_TO_JSVAL(&cell->number);
2212 return JS_TRUE;
2213 }
2214
2215 jsdouble *
2216 js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d)
2217 {
2218 jsval v;
2219 jsdouble *dp;
2220
2221 if (!js_NewDoubleInRootedValue(cx, d, &v))
2222 return NULL;
2223
2224 JS_ASSERT(JSVAL_IS_DOUBLE(v));
2225 dp = JSVAL_TO_DOUBLE(v);
2226 if (cx->localRootStack) {
2227 if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0)
2228 return NULL;
2229 } else {
2230 cx->weakRoots.newborn[GCX_DOUBLE] = dp;
2231 }
2232 return dp;
2233 }
2234
2235 #ifdef JS_TRACER
2236 JSBool
2237 js_ReserveObjects(JSContext *cx, size_t nobjects)
2238 {
2239 /*
2240 * Ensure at least nobjects objects are in the list. fslots[1] of each
2241 * object on the reservedObjects list is the length of the list from there.
2242 */
2243 JSObject *&head = JS_TRACE_MONITOR(cx).reservedObjects;
2244 size_t i = head ? JSVAL_TO_INT(head->fslots[1]) : 0;
2245 while (i < nobjects) {
2246 JSObject *obj = js_NewGCObject(cx, GCX_OBJECT);
2247 if (!obj)
2248 return JS_FALSE;
2249 memset(obj, 0, sizeof(JSObject));
2250 /* The class must be set to something for finalization. */
2251 obj->classword = (jsuword) &js_ObjectClass;
2252 obj->fslots[0] = OBJECT_TO_JSVAL(head);
2253 i++;
2254 obj->fslots[1] = INT_TO_JSVAL(i);
2255 head = obj;
2256 }
2257
2258 return JS_TRUE;
2259 }
2260 #endif
2261
2262 /*
2263 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2264 * they have no descendants to mark during the GC. Currently the optimization
2265 * is only used for non-dependant strings.
2266 */
2267 #define GC_THING_IS_SHALLOW(flagp, thing) \
2268 ((flagp) && \
2269 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2270 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2271 !((JSString *) (thing))->isDependent())))
2272
2273 /* This is compatible with JSDHashEntryStub. */
2274 typedef struct JSGCLockHashEntry {
2275 JSDHashEntryHdr hdr;
2276 const void *thing;
2277 uint32 count;
2278 } JSGCLockHashEntry;
2279
2280 JSBool
2281 js_LockGCThingRT(JSRuntime *rt, void *thing)
2282 {
2283 JSBool shallow, ok;
2284 uint8 *flagp;
2285 JSGCLockHashEntry *lhe;
2286
2287 if (!thing)
2288 return JS_TRUE;
2289
2290 flagp = GetGCThingFlagsOrNull(thing);
2291 JS_LOCK_GC(rt);
2292 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2293
2294 /*
2295 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2296 * nests a lock.
2297 */
2298 if (shallow && !(*flagp & GCF_LOCK)) {
2299 *flagp |= GCF_LOCK;
2300 METER(rt->gcStats.lock++);
2301 ok = JS_TRUE;
2302 goto out;
2303 }
2304
2305 if (!rt->gcLocksHash) {
2306 rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
2307 sizeof(JSGCLockHashEntry),
2308 GC_ROOTS_SIZE);
2309 if (!rt->gcLocksHash) {
2310 ok = JS_FALSE;
2311 goto out;
2312 }
2313 }
2314
2315 lhe = (JSGCLockHashEntry *)
2316 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
2317 if (!lhe) {
2318 ok = JS_FALSE;
2319 goto out;
2320 }
2321 if (!lhe->thing) {
2322 lhe->thing = thing;
2323 lhe->count = 1;
2324 } else {
2325 JS_ASSERT(lhe->count >= 1);
2326 lhe->count++;
2327 }
2328
2329 METER(rt->gcStats.lock++);
2330 ok = JS_TRUE;
2331 out:
2332 JS_UNLOCK_GC(rt);
2333 return ok;
2334 }
2335
2336 JSBool
2337 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
2338 {
2339 uint8 *flagp;
2340 JSBool shallow;
2341 JSGCLockHashEntry *lhe;
2342
2343 if (!thing)
2344 return JS_TRUE;
2345
2346 flagp = GetGCThingFlagsOrNull(thing);
2347 JS_LOCK_GC(rt);
2348 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2349
2350 if (shallow && !(*flagp & GCF_LOCK))
2351 goto out;
2352 if (!rt->gcLocksHash ||
2353 (lhe = (JSGCLockHashEntry *)
2354 JS_DHashTableOperate(rt->gcLocksHash, thing,
2355 JS_DHASH_LOOKUP),
2356 JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
2357 /* Shallow entry is not in the hash -> clear its lock bit. */
2358 if (shallow)
2359 *flagp &= ~GCF_LOCK;
2360 else
2361 goto out;
2362 } else {
2363 if (--lhe->count != 0)
2364 goto out;
2365 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
2366 }
2367
2368 rt->gcPoke = JS_TRUE;
2369 METER(rt->gcStats.unlock++);
2370 out:
2371 JS_UNLOCK_GC(rt);
2372 return JS_TRUE;
2373 }
2374
2375 JS_PUBLIC_API(void)
2376 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
2377 {
2378 switch (kind) {
2379 case JSTRACE_OBJECT: {
2380 /* If obj has no map, it must be a newborn. */
2381 JSObject *obj = (JSObject *) thing;
2382 if (!obj->map)
2383 break;
2384 obj->map->ops->trace(trc, obj);
2385 break;
2386 }
2387
2388 case JSTRACE_STRING: {
2389 JSString *str = (JSString *) thing;
2390 if (str->isDependent())
2391 JS_CALL_STRING_TRACER(trc, str->dependentBase(), "base");
2392 break;
2393 }
2394
2395 #if JS_HAS_XML_SUPPORT
2396 case JSTRACE_XML:
2397 js_TraceXML(trc, (JSXML *)thing);
2398 break;
2399 #endif
2400 }
2401 }
2402
2403 /*
2404 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2405 */
2406 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2407 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2408
2409 static void
2410 DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
2411 {
2412 JSGCArenaInfo *a;
2413 uint32 untracedBitIndex;
2414 jsuword bit;
2415
2416 /*
2417 * Things with children to be traced later are marked with
2418 * GCF_MARK | GCF_FINAL flags.
2419 */
2420 JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
2421 *flagp |= GCF_FINAL;
2422
2423 METER(rt->gcStats.untraced++);
2424 #ifdef DEBUG
2425 ++rt->gcTraceLaterCount;
2426 METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount);
2427 #endif
2428
2429 a = FLAGP_TO_ARENA(flagp);
2430 untracedBitIndex = FLAGP_TO_INDEX(flagp) /
2431 THINGS_PER_UNTRACED_BIT(a->list->thingSize);
2432 JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
2433 bit = (jsuword)1 << untracedBitIndex;
2434 if (a->u.untracedThings != 0) {
2435 JS_ASSERT(rt->gcUntracedArenaStackTop);
2436 if (a->u.untracedThings & bit) {
2437 /* bit already covers things with children to trace later. */
2438 return;
2439 }
2440 a->u.untracedThings |= bit;
2441 } else {
2442 /*
2443 * The thing is the first thing with not yet traced children in the
2444 * whole arena, so push the arena on the stack of arenas with things
2445 * to be traced later unless the arena has already been pushed. We
2446 * detect that through checking prevUntracedPage as the field is 0
2447 * only for not yet pushed arenas. To ensure that
2448 * prevUntracedPage != 0
2449 * even when the stack contains one element, we make prevUntracedPage
2450 * for the arena at the bottom to point to itself.
2451 *
2452 * See comments in TraceDelayedChildren.
2453 */
2454 a->u.untracedThings = bit;
2455 if (a->prevUntracedPage == 0) {
2456 if (!rt->gcUntracedArenaStackTop) {
2457 /* Stack was empty, mark the arena as the bottom element. */
2458 a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
2459 } else {
2460 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2461 a->prevUntracedPage =
2462 ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
2463 }
2464 rt->gcUntracedArenaStackTop = a;
2465 }
2466 }
2467 JS_ASSERT(rt->gcUntracedArenaStackTop);
2468 }
2469
2470 static void
2471 TraceDelayedChildren(JSTracer *trc)
2472 {
2473 JSRuntime *rt;
2474 JSGCArenaInfo *a, *aprev;
2475 uint32 thingSize;
2476 uint32 thingsPerUntracedBit;
2477 uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
2478 JSGCThing *thing;
2479 uint8 *flagp;
2480
2481 rt = trc->context->runtime;
2482 a = rt->gcUntracedArenaStackTop;
2483 if (!a) {
2484 JS_ASSERT(rt->gcTraceLaterCount == 0);
2485 return;
2486 }
2487
2488 for (;;) {
2489 /*
2490 * The following assert verifies that the current arena belongs to the
2491 * untraced stack, since DelayTracingChildren ensures that even for
2492 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2493 */
2494 JS_ASSERT(a->prevUntracedPage != 0);
2495 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2496 thingSize = a->list->thingSize;
2497 indexLimit = (a == a->list->last)
2498 ? a->list->lastCount
2499 : THINGS_PER_ARENA(thingSize);
2500 thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
2501
2502 /*
2503 * We cannot use do-while loop here as a->u.untracedThings can be zero
2504 * before the loop as a leftover from the previous iterations. See
2505 * comments after the loop.
2506 */
2507 while (a->u.untracedThings != 0) {
2508 untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings);
2509 a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex);
2510 thingIndex = untracedBitIndex * thingsPerUntracedBit;
2511 endIndex = thingIndex + thingsPerUntracedBit;
2512
2513 /*
2514 * endIndex can go beyond the last allocated thing as the real
2515 * limit can be "inside" the bit.
2516 */
2517 if (endIndex > indexLimit)
2518 endIndex = indexLimit;
2519 JS_ASSERT(thingIndex < indexLimit);
2520
2521 do {
2522 /*
2523 * Skip free or already traced things that share the bit
2524 * with untraced ones.
2525 */
2526 flagp = THING_FLAGP(a, thingIndex);
2527 if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
2528 continue;
2529 *flagp &= ~GCF_FINAL;
2530 #ifdef DEBUG
2531 JS_ASSERT(rt->gcTraceLaterCount != 0);
2532 --rt->gcTraceLaterCount;
2533 #endif
2534 thing = FLAGP_TO_THING(flagp, thingSize);
2535 JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp));
2536 } while (++thingIndex != endIndex);
2537 }
2538
2539 /*
2540 * We finished tracing of all things in the the arena but we can only
2541 * pop it from the stack if the arena is the stack's top.
2542 *
2543 * When JS_TraceChildren from the above calls JS_CallTracer that in
2544 * turn on low C stack calls DelayTracingChildren and the latter
2545 * pushes new arenas to the untraced stack, we have to skip popping
2546 * of this arena until it becomes the top of the stack again.
2547 */
2548 if (a == rt->gcUntracedArenaStackTop) {
2549 aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
2550 a->prevUntracedPage = 0;
2551 if (a == aprev) {
2552 /*
2553 * prevUntracedPage points to itself and we reached the
2554 * bottom of the stack.
2555 */
2556 break;
2557 }
2558 rt->gcUntracedArenaStackTop = a = aprev;
2559 } else {
2560 a = rt->gcUntracedArenaStackTop;
2561 }
2562 }
2563 JS_ASSERT(rt->gcUntracedArenaStackTop);
2564 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
2565 rt->gcUntracedArenaStackTop = NULL;
2566 JS_ASSERT(rt->gcTraceLaterCount == 0);
2567 }
2568
2569 JS_PUBLIC_API(void)
2570 JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
2571 {
2572 JSContext *cx;
2573 JSRuntime *rt;
2574 JSGCArenaInfo *a;
2575 uintN index;
2576 uint8 *flagp;
2577
2578 JS_ASSERT(thing);
2579 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
2580 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
2581
2582 if (!IS_GC_MARKING_TRACER(trc)) {
2583 trc->callback(trc, thing, kind);
2584 goto out;
2585 }
2586
2587 cx = trc->context;
2588 rt = cx->runtime;
2589 JS_ASSERT(rt->gcMarkingTracer == trc);
2590 JS_ASSERT(rt->gcLevel > 0);
2591
2592 /*
2593 * Optimize for string and double as their size is known and their tracing
2594 * is not recursive.
2595 */
2596 switch (kind) {
2597 case JSTRACE_DOUBLE:
2598 a = THING_TO_ARENA(thing);
2599 JS_ASSERT(!a->list);
2600 if (!a->u.hasMarkedDoubles) {
2601 ClearDoubleArenaFlags(a);
2602 a->u.hasMarkedDoubles = JS_TRUE;
2603 }
2604 index = DOUBLE_THING_TO_INDEX(thing);
2605 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
2606 goto out;
2607
2608 case JSTRACE_STRING:
2609 for (;;) {
2610 if (JSString::isStatic(thing))
2611 goto out;
2612 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
2613 JS_ASSERT((*flagp & GCF_FINAL) == 0);
2614 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2615 if (!((JSString *) thing)->isDependent()) {
2616 *flagp |= GCF_MARK;
2617 goto out;
2618 }
2619 if (*flagp & GCF_MARK)
2620 goto out;
2621 *flagp |= GCF_MARK;
2622 thing = ((JSString *) thing)->dependentBase();
2623 }
2624 /* NOTREACHED */
2625 }
2626
2627 flagp = GetGCThingFlags(thing);
2628 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2629 if (*flagp & GCF_MARK)
2630 goto out;
2631
2632 /*
2633 * We check for non-final flag only if mark is unset as
2634 * DelayTracingChildren uses the flag. See comments in the function.
2635 */
2636 JS_ASSERT(*flagp != GCF_FINAL);
2637 *flagp |= GCF_MARK;
2638 if (!cx->insideGCMarkCallback) {
2639 /*
2640 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2641 * uses the non-recursive code that otherwise would be called only on
2642 * a low C stack condition.
2643 */
2644 #ifdef JS_GC_ASSUME_LOW_C_STACK
2645 # define RECURSION_TOO_DEEP() JS_TRUE
2646 #else
2647 int stackDummy;
2648 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2649 #endif
2650 if (RECURSION_TOO_DEEP())
2651 DelayTracingChildren(rt, flagp);
2652 else
2653 JS_TraceChildren(trc, thing, kind);
2654 } else {
2655 /*
2656 * For API compatibility we allow for the callback to assume that
2657 * after it calls JS_MarkGCThing for the last time, the callback can
2658 * start to finalize its own objects that are only referenced by
2659 * unmarked GC things.
2660 *
2661 * Since we do not know which call from inside the callback is the
2662 * last, we ensure that children of all marked things are traced and
2663 * call TraceDelayedChildren(trc) after tracing the thing.
2664 *
2665 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2666 * for the things with untraced children, calling DelayTracingChildren
2667 * is useless here. Hence we always trace thing's children even with a
2668 * low native stack.
2669 */
2670 cx->insideGCMarkCallback = JS_FALSE;
2671 JS_TraceChildren(trc, thing, kind);
2672 TraceDelayedChildren(trc);
2673 cx->insideGCMarkCallback = JS_TRUE;
2674 }
2675
2676 out:
2677 #ifdef DEBUG
2678 trc->debugPrinter = NULL;
2679 trc->debugPrintArg = NULL;
2680 #endif
2681 return; /* to avoid out: right_curl when DEBUG is not defined */
2682 }
2683
2684 void
2685 js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
2686 {
2687 void *thing;
2688 uint32 kind;
2689
2690 if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
2691 thing = JSVAL_TO_TRACEABLE(v);
2692 kind = JSVAL_TRACE_KIND(v);
2693 JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v)));
2694 } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
2695 /* v can be an arbitrary GC thing reinterpreted as an object. */
2696 thing = JSVAL_TO_OBJECT(v);
2697 kind = js_GetGCThingTraceKind(thing);
2698 } else {
2699 return;
2700 }
2701 JS_CallTracer(trc, thing, kind);
2702 }
2703
2704 static JSDHashOperator
2705 gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2706 void *arg)
2707 {
2708 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2709 JSTracer *trc = (JSTracer *)arg;
2710 jsval *rp = (jsval *)rhe->root;
2711 jsval v = *rp;
2712
2713 /* Ignore null reference, scalar values, and static strings. */
2714 if (!JSVAL_IS_NULL(v) &&
2715 JSVAL_IS_GCTHING(v) &&
2716 !JSString::isStatic(JSVAL_TO_GCTHING(v))) {
2717 #ifdef DEBUG
2718 JSBool root_points_to_gcArenaList = JS_FALSE;
2719 jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2720 JSRuntime *rt;
2721 uintN i;
2722 JSGCArenaList *arenaList;
2723 uint32 thingSize;
2724 JSGCArenaInfo *a;
2725 size_t limit;
2726
2727 rt = trc->context->runtime;
2728 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2729 arenaList = &rt->gcArenaList[i];
2730 thingSize = arenaList->thingSize;
2731 limit = (size_t) arenaList->lastCount * thingSize;
2732 for (a = arenaList->last; a; a = a->prev) {
2733 if (thing - ARENA_INFO_TO_START(a) < limit) {
2734 root_points_to_gcArenaList = JS_TRUE;
2735 break;
2736 }
2737 limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
2738 }
2739 }
2740 if (!root_points_to_gcArenaList) {
2741 for (a = rt->gcDoubleArenaList.first; a; a = a->prev) {
2742 if (thing - ARENA_INFO_TO_START(a) <
2743 DOUBLES_PER_ARENA * sizeof(jsdouble)) {
2744 root_points_to_gcArenaList = JS_TRUE;
2745 break;
2746 }
2747 }
2748 }
2749 if (!root_points_to_gcArenaList && rhe->name) {
2750 fprintf(stderr,
2751 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2752 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2753 "The root's name is \"%s\".\n",
2754 rhe->name);
2755 }
2756 JS_ASSERT(root_points_to_gcArenaList);
2757 #endif
2758 JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
2759 js_CallValueTracerIfGCThing(trc, v);
2760 }
2761
2762 return JS_DHASH_NEXT;
2763 }
2764
2765 static JSDHashOperator
2766 gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2767 void *arg)
2768 {
2769 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2770 void *thing = (void *)lhe->thing;
2771 JSTracer *trc = (JSTracer *)arg;
2772 uint32 traceKind;
2773
2774 JS_ASSERT(lhe->count >= 1);
2775 traceKind = js_GetGCThingTraceKind(thing);
2776 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2777 return JS_DHASH_NEXT;
2778 }
2779
2780 #define TRACE_JSVALS(trc, len, vec, name) \
2781 JS_BEGIN_MACRO \
2782 jsval _v, *_vp, *_end; \
2783 \
2784 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2785 _v = *_vp; \
2786 if (JSVAL_IS_TRACEABLE(_v)) { \
2787 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2788 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2789 JSVAL_TRACE_KIND(_v)); \
2790 } \
2791 } \
2792 JS_END_MACRO
2793
2794 void
2795 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2796 {
2797 uintN nslots, minargs, skip;
2798
2799 if (fp->callobj)
2800 JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
2801 if (fp->argsobj)
2802 JS_CALL_OBJECT_TRACER(trc, JSVAL_TO_OBJECT(fp->argsobj), "arguments");
2803 if (fp->varobj)
2804 JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
2805 if (fp->script) {
2806 js_TraceScript(trc, fp->script);
2807
2808 /* fp->slots is null for watch pseudo-frames, see js_watch_set. */
2809 if (fp->slots) {
2810 /*
2811 * Don't mark what has not been pushed yet, or what has been
2812 * popped already.
2813 */
2814 if (fp->regs && fp->regs->sp) {
2815 nslots = (uintN) (fp->regs->sp - fp->slots);
2816 JS_ASSERT(nslots >= fp->script->nfixed);
2817 } else {
2818 nslots = fp->script->nfixed;
2819 }
2820 TRACE_JSVALS(trc, nslots, fp->slots, "slot");
2821 }
2822 } else {
2823 JS_ASSERT(!fp->slots);
2824 JS_ASSERT(!fp->regs);
2825 }
2826
2827 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2828 JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
2829 (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
2830 JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this");
2831
2832 if (fp->argv) {
2833 JS_CALL_VALUE_TRACER(trc, fp->argv[-2], "callee");
2834 nslots = fp->argc;
2835 skip = 0;
2836 if (fp->fun) {
2837 minargs = FUN_MINARGS(fp->fun);
2838 if (minargs > nslots)
2839 nslots = minargs;
2840 if (!FUN_INTERPRETED(fp->fun)) {
2841 JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
2842 nslots += fp->fun->u.n.extra;
2843 }
2844 if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
2845 skip = 2 + fp->argc;
2846 }
2847 TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
2848 }
2849
2850 JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
2851 if (fp->scopeChain)
2852 JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
2853 if (fp->sharpArray)
2854 JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array");
2855 }
2856
2857 static void
2858 TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
2859 {
2860 uint32 i;
2861 void *thing;
2862
2863 #ifdef DEBUG
2864 static const char *weakRootNames[JSTRACE_LIMIT] = {
2865 "newborn object",
2866 "newborn double",
2867 "newborn string",
2868 "newborn xml"
2869 };
2870 #endif
2871
2872 for (i = 0; i != JSTRACE_LIMIT; i++) {
2873 thing = wr->newborn[i];
2874 if (thing)
2875 JS_CALL_TRACER(trc, thing, i, weakRootNames[i]);
2876 }
2877 JS_ASSERT(i == GCX_EXTERNAL_STRING);
2878 for (; i != GCX_NTYPES; ++i) {
2879 thing = wr->newborn[i];
2880 if (thing) {
2881 JS_SET_TRACING_INDEX(trc, "newborn external string",
2882 i - GCX_EXTERNAL_STRING);
2883 JS_CallTracer(trc, thing, JSTRACE_STRING);
2884 }
2885 }
2886
2887 JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
2888 JS_SET_TRACING_NAME(trc, "lastInternalResult");
2889 js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
2890 }
2891
2892 JS_REQUIRES_STACK JS_FRIEND_API(void)
2893 js_TraceContext(JSTracer *trc, JSContext *acx)
2894 {
2895 JSStackFrame *fp, *nextChain;
2896 JSStackHeader *sh;
2897 JSTempValueRooter *tvr;
2898
2899 if (IS_GC_MARKING_TRACER(trc)) {
2900
2901 #define FREE_OLD_ARENAS(pool) \
2902 JS_BEGIN_MACRO \
2903 int64 _age; \
2904 JSArena * _a = (pool).current; \
2905 if (_a == (pool).first.next && \
2906 _a->avail == _a->base + sizeof(int64)) { \
2907 _age = JS_Now() - *(int64 *) _a->base; \
2908 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2909 1000) \
2910 JS_FreeArenaPool(&(pool)); \
2911 } \
2912 JS_END_MACRO
2913
2914 /*
2915 * Release the stackPool's arenas if the stackPool has existed for
2916 * longer than the limit specified by gcEmptyArenaPoolLifespan.
2917 */
2918 FREE_OLD_ARENAS(acx->stackPool);
2919
2920 /*
2921 * Release the regexpPool's arenas based on the same criterion as for
2922 * the stackPool.
2923 */
2924 FREE_OLD_ARENAS(acx->regexpPool);
2925
2926 /*
2927 * Clear the double free list to release all the pre-allocated doubles.
2928 */
2929 acx->doubleFreeList = NULL;
2930 }
2931
2932 /*
2933 * Iterate frame chain and dormant chains.
2934 *
2935 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2936 *
2937 * Since js_GetTopStackFrame needs to dereference cx->thread to check for
2938 * JIT frames, we check for non-null thread here and avoid null checks
2939 * there. See bug 471197.
2940 */
2941 #ifdef JS_THREADSAFE
2942 if (acx->thread)
2943 #endif
2944 {
2945 fp = js_GetTopStackFrame(acx);
2946 nextChain = acx->dormantFrameChain;
2947 if (!fp)
2948 goto next_chain;
2949
2950 /* The top frame must not be dormant. */
2951 JS_ASSERT(!fp->dormantNext);
2952 for (;;) {
2953 do {
2954 js_TraceStackFrame(trc, fp);
2955 } while ((fp = fp->down) != NULL);
2956
2957 next_chain:
2958 if (!nextChain)
2959 break;
2960 fp = nextChain;
2961 nextChain = nextChain->dormantNext;
2962 }
2963 }
2964
2965 /* Mark other roots-by-definition in acx. */
2966 if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL))
2967 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
2968 TraceWeakRoots(trc, &acx->weakRoots);
2969 if (acx->throwing) {
2970 JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
2971 } else {
2972 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2973 acx->exception = JSVAL_NULL;
2974 }
2975 #if JS_HAS_LVALUE_RETURN
2976 if (acx->rval2set)
2977 JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2");
2978 #endif
2979
2980 for (sh = acx->stackHeaders; sh; sh = sh->down) {
2981 METER(trc->context->runtime->gcStats.stackseg++);
2982 METER(trc->context->runtime->gcStats.segslots += sh->nslots);
2983 TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
2984 }
2985
2986 if (acx->localRootStack)
2987 js_TraceLocalRoots(trc, acx->localRootStack);
2988
2989 for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
2990 switch (tvr->count) {
2991 case JSTVU_SINGLE:
2992 JS_SET_TRACING_NAME(trc, "tvr->u.value");
2993 js_CallValueTracerIfGCThing(trc, tvr->u.value);
2994 break;
2995 case JSTVU_TRACE:
2996 tvr->u.trace(trc, tvr);
2997 break;
2998 case JSTVU_SPROP:
2999 tvr->u.sprop->trace(trc);
3000 break;
3001 case JSTVU_WEAK_ROOTS:
3002 TraceWeakRoots(trc, tvr->u.weakRoots);
3003 break;
3004 case JSTVU_COMPILER:
3005 tvr->u.compiler->trace(trc);
3006 break;
3007 case JSTVU_SCRIPT:
3008 js_TraceScript(trc, tvr->u.script);
3009 break;
3010 case JSTVU_ENUMERATOR:
3011 static_cast<JSAutoEnumStateRooter *>(tvr)->mark(trc);
3012 break;
3013 default:
3014 JS_ASSERT(tvr->count >= 0);
3015 TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
3016 }
3017 }
3018
3019 if (acx->sharpObjectMap.depth > 0)
3020 js_TraceSharpMap(trc, &acx->sharpObjectMap);
3021
3022 js_TraceRegExpStatics(trc, acx);
3023
3024 #ifdef JS_TRACER
3025 InterpState* state = acx->interpState;
3026 while (state) {
3027 if (state->nativeVp)
3028 TRACE_JSVALS(trc, state->nativeVpLen, state->nativeVp, "nativeVp");
3029 state = state->prev;
3030 }
3031 #endif
3032 }
3033
3034 #ifdef JS_TRACER
3035
3036 static void
3037 MarkReservedGCThings(JSTraceMonitor *tm)
3038 {
3039 /* Keep reserved doubles. */
3040 for (jsval *ptr = tm->reservedDoublePool; ptr < tm->reservedDoublePoolPtr; ++ptr) {
3041 jsdouble* dp = JSVAL_TO_DOUBLE(*ptr);
3042 JS_ASSERT(js_GetGCThingTraceKind(dp) == JSTRACE_DOUBLE);
3043
3044 JSGCArenaInfo *a = THING_TO_ARENA(dp);
3045 JS_ASSERT(!a->list);
3046 if (!a->u.hasMarkedDoubles) {
3047 ClearDoubleArenaFlags(a);
3048 a->u.hasMarkedDoubles = JS_TRUE;
3049 }
3050 jsuint index = DOUBLE_THING_TO_INDEX(dp);
3051 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
3052 }
3053 /* Keep reserved objects. */
3054 for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
3055 uint8 *flagp = GetGCThingFlags(obj);
3056 JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT);
3057 JS_ASSERT(*flagp != GCF_FINAL);
3058 *flagp |= GCF_MARK;
3059 }
3060 }
3061
3062 #ifdef JS_THREADSAFE
3063 static JSDHashOperator
3064 reserved_gcthings_marker(JSDHashTable *table, JSDHashEntryHdr *hdr,
3065 uint32, void *)
3066 {
3067 JSThread *thread = ((JSThreadsHashEntry *) hdr)->thread;
3068
3069 MarkReservedGCThings(&thread->data.traceMonitor);
3070 return JS_DHASH_NEXT;
3071 }
3072 #endif
3073
3074 #endif
3075
3076 JS_REQUIRES_STACK void
3077 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
3078 {
3079 JSRuntime *rt = trc->context->runtime;
3080 JSContext *iter, *acx;
3081
3082 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
3083 if (rt->gcLocksHash)
3084 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
3085 js_TraceAtomState(trc, allAtoms);
3086 js_TraceRuntimeNumberState(trc);
3087
3088 iter = NULL;
3089 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
3090 js_TraceContext(trc, acx);
3091
3092 js_TraceThreads(rt, trc);
3093
3094 if (rt->gcExtraRootsTraceOp)
3095 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
3096
3097 #ifdef JS_TRACER
3098 for (int i = 0; i < JSBUILTIN_LIMIT; i++) {
3099 if (rt->builtinFunctions[i])
3100 JS_CALL_OBJECT_TRACER(trc, rt->builtinFunctions[i], "builtin function");
3101 }
3102
3103 /* Mark reserved gcthings unless we are shutting down. */
3104 if (IS_GC_MARKING_TRACER(trc) && rt->state != JSRTS_LANDING) {
3105 #ifdef JS_THREADSAFE
3106 JS_DHashTableEnumerate(&rt->threads, reserved_gcthings_marker, NULL);
3107 #else
3108 MarkReservedGCThings(&rt->threadData.traceMonitor);
3109 #endif
3110 }
3111
3112 #endif
3113 }
3114
3115 void
3116 js_TriggerGC(JSContext *cx, JSBool gcLocked)
3117 {
3118 JSRuntime *rt = cx->runtime;
3119
3120 #ifdef JS_THREADSAFE
3121 JS_ASSERT(cx->requestDepth > 0);
3122 #endif
3123 JS_ASSERT(!rt->gcRunning);
3124 if (rt->gcIsNeeded)
3125 return;
3126
3127 /*
3128 * Trigger the GC when it is safe to call an operation callback on any
3129 * thread.
3130 */
3131 rt->gcIsNeeded = JS_TRUE;
3132 js_TriggerAllOperationCallbacks(rt, gcLocked);
3133 }
3134
3135 static void
3136 ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
3137 {
3138 JSObject *obj = ssr->obj;
3139 JSObject *pobj = ssr->pobj;
3140 uint32 slot = ssr->slot;
3141
3142 while (pobj) {
3143 pobj = js_GetWrappedObject(cx, pobj);
3144 if (pobj == obj) {
3145 ssr->cycle = true;
3146 return;
3147 }
3148 pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
3149 }
3150
3151 pobj = ssr->pobj;
3152 if (slot == JSSLOT_PROTO) {
3153 obj->setProto(pobj);
3154 } else {
3155 JS_ASSERT(slot == JSSLOT_PARENT);
3156 obj->setParent(pobj);
3157 }
3158 }
3159
3160 void
3161 js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data)
3162 {
3163 JSScript **listp, *script;
3164
3165 for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) {
3166 listp = &data->scriptsToGC[i];
3167 while ((script = *listp) != NULL) {
3168 *listp = script->u.nextToGC;
3169 script->u.nextToGC = NULL;
3170 js_DestroyScript(cx, script);
3171 }
3172 }
3173 }
3174
3175 static void
3176 FinalizeObject(JSContext *cx, JSObject *obj)
3177 {
3178 /* Cope with stillborn objects that have no map. */
3179 if (!obj->map)
3180 return;
3181
3182 if (JS_UNLIKELY(cx->debugHooks->objectHook != NULL)) {
3183 cx->debugHooks->objectHook(cx, obj, JS_FALSE,
3184 cx->debugHooks->objectHookData);
3185 }
3186
3187 /* Finalize obj first, in case it needs map and slots. */
3188 JSClass *clasp = STOBJ_GET_CLASS(obj);
3189 if (clasp->finalize)
3190 clasp->finalize(cx, obj);
3191
3192 #ifdef INCLUDE_MOZILLA_DTRACE
3193 if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED())
3194 jsdtrace_object_finalize(obj);
3195 #endif
3196
3197 if (JS_LIKELY(OBJ_IS_NATIVE(obj)))
3198 OBJ_SCOPE(obj)->drop(cx, obj);
3199 js_FreeSlots(cx, obj);
3200 }
3201
3202 static JSStringFinalizeOp str_finalizers[GCX_NTYPES - GCX_EXTERNAL_STRING] = {
3203 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
3204 };
3205
3206 intN
3207 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
3208 JSStringFinalizeOp newop)
3209 {
3210 uintN i;
3211
3212 for (i = 0; i != JS_ARRAY_LENGTH(str_finalizers); i++) {
3213 if (str_finalizers[i] == oldop) {
3214 str_finalizers[i] = newop;
3215 return (intN) i;
3216 }
3217 }
3218 return -1;
3219 }
3220
3221 /*
3222 * cx is NULL when we are called from js_FinishAtomState to force the
3223 * finalization of the permanently interned strings.
3224 */
3225 void
3226 js_FinalizeStringRT(JSRuntime *rt, JSString *str, intN type, JSContext *cx)
3227 {
3228 jschar *chars;
3229 JSBool valid;
3230 JSStringFinalizeOp finalizer;
3231
3232 JS_RUNTIME_UNMETER(rt, liveStrings);
3233 JS_ASSERT(!JSString::isStatic(str));
3234 if (str->isDependent()) {
3235 /* A dependent string can not be external and must be valid. */
3236 JS_ASSERT(type < 0);
3237 JS_ASSERT(str->dependentBase());
3238 JS_RUNTIME_UNMETER(rt, liveDependentStrings);
3239 valid = JS_TRUE;
3240 } else {
3241 /* A stillborn string has null chars, so is not valid. */
3242 chars = str->flatChars();
3243 valid = (chars != NULL);
3244 if (valid) {
3245 if (type < 0) {
3246 if (cx)
3247 cx->free(chars);
3248 else
3249 rt->free(chars);
3250 } else {
3251 JS_ASSERT((uintN) type < JS_ARRAY_LENGTH(str_finalizers));
3252 finalizer = str_finalizers[type];
3253 if (finalizer) {
3254 /*
3255 * Assume that the finalizer for the permanently interned
3256 * string knows how to deal with null context.
3257 */
3258 finalizer(cx, str);
3259 }
3260 }
3261 }
3262 }
3263 if (valid && str->isDeflated())
3264 js_PurgeDeflatedStringCache(rt, str);
3265 }
3266
3267 /*
3268 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3269 * rt->gcLock already held, so the lock should be kept on return.
3270 */
3271 void
3272 js_GC(JSContext *cx, JSGCInvocationKind gckind)
3273 {
3274 JSRuntime *rt;
3275 JSBool keepAtoms;
3276 JSGCCallback callback;
3277 uintN i, type;
3278 JSTracer trc;
3279 uint32 thingSize, indexLimit;
3280 JSGCArenaInfo *a, **ap, *emptyArenas;
3281 uint8 flags, *flagp;
3282 JSGCThing *thing, *freeList;
3283 JSGCArenaList *arenaList;
3284 JSBool allClear;
3285 #ifdef JS_THREADSAFE
3286 uint32 requestDebit;
3287 #endif
3288 #ifdef JS_GCMETER
3289 uint32 nlivearenas, nkilledarenas, nthings;
3290 #endif
3291
3292 JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx));
3293 rt = cx->runtime;
3294
3295 #ifdef JS_THREADSAFE
3296 /*
3297 * We allow js_GC calls outside a request but the context must be bound
3298 * to the current thread.
3299 */
3300 JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
3301
3302 /* Avoid deadlock. */
3303 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
3304 #endif
3305
3306 if (gckind & GC_KEEP_ATOMS) {
3307 /*
3308 * The set slot request and last ditch GC kinds preserve all atoms and
3309 * weak roots.
3310 */
3311 keepAtoms = JS_TRUE;
3312 } else {
3313 /* Keep atoms when a suspended compile is running on another context. */
3314 keepAtoms = (rt->gcKeepAtoms != 0);
3315 JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
3316 }
3317
3318 /*
3319 * Don't collect garbage if the runtime isn't up, and cx is not the last
3320 * context in the runtime. The last context must force a GC, and nothing
3321 * should suppress that final collection or there may be shutdown leaks,
3322 * or runtime bloat until the next context is created.
3323 */
3324 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
3325 return;
3326
3327 restart_at_beginning:
3328 /*
3329 * Let the API user decide to defer a GC if it wants to (unless this
3330 * is the last context). Invoke the callback regardless. Sample the
3331 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3332 * another thread.
3333 */
3334 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3335 JSBool ok;
3336
3337 if (gckind & GC_LOCK_HELD)
3338 JS_UNLOCK_GC(rt);
3339 ok = callback(cx, JSGC_BEGIN);
3340 if (gckind & GC_LOCK_HELD)
3341 JS_LOCK_GC(rt);
3342 if (!ok && gckind != GC_LAST_CONTEXT) {
3343 /*
3344 * It's possible that we've looped back to this code from the 'goto
3345 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3346 * that rt->gcLevel is now 0. Don't return without notifying!
3347 */
3348 if (rt->gcLevel == 0 && (gckind & GC_LOCK_HELD))
3349 JS_NOTIFY_GC_DONE(rt);
3350 return;
3351 }
3352 }
3353
3354 /* Lock out other GC allocator and collector invocations. */
3355 if (!(gckind & GC_LOCK_HELD))
3356 JS_LOCK_GC(rt);
3357
3358 METER(rt->gcStats.poke++);
3359 rt->gcPoke = JS_FALSE;
3360
3361 #ifdef JS_THREADSAFE
3362 /*
3363 * Check if the GC is already running on this or another thread and
3364 * delegate the job to it.
3365 */
3366 if (rt->gcLevel > 0) {
3367 JS_ASSERT(rt->gcThread);
3368
3369 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3370 rt->gcLevel++;
3371 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3372
3373 /*
3374 * If the GC runs on another thread, temporarily suspend the current
3375 * request and wait until the GC is done.
3376 */
3377 if (rt->gcThread != cx->thread) {
3378 requestDebit = js_DiscountRequestsForGC(cx);
3379 js_RecountRequestsAfterGC(rt, requestDebit);
3380 }
3381 if (!(gckind & GC_LOCK_HELD))
3382 JS_UNLOCK_GC(rt);
3383 return;
3384 }
3385
3386 /* No other thread is in GC, so indicate that we're now in GC. */
3387 rt->gcLevel = 1;
3388 rt->gcThread = cx->thread;
3389
3390 /*
3391 * Notify all operation callbacks, which will give them a chance to
3392 * yield their current request. Contexts that are not currently
3393 * executing will perform their callback at some later point,
3394 * which then will be unnecessary, but harmless.
3395 */
3396 js_NudgeOtherContexts(cx);
3397
3398 /*
3399 * Discount all the requests on the current thread from contributing
3400 * to rt->requestCount before we wait for all other requests to finish.
3401 * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on
3402 * rt->requestCount transitions to 0.
3403 */
3404 requestDebit = js_CountThreadRequests(cx);
3405 JS_ASSERT_IF(cx->requestDepth != 0, requestDebit >= 1);
3406 rt->requestCount -= requestDebit;
3407 while (rt->requestCount > 0)
3408 JS_AWAIT_REQUEST_DONE(rt);
3409 rt->requestCount += requestDebit;
3410
3411 #else /* !JS_THREADSAFE */
3412
3413 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3414 rt->gcLevel++;
3415 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3416 if (rt->gcLevel > 1)
3417 return;
3418
3419 #endif /* !JS_THREADSAFE */
3420
3421 /*
3422 * Set rt->gcRunning here within the GC lock, and after waiting for any
3423 * active requests to end, so that new requests that try to JS_AddRoot,
3424 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3425 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3426 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3427 * waiting for GC to finish.
3428 */
3429 rt->gcRunning = JS_TRUE;
3430
3431 if (gckind == GC_SET_SLOT_REQUEST) {
3432 JSSetSlotRequest *ssr;
3433
3434 while ((ssr = rt->setSlotRequests) != NULL) {
3435 rt->setSlotRequests = ssr->next;
3436 JS_UNLOCK_GC(rt);
3437 ssr->next = NULL;
3438 ProcessSetSlotRequest(cx, ssr);
3439 JS_LOCK_GC(rt);
3440 }
3441
3442 /*
3443 * We assume here that killing links to parent and prototype objects
3444 * does not create garbage (such objects typically are long-lived and
3445 * widely shared, e.g. global objects, Function.prototype, etc.). We
3446 * collect garbage only if a racing thread attempted GC and is waiting
3447 * for us to finish (gcLevel > 1) or if someone already poked us.
3448 */
3449 if (rt->gcLevel == 1 && !rt->gcPoke && !rt->gcIsNeeded)
3450 goto done_running;
3451
3452 rt->gcLevel = 0;
3453 rt->gcPoke = JS_FALSE;
3454 rt->gcRunning = JS_FALSE;
3455 #ifdef JS_THREADSAFE
3456 rt->gcThread = NULL;
3457 #endif
3458 gckind = GC_LOCK_HELD;
3459 goto restart_at_beginning;
3460 }
3461
3462 JS_UNLOCK_GC(rt);
3463
3464 #ifdef JS_TRACER
3465 if (JS_ON_TRACE(cx))
3466 goto out;
3467 #endif
3468 VOUCH_HAVE_STACK();
3469
3470 /* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */
3471 rt->gcIsNeeded = JS_FALSE;
3472
3473 /* Reset malloc counter. */
3474 rt->gcMallocBytes = 0;
3475
3476 #ifdef JS_DUMP_SCOPE_METERS
3477 { extern void js_DumpScopeMeters(JSRuntime *rt);
3478 js_DumpScopeMeters(rt);
3479 }
3480 #endif
3481
3482 #ifdef JS_TRACER
3483 js_PurgeJITOracle();
3484 #endif
3485
3486 restart:
3487 rt->gcNumber++;
3488 JS_ASSERT(!rt->gcUntracedArenaStackTop);
3489 JS_ASSERT(rt->gcTraceLaterCount == 0);
3490
3491 /*
3492 * Reset the property cache's type id generator so we can compress ids.
3493 * Same for the protoHazardShape proxy-shape standing in for all object
3494 * prototypes having readonly or setter properties.
3495 */
3496 if (rt->shapeGen & SHAPE_OVERFLOW_BIT
3497 #ifdef JS_GC_ZEAL
3498 || rt->gcZeal >= 1
3499 #endif
3500 ) {
3501 rt->gcRegenShapes = true;
3502 rt->gcRegenShapesScopeFlag ^= JSScope::SHAPE_REGEN;
3503 rt->shapeGen = 0;
3504 rt->protoHazardShape = 0;
3505 }
3506
3507 js_PurgeThreads(cx);
3508 #ifdef JS_TRACER
3509 if (gckind == GC_LAST_CONTEXT) {
3510 /* Clear builtin functions, which are recreated on demand. */
3511 memset(rt->builtinFunctions, 0, sizeof rt->builtinFunctions);
3512 }
3513 #endif
3514
3515 /*
3516 * Mark phase.
3517 */
3518 JS_TRACER_INIT(&trc, cx, NULL);
3519 rt->gcMarkingTracer = &trc;
3520 JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
3521
3522 for (a = rt->gcDoubleArenaList.first; a; a = a->prev)
3523 a->u.hasMarkedDoubles = JS_FALSE;
3524
3525 js_TraceRuntime(&trc, keepAtoms);
3526 js_MarkScriptFilenames(rt, keepAtoms);
3527
3528 /*
3529 * Mark children of things that caused too deep recursion during the above
3530 * tracing.
3531 */
3532 TraceDelayedChildren(&trc);
3533
3534 JS_ASSERT(!cx->insideGCMarkCallback);
3535 if (rt->gcCallback) {
3536 cx->insideGCMarkCallback = JS_TRUE;
3537 (void) rt->gcCallback(cx, JSGC_MARK_END);
3538 JS_ASSERT(cx->insideGCMarkCallback);
3539 cx->insideGCMarkCallback = JS_FALSE;
3540 }
3541 JS_ASSERT(rt->gcTraceLaterCount == 0);
3542
3543 rt->gcMarkingTracer = NULL;
3544
3545 #ifdef JS_THREADSAFE
3546 cx->createDeallocatorTask();
3547 #endif
3548
3549 /*
3550 * Sweep phase.
3551 *
3552 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3553 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3554 * rather than nest badly and leave the unmarked newborn to be swept.
3555 *
3556 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3557 * JSString or jsdouble held in a hashtable to check if the hashtable
3558 * entry can be freed. Note that even after the entry is freed, JSObject
3559 * finalizers can continue to access the corresponding jsdouble* and
3560 * JSString* assuming that they are unique. This works since the
3561 * atomization API must not be called during GC.
3562 */
3563 js_SweepAtomState(cx);
3564
3565 /* Finalize iterator states before the objects they iterate over. */
3566 CloseNativeIterators(cx);
3567
3568 /* Finalize watch points associated with unreachable objects. */
3569 js_SweepWatchPoints(cx);
3570
3571 #ifdef DEBUG
3572 /* Save the pre-sweep count of scope-mapped properties. */
3573 rt->liveScopePropsPreSweep = rt->liveScopeProps;
3574 #endif