/[jscoverage]/trunk/js/jsgc.cpp
ViewVC logotype

Contents of /trunk/js/jsgc.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 332 - (show annotations)
Thu Oct 23 19:03:33 2008 UTC (10 years, 11 months ago) by siliconforks
File size: 122299 byte(s)
Add SpiderMonkey from Firefox 3.1b1.

The following directories and files were removed:
correct/, correct.js
liveconnect/
nanojit/
t/
v8/
vprof/
xpconnect/
all JavaScript files (Y.js, call.js, if.js, math-partial-sums.js, md5.js, perfect.js, trace-test.js, trace.js)


1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
3 *
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 *
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
11 *
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
16 *
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
19 *
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
24 *
25 * Contributor(s):
26 *
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
38 *
39 * ***** END LICENSE BLOCK ***** */
40
41 /*
42 * JS Mark-and-Sweep Garbage Collector.
43 *
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
48 *
49 * XXX swizzle page to freelist for better locality of reference
50 */
51 #include "jsstddef.h"
52 #include <stdlib.h> /* for free */
53 #include <math.h>
54 #include <string.h> /* for memset used when DEBUG */
55 #include "jstypes.h"
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
58 #include "jsbit.h"
59 #include "jsclist.h"
60 #include "jsprf.h"
61 #include "jsapi.h"
62 #include "jsatom.h"
63 #include "jscntxt.h"
64 #include "jsversion.h"
65 #include "jsdbgapi.h"
66 #include "jsexn.h"
67 #include "jsfun.h"
68 #include "jsgc.h"
69 #include "jsinterp.h"
70 #include "jsiter.h"
71 #include "jslock.h"
72 #include "jsnum.h"
73 #include "jsobj.h"
74 #include "jsparse.h"
75 #include "jsscope.h"
76 #include "jsscript.h"
77 #include "jsstr.h"
78 #include "jstracer.h"
79
80 #if JS_HAS_XML_SUPPORT
81 #include "jsxml.h"
82 #endif
83
84 /*
85 * Check if posix_memalign is available.
86 */
87 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
88 # define HAS_POSIX_MEMALIGN 1
89 #else
90 # define HAS_POSIX_MEMALIGN 0
91 #endif
92
93 /*
94 * jemalloc provides posix_memalign.
95 */
96 #ifdef MOZ_MEMORY
97 extern "C" {
98 #include "../../memory/jemalloc/jemalloc.h"
99 }
100 #endif
101
102 /*
103 * Include the headers for mmap unless we have posix_memalign and do not
104 * insist on mmap.
105 */
106 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
107 # if defined(XP_WIN)
108 # ifndef JS_GC_USE_MMAP
109 # define JS_GC_USE_MMAP 1
110 # endif
111 # include <windows.h>
112 # else
113 # if defined(XP_UNIX) || defined(XP_BEOS)
114 # include <unistd.h>
115 # endif
116 # if _POSIX_MAPPED_FILES > 0
117 # ifndef JS_GC_USE_MMAP
118 # define JS_GC_USE_MMAP 1
119 # endif
120 # include <sys/mman.h>
121
122 /* On Mac OS X MAP_ANONYMOUS is not defined. */
123 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
124 # define MAP_ANONYMOUS MAP_ANON
125 # endif
126 # else
127 # if JS_GC_USE_MMAP
128 # error "JS_GC_USE_MMAP is set when mmap is not available"
129 # endif
130 # endif
131 # endif
132 #endif
133
134 /*
135 * A GC arena contains a fixed number of flag bits for each thing in its heap,
136 * and supports O(1) lookup of a flag given its thing's address.
137 *
138 * To implement this, we allocate things of the same size from a GC arena
139 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
140 * following picture shows arena's layout:
141 *
142 * +------------------------------+--------------------+---------------+
143 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
144 * +------------------------------+--------------------+---------------+
145 *
146 * To find the flag bits for the thing we calculate the thing index counting
147 * from arena's start using:
148 *
149 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
150 *
151 * The details of flag's lookup depend on thing's kind. For all GC things
152 * except doubles we use one byte of flags where the 4 bits determine thing's
153 * type and the rest is used to implement GC marking, finalization and
154 * locking. We calculate the address of flag's byte using:
155 *
156 * flagByteAddress =
157 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
158 *
159 * where
160 *
161 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
162 *
163 * is the last byte of flags' area.
164 *
165 * This implies that the things are allocated from the start of their area and
166 * flags are allocated from the end. This arrangement avoids a relatively
167 * expensive calculation of the location of the boundary separating things and
168 * flags. The boundary's offset from the start of the arena is given by:
169 *
170 * thingsPerArena * thingSize
171 *
172 * where thingsPerArena is the number of things that the arena can hold:
173 *
174 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
175 *
176 * To allocate doubles we use a specialized arena. It can contain only numbers
177 * so we do not need the type bits. Moreover, since the doubles do not require
178 * a finalizer and very few of them are locked via js_LockGCThing API, we use
179 * just one bit of flags per double to denote if it was marked during the
180 * marking phase of the GC. The locking is implemented via a hash table. Thus
181 * for doubles the flag area becomes a bitmap.
182 *
183 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
184 * When it is true, a platform-dependent function like mmap is used to get
185 * memory aligned on CPU page boundaries. If the macro is false or undefined,
186 * posix_memalign is used when available. Otherwise the code uses malloc to
187 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
188 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
189 * see NewGCChunk/DestroyGCChunk below.
190 *
191 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
192 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
193 * not be a compile-time constant as the system page size is not known until
194 * runtime.
195 */
196 #if JS_GC_USE_MMAP
197 static uint32 js_gcArenasPerChunk = 0;
198 static JSBool js_gcUseMmap = JS_FALSE;
199 #elif HAS_POSIX_MEMALIGN
200 # define js_gcArenasPerChunk 1
201 #else
202 # define js_gcArenasPerChunk 7
203 #endif
204
205 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
206 # define CHUNKED_ARENA_ALLOCATION 0
207 #else
208 # define CHUNKED_ARENA_ALLOCATION 1
209 #endif
210
211 #define GC_ARENA_SHIFT 12
212 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
213 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
214
215 /*
216 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
217 * It is used to improve allocation efficiency when using posix_memalign. If
218 * malloc's implementation uses internal headers, then calling
219 *
220 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
221 *
222 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
223 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
224 * calls
225 *
226 * posix_memalign(&p, GC_ARENA_SIZE,
227 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
228 *
229 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
230 * system header, the system can pack all allocations together without holes.
231 *
232 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
233 * comes from jemalloc that does not use any headers/trailers.
234 */
235 #ifndef JS_GC_ARENA_PAD
236 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
237 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
238 # else
239 # define JS_GC_ARENA_PAD 0
240 # endif
241 #endif
242
243 struct JSGCArenaInfo {
244 /*
245 * Allocation list for the arena or NULL if the arena holds double values.
246 */
247 JSGCArenaList *list;
248
249 /*
250 * Pointer to the previous arena in a linked list. The arena can either
251 * belong to one of JSContext.gcArenaList lists or, when it does not have
252 * any allocated GC things, to the list of free arenas in the chunk with
253 * head stored in JSGCChunkInfo.lastFreeArena.
254 */
255 JSGCArenaInfo *prev;
256
257 #if !CHUNKED_ARENA_ALLOCATION
258 jsuword prevUntracedPage;
259 #else
260 /*
261 * A link field for the list of arenas with marked but not yet traced
262 * things. The field is encoded as arena's page to share the space with
263 * firstArena and arenaIndex fields.
264 */
265 jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
266
267 /*
268 * When firstArena is false, the index of arena in the chunk. When
269 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
270 * NO_FREE_ARENAS if there are no free arenas in the chunk.
271 *
272 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
273 * access either of indexes.
274 */
275 jsuword arenaIndex : GC_ARENA_SHIFT - 1;
276
277 /* Flag indicating if the arena is the first in the chunk. */
278 jsuword firstArena : 1;
279 #endif
280
281 union {
282 jsuword untracedThings; /* bitset for fast search of marked
283 but not yet traced things */
284 JSBool hasMarkedDoubles; /* the arena has marked doubles */
285 } u;
286
287 #if JS_GC_ARENA_PAD != 0
288 uint8 pad[JS_GC_ARENA_PAD];
289 #endif
290 };
291
292 /*
293 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
294 * as possible. The code does not rely on this check so if on a particular
295 * platform this does not compile, then, as a workaround, comment the assert
296 * out and submit a bug report.
297 */
298 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
299
300 /*
301 * Macros to convert between JSGCArenaInfo, the start address of the arena and
302 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
303 */
304 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
305
306 #define IS_ARENA_INFO_ADDRESS(arena) \
307 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
308
309 #define ARENA_START_TO_INFO(arenaStart) \
310 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
311 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
312
313 #define ARENA_INFO_TO_START(arena) \
314 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
315 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
316
317 #define ARENA_PAGE_TO_INFO(arenaPage) \
318 (JS_ASSERT(arenaPage != 0), \
319 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
320 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
321
322 #define ARENA_INFO_TO_PAGE(arena) \
323 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
324 ((jsuword) (arena) >> GC_ARENA_SHIFT))
325
326 #define GET_ARENA_INFO(chunk, index) \
327 (JS_ASSERT((index) < js_gcArenasPerChunk), \
328 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
329
330 #if CHUNKED_ARENA_ALLOCATION
331 /*
332 * Definitions for allocating arenas in chunks.
333 *
334 * All chunks that have at least one free arena are put on the doubly-linked
335 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
336 * the head of the chunk's free arena list together with the link fields for
337 * gcChunkList.
338 *
339 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
340 * the index of this arena. When all arenas in the chunk are used, it is
341 * removed from the list and the index is set to NO_FREE_ARENAS indicating
342 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
343 */
344
345 struct JSGCChunkInfo {
346 JSGCChunkInfo **prevp;
347 JSGCChunkInfo *next;
348 JSGCArenaInfo *lastFreeArena;
349 uint32 numFreeArenas;
350 };
351
352 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
353
354 #ifdef js_gcArenasPerChunk
355 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
356 js_gcArenasPerChunk <= NO_FREE_ARENAS);
357 #endif
358
359 #define GET_ARENA_CHUNK(arena, index) \
360 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
361 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
362
363 #define GET_ARENA_INDEX(arena) \
364 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
365
366 #define GET_CHUNK_INFO_INDEX(chunk) \
367 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
368
369 #define SET_CHUNK_INFO_INDEX(chunk, index) \
370 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
371 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
372
373 #define GET_CHUNK_INFO(chunk, infoIndex) \
374 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
375 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
376 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
377
378 #define CHUNK_INFO_TO_INDEX(ci) \
379 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
380
381 #endif
382
383 /*
384 * Macros for GC-thing operations.
385 */
386 #define THINGS_PER_ARENA(thingSize) \
387 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
388
389 #define THING_TO_ARENA(thing) \
390 ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
391 1 - sizeof(JSGCArenaInfo)))
392
393 #define THING_TO_INDEX(thing, thingSize) \
394 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
395
396 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
397
398 #define THING_FLAGP(arena, thingIndex) \
399 (JS_ASSERT((jsuword) (thingIndex) \
400 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
401 (uint8 *)(arena) - 1 - (thingIndex))
402
403 #define THING_TO_FLAGP(thing, thingSize) \
404 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
405
406 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
407
408 #define FLAGP_TO_INDEX(flagp) \
409 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
410 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
411
412 #define FLAGP_TO_THING(flagp, thingSize) \
413 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
414 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
415 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
416 (thingSize) * FLAGP_TO_INDEX(flagp)))
417
418 /*
419 * Macros for the specialized arena for doubles.
420 *
421 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
422 * hold. We find it as the following. Let n be the number of doubles in the
423 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
424 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
425 * which the following holds:
426 *
427 * n*s + ceil(n/B) <= M (1)
428 *
429 * where "/" denotes normal real division,
430 * ceil(r) gives the least integer not smaller than the number r,
431 * s is the number of words in jsdouble,
432 * B is number of bits per word or B == JS_BITS_PER_WORD
433 * M is the number of words in the arena before JSGCArenaInfo or
434 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
435 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
436 *
437 * We rewrite the inequality as
438 *
439 * n*B*s/B + ceil(n/B) <= M,
440 * ceil(n*B*s/B + n/B) <= M,
441 * ceil(n*(B*s + 1)/B) <= M (2)
442 *
443 * We define a helper function e(n, s, B),
444 *
445 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
446 *
447 * It gives:
448 *
449 * n*(B*s + 1)/B + e(n, s, B) <= M,
450 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
451 *
452 * We apply the floor function to both sides of the last equation, where
453 * floor(r) gives the biggest integer not greater than r. As a consequence we
454 * have:
455 *
456 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
457 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
458 * n <= floor(M*B/(B*s + 1)), (3)
459 *
460 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
461 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
462 * must also satisfy (3). That is, we got an upper estimate for the maximum
463 * value of n. Lets show that this upper estimate,
464 *
465 * floor(M*B/(B*s + 1)), (4)
466 *
467 * also satisfies (1) and, as such, gives the required maximum value.
468 * Substituting it into (2) gives:
469 *
470 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
471 *
472 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
473 *
474 * ceil(floor(M/X)*X) <= ceil(M) == M.
475 *
476 * Thus the value of (4) gives the maximum n satisfying (1).
477 *
478 * For the final result we observe that in (4)
479 *
480 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
481 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
482 *
483 * and
484 *
485 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
486 * == JS_BITS_PER_DOUBLE.
487 */
488 #define DOUBLES_PER_ARENA \
489 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
490
491 /*
492 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
493 */
494 JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
495 JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
496 JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
497
498 #define DOUBLES_ARENA_BITMAP_WORDS \
499 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
500
501 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
502 JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
503 DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
504 ARENA_INFO_OFFSET);
505
506 JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
507 sizeof(jsuword) *
508 JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
509 ARENA_INFO_OFFSET);
510
511 /*
512 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
513 * last byte of the occupation bitmap are unused.
514 */
515 #define UNUSED_DOUBLE_BITMAP_BITS \
516 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
517
518 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
519
520 #define DOUBLES_ARENA_BITMAP_OFFSET \
521 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
522
523 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
524 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
525 JS_ASSERT(!(arenaInfo)->list)) \
526
527 /*
528 * Get the start of the bitmap area containing double mark flags in the arena.
529 * To access the flag the code uses
530 *
531 * JS_TEST_BIT(bitmapStart, index)
532 *
533 * That is, compared with the case of arenas with non-double things, we count
534 * flags from the start of the bitmap area, not from the end.
535 */
536 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
537 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
538 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
539
540 #define DOUBLE_THING_TO_INDEX(thing) \
541 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
542 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
543 DOUBLES_ARENA_BITMAP_OFFSET), \
544 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
545
546 static void
547 ClearDoubleArenaFlags(JSGCArenaInfo *a)
548 {
549 jsbitmap *bitmap, mask;
550 uintN nused;
551
552 /*
553 * When some high bits in the last byte of the double occupation bitmap
554 * are unused, we must set them. Otherwise RefillDoubleFreeList will
555 * assume that they corresponds to some free cells and tries to allocate
556 * them.
557 *
558 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
559 */
560 bitmap = DOUBLE_ARENA_BITMAP(a);
561 memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
562 mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
563 nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
564 bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
565 }
566
567 static JS_ALWAYS_INLINE JSBool
568 IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
569 {
570 jsbitmap *bitmap;
571
572 JS_ASSERT(a->u.hasMarkedDoubles);
573 bitmap = DOUBLE_ARENA_BITMAP(a);
574 return JS_TEST_BIT(bitmap, index);
575 }
576
577 /*
578 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
579 *
580 * 1. The next byte in the bitmap area for doubles to check for unmarked
581 * (or free) doubles.
582 * 2. Or to the end of the bitmap area when all GC cells of the arena are
583 * allocated.
584 * 3. Or to a special sentinel value indicating that there are no arenas
585 * to check for unmarked doubles.
586 *
587 * We set the sentinel to ARENA_INFO_OFFSET so the single check
588 *
589 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
590 *
591 * will cover both the second and the third cases.
592 */
593 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
594
595 #ifdef JS_THREADSAFE
596 /*
597 * The maximum number of things to put on the local free list by taking
598 * several things from the global free list or from the tail of the last
599 * allocated arena to amortize the cost of rt->gcLock.
600 *
601 * We use number 8 based on benchmarks from bug 312238.
602 */
603 #define MAX_THREAD_LOCAL_THINGS 8
604
605 #endif
606
607 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
608
609 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
610 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
611
612 /* We want to use all the available GC thing space for object's slots. */
613 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
614
615 /*
616 * Ensure that JSObject is allocated from a different GC-list rather than
617 * jsdouble and JSString so we can easily finalize JSObject before these 2
618 * types of GC things. See comments in js_GC.
619 */
620 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
621 GC_FREELIST_INDEX(sizeof(JSObject)));
622 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
623 GC_FREELIST_INDEX(sizeof(JSObject)));
624
625 /*
626 * JSPtrTable capacity growth descriptor. The table grows by powers of two
627 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
628 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
629 */
630 typedef struct JSPtrTableInfo {
631 uint16 minCapacity;
632 uint16 linearGrowthThreshold;
633 } JSPtrTableInfo;
634
635 #define GC_ITERATOR_TABLE_MIN 4
636 #define GC_ITERATOR_TABLE_LINEAR 1024
637
638 static const JSPtrTableInfo iteratorTableInfo = {
639 GC_ITERATOR_TABLE_MIN,
640 GC_ITERATOR_TABLE_LINEAR
641 };
642
643 /* Calculate table capacity based on the current value of JSPtrTable.count. */
644 static size_t
645 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
646 {
647 size_t linear, log, capacity;
648
649 linear = info->linearGrowthThreshold;
650 JS_ASSERT(info->minCapacity <= linear);
651
652 if (count == 0) {
653 capacity = 0;
654 } else if (count < linear) {
655 log = JS_CEILING_LOG2W(count);
656 JS_ASSERT(log != JS_BITS_PER_WORD);
657 capacity = (size_t)1 << log;
658 if (capacity < info->minCapacity)
659 capacity = info->minCapacity;
660 } else {
661 capacity = JS_ROUNDUP(count, linear);
662 }
663
664 JS_ASSERT(capacity >= count);
665 return capacity;
666 }
667
668 static void
669 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
670 {
671 if (table->array) {
672 JS_ASSERT(table->count > 0);
673 free(table->array);
674 table->array = NULL;
675 table->count = 0;
676 }
677 JS_ASSERT(table->count == 0);
678 }
679
680 static JSBool
681 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
682 void *ptr)
683 {
684 size_t count, capacity;
685 void **array;
686
687 count = table->count;
688 capacity = PtrTableCapacity(count, info);
689
690 if (count == capacity) {
691 if (capacity < info->minCapacity) {
692 JS_ASSERT(capacity == 0);
693 JS_ASSERT(!table->array);
694 capacity = info->minCapacity;
695 } else {
696 /*
697 * Simplify the overflow detection assuming pointer is bigger
698 * than byte.
699 */
700 JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
701 capacity = (capacity < info->linearGrowthThreshold)
702 ? 2 * capacity
703 : capacity + info->linearGrowthThreshold;
704 if (capacity > (size_t)-1 / sizeof table->array[0])
705 goto bad;
706 }
707 array = (void **) realloc(table->array,
708 capacity * sizeof table->array[0]);
709 if (!array)
710 goto bad;
711 #ifdef DEBUG
712 memset(array + count, JS_FREE_PATTERN,
713 (capacity - count) * sizeof table->array[0]);
714 #endif
715 table->array = array;
716 }
717
718 table->array[count] = ptr;
719 table->count = count + 1;
720
721 return JS_TRUE;
722
723 bad:
724 JS_ReportOutOfMemory(cx);
725 return JS_FALSE;
726 }
727
728 static void
729 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
730 size_t newCount)
731 {
732 size_t oldCapacity, capacity;
733 void **array;
734
735 JS_ASSERT(newCount <= table->count);
736 if (newCount == table->count)
737 return;
738
739 oldCapacity = PtrTableCapacity(table->count, info);
740 table->count = newCount;
741 capacity = PtrTableCapacity(newCount, info);
742
743 if (oldCapacity != capacity) {
744 array = table->array;
745 JS_ASSERT(array);
746 if (capacity == 0) {
747 free(array);
748 table->array = NULL;
749 return;
750 }
751 array = (void **) realloc(array, capacity * sizeof array[0]);
752 if (array)
753 table->array = array;
754 }
755 #ifdef DEBUG
756 memset(table->array + newCount, JS_FREE_PATTERN,
757 (capacity - newCount) * sizeof table->array[0]);
758 #endif
759 }
760
761 #ifdef JS_GCMETER
762 # define METER(x) ((void) (x))
763 # define METER_IF(condition, x) ((void) ((condition) && (x)))
764 #else
765 # define METER(x) ((void) 0)
766 # define METER_IF(condition, x) ((void) 0)
767 #endif
768
769 #define METER_UPDATE_MAX(maxLval, rval) \
770 METER_IF((maxLval) < (rval), (maxLval) = (rval))
771
772 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
773
774 /*
775 * For chunks allocated via over-sized malloc, get a pointer to store the gap
776 * between the malloc's result and the first arena in the chunk.
777 */
778 static uint32 *
779 GetMallocedChunkGapPtr(jsuword chunk)
780 {
781 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
782
783 /* Use the memory after the chunk, see NewGCChunk for details. */
784 return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
785 }
786
787 #endif
788
789 static jsuword
790 NewGCChunk(void)
791 {
792 void *p;
793
794 #if JS_GC_USE_MMAP
795 if (js_gcUseMmap) {
796 # if defined(XP_WIN)
797 p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
798 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
799 return (jsuword) p;
800 # else
801 p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
802 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
803 return (p == MAP_FAILED) ? 0 : (jsuword) p;
804 # endif
805 }
806 #endif
807
808 #if HAS_POSIX_MEMALIGN
809 if (0 != posix_memalign(&p, GC_ARENA_SIZE,
810 GC_ARENA_SIZE * js_gcArenasPerChunk -
811 JS_GC_ARENA_PAD)) {
812 return 0;
813 }
814 return (jsuword) p;
815 #else
816 /*
817 * Implement chunk allocation using oversized malloc if mmap and
818 * posix_memalign are not available.
819 *
820 * Since malloc allocates pointers aligned on the word boundary, to get
821 * js_gcArenasPerChunk aligned arenas, we need to malloc only
822 *
823 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
824 *
825 * bytes. But since we stores the gap between the malloced pointer and the
826 * first arena in the chunk after the chunk, we need to ask for
827 *
828 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
829 *
830 * bytes to ensure that we always have room to store the gap.
831 */
832 p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
833 if (!p)
834 return 0;
835
836 {
837 jsuword chunk;
838
839 chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
840 *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
841 return chunk;
842 }
843 #endif
844 }
845
846 static void
847 DestroyGCChunk(jsuword chunk)
848 {
849 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
850 #if JS_GC_USE_MMAP
851 if (js_gcUseMmap) {
852 # if defined(XP_WIN)
853 VirtualFree((void *) chunk, 0, MEM_RELEASE);
854 # elif defined(SOLARIS)
855 munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
856 # else
857 munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
858 # endif
859 return;
860 }
861 #endif
862
863 #if HAS_POSIX_MEMALIGN
864 free((void *) chunk);
865 #else
866 /* See comments in NewGCChunk. */
867 JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
868 free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
869 #endif
870 }
871
872 #if CHUNKED_ARENA_ALLOCATION
873
874 static void
875 AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
876 {
877 ci->prevp = &rt->gcChunkList;
878 ci->next = rt->gcChunkList;
879 if (rt->gcChunkList) {
880 JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
881 rt->gcChunkList->prevp = &ci->next;
882 }
883 rt->gcChunkList = ci;
884 }
885
886 static void
887 RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
888 {
889 *ci->prevp = ci->next;
890 if (ci->next) {
891 JS_ASSERT(ci->next->prevp == &ci->next);
892 ci->next->prevp = ci->prevp;
893 }
894 }
895
896 #endif
897
898 static JSGCArenaInfo *
899 NewGCArena(JSRuntime *rt)
900 {
901 jsuword chunk;
902 JSGCArenaInfo *a;
903
904 if (rt->gcBytes >= rt->gcMaxBytes)
905 return NULL;
906
907 #if CHUNKED_ARENA_ALLOCATION
908 if (js_gcArenasPerChunk == 1) {
909 #endif
910 chunk = NewGCChunk();
911 if (chunk == 0)
912 return NULL;
913 a = ARENA_START_TO_INFO(chunk);
914 #if CHUNKED_ARENA_ALLOCATION
915 } else {
916 JSGCChunkInfo *ci;
917 uint32 i;
918 JSGCArenaInfo *aprev;
919
920 ci = rt->gcChunkList;
921 if (!ci) {
922 chunk = NewGCChunk();
923 if (chunk == 0)
924 return NULL;
925 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
926 a = GET_ARENA_INFO(chunk, 0);
927 a->firstArena = JS_TRUE;
928 a->arenaIndex = 0;
929 aprev = NULL;
930 i = 0;
931 do {
932 a->prev = aprev;
933 aprev = a;
934 ++i;
935 a = GET_ARENA_INFO(chunk, i);
936 a->firstArena = JS_FALSE;
937 a->arenaIndex = i;
938 } while (i != js_gcArenasPerChunk - 1);
939 ci = GET_CHUNK_INFO(chunk, 0);
940 ci->lastFreeArena = aprev;
941 ci->numFreeArenas = js_gcArenasPerChunk - 1;
942 AddChunkToList(rt, ci);
943 } else {
944 JS_ASSERT(ci->prevp == &rt->gcChunkList);
945 a = ci->lastFreeArena;
946 aprev = a->prev;
947 if (!aprev) {
948 JS_ASSERT(ci->numFreeArenas == 1);
949 JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
950 RemoveChunkFromList(rt, ci);
951 chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
952 SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
953 } else {
954 JS_ASSERT(ci->numFreeArenas >= 2);
955 JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
956 ci->lastFreeArena = aprev;
957 ci->numFreeArenas--;
958 }
959 }
960 }
961 #endif
962
963 rt->gcBytes += GC_ARENA_SIZE;
964 a->prevUntracedPage = 0;
965 memset(&a->u, 0, sizeof(a->u));
966
967 return a;
968 }
969
970 static void
971 DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
972 {
973 JSGCArenaInfo *a;
974
975 while (last) {
976 a = last;
977 last = last->prev;
978
979 METER(rt->gcStats.afree++);
980 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
981 rt->gcBytes -= GC_ARENA_SIZE;
982
983 #if CHUNKED_ARENA_ALLOCATION
984 if (js_gcArenasPerChunk == 1) {
985 #endif
986 DestroyGCChunk(ARENA_INFO_TO_START(a));
987 #if CHUNKED_ARENA_ALLOCATION
988 } else {
989 uint32 arenaIndex;
990 jsuword chunk;
991 uint32 chunkInfoIndex;
992 JSGCChunkInfo *ci;
993 # ifdef DEBUG
994 jsuword firstArena;
995
996 firstArena = a->firstArena;
997 arenaIndex = a->arenaIndex;
998 memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
999 GC_ARENA_SIZE - JS_GC_ARENA_PAD);
1000 a->firstArena = firstArena;
1001 a->arenaIndex = arenaIndex;
1002 # endif
1003 arenaIndex = GET_ARENA_INDEX(a);
1004 chunk = GET_ARENA_CHUNK(a, arenaIndex);
1005 chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
1006 if (chunkInfoIndex == NO_FREE_ARENAS) {
1007 chunkInfoIndex = arenaIndex;
1008 SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
1009 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1010 a->prev = NULL;
1011 ci->lastFreeArena = a;
1012 ci->numFreeArenas = 1;
1013 AddChunkToList(rt, ci);
1014 } else {
1015 JS_ASSERT(chunkInfoIndex != arenaIndex);
1016 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1017 JS_ASSERT(ci->numFreeArenas != 0);
1018 JS_ASSERT(ci->lastFreeArena);
1019 JS_ASSERT(a != ci->lastFreeArena);
1020 if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
1021 RemoveChunkFromList(rt, ci);
1022 DestroyGCChunk(chunk);
1023 } else {
1024 ++ci->numFreeArenas;
1025 a->prev = ci->lastFreeArena;
1026 ci->lastFreeArena = a;
1027 }
1028 }
1029 }
1030 # endif
1031 }
1032 }
1033
1034 static void
1035 InitGCArenaLists(JSRuntime *rt)
1036 {
1037 uintN i, thingSize;
1038 JSGCArenaList *arenaList;
1039
1040 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1041 arenaList = &rt->gcArenaList[i];
1042 thingSize = GC_FREELIST_NBYTES(i);
1043 JS_ASSERT((size_t)(uint16)thingSize == thingSize);
1044 arenaList->last = NULL;
1045 arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
1046 arenaList->thingSize = (uint16) thingSize;
1047 arenaList->freeList = NULL;
1048 }
1049 rt->gcDoubleArenaList.first = NULL;
1050 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1051 }
1052
1053 static void
1054 FinishGCArenaLists(JSRuntime *rt)
1055 {
1056 uintN i;
1057 JSGCArenaList *arenaList;
1058
1059 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1060 arenaList = &rt->gcArenaList[i];
1061 DestroyGCArenas(rt, arenaList->last);
1062 arenaList->last = NULL;
1063 arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
1064 arenaList->freeList = NULL;
1065 }
1066 DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
1067 rt->gcDoubleArenaList.first = NULL;
1068 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1069
1070 rt->gcBytes = 0;
1071 JS_ASSERT(rt->gcChunkList == 0);
1072 }
1073
1074 /*
1075 * This function must not be called when thing is jsdouble.
1076 */
1077 static uint8 *
1078 GetGCThingFlags(void *thing)
1079 {
1080 JSGCArenaInfo *a;
1081 uint32 index;
1082
1083 a = THING_TO_ARENA(thing);
1084 index = THING_TO_INDEX(thing, a->list->thingSize);
1085 return THING_FLAGP(a, index);
1086 }
1087
1088 /*
1089 * This function returns null when thing is jsdouble.
1090 */
1091 static uint8 *
1092 GetGCThingFlagsOrNull(void *thing)
1093 {
1094 JSGCArenaInfo *a;
1095 uint32 index;
1096
1097 a = THING_TO_ARENA(thing);
1098 if (!a->list)
1099 return NULL;
1100 index = THING_TO_INDEX(thing, a->list->thingSize);
1101 return THING_FLAGP(a, index);
1102 }
1103
1104 intN
1105 js_GetExternalStringGCType(JSString *str)
1106 {
1107 uintN type;
1108
1109 type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
1110 JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
1111 return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
1112 }
1113
1114 static uint32
1115 MapGCFlagsToTraceKind(uintN flags)
1116 {
1117 uint32 type;
1118
1119 type = flags & GCF_TYPEMASK;
1120 JS_ASSERT(type != GCX_DOUBLE);
1121 JS_ASSERT(type < GCX_NTYPES);
1122 return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
1123 }
1124
1125 JS_FRIEND_API(uint32)
1126 js_GetGCThingTraceKind(void *thing)
1127 {
1128 JSGCArenaInfo *a;
1129 uint32 index;
1130
1131 a = THING_TO_ARENA(thing);
1132 if (!a->list)
1133 return JSTRACE_DOUBLE;
1134
1135 index = THING_TO_INDEX(thing, a->list->thingSize);
1136 return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
1137 }
1138
1139 JSRuntime*
1140 js_GetGCStringRuntime(JSString *str)
1141 {
1142 JSGCArenaList *list;
1143
1144 list = THING_TO_ARENA(str)->list;
1145
1146 JS_ASSERT(list->thingSize == sizeof(JSGCThing));
1147 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
1148
1149 return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
1150 }
1151
1152 JSBool
1153 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
1154 {
1155 JSGCArenaInfo *a;
1156 uint32 index, flags;
1157
1158 a = THING_TO_ARENA(thing);
1159 if (!a->list) {
1160 /*
1161 * Check if arena has no marked doubles. In that case the bitmap with
1162 * the mark flags contains all garbage as it is initialized only when
1163 * marking the first double in the arena.
1164 */
1165 if (!a->u.hasMarkedDoubles)
1166 return JS_TRUE;
1167 index = DOUBLE_THING_TO_INDEX(thing);
1168 return !IsMarkedDouble(a, index);
1169 }
1170 index = THING_TO_INDEX(thing, a->list->thingSize);
1171 flags = *THING_FLAGP(a, index);
1172 return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
1173 }
1174
1175 /* This is compatible with JSDHashEntryStub. */
1176 typedef struct JSGCRootHashEntry {
1177 JSDHashEntryHdr hdr;
1178 void *root;
1179 const char *name;
1180 } JSGCRootHashEntry;
1181
1182 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1183 #define GC_ROOTS_SIZE 256
1184
1185 #if CHUNKED_ARENA_ALLOCATION
1186
1187 /*
1188 * For a CPU with extremely large pages using them for GC things wastes
1189 * too much memory.
1190 */
1191 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1192
1193 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
1194
1195 #endif
1196
1197 JSBool
1198 js_InitGC(JSRuntime *rt, uint32 maxbytes)
1199 {
1200 #if JS_GC_USE_MMAP
1201 if (js_gcArenasPerChunk == 0) {
1202 size_t cpuPageSize, arenasPerPage;
1203 # if defined(XP_WIN)
1204 SYSTEM_INFO si;
1205
1206 GetSystemInfo(&si);
1207 cpuPageSize = si.dwPageSize;
1208
1209 # elif defined(XP_UNIX) || defined(XP_BEOS)
1210 cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
1211 # else
1212 # error "Not implemented"
1213 # endif
1214 /* cpuPageSize is a power of 2. */
1215 JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
1216 arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
1217 #ifdef DEBUG
1218 if (arenasPerPage == 0) {
1219 fprintf(stderr,
1220 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1221 "paged allocation for the garbage collector. Please report this.\n",
1222 (unsigned) cpuPageSize);
1223 }
1224 #endif
1225 if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
1226 /*
1227 * Use at least 4 GC arenas per paged allocation chunk to minimize
1228 * the overhead of mmap/VirtualAlloc.
1229 */
1230 js_gcUseMmap = JS_TRUE;
1231 js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
1232 } else {
1233 js_gcUseMmap = JS_FALSE;
1234 js_gcArenasPerChunk = 7;
1235 }
1236 }
1237 JS_ASSERT(1 <= js_gcArenasPerChunk &&
1238 js_gcArenasPerChunk <= NO_FREE_ARENAS);
1239 #endif
1240
1241 InitGCArenaLists(rt);
1242 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
1243 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
1244 rt->gcRootsHash.ops = NULL;
1245 return JS_FALSE;
1246 }
1247 rt->gcLocksHash = NULL; /* create lazily */
1248
1249 /*
1250 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1251 * for default backward API compatibility.
1252 */
1253 rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
1254 rt->gcEmptyArenaPoolLifespan = 30000;
1255
1256 METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
1257 return JS_TRUE;
1258 }
1259
1260 #ifdef JS_GCMETER
1261
1262 static void
1263 UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
1264 uint32 nthings)
1265 {
1266 size_t narenas;
1267
1268 narenas = nlivearenas + nkilledArenas;
1269 JS_ASSERT(narenas >= st->livearenas);
1270
1271 st->newarenas = narenas - st->livearenas;
1272 st->narenas = narenas;
1273 st->livearenas = nlivearenas;
1274 if (st->maxarenas < narenas)
1275 st->maxarenas = narenas;
1276 st->totalarenas += narenas;
1277
1278 st->nthings = nthings;
1279 if (st->maxthings < nthings)
1280 st->maxthings = nthings;
1281 st->totalthings += nthings;
1282 }
1283
1284 JS_FRIEND_API(void)
1285 js_DumpGCStats(JSRuntime *rt, FILE *fp)
1286 {
1287 int i;
1288 size_t sumArenas, sumTotalArenas;
1289 size_t sumThings, sumMaxThings;
1290 size_t sumThingSize, sumTotalThingSize;
1291 size_t sumArenaCapacity, sumTotalArenaCapacity;
1292 JSGCArenaStats *st;
1293 size_t thingSize, thingsPerArena;
1294 size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
1295
1296 fprintf(fp, "\nGC allocation statistics:\n");
1297
1298 #define UL(x) ((unsigned long)(x))
1299 #define ULSTAT(x) UL(rt->gcStats.x)
1300 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1301
1302 sumArenas = 0;
1303 sumTotalArenas = 0;
1304 sumThings = 0;
1305 sumMaxThings = 0;
1306 sumThingSize = 0;
1307 sumTotalThingSize = 0;
1308 sumArenaCapacity = 0;
1309 sumTotalArenaCapacity = 0;
1310 sumAlloc = 0;
1311 sumLocalAlloc = 0;
1312 sumFail = 0;
1313 sumRetry = 0;
1314 for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
1315 if (i == -1) {
1316 thingSize = sizeof(jsdouble);
1317 thingsPerArena = DOUBLES_PER_ARENA;
1318 st = &rt->gcStats.doubleArenaStats;
1319 fprintf(fp,
1320 "Arena list for double values (%lu doubles per arena):",
1321 UL(thingsPerArena));
1322 } else {
1323 thingSize = rt->gcArenaList[i].thingSize;
1324 thingsPerArena = THINGS_PER_ARENA(thingSize);
1325 st = &rt->gcStats.arenaStats[i];
1326 fprintf(fp,
1327 "Arena list %d (thing size %lu, %lu things per arena):",
1328 i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
1329 }
1330 if (st->maxarenas == 0) {
1331 fputs(" NEVER USED\n", fp);
1332 continue;
1333 }
1334 putc('\n', fp);
1335 fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
1336 fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
1337 UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
1338 fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
1339 UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
1340 fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
1341 fprintf(fp, " things: %lu\n", UL(st->nthings));
1342 fprintf(fp, " GC cell utilization: %.1f%%\n",
1343 PERCENT(st->nthings, thingsPerArena * st->narenas));
1344 fprintf(fp, " average cell utilization: %.1f%%\n",
1345 PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
1346 fprintf(fp, " max things: %lu\n", UL(st->maxthings));
1347 fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
1348 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1349 UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
1350 sumArenas += st->narenas;
1351 sumTotalArenas += st->totalarenas;
1352 sumThings += st->nthings;
1353 sumMaxThings += st->maxthings;
1354 sumThingSize += thingSize * st->nthings;
1355 sumTotalThingSize += thingSize * st->totalthings;
1356 sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
1357 sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
1358 sumAlloc += st->alloc;
1359 sumLocalAlloc += st->localalloc;
1360 sumFail += st->fail;
1361 sumRetry += st->retry;
1362 }
1363 fprintf(fp, "TOTAL STATS:\n");
1364 fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
1365 fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
1366 fprintf(fp, " total GC things: %lu\n", UL(sumThings));
1367 fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
1368 fprintf(fp, " GC cell utilization: %.1f%%\n",
1369 PERCENT(sumThingSize, sumArenaCapacity));
1370 fprintf(fp, " average cell utilization: %.1f%%\n",
1371 PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
1372 fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
1373 fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
1374 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1375 UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
1376 fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
1377 fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
1378 fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
1379 fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
1380 fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
1381 fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1382 fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
1383 fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1384 fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
1385 #ifdef DEBUG
1386 fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
1387 #endif
1388 fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1389 fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1390 fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
1391 fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
1392 fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1393 fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1394 fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
1395 fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
1396 fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1397
1398 #undef UL
1399 #undef ULSTAT
1400 #undef PERCENT
1401
1402 #ifdef JS_ARENAMETER
1403 JS_DumpArenaStats(fp);
1404 #endif
1405 }
1406 #endif
1407
1408 #ifdef DEBUG
1409 static void
1410 CheckLeakedRoots(JSRuntime *rt);
1411 #endif
1412
1413 #ifdef JS_THREADSAFE
1414 static void
1415 TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount);
1416 #endif
1417
1418 void
1419 js_FinishGC(JSRuntime *rt)
1420 {
1421 #ifdef JS_ARENAMETER
1422 JS_DumpArenaStats(stdout);
1423 #endif
1424 #ifdef JS_GCMETER
1425 js_DumpGCStats(rt, stdout);
1426 #endif
1427
1428 FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1429 #ifdef JS_THREADSAFE
1430 TrimGCFreeListsPool(rt, 0);
1431 JS_ASSERT(!rt->gcFreeListsPool);
1432 #endif
1433 FinishGCArenaLists(rt);
1434
1435 if (rt->gcRootsHash.ops) {
1436 #ifdef DEBUG
1437 CheckLeakedRoots(rt);
1438 #endif
1439 JS_DHashTableFinish(&rt->gcRootsHash);
1440 rt->gcRootsHash.ops = NULL;
1441 }
1442 if (rt->gcLocksHash) {
1443 JS_DHashTableDestroy(rt->gcLocksHash);
1444 rt->gcLocksHash = NULL;
1445 }
1446 }
1447
1448 JSBool
1449 js_AddRoot(JSContext *cx, void *rp, const char *name)
1450 {
1451 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1452 if (!ok)
1453 JS_ReportOutOfMemory(cx);
1454 return ok;
1455 }
1456
1457 JSBool
1458 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1459 {
1460 JSBool ok;
1461 JSGCRootHashEntry *rhe;
1462
1463 /*
1464 * Due to the long-standing, but now removed, use of rt->gcLock across the
1465 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1466 * properly with a racing GC, without calling JS_AddRoot from a request.
1467 * We have to preserve API compatibility here, now that we avoid holding
1468 * rt->gcLock across the mark phase (including the root hashtable mark).
1469 *
1470 * If the GC is running and we're called on another thread, wait for this
1471 * GC activation to finish. We can safely wait here (in the case where we
1472 * are called within a request on another thread's context) without fear
1473 * of deadlock because the GC doesn't set rt->gcRunning until after it has
1474 * waited for all active requests to end.
1475 */
1476 JS_LOCK_GC(rt);
1477 #ifdef JS_THREADSAFE
1478 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
1479 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
1480 do {
1481 JS_AWAIT_GC_DONE(rt);
1482 } while (rt->gcLevel > 0);
1483 }
1484 #endif
1485 rhe = (JSGCRootHashEntry *)
1486 JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
1487 if (rhe) {
1488 rhe->root = rp;
1489 rhe->name = name;
1490 ok = JS_TRUE;
1491 } else {
1492 ok = JS_FALSE;
1493 }
1494 JS_UNLOCK_GC(rt);
1495 return ok;
1496 }
1497
1498 JSBool
1499 js_RemoveRoot(JSRuntime *rt, void *rp)
1500 {
1501 /*
1502 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1503 * Same synchronization drill as above in js_AddRoot.
1504 */
1505 JS_LOCK_GC(rt);
1506 #ifdef JS_THREADSAFE
1507 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
1508 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
1509 do {
1510 JS_AWAIT_GC_DONE(rt);
1511 } while (rt->gcLevel > 0);
1512 }
1513 #endif
1514 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
1515 rt->gcPoke = JS_TRUE;
1516 JS_UNLOCK_GC(rt);
1517 return JS_TRUE;
1518 }
1519
1520 #ifdef DEBUG
1521
1522 static JSDHashOperator
1523 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
1524 {
1525 uint32 *leakedroots = (uint32 *)arg;
1526 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1527
1528 (*leakedroots)++;
1529 fprintf(stderr,
1530 "JS engine warning: leaking GC root \'%s\' at %p\n",
1531 rhe->name ? (char *)rhe->name : "", rhe->root);
1532
1533 return JS_DHASH_NEXT;
1534 }
1535
1536 static void
1537 CheckLeakedRoots(JSRuntime *rt)
1538 {
1539 uint32 leakedroots = 0;
1540
1541 /* Warn (but don't assert) debug builds of any remaining roots. */
1542 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
1543 &leakedroots);
1544 if (leakedroots > 0) {
1545 if (leakedroots == 1) {
1546 fprintf(stderr,
1547 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1548 " This root may point to freed memory. Objects reachable\n"
1549 " through it have not been finalized.\n",
1550 (void *) rt);
1551 } else {
1552 fprintf(stderr,
1553 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1554 " These roots may point to freed memory. Objects reachable\n"
1555 " through them have not been finalized.\n",
1556 (unsigned long) leakedroots, (void *) rt);
1557 }
1558 }
1559 }
1560
1561 typedef struct NamedRootDumpArgs {
1562 void (*dump)(const char *name, void *rp, void *data);
1563 void *data;
1564 } NamedRootDumpArgs;
1565
1566 static JSDHashOperator
1567 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1568 void *arg)
1569 {
1570 NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
1571 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1572
1573 if (rhe->name)
1574 args->dump(rhe->name, rhe->root, args->data);
1575 return JS_DHASH_NEXT;
1576 }
1577
1578 JS_BEGIN_EXTERN_C
1579 void
1580 js_DumpNamedRoots(JSRuntime *rt,
1581 void (*dump)(const char *name, void *rp, void *data),
1582 void *data)
1583 {
1584 NamedRootDumpArgs args;
1585
1586 args.dump = dump;
1587 args.data = data;
1588 JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
1589 }
1590 JS_END_EXTERN_C
1591
1592 #endif /* DEBUG */
1593
1594 typedef struct GCRootMapArgs {
1595 JSGCRootMapFun map;
1596 void *data;
1597 } GCRootMapArgs;
1598
1599 static JSDHashOperator
1600 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1601 void *arg)
1602 {
1603 GCRootMapArgs *args = (GCRootMapArgs *) arg;
1604 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1605 intN mapflags;
1606 int op;
1607
1608 mapflags = args->map(rhe->root, rhe->name, args->data);
1609
1610 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1611 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1612 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1613 op = (JSDHashOperator)mapflags;
1614 #else
1615 op = JS_DHASH_NEXT;
1616 if (mapflags & JS_MAP_GCROOT_STOP)
1617 op |= JS_DHASH_STOP;
1618 if (mapflags & JS_MAP_GCROOT_REMOVE)
1619 op |= JS_DHASH_REMOVE;
1620 #endif
1621
1622 return (JSDHashOperator) op;
1623 }
1624
1625 uint32
1626 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1627 {
1628 GCRootMapArgs args;
1629 uint32 rv;
1630
1631 args.map = map;
1632 args.data = data;
1633 JS_LOCK_GC(rt);
1634 rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
1635 JS_UNLOCK_GC(rt);
1636 return rv;
1637 }
1638
1639 JSBool
1640 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
1641 {
1642 JSRuntime *rt;
1643 JSBool ok;
1644
1645 rt = cx->runtime;
1646 JS_ASSERT(!rt->gcRunning);
1647
1648 JS_LOCK_GC(rt);
1649 ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
1650 JS_UNLOCK_GC(rt);
1651 return ok;
1652 }
1653
1654 static void
1655 CloseNativeIterators(JSContext *cx)
1656 {
1657 JSRuntime *rt;
1658 size_t count, newCount, i;
1659 void **array;
1660 JSObject *obj;
1661
1662 rt = cx->runtime;
1663 count = rt->gcIteratorTable.count;
1664 array = rt->gcIteratorTable.array;
1665
1666 newCount = 0;
1667 for (i = 0; i != count; ++i) {
1668 obj = (JSObject *)array[i];
1669 if (js_IsAboutToBeFinalized(cx, obj))
1670 js_CloseNativeIterator(cx, obj);
1671 else
1672 array[newCount++] = obj;
1673 }
1674 ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
1675 }
1676
1677 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1678 #define DEBUG_gchist
1679 #endif
1680
1681 #ifdef DEBUG_gchist
1682 #define NGCHIST 64
1683
1684 static struct GCHist {
1685 JSBool lastDitch;
1686 JSGCThing *freeList;
1687 } gchist[NGCHIST];
1688
1689 unsigned gchpos = 0;
1690 #endif
1691
1692 #ifdef JS_THREADSAFE
1693
1694 const JSGCFreeListSet js_GCEmptyFreeListSet = {
1695 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, NULL
1696 };
1697
1698 static void
1699 TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount)
1700 {
1701 JSGCFreeListSet **cursor, *freeLists, *link;
1702
1703 cursor = &rt->gcFreeListsPool;
1704 while (keepCount != 0) {
1705 --keepCount;
1706 freeLists = *cursor;
1707 if (!freeLists)
1708 return;
1709 memset(freeLists->array, 0, sizeof freeLists->array);
1710 cursor = &freeLists->link;
1711 }
1712 freeLists = *cursor;
1713 if (freeLists) {
1714 *cursor = NULL;
1715 do {
1716 link = freeLists->link;
1717 free(freeLists);
1718 } while ((freeLists = link) != NULL);
1719 }
1720 }
1721
1722 void
1723 js_RevokeGCLocalFreeLists(JSContext *cx)
1724 {
1725 JS_ASSERT(!cx->gcLocalFreeLists->link);
1726 if (cx->gcLocalFreeLists != &js_GCEmptyFreeListSet) {
1727 cx->gcLocalFreeLists->link = cx->runtime->gcFreeListsPool;
1728 cx->runtime->gcFreeListsPool = cx->gcLocalFreeLists;
1729 cx->gcLocalFreeLists = (JSGCFreeListSet *) &js_GCEmptyFreeListSet;
1730 }
1731 }
1732
1733 static JSGCFreeListSet *
1734 EnsureLocalFreeList(JSContext *cx)
1735 {
1736 JSGCFreeListSet *freeLists;
1737
1738 freeLists = cx->gcLocalFreeLists;
1739 if (freeLists != &js_GCEmptyFreeListSet) {
1740 JS_ASSERT(freeLists);
1741 return freeLists;
1742 }
1743
1744 freeLists = cx->runtime->gcFreeListsPool;
1745 if (freeLists) {
1746 cx->runtime->gcFreeListsPool = freeLists->link;
1747 freeLists->link = NULL;
1748 } else {
1749 /* JS_malloc is not used as the caller reports out-of-memory itself. */
1750 freeLists = (JSGCFreeListSet *) calloc(1, sizeof *freeLists);
1751 if (!freeLists)
1752 return NULL;
1753 }
1754 cx->gcLocalFreeLists = freeLists;
1755 return freeLists;
1756 }
1757
1758 #endif
1759
1760 void *
1761 js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
1762 {
1763 JSRuntime *rt;
1764 uintN flindex;
1765 JSBool doGC;
1766 JSGCThing *thing;
1767 uint8 *flagp;
1768 JSGCArenaList *arenaList;
1769 JSGCArenaInfo *a;
1770 uintN thingsLimit;
1771 JSLocalRootStack *lrs;
1772 #ifdef JS_GCMETER
1773 JSGCArenaStats *astats;
1774 #endif
1775 #ifdef JS_THREADSAFE
1776 JSBool gcLocked;
1777 uintN localMallocBytes;
1778 JSGCFreeListSet *freeLists;
1779 JSGCThing **lastptr;
1780 JSGCThing *tmpthing;
1781 uint8 *tmpflagp;
1782 uintN maxFreeThings; /* max to take from the global free list */
1783 #endif
1784
1785 JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
1786 rt = cx->runtime;
1787 nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
1788 flindex = GC_FREELIST_INDEX(nbytes);
1789
1790 /* Updates of metering counters here may not be thread-safe. */
1791 METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
1792 METER(astats->alloc++);
1793
1794 #ifdef JS_THREADSAFE
1795 gcLocked = JS_FALSE;
1796 JS_ASSERT(cx->thread);
1797 freeLists = cx->gcLocalFreeLists;
1798 thing = freeLists->array[flindex];
1799 localMallocBytes = cx->thread->gcMallocBytes;
1800 if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1801 flagp = thing->flagp;
1802 freeLists->array[flindex] = thing->next;
1803 METER(astats->localalloc++);
1804 goto success;
1805 }
1806
1807 JS_LOCK_GC(rt);
1808 gcLocked = JS_TRUE;
1809
1810 /* Transfer thread-local counter to global one. */
1811 if (localMallocBytes != 0) {
1812 cx->thread->gcMallocBytes = 0;
1813 if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1814 rt->gcMallocBytes = rt->gcMaxMallocBytes;
1815 else
1816 rt->gcMallocBytes += localMallocBytes;
1817 }
1818 #endif
1819 JS_ASSERT(!rt->gcRunning);
1820 if (rt->gcRunning) {
1821 METER(rt->gcStats.finalfail++);
1822 JS_UNLOCK_GC(rt);
1823 return NULL;
1824 }
1825
1826 doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke);
1827 #ifdef JS_GC_ZEAL
1828 doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
1829 #endif
1830
1831 arenaList = &rt->gcArenaList[flindex];
1832 for (;;) {
1833 if (doGC && !JS_ON_TRACE(cx)) {
1834 /*
1835 * Keep rt->gcLock across the call into js_GC so we don't starve
1836 * and lose to racing threads who deplete the heap just after
1837 * js_GC has replenished it (or has synchronized with a racing
1838 * GC that collected a bunch of garbage). This unfair scheduling
1839 * can happen on certain operating systems. For the gory details,
1840 * see bug 162779 at https://bugzilla.mozilla.org/.
1841 */
1842 js_GC(cx, GC_LAST_DITCH);
1843 METER(astats->retry++);
1844 }
1845
1846 /* Try to get thing from the free list. */
1847 thing = arenaList->freeList;
1848 if (thing) {
1849 arenaList->freeList = thing->next;
1850 flagp = thing->flagp;
1851 JS_ASSERT(*flagp & GCF_FINAL);
1852
1853 #ifdef JS_THREADSAFE
1854 /*
1855 * Refill the local free list by taking several things from the
1856 * global free list unless we are still at rt->gcMaxMallocBytes
1857 * barrier or the free list is already populated. The former
1858 * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
1859 * or no gcPoke. The latter is caused via allocating new things
1860 * in gcCallback(cx, JSGC_END).
1861 */
1862 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1863 break;
1864
1865 freeLists = EnsureLocalFreeList(cx);
1866 if (!freeLists)
1867 goto fail;
1868 if (freeLists->array[flindex])
1869 break;
1870
1871 tmpthing = arenaList->freeList;
1872 if (tmpthing) {
1873 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1874 do {
1875 if (!tmpthing->next)
1876 break;
1877 tmpthing = tmpthing->next;
1878 } while (--maxFreeThings != 0);
1879
1880 freeLists->array[flindex] = arenaList->freeList;
1881 arenaList->freeList = tmpthing->next;
1882 tmpthing->next = NULL;
1883 }
1884 #endif
1885 break;
1886 }
1887
1888 /*
1889 * Try to allocate things from the last arena. If it is fully used,
1890 * check if we can allocate a new one and, if we cannot, consider
1891 * doing a "last ditch" GC unless already tried.
1892 */
1893 thingsLimit = THINGS_PER_ARENA(nbytes);
1894 if (arenaList->lastCount != thingsLimit) {
1895 JS_ASSERT(arenaList->lastCount < thingsLimit);
1896 a = arenaList->last;
1897 } else {
1898 a = NewGCArena(rt);
1899 if (!a) {
1900 if (doGC || JS_ON_TRACE(cx))
1901 goto fail;
1902 doGC = JS_TRUE;
1903 continue;
1904 }
1905 a->list = arenaList;
1906 a->prev = arenaList->last;
1907 a->prevUntracedPage = 0;
1908 a->u.untracedThings = 0;
1909 arenaList->last = a;
1910 arenaList->lastCount = 0;
1911 }
1912
1913 flagp = THING_FLAGP(a, arenaList->lastCount);
1914 thing = FLAGP_TO_THING(flagp, nbytes);
1915 arenaList->lastCount++;
1916
1917 #ifdef JS_THREADSAFE
1918 /*
1919 * Refill the local free list by taking free things from the last
1920 * arena. Prefer to order free things by ascending address in the
1921 * (unscientific) hope of better cache locality.
1922 */
1923 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1924 break;
1925
1926 freeLists = EnsureLocalFreeList(cx);
1927 if (!freeLists)
1928 goto fail;
1929 if (freeLists->array[flindex])
1930 break;
1931 lastptr = &freeLists->array[flindex];
1932 maxFreeThings = thingsLimit - arenaList->lastCount;
1933 if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
1934 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1935 while (maxFreeThings != 0) {
1936 --maxFreeThings;
1937
1938 tmpflagp = THING_FLAGP(a, arenaList->lastCount);
1939 tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
1940 arenaList->lastCount++;
1941 tmpthing->flagp = tmpflagp;
1942 *tmpflagp = GCF_FINAL; /* signifying that thing is free */
1943
1944 *lastptr = tmpthing;
1945 lastptr = &tmpthing->next;
1946 }
1947 *lastptr = NULL;
1948 #endif
1949 break;
1950 }
1951
1952 /* We successfully allocated the thing. */
1953 #ifdef JS_THREADSAFE
1954 success:
1955 #endif
1956 lrs = cx->localRootStack;
1957 if (lrs) {
1958 /*
1959 * If we're in a local root scope, don't set newborn[type] at all, to
1960 * avoid entraining garbage from it for an unbounded amount of time
1961 * on this context. A caller will leave the local root scope and pop
1962 * this reference, allowing thing to be GC'd if it has no other refs.
1963 * See JS_EnterLocalRootScope and related APIs.
1964 */
1965 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
1966 /*
1967 * When we fail for a thing allocated through the tail of the last
1968 * arena, thing's flag byte is not initialized. So to prevent GC
1969 * accessing the uninitialized flags during the finalization, we
1970 * always mark the thing as final. See bug 337407.
1971 */
1972 *flagp = GCF_FINAL;
1973 goto fail;
1974 }
1975 } else {
1976 /*
1977 * No local root scope, so we're stuck with the old, fragile model of
1978 * depending on a pigeon-hole newborn per type per context.
1979 */
1980 cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
1981 }
1982
1983 /* We can't fail now, so update flags. */
1984 *flagp = (uint8)flags;
1985
1986 #ifdef DEBUG_gchist
1987 gchist[gchpos].lastDitch = doGC;
1988 gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
1989 if (++gchpos == NGCHIST)
1990 gchpos = 0;
1991 #endif
1992
1993 /* This is not thread-safe for thread-local allocations. */
1994 METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
1995
1996 #ifdef JS_THREADSAFE
1997 if (gcLocked)
1998 JS_UNLOCK_GC(rt);
1999 #endif
2000 JS_COUNT_OPERATION(cx, JSOW_ALLOCATION);
2001 return thing;
2002
2003 fail:
2004 #ifdef JS_THREADSAFE
2005 if (gcLocked)
2006 JS_UNLOCK_GC(rt);
2007 #endif
2008 METER(astats->fail++);
2009 if (!JS_ON_TRACE(cx))
2010 JS_ReportOutOfMemory(cx);
2011 return NULL;
2012 }
2013
2014 static JSGCDoubleCell *
2015 RefillDoubleFreeList(JSContext *cx)
2016 {
2017 JSRuntime *rt;
2018 jsbitmap *doubleFlags, usedBits;
2019 JSBool didGC = JS_FALSE;
2020 JSGCArenaInfo *a;
2021 uintN bit, index;
2022 JSGCDoubleCell *cell, *list, *lastcell;
2023
2024 JS_ASSERT(!cx->doubleFreeList);
2025
2026 rt = cx->runtime;
2027 JS_LOCK_GC(rt);
2028
2029 JS_ASSERT(!rt->gcRunning);
2030 if (rt->gcRunning) {
2031 METER(rt->gcStats.finalfail++);
2032 JS_UNLOCK_GC(rt);
2033 return NULL;
2034 }
2035
2036 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke
2037 #ifdef JS_GC_ZEAL
2038 && (rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke))
2039 #endif
2040 ) {
2041 goto do_gc;
2042 }
2043
2044 /*
2045 * Loop until we find a flag bitmap byte with unset bits indicating free
2046 * double cells, then set all bits as used and put the cells to the free
2047 * list for the current context.
2048 */
2049 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2050 for (;;) {
2051 if (((jsuword) doubleFlags & GC_ARENA_MASK) ==
2052 ARENA_INFO_OFFSET) {
2053 if (doubleFlags == DOUBLE_BITMAP_SENTINEL ||
2054 !((JSGCArenaInfo *) doubleFlags)->prev) {
2055 a = NewGCArena(rt);
2056 if (!a) {
2057 do_gc:
2058 if (didGC || JS_ON_TRACE(cx)) {
2059 METER(rt->gcStats.doubleArenaStats.fail++);
2060 JS_UNLOCK_GC(rt);
2061 if (!JS_ON_TRACE(cx))
2062 JS_ReportOutOfMemory(cx);
2063 return NULL;
2064 }
2065 js_GC(cx, GC_LAST_DITCH);
2066 METER(rt->gcStats.doubleArenaStats.retry++);
2067 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2068 didGC = JS_TRUE;
2069 continue;
2070 }
2071 a->list = NULL;
2072 a->prev = NULL;
2073 if (doubleFlags == DOUBLE_BITMAP_SENTINEL) {
2074 JS_ASSERT(!rt->gcDoubleArenaList.first);
2075 rt->gcDoubleArenaList.first = a;
2076 } else {
2077 JS_ASSERT(rt->gcDoubleArenaList.first);
2078 ((JSGCArenaInfo *) doubleFlags)->prev = a;
2079 }
2080 ClearDoubleArenaFlags(a);
2081 doubleFlags = DOUBLE_ARENA_BITMAP(a);
2082 break;
2083 }
2084 doubleFlags =
2085 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev);
2086 }
2087
2088 /*
2089 * When doubleFlags points the last bitmap's word in the arena, its
2090 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2091 * sets such bits to 1. Thus even for this last word its bit is unset
2092 * iff the corresponding cell exists and free.
2093 */
2094 if (*doubleFlags != (jsbitmap) -1)
2095 break;
2096 ++doubleFlags;
2097 }
2098
2099 rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1;
2100 usedBits = *doubleFlags;
2101 JS_ASSERT(usedBits != (jsbitmap) -1);
2102 *doubleFlags = (jsbitmap) -1;
2103 JS_UNLOCK_GC(rt);
2104
2105 /*
2106 * Find the index corresponding to the first bit in *doubleFlags. The last
2107 * bit will have "index + JS_BITS_PER_WORD - 1".
2108 */
2109 index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) -
2110 DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE;
2111 cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index;
2112
2113 if (usedBits == 0) {
2114 /* The common case when all doubles from *doubleFlags are free. */
2115 JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA);
2116 list = cell;
2117 for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell)
2118 cell->link = cell + 1;
2119 lastcell->link = NULL;
2120 } else {
2121 /*
2122 * Assemble the free list from free cells from *doubleFlags starting
2123 * from the tail. In the loop
2124 *
2125 * index + bit >= DOUBLES_PER_ARENA
2126 *
2127 * when bit is one of the unused bits. We do not check for such bits
2128 * explicitly as they must be set and the "if" check filters them out.
2129 */
2130 JS_ASSERT(index + JS_BITS_PER_WORD <=
2131 DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS);
2132 bit = JS_BITS_PER_WORD;
2133 cell += bit;
2134 list = NULL;
2135 do {
2136 --bit;
2137 --cell;
2138 if (!(((jsbitmap) 1 << bit) & usedBits)) {
2139 JS_ASSERT(index + bit < DOUBLES_PER_ARENA);
2140 JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list);
2141 cell->link = list;
2142 list = cell;
2143 }
2144 } while (bit != 0);
2145 }
2146 JS_ASSERT(list);
2147 JS_COUNT_OPERATION(cx, JSOW_ALLOCATION * JS_BITS_PER_WORD);
2148
2149 /*
2150 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2151 * it immediately consumes the head of the list.
2152 */
2153 return list;
2154 }
2155
2156 JSBool
2157 js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp)
2158 {
2159 #ifdef JS_GCMETER
2160 JSGCArenaStats *astats;
2161 #endif
2162 JSGCDoubleCell *cell;
2163
2164 /* Updates of metering counters here are not thread-safe. */
2165 METER(astats = &cx->runtime->gcStats.doubleArenaStats);
2166 METER(astats->alloc++);
2167 cell = cx->doubleFreeList;
2168 if (!cell) {
2169 cell = RefillDoubleFreeList(cx);
2170 if (!cell) {
2171 METER(astats->fail++);
2172 return JS_FALSE;
2173 }
2174 } else {
2175 METER(astats->localalloc++);
2176 }
2177 cx->doubleFreeList = cell->link;
2178 cell->number = d;
2179 *vp = DOUBLE_TO_JSVAL(&cell->number);
2180 return JS_TRUE;
2181 }
2182
2183 jsdouble *
2184 js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d)
2185 {
2186 jsval v;
2187 jsdouble *dp;
2188
2189 if (!js_NewDoubleInRootedValue(cx, d, &v))
2190 return NULL;
2191
2192 JS_ASSERT(JSVAL_IS_DOUBLE(v));
2193 dp = JSVAL_TO_DOUBLE(v);
2194 if (cx->localRootStack) {
2195 if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0)
2196 return NULL;
2197 } else {
2198 cx->weakRoots.newborn[GCX_DOUBLE] = dp;
2199 }
2200 return dp;
2201 }
2202
2203 JSBool
2204 js_AddAsGCBytes(JSContext *cx, size_t sz)
2205 {
2206 JSRuntime *rt;
2207
2208 rt = cx->runtime;
2209 if (rt->gcBytes >= rt->gcMaxBytes ||
2210 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)
2211 #ifdef JS_GC_ZEAL
2212 || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke)
2213 #endif
2214 ) {
2215 if (JS_ON_TRACE(cx)) {
2216 JS_UNLOCK_GC(rt);
2217 return JS_FALSE;
2218 }
2219 js_GC(cx, GC_LAST_DITCH);
2220 if (rt->gcBytes >= rt->gcMaxBytes ||
2221 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)) {
2222 JS_UNLOCK_GC(rt);
2223 JS_ReportOutOfMemory(cx);
2224 return JS_FALSE;
2225 }
2226 }
2227 rt->gcBytes += (uint32) sz;
2228 return JS_TRUE;
2229 }
2230
2231 void
2232 js_RemoveAsGCBytes(JSRuntime *rt, size_t sz)
2233 {
2234 JS_ASSERT((size_t) rt->gcBytes >= sz);
2235 rt->gcBytes -= (uint32) sz;
2236 }
2237
2238 /*
2239 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2240 * they have no descendants to mark during the GC. Currently the optimization
2241 * is only used for non-dependant strings.
2242 */
2243 #define GC_THING_IS_SHALLOW(flagp, thing) \
2244 ((flagp) && \
2245 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2246 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2247 !JSSTRING_IS_DEPENDENT((JSString *) (thing)))))
2248
2249 /* This is compatible with JSDHashEntryStub. */
2250 typedef struct JSGCLockHashEntry {
2251 JSDHashEntryHdr hdr;
2252 const void *thing;
2253 uint32 count;
2254 } JSGCLockHashEntry;
2255
2256 JSBool
2257 js_LockGCThingRT(JSRuntime *rt, void *thing)
2258 {
2259 JSBool shallow, ok;
2260 uint8 *flagp;
2261 JSGCLockHashEntry *lhe;
2262
2263 if (!thing)
2264 return JS_TRUE;
2265
2266 flagp = GetGCThingFlagsOrNull(thing);
2267 JS_LOCK_GC(rt);
2268 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2269
2270 /*
2271 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2272 * nests a lock.
2273 */
2274 if (shallow && !(*flagp & GCF_LOCK)) {
2275 *flagp |= GCF_LOCK;
2276 METER(rt->gcStats.lock++);
2277 ok = JS_TRUE;
2278 goto out;
2279 }
2280
2281 if (!rt->gcLocksHash) {
2282 rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
2283 sizeof(JSGCLockHashEntry),
2284 GC_ROOTS_SIZE);
2285 if (!rt->gcLocksHash) {
2286 ok = JS_FALSE;
2287 goto out;
2288 }
2289 }
2290
2291 lhe = (JSGCLockHashEntry *)
2292 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
2293 if (!lhe) {
2294 ok = JS_FALSE;
2295 goto out;
2296 }
2297 if (!lhe->thing) {
2298 lhe->thing = thing;
2299 lhe->count = 1;
2300 } else {
2301 JS_ASSERT(lhe->count >= 1);
2302 lhe->count++;
2303 }
2304
2305 METER(rt->gcStats.lock++);
2306 ok = JS_TRUE;
2307 out:
2308 JS_UNLOCK_GC(rt);
2309 return ok;
2310 }
2311
2312 JSBool
2313 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
2314 {
2315 uint8 *flagp;
2316 JSBool shallow;
2317 JSGCLockHashEntry *lhe;
2318
2319 if (!thing)
2320 return JS_TRUE;
2321
2322 flagp = GetGCThingFlagsOrNull(thing);
2323 JS_LOCK_GC(rt);
2324 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2325
2326 if (shallow && !(*flagp & GCF_LOCK))
2327 goto out;
2328 if (!rt->gcLocksHash ||
2329 (lhe = (JSGCLockHashEntry *)
2330 JS_DHashTableOperate(rt->gcLocksHash, thing,
2331 JS_DHASH_LOOKUP),
2332 JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
2333 /* Shallow entry is not in the hash -> clear its lock bit. */
2334 if (shallow)
2335 *flagp &= ~GCF_LOCK;
2336 else
2337 goto out;
2338 } else {
2339 if (--lhe->count != 0)
2340 goto out;
2341 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
2342 }
2343
2344 rt->gcPoke = JS_TRUE;
2345 METER(rt->gcStats.unlock++);
2346 out:
2347 JS_UNLOCK_GC(rt);
2348 return JS_TRUE;
2349 }
2350
2351 JS_PUBLIC_API(void)
2352 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
2353 {
2354 JSObject *obj;
2355 size_t nslots, i;
2356 jsval v;
2357 JSString *str;
2358
2359 switch (kind) {
2360 case JSTRACE_OBJECT:
2361 /* If obj has no map, it must be a newborn. */
2362 obj = (JSObject *) thing;
2363 if (!obj->map)
2364 break;
2365 if (obj->map->ops->trace) {
2366 obj->map->ops->trace(trc, obj);
2367 } else {
2368 nslots = STOBJ_NSLOTS(obj);
2369 for (i = 0; i != nslots; ++i) {
2370 v = STOBJ_GET_SLOT(obj, i);
2371 if (JSVAL_IS_TRACEABLE(v)) {
2372 JS_SET_TRACING_INDEX(trc, "slot", i);
2373 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v),
2374 JSVAL_TRACE_KIND(v));
2375 }
2376 }
2377 }
2378 break;
2379
2380 case JSTRACE_STRING:
2381 str = (JSString *)thing;
2382 if (JSSTRING_IS_DEPENDENT(str))
2383 JS_CALL_STRING_TRACER(trc, JSSTRDEP_BASE(str), "base");
2384 break;
2385
2386 #if JS_HAS_XML_SUPPORT
2387 case JSTRACE_XML:
2388 js_TraceXML(trc, (JSXML *)thing);
2389 break;
2390 #endif
2391 }
2392 }
2393
2394 /*
2395 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2396 */
2397 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2398 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2399
2400 static void
2401 DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
2402 {
2403 JSGCArenaInfo *a;
2404 uint32 untracedBitIndex;
2405 jsuword bit;
2406
2407 /*
2408 * Things with children to be traced later are marked with
2409 * GCF_MARK | GCF_FINAL flags.
2410 */
2411 JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
2412 *flagp |= GCF_FINAL;
2413
2414 METER(rt->gcStats.untraced++);
2415 #ifdef DEBUG
2416 ++rt->gcTraceLaterCount;
2417 METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount);
2418 #endif
2419
2420 a = FLAGP_TO_ARENA(flagp);
2421 untracedBitIndex = FLAGP_TO_INDEX(flagp) /
2422 THINGS_PER_UNTRACED_BIT(a->list->thingSize);
2423 JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
2424 bit = (jsuword)1 << untracedBitIndex;
2425 if (a->u.untracedThings != 0) {
2426 JS_ASSERT(rt->gcUntracedArenaStackTop);
2427 if (a->u.untracedThings & bit) {
2428 /* bit already covers things with children to trace later. */
2429 return;
2430 }
2431 a->u.untracedThings |= bit;
2432 } else {
2433 /*
2434 * The thing is the first thing with not yet traced children in the
2435 * whole arena, so push the arena on the stack of arenas with things
2436 * to be traced later unless the arena has already been pushed. We
2437 * detect that through checking prevUntracedPage as the field is 0
2438 * only for not yet pushed arenas. To ensure that
2439 * prevUntracedPage != 0
2440 * even when the stack contains one element, we make prevUntracedPage
2441 * for the arena at the bottom to point to itself.
2442 *
2443 * See comments in TraceDelayedChildren.
2444 */
2445 a->u.untracedThings = bit;
2446 if (a->prevUntracedPage == 0) {
2447 if (!rt->gcUntracedArenaStackTop) {
2448 /* Stack was empty, mark the arena as the bottom element. */
2449 a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
2450 } else {
2451 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2452 a->prevUntracedPage =
2453 ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
2454 }
2455 rt->gcUntracedArenaStackTop = a;
2456 }
2457 }
2458 JS_ASSERT(rt->gcUntracedArenaStackTop);
2459 }
2460
2461 static void
2462 TraceDelayedChildren(JSTracer *trc)
2463 {
2464 JSRuntime *rt;
2465 JSGCArenaInfo *a, *aprev;
2466 uint32 thingSize;
2467 uint32 thingsPerUntracedBit;
2468 uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
2469 JSGCThing *thing;
2470 uint8 *flagp;
2471
2472 rt = trc->context->runtime;
2473 a = rt->gcUntracedArenaStackTop;
2474 if (!a) {
2475 JS_ASSERT(rt->gcTraceLaterCount == 0);
2476 return;
2477 }
2478
2479 for (;;) {
2480 /*
2481 * The following assert verifies that the current arena belongs to the
2482 * untraced stack, since DelayTracingChildren ensures that even for
2483 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2484 */
2485 JS_ASSERT(a->prevUntracedPage != 0);
2486 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2487 thingSize = a->list->thingSize;
2488 indexLimit = (a == a->list->last)
2489 ? a->list->lastCount
2490 : THINGS_PER_ARENA(thingSize);
2491 thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
2492
2493 /*
2494 * We cannot use do-while loop here as a->u.untracedThings can be zero
2495 * before the loop as a leftover from the previous iterations. See
2496 * comments after the loop.
2497 */
2498 while (a->u.untracedThings != 0) {
2499 untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings);
2500 a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex);
2501 thingIndex = untracedBitIndex * thingsPerUntracedBit;
2502 endIndex = thingIndex + thingsPerUntracedBit;
2503
2504 /*
2505 * endIndex can go beyond the last allocated thing as the real
2506 * limit can be "inside" the bit.
2507 */
2508 if (endIndex > indexLimit)
2509 endIndex = indexLimit;
2510 JS_ASSERT(thingIndex < indexLimit);
2511
2512 do {
2513 /*
2514 * Skip free or already traced things that share the bit
2515 * with untraced ones.
2516 */
2517 flagp = THING_FLAGP(a, thingIndex);
2518 if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
2519 continue;
2520 *flagp &= ~GCF_FINAL;
2521 #ifdef DEBUG
2522 JS_ASSERT(rt->gcTraceLaterCount != 0);
2523 --rt->gcTraceLaterCount;
2524 #endif
2525 thing = FLAGP_TO_THING(flagp, thingSize);
2526 JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp));
2527 } while (++thingIndex != endIndex);
2528 }
2529
2530 /*
2531 * We finished tracing of all things in the the arena but we can only
2532 * pop it from the stack if the arena is the stack's top.
2533 *
2534 * When JS_TraceChildren from the above calls JS_CallTracer that in
2535 * turn on low C stack calls DelayTracingChildren and the latter
2536 * pushes new arenas to the untraced stack, we have to skip popping
2537 * of this arena until it becomes the top of the stack again.
2538 */
2539 if (a == rt->gcUntracedArenaStackTop) {
2540 aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
2541 a->prevUntracedPage = 0;
2542 if (a == aprev) {
2543 /*
2544 * prevUntracedPage points to itself and we reached the
2545 * bottom of the stack.
2546 */
2547 break;
2548 }
2549 rt->gcUntracedArenaStackTop = a = aprev;
2550 } else {
2551 a = rt->gcUntracedArenaStackTop;
2552 }
2553 }
2554 JS_ASSERT(rt->gcUntracedArenaStackTop);
2555 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
2556 rt->gcUntracedArenaStackTop = NULL;
2557 JS_ASSERT(rt->gcTraceLaterCount == 0);
2558 }
2559
2560 JS_PUBLIC_API(void)
2561 JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
2562 {
2563 JSContext *cx;
2564 JSRuntime *rt;
2565 JSGCArenaInfo *a;
2566 uintN index;
2567 uint8 *flagp;
2568
2569 JS_ASSERT(thing);
2570 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
2571 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
2572
2573 if (!IS_GC_MARKING_TRACER(trc)) {
2574 trc->callback(trc, thing, kind);
2575 goto out;
2576 }
2577
2578 cx = trc->context;
2579 rt = cx->runtime;
2580 JS_ASSERT(rt->gcMarkingTracer == trc);
2581 JS_ASSERT(rt->gcLevel > 0);
2582
2583 /*
2584 * Optimize for string and double as their size is known and their tracing
2585 * is not recursive.
2586 */
2587 switch (kind) {
2588 case JSTRACE_DOUBLE:
2589 a = THING_TO_ARENA(thing);
2590 JS_ASSERT(!a->list);
2591 if (!a->u.hasMarkedDoubles) {
2592 ClearDoubleArenaFlags(a);
2593 a->u.hasMarkedDoubles = JS_TRUE;
2594 }
2595 index = DOUBLE_THING_TO_INDEX(thing);
2596 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
2597 goto out;
2598
2599 case JSTRACE_STRING:
2600 for (;;) {
2601 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
2602 JS_ASSERT((*flagp & GCF_FINAL) == 0);
2603 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2604 if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) {
2605 *flagp |= GCF_MARK;
2606 goto out;
2607 }
2608 if (*flagp & GCF_MARK)
2609 goto out;
2610 *flagp |= GCF_MARK;
2611 thing = JSSTRDEP_BASE((JSString *) thing);
2612 }
2613 /* NOTREACHED */
2614 }
2615
2616 flagp = GetGCThingFlags(thing);
2617 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2618 if (*flagp & GCF_MARK)
2619 goto out;
2620
2621 /*
2622 * We check for non-final flag only if mark is unset as
2623 * DelayTracingChildren uses the flag. See comments in the function.
2624 */
2625 JS_ASSERT(*flagp != GCF_FINAL);
2626 *flagp |= GCF_MARK;
2627 if (!cx->insideGCMarkCallback) {
2628 /*
2629 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2630 * uses the non-recursive code that otherwise would be called only on
2631 * a low C stack condition.
2632 */
2633 #ifdef JS_GC_ASSUME_LOW_C_STACK
2634 # define RECURSION_TOO_DEEP() JS_TRUE
2635 #else
2636 int stackDummy;
2637 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2638 #endif
2639 if (RECURSION_TOO_DEEP())
2640 DelayTracingChildren(rt, flagp);
2641 else
2642 JS_TraceChildren(trc, thing, kind);
2643 } else {
2644 /*
2645 * For API compatibility we allow for the callback to assume that
2646 * after it calls JS_MarkGCThing for the last time, the callback can
2647 * start to finalize its own objects that are only referenced by
2648 * unmarked GC things.
2649 *
2650 * Since we do not know which call from inside the callback is the
2651 * last, we ensure that children of all marked things are traced and
2652 * call TraceDelayedChildren(trc) after tracing the thing.
2653 *
2654 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2655 * for the things with untraced children, calling DelayTracingChildren
2656 * is useless here. Hence we always trace thing's children even with a
2657 * low native stack.
2658 */
2659 cx->insideGCMarkCallback = JS_FALSE;
2660 JS_TraceChildren(trc, thing, kind);
2661 TraceDelayedChildren(trc);
2662 cx->insideGCMarkCallback = JS_TRUE;
2663 }
2664
2665 out:
2666 #ifdef DEBUG
2667 trc->debugPrinter = NULL;
2668 trc->debugPrintArg = NULL;
2669 #endif
2670 return; /* to avoid out: right_curl when DEBUG is not defined */
2671 }
2672
2673 void
2674 js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
2675 {
2676 void *thing;
2677 uint32 kind;
2678
2679 if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
2680 thing = JSVAL_TO_TRACEABLE(v);
2681 kind = JSVAL_TRACE_KIND(v);
2682 JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v)));
2683 } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
2684 /* v can be an arbitrary GC thing reinterpreted as an object. */
2685 thing = JSVAL_TO_OBJECT(v);
2686 kind = js_GetGCThingTraceKind(thing);
2687 } else {
2688 return;
2689 }
2690 JS_CallTracer(trc, thing, kind);
2691 }
2692
2693 static JSDHashOperator
2694 gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2695 void *arg)
2696 {
2697 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2698 JSTracer *trc = (JSTracer *)arg;
2699 jsval *rp = (jsval *)rhe->root;
2700 jsval v = *rp;
2701
2702 /* Ignore null object and scalar values. */
2703 if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
2704 #ifdef DEBUG
2705 JSBool root_points_to_gcArenaList = JS_FALSE;
2706 jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2707 JSRuntime *rt;
2708 uintN i;
2709 JSGCArenaList *arenaList;
2710 uint32 thingSize;
2711 JSGCArenaInfo *a;
2712 size_t limit;
2713
2714 rt = trc->context->runtime;
2715 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2716 arenaList = &rt->gcArenaList[i];
2717 thingSize = arenaList->thingSize;
2718 limit = (size_t) arenaList->lastCount * thingSize;
2719 for (a = arenaList->last; a; a = a->prev) {
2720 if (thing - ARENA_INFO_TO_START(a) < limit) {
2721 root_points_to_gcArenaList = JS_TRUE;
2722 break;
2723 }
2724 limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
2725 }
2726 }
2727 if (!root_points_to_gcArenaList) {
2728 for (a = rt->gcDoubleArenaList.first; a; a = a->prev) {
2729 if (thing - ARENA_INFO_TO_START(a) <
2730 DOUBLES_PER_ARENA * sizeof(jsdouble)) {
2731 root_points_to_gcArenaList = JS_TRUE;
2732 break;
2733 }
2734 }
2735 }
2736 if (!root_points_to_gcArenaList && rhe->name) {
2737 fprintf(stderr,
2738 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2739 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2740 "The root's name is \"%s\".\n",
2741 rhe->name);
2742 }
2743 JS_ASSERT(root_points_to_gcArenaList);
2744 #endif
2745 JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
2746 js_CallValueTracerIfGCThing(trc, v);
2747 }
2748
2749 return JS_DHASH_NEXT;
2750 }
2751
2752 static JSDHashOperator
2753 gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2754 void *arg)
2755 {
2756 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2757 void *thing = (void *)lhe->thing;
2758 JSTracer *trc = (JSTracer *)arg;
2759 uint32 traceKind;
2760
2761 JS_ASSERT(lhe->count >= 1);
2762 traceKind = js_GetGCThingTraceKind(thing);
2763 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2764 return JS_DHASH_NEXT;
2765 }
2766
2767 #define TRACE_JSVALS(trc, len, vec, name) \
2768 JS_BEGIN_MACRO \
2769 jsval _v, *_vp, *_end; \
2770 \
2771 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2772 _v = *_vp; \
2773 if (JSVAL_IS_TRACEABLE(_v)) { \
2774 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2775 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2776 JSVAL_TRACE_KIND(_v)); \
2777 } \
2778 } \
2779 JS_END_MACRO
2780
2781 void
2782 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2783 {
2784 uintN nslots, minargs, skip;
2785
2786 if (fp->callobj)
2787 JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
2788 if (fp->argsobj)
2789 JS_CALL_OBJECT_TRACER(trc, fp->argsobj, "arguments");
2790 if (fp->varobj)
2791 JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
2792 if (fp->script) {
2793 js_TraceScript(trc, fp->script);
2794 if (fp->regs) {
2795 /*
2796 * Don't mark what has not been pushed yet, or what has been
2797 * popped already.
2798 */
2799 JS_ASSERT((size_t) (fp->regs->sp - fp->slots) <=
2800 fp->script->nslots);
2801 nslots = (uintN) (fp->regs->sp - fp->slots);
2802 TRACE_JSVALS(trc, nslots, fp->slots, "slot");
2803 }
2804 } else {
2805 JS_ASSERT(!fp->slots);
2806 JS_ASSERT(!fp->regs);
2807 }
2808
2809 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2810 JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
2811 (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
2812 JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this");
2813
2814 if (fp->callee)
2815 JS_CALL_OBJECT_TRACER(trc, fp->callee, "callee");
2816
2817 if (fp->argv) {
2818 nslots = fp->argc;
2819 skip = 0;
2820 if (fp->fun) {
2821 minargs = FUN_MINARGS(fp->fun);
2822 if (minargs > nslots)
2823 nslots = minargs;
2824 if (!FUN_INTERPRETED(fp->fun)) {
2825 JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
2826 nslots += fp->fun->u.n.extra;
2827 }
2828 if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
2829 skip = 2 + fp->argc;
2830 }
2831 TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
2832 }
2833
2834 JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
2835 if (fp->scopeChain)
2836 JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
2837 if (fp->sharpArray)
2838 JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array");
2839
2840 if (fp->xmlNamespace)
2841 JS_CALL_OBJECT_TRACER(trc, fp->xmlNamespace, "xmlNamespace");
2842 }
2843
2844 static void
2845 TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
2846 {
2847 uint32 i;
2848 void *thing;
2849
2850 #ifdef DEBUG
2851 static const char *weakRootNames[JSTRACE_LIMIT] = {
2852 "newborn object",
2853 "newborn double",
2854 "newborn string",
2855 "newborn xml"
2856 };
2857 #endif
2858
2859 for (i = 0; i != JSTRACE_LIMIT; i++) {
2860 thing = wr->newborn[i];
2861 if (thing)
2862 JS_CALL_TRACER(trc, thing, i, weakRootNames[i]);
2863 }
2864 JS_ASSERT(i == GCX_EXTERNAL_STRING);
2865 for (; i != GCX_NTYPES; ++i) {
2866 thing = wr->newborn[i];
2867 if (thing) {
2868 JS_SET_TRACING_INDEX(trc, "newborn external string",
2869 i - GCX_EXTERNAL_STRING);
2870 JS_CallTracer(trc, thing, JSTRACE_STRING);
2871 }
2872 }
2873
2874 JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
2875 JS_SET_TRACING_NAME(trc, "lastInternalResult");
2876 js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
2877 }
2878
2879 JS_FRIEND_API(void)
2880 js_TraceContext(JSTracer *trc, JSContext *acx)
2881 {
2882 JSStackFrame *fp, *nextChain;
2883 JSStackHeader *sh;
2884 JSTempValueRooter *tvr;
2885
2886 if (IS_GC_MARKING_TRACER(trc)) {
2887
2888 #define FREE_OLD_ARENAS(pool) \
2889 JS_BEGIN_MACRO \
2890 int64 _age; \
2891 JSArena * _a = (pool).current; \
2892 if (_a == (pool).first.next && \
2893 _a->avail == _a->base + sizeof(int64)) { \
2894 _age = JS_Now() - *(int64 *) _a->base; \
2895 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2896 1000) \
2897 JS_FreeArenaPool(&(pool)); \
2898 } \
2899 JS_END_MACRO
2900
2901 #ifdef JS_THREADSAFE
2902 js_RevokeGCLocalFreeLists(acx);
2903 #endif
2904
2905 /*
2906 * Release the stackPool's arenas if the stackPool has existed for
2907 * longer than the limit specified by gcEmptyArenaPoolLifespan.
2908 */
2909 FREE_OLD_ARENAS(acx->stackPool);
2910
2911 /*
2912 * Release the regexpPool's arenas based on the same criterion as for
2913 * the stackPool.
2914 */
2915 FREE_OLD_ARENAS(acx->regexpPool);
2916
2917 /*
2918 * Clear the double free list to release all the pre-allocated doubles.
2919 */
2920 acx->doubleFreeList = NULL;
2921 }
2922
2923 /*
2924 * Iterate frame chain and dormant chains.
2925 *
2926 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2927 */
2928 fp = acx->fp;
2929 nextChain = acx->dormantFrameChain;
2930 if (!fp)
2931 goto next_chain;
2932
2933 /* The top frame must not be dormant. */
2934 JS_ASSERT(!fp->dormantNext);
2935 for (;;) {
2936 do {
2937 js_TraceStackFrame(trc, fp);
2938 } while ((fp = fp->down) != NULL);
2939
2940 next_chain:
2941 if (!nextChain)
2942 break;
2943 fp = nextChain;
2944 nextChain = nextChain->dormantNext;
2945 }
2946
2947 /* Mark other roots-by-definition in acx. */
2948 if (acx->globalObject)
2949 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
2950 TraceWeakRoots(trc, &acx->weakRoots);
2951 if (acx->throwing) {
2952 JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
2953 } else {
2954 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2955 acx->exception = JSVAL_NULL;
2956 }
2957 #if JS_HAS_LVALUE_RETURN
2958 if (acx->rval2set)
2959 JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2");
2960 #endif
2961
2962 for (sh = acx->stackHeaders; sh; sh = sh->down) {
2963 METER(trc->context->runtime->gcStats.stackseg++);
2964 METER(trc->context->runtime->gcStats.segslots += sh->nslots);
2965 TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
2966 }
2967
2968 if (acx->localRootStack)
2969 js_TraceLocalRoots(trc, acx->localRootStack);
2970
2971 for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
2972 switch (tvr->count) {
2973 case JSTVU_SINGLE:
2974 JS_SET_TRACING_NAME(trc, "tvr->u.value");
2975 js_CallValueTracerIfGCThing(trc, tvr->u.value);
2976 break;
2977 case JSTVU_TRACE:
2978 tvr->u.trace(trc, tvr);
2979 break;
2980 case JSTVU_SPROP:
2981 TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop);
2982 break;
2983 case JSTVU_WEAK_ROOTS:
2984 TraceWeakRoots(trc, tvr->u.weakRoots);
2985 break;
2986 case JSTVU_PARSE_CONTEXT:
2987 js_TraceParseContext(trc, tvr->u.parseContext);
2988 break;
2989 case JSTVU_SCRIPT:
2990 js_TraceScript(trc, tvr->u.script);
2991 break;
2992 default:
2993 JS_ASSERT(tvr->count >= 0);
2994 TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
2995 }
2996 }
2997
2998 if (acx->sharpObjectMap.depth > 0)
2999 js_TraceSharpMap(trc, &acx->sharpObjectMap);
3000 }
3001
3002 void
3003 js_TraceTraceMonitor(JSTracer *trc, JSTraceMonitor *tm)
3004 {
3005 if (IS_GC_MARKING_TRACER(trc))
3006 tm->recoveryDoublePoolPtr = tm->recoveryDoublePool;
3007 }
3008
3009 void
3010 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
3011 {
3012 JSRuntime *rt = trc->context->runtime;
3013 JSContext *iter, *acx;
3014
3015 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
3016 if (rt->gcLocksHash)
3017 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
3018 js_TraceAtomState(trc, allAtoms);
3019 js_TraceNativeEnumerators(trc);
3020 js_TraceRuntimeNumberState(trc);
3021
3022 iter = NULL;
3023 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
3024 js_TraceContext(trc, acx);
3025
3026 if (rt->gcExtraRootsTraceOp)
3027 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
3028
3029 #ifdef JS_THREADSAFE
3030 /* Trace the loop table(s) which can contain pointers to code objects. */
3031 while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
3032 if (!acx->thread)
3033 continue;
3034 js_TraceTraceMonitor(trc, &acx->thread->traceMonitor);
3035 }
3036 #else
3037 js_TraceTraceMonitor(trc, &rt->traceMonitor);
3038 #endif
3039 }
3040
3041 static void
3042 ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
3043 {
3044 JSObject *obj, *pobj;
3045 uint32 slot;
3046
3047 obj = ssr->obj;
3048 pobj = ssr->pobj;
3049 slot = ssr->slot;
3050
3051 while (pobj) {
3052 pobj = js_GetWrappedObject(cx, pobj);
3053 if (pobj == obj) {
3054 ssr->errnum = JSMSG_CYCLIC_VALUE;
3055 return;
3056 }
3057 pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
3058 }
3059
3060 pobj = ssr->pobj;
3061
3062 if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
3063 JSScope *scope, *newscope;
3064 JSObject *oldproto;
3065
3066 /* Check to see whether obj shares its prototype's scope. */
3067 scope = OBJ_SCOPE(obj);
3068 oldproto = STOBJ_GET_PROTO(obj);
3069 if (oldproto && OBJ_SCOPE(oldproto) == scope) {
3070 /* Either obj needs a new empty scope, or it should share pobj's. */
3071 if (!pobj ||
3072 !OBJ_IS_NATIVE(pobj) ||
3073 OBJ_GET_CLASS(cx, pobj) != STOBJ_GET_CLASS(oldproto)) {
3074 /*
3075 * With no proto and no scope of its own, obj is truly empty.
3076 *
3077 * If pobj is not native, obj needs its own empty scope -- it
3078 * should not continue to share oldproto's scope once oldproto
3079 * is not on obj's prototype chain. That would put properties
3080 * from oldproto's scope ahead of properties defined by pobj,
3081 * in lookup order.
3082 *
3083 * If pobj's class differs from oldproto's, we may need a new
3084 * scope to handle differences in private and reserved slots,
3085 * so we suboptimally but safely make one.
3086 */
3087 if (!js_GetMutableScope(cx, obj)) {
3088 ssr->errnum = JSMSG_OUT_OF_MEMORY;
3089 return;
3090 }
3091 } else if (OBJ_SCOPE(pobj) != scope) {
3092 newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
3093 obj->map = &newscope->map;
3094 js_DropObjectMap(cx, &scope->map, obj);
3095 JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
3096 }
3097 }
3098
3099 /*
3100 * Regenerate property cache shape ids for all of the scopes along the
3101 * old prototype chain, in case any property cache entries were filled
3102 * by looking up starting from obj.
3103 */
3104 while (oldproto && OBJ_IS_NATIVE(oldproto)) {
3105 scope = OBJ_SCOPE(oldproto);
3106 SCOPE_MAKE_UNIQUE_SHAPE(cx, scope);
3107 oldproto = STOBJ_GET_PROTO(scope->object);
3108 }
3109 }
3110
3111 /* Finally, do the deed. */
3112 STOBJ_SET_SLOT(obj, slot, OBJECT_TO_JSVAL(pobj));
3113 }
3114
3115 static void
3116 DestroyScriptsToGC(JSContext *cx, JSScript **listp)
3117 {
3118 JSScript *script;
3119
3120 while ((script = *listp) != NULL) {
3121 *listp = script->u.nextToGC;
3122 script->u.nextToGC = NULL;
3123 js_DestroyScript(cx, script);
3124 }
3125 }
3126
3127 /*
3128 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3129 * rt->gcLock already held, so the lock should be kept on return.
3130 */
3131 void
3132 js_GC(JSContext *cx, JSGCInvocationKind gckind)
3133 {
3134 JSRuntime *rt;
3135 JSBool keepAtoms;
3136 JSGCCallback callback;
3137 uintN i, type;
3138 JSTracer trc;
3139 uint32 thingSize, indexLimit;
3140 JSGCArenaInfo *a, **ap, *emptyArenas;
3141 uint8 flags, *flagp;
3142 JSGCThing *thing, *freeList;
3143 JSGCArenaList *arenaList;
3144 JSBool allClear;
3145 #ifdef JS_THREADSAFE
3146 uint32 requestDebit;
3147 JSContext *acx, *iter;
3148 #endif
3149 #ifdef JS_GCMETER
3150 uint32 nlivearenas, nkilledarenas, nthings;
3151 #endif
3152
3153 JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx));
3154 rt = cx->runtime;
3155 #ifdef JS_THREADSAFE
3156 /* Avoid deadlock. */
3157 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
3158 #endif
3159
3160 if (gckind & GC_KEEP_ATOMS) {
3161 /*
3162 * The set slot request and last ditch GC kinds preserve all atoms and
3163 * weak roots.
3164 */
3165 keepAtoms = JS_TRUE;
3166 } else {
3167 /* Keep atoms when a suspended compile is running on another context. */
3168 keepAtoms = (rt->gcKeepAtoms != 0);
3169 JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
3170 }
3171
3172 /*
3173 * Don't collect garbage if the runtime isn't up, and cx is not the last
3174 * context in the runtime. The last context must force a GC, and nothing
3175 * should suppress that final collection or there may be shutdown leaks,
3176 * or runtime bloat until the next context is created.
3177 */
3178 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
3179 return;
3180
3181 restart_at_beginning:
3182 /*
3183 * Let the API user decide to defer a GC if it wants to (unless this
3184 * is the last context). Invoke the callback regardless. Sample the
3185 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3186 * another thread.
3187 */
3188 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3189 JSBool ok;
3190
3191 if (gckind & GC_LOCK_HELD)
3192 JS_UNLOCK_GC(rt);
3193 ok = callback(cx, JSGC_BEGIN);
3194 if (gckind & GC_LOCK_HELD)
3195 JS_LOCK_GC(rt);
3196 if (!ok && gckind != GC_LAST_CONTEXT) {
3197 /*
3198 * It's possible that we've looped back to this code from the 'goto
3199 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3200 * that rt->gcLevel is now 0. Don't return without notifying!
3201 */
3202 if (rt->gcLevel == 0 && (gckind & GC_LOCK_HELD))
3203 JS_NOTIFY_GC_DONE(rt);
3204 return;
3205 }
3206 }
3207
3208 /* Lock out other GC allocator and collector invocations. */
3209 if (!(gckind & GC_LOCK_HELD))
3210 JS_LOCK_GC(rt);
3211
3212 METER(rt->gcStats.poke++);
3213 rt->gcPoke = JS_FALSE;
3214
3215 #ifdef JS_THREADSAFE
3216 JS_ASSERT(cx->thread->id == js_CurrentThreadId());
3217
3218 /* Bump gcLevel and return rather than nest on this thread. */
3219 if (rt->gcThread == cx->thread) {
3220 JS_ASSERT(rt->gcLevel > 0);
3221 rt->gcLevel++;
3222 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3223 if (!(gckind & GC_LOCK_HELD))
3224 JS_UNLOCK_GC(rt);
3225 return;
3226 }
3227
3228 /*
3229 * If we're in one or more requests (possibly on more than one context)
3230 * running on the current thread, indicate, temporarily, that all these
3231 * requests are inactive. If cx->thread is NULL, then cx is not using
3232 * the request model, and does not contribute to rt->requestCount.
3233 */
3234 requestDebit = 0;
3235 if (cx->thread) {
3236 JSCList *head, *link;
3237
3238 /*
3239 * Check all contexts on cx->thread->contextList for active requests,
3240 * counting each such context against requestDebit.
3241 */
3242 head = &cx->thread->contextList;
3243 for (link = head->next; link != head; link = link->next) {
3244 acx = CX_FROM_THREAD_LINKS(link);
3245 JS_ASSERT(acx->thread == cx->thread);
3246 if (acx->requestDepth)
3247 requestDebit++;
3248 }
3249 } else {
3250 /*
3251 * We assert, but check anyway, in case someone is misusing the API.
3252 * Avoiding the loop over all of rt's contexts is a win in the event
3253 * that the GC runs only on request-less contexts with null threads,
3254 * in a special thread such as might be used by the UI/DOM/Layout
3255 * "mozilla" or "main" thread in Mozilla-the-browser.
3256 */
3257 JS_ASSERT(cx->requestDepth == 0);
3258 if (cx->requestDepth)
3259 requestDebit = 1;
3260 }
3261 if (requestDebit) {
3262 JS_ASSERT(requestDebit <= rt->requestCount);
3263 rt->requestCount -= requestDebit;
3264 if (rt->requestCount == 0)
3265 JS_NOTIFY_REQUEST_DONE(rt);
3266 }
3267
3268 /* If another thread is already in GC, don't attempt GC; wait instead. */
3269 if (rt->gcLevel > 0) {
3270 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3271 rt->gcLevel++;
3272 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3273
3274 /* Wait for the other thread to finish, then resume our request. */
3275 while (rt->gcLevel > 0)
3276 JS_AWAIT_GC_DONE(rt);
3277 if (requestDebit)
3278 rt->requestCount += requestDebit;
3279 if (!(gckind & GC_LOCK_HELD))
3280 JS_UNLOCK_GC(rt);
3281 return;
3282 }
3283
3284 /* No other thread is in GC, so indicate that we're now in GC. */
3285 rt->gcLevel = 1;
3286 rt->gcThread = cx->thread;
3287
3288 /* Wait for all other requests to finish. */
3289 while (rt->requestCount > 0)
3290 JS_AWAIT_REQUEST_DONE(rt);
3291
3292 #else /* !JS_THREADSAFE */
3293
3294 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3295 rt->gcLevel++;
3296 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3297 if (rt->gcLevel > 1)
3298 return;
3299
3300 #endif /* !JS_THREADSAFE */
3301
3302 /*
3303 * Set rt->gcRunning here within the GC lock, and after waiting for any
3304 * active requests to end, so that new requests that try to JS_AddRoot,
3305 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3306 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3307 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3308 * waiting for GC to finish.
3309 */
3310 rt->gcRunning = JS_TRUE;
3311
3312 if (gckind == GC_SET_SLOT_REQUEST) {
3313 JSSetSlotRequest *ssr;
3314
3315 while ((ssr = rt->setSlotRequests) != NULL) {
3316 rt->setSlotRequests = ssr->next;
3317 JS_UNLOCK_GC(rt);
3318 ssr->next = NULL;
3319 ProcessSetSlotRequest(cx, ssr);
3320 JS_LOCK_GC(rt);
3321 }
3322
3323 /*
3324 * We assume here that killing links to parent and prototype objects
3325 * does not create garbage (such objects typically are long-lived and
3326 * widely shared, e.g. global objects, Function.prototype, etc.). We
3327 * collect garbage only if a racing thread attempted GC and is waiting
3328 * for us to finish (gcLevel > 1) or if someone already poked us.
3329 */
3330 if (rt->gcLevel == 1 && !rt->gcPoke)
3331 goto done_running;
3332
3333 rt->gcLevel = 0;
3334 rt->gcPoke = JS_FALSE;
3335 rt->gcRunning = JS_FALSE;
3336 #ifdef JS_THREADSAFE
3337 rt->gcThread = NULL;
3338 rt->requestCount += requestDebit;
3339 #endif
3340 gckind = GC_LOCK_HELD;
3341 goto restart_at_beginning;
3342 }
3343
3344 JS_UNLOCK_GC(rt);
3345
3346 #ifdef JS_TRACER
3347 if (JS_ON_TRACE(cx))
3348 goto out;
3349 #endif
3350
3351 /* Reset malloc counter. */
3352 rt->gcMallocBytes = 0;
3353
3354 #ifdef JS_DUMP_SCOPE_METERS
3355 { extern void js_DumpScopeMeters(JSRuntime *rt);
3356 js_DumpScopeMeters(rt);
3357 }
3358 #endif
3359
3360 /* Clear property and JIT caches (only for cx->thread if JS_THREADSAFE). */
3361 js_FlushPropertyCache(cx);
3362 #ifdef JS_TRACER
3363 js_FlushJITCache(cx);
3364 js_FlushJITOracle(cx);
3365 #endif
3366
3367 /* Destroy eval'ed scripts. */
3368 DestroyScriptsToGC(cx, &JS_SCRIPTS_TO_GC(cx));
3369
3370 #ifdef JS_THREADSAFE
3371 /*
3372 * Clear thread-based caches. To avoid redundant clearing we unroll the
3373 * current thread's step.
3374 *
3375 * In case a JSScript wrapped within an object was finalized, we null
3376 * acx->thread->gsnCache.script and finish the cache's hashtable. Note
3377 * that js_DestroyScript, called from script_finalize, will have already
3378 * cleared cx->thread->gsnCache above during finalization, so we don't
3379 * have to here.
3380 */
3381 iter = NULL;
3382 while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
3383 if (!acx->thread || acx->thread == cx->thread)
3384 continue;
3385 GSN_CACHE_CLEAR(&acx->thread->gsnCache);
3386 js_FlushPropertyCache(acx);
3387 #ifdef JS_TRACER
3388 js_FlushJITCache(acx);
3389 #endif
3390 DestroyScriptsToGC(cx, &acx->thread->scriptsToGC);
3391 }
3392 #else
3393 /* The thread-unsafe case just has to clear the runtime's GSN cache. */
3394 GSN_CACHE_CLEAR(&rt->gsnCache);
3395 #endif
3396
3397 restart:
3398 rt->gcNumber++;
3399 JS_ASSERT(!rt->gcUntracedArenaStackTop);
3400 JS_ASSERT(rt->gcTraceLaterCount == 0);
3401
3402 /* Reset the property cache's type id generator so we can compress ids. */
3403 rt->shapeGen = 0;
3404
3405 /*
3406 * Mark phase.
3407 */
3408 JS_TRACER_INIT(&trc, cx, NULL);
3409 rt->gcMarkingTracer = &trc;
3410 JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
3411
3412 for (a = rt->gcDoubleArenaList.first; a; a = a->prev)
3413 a->u.hasMarkedDoubles = JS_FALSE;
3414
3415 js_TraceRuntime(&trc, keepAtoms);
3416 js_MarkScriptFilenames(rt, keepAtoms);
3417
3418 /*
3419 * Mark children of things that caused too deep recursion during the above
3420 * tracing.
3421 */
3422 TraceDelayedChildren(&trc);
3423
3424 JS_ASSERT(!cx->insideGCMarkCallback);
3425 if (rt->gcCallback) {
3426 cx->insideGCMarkCallback = JS_TRUE;
3427 (void) rt->gcCallback(cx, JSGC_MARK_END);
3428 JS_ASSERT(cx->insideGCMarkCallback);
3429 cx->insideGCMarkCallback = JS_FALSE;
3430 }
3431 JS_ASSERT(rt->gcTraceLaterCount == 0);
3432
3433 rt->gcMarkingTracer = NULL;
3434
3435 /*
3436 * Sweep phase.
3437 *
3438 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3439 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3440 * rather than nest badly and leave the unmarked newborn to be swept.
3441 *
3442 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3443 * JSString or jsdouble held in a hashtable to check if the hashtable
3444 * entry can be freed. Note that even after the entry is freed, JSObject
3445 * finalizers can continue to access the corresponding jsdouble* and
3446 * JSString* assuming that they are unique. This works since the
3447 * atomization API must not be called during GC.
3448 */
3449 js_SweepAtomState(cx);
3450
3451 /* Finalize iterator states before the objects they iterate over. */
3452 CloseNativeIterators(cx);
3453
3454 /* Finalize watch points associated with unreachable objects. */
3455 js_SweepWatchPoints(cx);
3456
3457 #ifdef DEBUG
3458 /* Save the pre-sweep count of scope-mapped properties. */
3459 rt->liveScopePropsPreSweep = rt->liveScopeProps;
3460 #endif
3461
3462 /*
3463 * Here we need to ensure that JSObject instances are finalized before GC-
3464 * allocated JSString and jsdouble instances so object's finalizer can
3465 * access them even if they will be freed. For that we simply finalize the
3466 * list containing JSObject first since the static assert at the beginning
3467 * of the file guarantees that JSString and jsdouble instances are
3468 * allocated from a different list.
3469 */
3470 emptyArenas = NULL;
3471 for (i = 0; i < GC_NUM_FREELISTS; i++) {
3472 arenaList = &rt->gcArenaList[i == 0
3473 ? GC_FREELIST_INDEX(sizeof(JSObject))
3474 : i == GC_FREELIST_INDEX(sizeof(JSObject))
3475 ? 0
3476 : i];
3477 ap = &arenaList->last;
3478 if (!(a = *ap))
3479 continue;
3480
3481 JS_ASSERT(arenaList->lastCount > 0);
3482 arenaList->freeList = NULL;
3483 freeList = NULL;
3484 thingSize = arenaList->thingSize;
3485 indexLimit = THINGS_PER_ARENA(thingSize);
3486 flagp = THING_FLAGP(a, arenaList->lastCount - 1);
3487 METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
3488 for (;;) {
3489 JS_ASSERT(a->prevUntracedPage == 0);
3490 JS_ASSERT(a->u.untracedThings == 0);
3491 allClear = JS_TRUE;
3492 do {
3493 flags = *flagp;
3494 if (flags & (GCF_MARK | GCF_LOCK)) {
3495 *flagp &= ~GCF_MARK;
3496 allClear = JS_FALSE;
3497 METER(nthings++);
3498 } else {
3499 thing = FLAGP_TO_THING(flagp, thingSize);
3500 if (!(flags & GCF_FINAL)) {
3501 /*
3502 * Call the finalizer with GCF_FINAL ORed into flags.
3503 */
3504 *flagp = (uint8)(flags | GCF_FINAL);
3505 type = flags & GCF_TYPEMASK;
3506 switch (type) {
3507 case GCX_OBJECT:
3508 js_FinalizeObject(cx, (JSObject *) thing);
3509 break;
3510 case GCX_DOUBLE:
3511 /* Do nothing. */
3512 break;
3513 #if JS_HAS_XML_SUPPORT
3514 case GCX_XML:
3515 js_FinalizeXML(cx, (JSXML *) thing);
3516 break;
3517 #endif
3518 default:
3519 JS_ASSERT(type == GCX_STRING ||
3520 type - GCX_EXTERNAL_STRING <
3521 GCX_NTYPES - GCX_EXTERNAL_STRING);
3522 js_FinalizeStringRT(rt, (JSString *) thing,
3523 (intN) (type -
3524 GCX_EXTERNAL_STRING),
3525 cx);
3526 break;
3527 }
3528 #ifdef DEBUG
3529 memset(thing, JS_FREE_PATTERN, thingSize);
3530 #endif
3531 }
3532 thing->flagp = flagp;
3533 thing->next = freeList;
3534 freeList = thing;
3535 }
3536 } while (++flagp != THING_FLAGS_END(a