1 |
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 |
* vim: set ts=8 sw=4 et tw=78: |
3 |
* |
4 |
* ***** BEGIN LICENSE BLOCK ***** |
5 |
* Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
6 |
* |
7 |
* The contents of this file are subject to the Mozilla Public License Version |
8 |
* 1.1 (the "License"); you may not use this file except in compliance with |
9 |
* the License. You may obtain a copy of the License at |
10 |
* http://www.mozilla.org/MPL/ |
11 |
* |
12 |
* Software distributed under the License is distributed on an "AS IS" basis, |
13 |
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
14 |
* for the specific language governing rights and limitations under the |
15 |
* License. |
16 |
* |
17 |
* The Original Code is Mozilla Communicator client code, released |
18 |
* March 31, 1998. |
19 |
* |
20 |
* The Initial Developer of the Original Code is |
21 |
* Netscape Communications Corporation. |
22 |
* Portions created by the Initial Developer are Copyright (C) 1998 |
23 |
* the Initial Developer. All Rights Reserved. |
24 |
* |
25 |
* Contributor(s): |
26 |
* |
27 |
* Alternatively, the contents of this file may be used under the terms of |
28 |
* either of the GNU General Public License Version 2 or later (the "GPL"), |
29 |
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
30 |
* in which case the provisions of the GPL or the LGPL are applicable instead |
31 |
* of those above. If you wish to allow use of your version of this file only |
32 |
* under the terms of either the GPL or the LGPL, and not to allow others to |
33 |
* use your version of this file under the terms of the MPL, indicate your |
34 |
* decision by deleting the provisions above and replace them with the notice |
35 |
* and other provisions required by the GPL or the LGPL. If you do not delete |
36 |
* the provisions above, a recipient may use your version of this file under |
37 |
* the terms of any one of the MPL, the GPL or the LGPL. |
38 |
* |
39 |
* ***** END LICENSE BLOCK ***** */ |
40 |
|
41 |
/* |
42 |
* JS symbol tables. |
43 |
*/ |
44 |
#include "jsstddef.h" |
45 |
#include <stdlib.h> |
46 |
#include <string.h> |
47 |
#include "jstypes.h" |
48 |
#include "jsarena.h" |
49 |
#include "jsbit.h" |
50 |
#include "jsclist.h" |
51 |
#include "jsdhash.h" |
52 |
#include "jsutil.h" /* Added by JSIFY */ |
53 |
#include "jsapi.h" |
54 |
#include "jsatom.h" |
55 |
#include "jscntxt.h" |
56 |
#include "jsdbgapi.h" |
57 |
#include "jslock.h" |
58 |
#include "jsnum.h" |
59 |
#include "jsscope.h" |
60 |
#include "jsstr.h" |
61 |
#include "jsarray.h" |
62 |
|
63 |
JSScope * |
64 |
js_GetMutableScope(JSContext *cx, JSObject *obj) |
65 |
{ |
66 |
JSScope *scope, *newscope; |
67 |
JSClass *clasp; |
68 |
uint32 freeslot; |
69 |
|
70 |
scope = OBJ_SCOPE(obj); |
71 |
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope)); |
72 |
if (scope->object == obj) |
73 |
return scope; |
74 |
|
75 |
/* |
76 |
* Compile-time block objects each have their own scope, created at |
77 |
* birth, and runtime clone of a block objects are never mutated. |
78 |
*/ |
79 |
JS_ASSERT(STOBJ_GET_CLASS(obj) != &js_BlockClass); |
80 |
newscope = js_NewScope(cx, scope->map.ops, LOCKED_OBJ_GET_CLASS(obj), obj); |
81 |
if (!newscope) |
82 |
return NULL; |
83 |
JS_LOCK_SCOPE(cx, newscope); |
84 |
obj->map = &newscope->map; |
85 |
|
86 |
JS_ASSERT(newscope->freeslot == JSSLOT_FREE(STOBJ_GET_CLASS(obj))); |
87 |
clasp = STOBJ_GET_CLASS(obj); |
88 |
if (clasp->reserveSlots) { |
89 |
freeslot = JSSLOT_FREE(clasp) + clasp->reserveSlots(cx, obj); |
90 |
if (freeslot > STOBJ_NSLOTS(obj)) |
91 |
freeslot = STOBJ_NSLOTS(obj); |
92 |
if (newscope->freeslot < freeslot) |
93 |
newscope->freeslot = freeslot; |
94 |
} |
95 |
JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope); |
96 |
js_DropScope(cx, scope, obj); |
97 |
return newscope; |
98 |
} |
99 |
|
100 |
/* |
101 |
* JSScope uses multiplicative hashing, _a la_ jsdhash.[ch], but specialized |
102 |
* to minimize footprint. But if a scope has fewer than SCOPE_HASH_THRESHOLD |
103 |
* entries, we use linear search and avoid allocating scope->table. |
104 |
*/ |
105 |
#define SCOPE_HASH_THRESHOLD 6 |
106 |
#define MIN_SCOPE_SIZE_LOG2 4 |
107 |
#define MIN_SCOPE_SIZE JS_BIT(MIN_SCOPE_SIZE_LOG2) |
108 |
#define SCOPE_TABLE_NBYTES(n) ((n) * sizeof(JSScopeProperty *)) |
109 |
|
110 |
static void |
111 |
InitMinimalScope(JSContext *cx, JSScope *scope) |
112 |
{ |
113 |
js_LeaveTraceIfGlobalObject(cx, scope->object); |
114 |
scope->shape = 0; |
115 |
scope->hashShift = JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2; |
116 |
scope->entryCount = scope->removedCount = 0; |
117 |
scope->table = NULL; |
118 |
scope->lastProp = NULL; |
119 |
} |
120 |
|
121 |
static JSBool |
122 |
CreateScopeTable(JSContext *cx, JSScope *scope, JSBool report) |
123 |
{ |
124 |
int sizeLog2; |
125 |
JSScopeProperty *sprop, **spp; |
126 |
|
127 |
JS_ASSERT(!scope->table); |
128 |
JS_ASSERT(scope->lastProp); |
129 |
|
130 |
if (scope->entryCount > SCOPE_HASH_THRESHOLD) { |
131 |
/* |
132 |
* Either we're creating a table for a large scope that was populated |
133 |
* via property cache hit logic under JSOP_INITPROP, JSOP_SETNAME, or |
134 |
* JSOP_SETPROP; or else calloc failed at least once already. In any |
135 |
* event, let's try to grow, overallocating to hold at least twice the |
136 |
* current population. |
137 |
*/ |
138 |
sizeLog2 = JS_CeilingLog2(2 * scope->entryCount); |
139 |
scope->hashShift = JS_DHASH_BITS - sizeLog2; |
140 |
} else { |
141 |
JS_ASSERT(scope->hashShift == JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2); |
142 |
sizeLog2 = MIN_SCOPE_SIZE_LOG2; |
143 |
} |
144 |
|
145 |
scope->table = (JSScopeProperty **) |
146 |
calloc(JS_BIT(sizeLog2), sizeof(JSScopeProperty *)); |
147 |
if (!scope->table) { |
148 |
if (report) |
149 |
JS_ReportOutOfMemory(cx); |
150 |
return JS_FALSE; |
151 |
} |
152 |
js_UpdateMallocCounter(cx, JS_BIT(sizeLog2) * sizeof(JSScopeProperty *)); |
153 |
|
154 |
scope->hashShift = JS_DHASH_BITS - sizeLog2; |
155 |
for (sprop = scope->lastProp; sprop; sprop = sprop->parent) { |
156 |
spp = js_SearchScope(scope, sprop->id, JS_TRUE); |
157 |
SPROP_STORE_PRESERVING_COLLISION(spp, sprop); |
158 |
} |
159 |
return JS_TRUE; |
160 |
} |
161 |
|
162 |
JSScope * |
163 |
js_NewScope(JSContext *cx, JSObjectOps *ops, JSClass *clasp, JSObject *obj) |
164 |
{ |
165 |
JS_ASSERT(OPS_IS_NATIVE(ops)); |
166 |
JS_ASSERT(obj); |
167 |
|
168 |
JSScope *scope = (JSScope *) JS_malloc(cx, sizeof(JSScope)); |
169 |
if (!scope) |
170 |
return NULL; |
171 |
|
172 |
scope->map.ops = ops; |
173 |
scope->object = obj; |
174 |
scope->nrefs = 1; |
175 |
scope->freeslot = JSSLOT_FREE(clasp); |
176 |
scope->flags = 0; |
177 |
InitMinimalScope(cx, scope); |
178 |
|
179 |
#ifdef JS_THREADSAFE |
180 |
js_InitTitle(cx, &scope->title); |
181 |
#endif |
182 |
JS_RUNTIME_METER(cx->runtime, liveScopes); |
183 |
JS_RUNTIME_METER(cx->runtime, totalScopes); |
184 |
return scope; |
185 |
} |
186 |
|
187 |
#if defined DEBUG || defined JS_DUMP_PROPTREE_STATS |
188 |
# include "jsprf.h" |
189 |
# define LIVE_SCOPE_METER(cx,expr) JS_LOCK_RUNTIME_VOID(cx->runtime,expr) |
190 |
#else |
191 |
# define LIVE_SCOPE_METER(cx,expr) /* nothing */ |
192 |
#endif |
193 |
|
194 |
void |
195 |
js_DestroyScope(JSContext *cx, JSScope *scope) |
196 |
{ |
197 |
#ifdef JS_THREADSAFE |
198 |
js_FinishTitle(cx, &scope->title); |
199 |
#endif |
200 |
if (scope->table) |
201 |
JS_free(cx, scope->table); |
202 |
|
203 |
LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= scope->entryCount); |
204 |
JS_RUNTIME_UNMETER(cx->runtime, liveScopes); |
205 |
JS_free(cx, scope); |
206 |
} |
207 |
|
208 |
void |
209 |
js_HoldScope(JSScope *scope) |
210 |
{ |
211 |
JS_ASSERT(scope->nrefs >= 0); |
212 |
JS_ATOMIC_INCREMENT(&scope->nrefs); |
213 |
} |
214 |
|
215 |
JSBool |
216 |
js_DropScope(JSContext *cx, JSScope *scope, JSObject *obj) |
217 |
{ |
218 |
JS_ASSERT(scope->nrefs > 0); |
219 |
JS_ATOMIC_DECREMENT(&scope->nrefs); |
220 |
|
221 |
if (scope->nrefs == 0) { |
222 |
js_DestroyScope(cx, scope); |
223 |
return false; |
224 |
} |
225 |
if (scope->object == obj) |
226 |
scope->object = NULL; |
227 |
return true; |
228 |
} |
229 |
|
230 |
#ifdef JS_DUMP_PROPTREE_STATS |
231 |
typedef struct JSScopeStats { |
232 |
jsrefcount searches; |
233 |
jsrefcount hits; |
234 |
jsrefcount misses; |
235 |
jsrefcount hashes; |
236 |
jsrefcount steps; |
237 |
jsrefcount stepHits; |
238 |
jsrefcount stepMisses; |
239 |
jsrefcount adds; |
240 |
jsrefcount redundantAdds; |
241 |
jsrefcount addFailures; |
242 |
jsrefcount changeFailures; |
243 |
jsrefcount compresses; |
244 |
jsrefcount grows; |
245 |
jsrefcount removes; |
246 |
jsrefcount removeFrees; |
247 |
jsrefcount uselessRemoves; |
248 |
jsrefcount shrinks; |
249 |
} JSScopeStats; |
250 |
|
251 |
JS_FRIEND_DATA(JSScopeStats) js_scope_stats = {0}; |
252 |
|
253 |
# define METER(x) JS_ATOMIC_INCREMENT(&js_scope_stats.x) |
254 |
#else |
255 |
# define METER(x) /* nothing */ |
256 |
#endif |
257 |
|
258 |
JS_STATIC_ASSERT(sizeof(JSHashNumber) == 4); |
259 |
JS_STATIC_ASSERT(sizeof(jsid) == JS_BYTES_PER_WORD); |
260 |
|
261 |
#if JS_BYTES_PER_WORD == 4 |
262 |
# define HASH_ID(id) ((JSHashNumber)(id)) |
263 |
#elif JS_BYTES_PER_WORD == 8 |
264 |
# define HASH_ID(id) ((JSHashNumber)(id) ^ (JSHashNumber)((id) >> 32)) |
265 |
#else |
266 |
# error "Unsupported configuration" |
267 |
#endif |
268 |
|
269 |
/* |
270 |
* Double hashing needs the second hash code to be relatively prime to table |
271 |
* size, so we simply make hash2 odd. The inputs to multiplicative hash are |
272 |
* the golden ratio, expressed as a fixed-point 32 bit fraction, and the id |
273 |
* itself. |
274 |
*/ |
275 |
#define SCOPE_HASH0(id) (HASH_ID(id) * JS_GOLDEN_RATIO) |
276 |
#define SCOPE_HASH1(hash0,shift) ((hash0) >> (shift)) |
277 |
#define SCOPE_HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1) |
278 |
|
279 |
JS_FRIEND_API(JSScopeProperty **) |
280 |
js_SearchScope(JSScope *scope, jsid id, JSBool adding) |
281 |
{ |
282 |
JSHashNumber hash0, hash1, hash2; |
283 |
int hashShift, sizeLog2; |
284 |
JSScopeProperty *stored, *sprop, **spp, **firstRemoved; |
285 |
uint32 sizeMask; |
286 |
|
287 |
METER(searches); |
288 |
if (!scope->table) { |
289 |
/* Not enough properties to justify hashing: search from lastProp. */ |
290 |
JS_ASSERT(!SCOPE_HAD_MIDDLE_DELETE(scope)); |
291 |
for (spp = &scope->lastProp; (sprop = *spp); spp = &sprop->parent) { |
292 |
if (sprop->id == id) { |
293 |
METER(hits); |
294 |
return spp; |
295 |
} |
296 |
} |
297 |
METER(misses); |
298 |
return spp; |
299 |
} |
300 |
|
301 |
/* Compute the primary hash address. */ |
302 |
METER(hashes); |
303 |
hash0 = SCOPE_HASH0(id); |
304 |
hashShift = scope->hashShift; |
305 |
hash1 = SCOPE_HASH1(hash0, hashShift); |
306 |
spp = scope->table + hash1; |
307 |
|
308 |
/* Miss: return space for a new entry. */ |
309 |
stored = *spp; |
310 |
if (SPROP_IS_FREE(stored)) { |
311 |
METER(misses); |
312 |
return spp; |
313 |
} |
314 |
|
315 |
/* Hit: return entry. */ |
316 |
sprop = SPROP_CLEAR_COLLISION(stored); |
317 |
if (sprop && sprop->id == id) { |
318 |
METER(hits); |
319 |
return spp; |
320 |
} |
321 |
|
322 |
/* Collision: double hash. */ |
323 |
sizeLog2 = JS_DHASH_BITS - hashShift; |
324 |
hash2 = SCOPE_HASH2(hash0, sizeLog2, hashShift); |
325 |
sizeMask = JS_BITMASK(sizeLog2); |
326 |
|
327 |
/* Save the first removed entry pointer so we can recycle it if adding. */ |
328 |
if (SPROP_IS_REMOVED(stored)) { |
329 |
firstRemoved = spp; |
330 |
} else { |
331 |
firstRemoved = NULL; |
332 |
if (adding && !SPROP_HAD_COLLISION(stored)) |
333 |
SPROP_FLAG_COLLISION(spp, sprop); |
334 |
} |
335 |
|
336 |
for (;;) { |
337 |
METER(steps); |
338 |
hash1 -= hash2; |
339 |
hash1 &= sizeMask; |
340 |
spp = scope->table + hash1; |
341 |
|
342 |
stored = *spp; |
343 |
if (SPROP_IS_FREE(stored)) { |
344 |
METER(stepMisses); |
345 |
return (adding && firstRemoved) ? firstRemoved : spp; |
346 |
} |
347 |
|
348 |
sprop = SPROP_CLEAR_COLLISION(stored); |
349 |
if (sprop && sprop->id == id) { |
350 |
METER(stepHits); |
351 |
return spp; |
352 |
} |
353 |
|
354 |
if (SPROP_IS_REMOVED(stored)) { |
355 |
if (!firstRemoved) |
356 |
firstRemoved = spp; |
357 |
} else { |
358 |
if (adding && !SPROP_HAD_COLLISION(stored)) |
359 |
SPROP_FLAG_COLLISION(spp, sprop); |
360 |
} |
361 |
} |
362 |
|
363 |
/* NOTREACHED */ |
364 |
return NULL; |
365 |
} |
366 |
|
367 |
static JSBool |
368 |
ChangeScope(JSContext *cx, JSScope *scope, int change) |
369 |
{ |
370 |
int oldlog2, newlog2; |
371 |
uint32 oldsize, newsize, nbytes; |
372 |
JSScopeProperty **table, **oldtable, **spp, **oldspp, *sprop; |
373 |
|
374 |
if (!scope->table) |
375 |
return CreateScopeTable(cx, scope, JS_TRUE); |
376 |
|
377 |
/* Grow, shrink, or compress by changing scope->table. */ |
378 |
oldlog2 = JS_DHASH_BITS - scope->hashShift; |
379 |
newlog2 = oldlog2 + change; |
380 |
oldsize = JS_BIT(oldlog2); |
381 |
newsize = JS_BIT(newlog2); |
382 |
nbytes = SCOPE_TABLE_NBYTES(newsize); |
383 |
table = (JSScopeProperty **) calloc(nbytes, 1); |
384 |
if (!table) { |
385 |
JS_ReportOutOfMemory(cx); |
386 |
return JS_FALSE; |
387 |
} |
388 |
|
389 |
/* Now that we have a new table allocated, update scope members. */ |
390 |
scope->hashShift = JS_DHASH_BITS - newlog2; |
391 |
scope->removedCount = 0; |
392 |
oldtable = scope->table; |
393 |
scope->table = table; |
394 |
|
395 |
/* Treat the above calloc as a JS_malloc, to match CreateScopeTable. */ |
396 |
cx->runtime->gcMallocBytes += nbytes; |
397 |
|
398 |
/* Copy only live entries, leaving removed and free ones behind. */ |
399 |
for (oldspp = oldtable; oldsize != 0; oldspp++) { |
400 |
sprop = SPROP_FETCH(oldspp); |
401 |
if (sprop) { |
402 |
spp = js_SearchScope(scope, sprop->id, JS_TRUE); |
403 |
JS_ASSERT(SPROP_IS_FREE(*spp)); |
404 |
*spp = sprop; |
405 |
} |
406 |
oldsize--; |
407 |
} |
408 |
|
409 |
/* Finally, free the old table storage. */ |
410 |
JS_free(cx, oldtable); |
411 |
return JS_TRUE; |
412 |
} |
413 |
|
414 |
/* |
415 |
* Take care to exclude the mark bits in case we're called from the GC. |
416 |
*/ |
417 |
#define SPROP_FLAGS_NOT_MATCHED (SPROP_MARK | SPROP_FLAG_SHAPE_REGEN) |
418 |
|
419 |
static JSDHashNumber |
420 |
js_HashScopeProperty(JSDHashTable *table, const void *key) |
421 |
{ |
422 |
const JSScopeProperty *sprop = (const JSScopeProperty *)key; |
423 |
JSDHashNumber hash; |
424 |
JSPropertyOp gsop; |
425 |
|
426 |
/* Accumulate from least to most random so the low bits are most random. */ |
427 |
hash = 0; |
428 |
gsop = sprop->getter; |
429 |
if (gsop) |
430 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; |
431 |
gsop = sprop->setter; |
432 |
if (gsop) |
433 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; |
434 |
|
435 |
hash = JS_ROTATE_LEFT32(hash, 4) |
436 |
^ (sprop->flags & ~SPROP_FLAGS_NOT_MATCHED); |
437 |
|
438 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ sprop->attrs; |
439 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ sprop->shortid; |
440 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ sprop->slot; |
441 |
hash = JS_ROTATE_LEFT32(hash, 4) ^ sprop->id; |
442 |
return hash; |
443 |
} |
444 |
|
445 |
#define SPROP_MATCH(sprop, child) \ |
446 |
SPROP_MATCH_PARAMS(sprop, (child)->id, (child)->getter, (child)->setter, \ |
447 |
(child)->slot, (child)->attrs, (child)->flags, \ |
448 |
(child)->shortid) |
449 |
|
450 |
#define SPROP_MATCH_PARAMS(sprop, aid, agetter, asetter, aslot, aattrs, \ |
451 |
aflags, ashortid) \ |
452 |
((sprop)->id == (aid) && \ |
453 |
SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \ |
454 |
aflags, ashortid)) |
455 |
|
456 |
#define SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \ |
457 |
aflags, ashortid) \ |
458 |
((sprop)->getter == (agetter) && \ |
459 |
(sprop)->setter == (asetter) && \ |
460 |
(sprop)->slot == (aslot) && \ |
461 |
(sprop)->attrs == (aattrs) && \ |
462 |
(((sprop)->flags ^ (aflags)) & ~SPROP_FLAGS_NOT_MATCHED) == 0 && \ |
463 |
(sprop)->shortid == (ashortid)) |
464 |
|
465 |
static JSBool |
466 |
js_MatchScopeProperty(JSDHashTable *table, |
467 |
const JSDHashEntryHdr *hdr, |
468 |
const void *key) |
469 |
{ |
470 |
const JSPropertyTreeEntry *entry = (const JSPropertyTreeEntry *)hdr; |
471 |
const JSScopeProperty *sprop = entry->child; |
472 |
const JSScopeProperty *kprop = (const JSScopeProperty *)key; |
473 |
|
474 |
return SPROP_MATCH(sprop, kprop); |
475 |
} |
476 |
|
477 |
static const JSDHashTableOps PropertyTreeHashOps = { |
478 |
JS_DHashAllocTable, |
479 |
JS_DHashFreeTable, |
480 |
js_HashScopeProperty, |
481 |
js_MatchScopeProperty, |
482 |
JS_DHashMoveEntryStub, |
483 |
JS_DHashClearEntryStub, |
484 |
JS_DHashFinalizeStub, |
485 |
NULL |
486 |
}; |
487 |
|
488 |
/* |
489 |
* A property tree node on rt->propertyFreeList overlays the following prefix |
490 |
* struct on JSScopeProperty. |
491 |
*/ |
492 |
typedef struct FreeNode { |
493 |
jsid id; |
494 |
JSScopeProperty *next; |
495 |
JSScopeProperty **prevp; |
496 |
} FreeNode; |
497 |
|
498 |
#define FREENODE(sprop) ((FreeNode *) (sprop)) |
499 |
|
500 |
#define FREENODE_INSERT(list, sprop) \ |
501 |
JS_BEGIN_MACRO \ |
502 |
FREENODE(sprop)->next = (list); \ |
503 |
FREENODE(sprop)->prevp = &(list); \ |
504 |
if (list) \ |
505 |
FREENODE(list)->prevp = &FREENODE(sprop)->next; \ |
506 |
(list) = (sprop); \ |
507 |
JS_END_MACRO |
508 |
|
509 |
#define FREENODE_REMOVE(sprop) \ |
510 |
JS_BEGIN_MACRO \ |
511 |
*FREENODE(sprop)->prevp = FREENODE(sprop)->next; \ |
512 |
if (FREENODE(sprop)->next) \ |
513 |
FREENODE(FREENODE(sprop)->next)->prevp = FREENODE(sprop)->prevp; \ |
514 |
JS_END_MACRO |
515 |
|
516 |
/* NB: Called with rt->gcLock held. */ |
517 |
static JSScopeProperty * |
518 |
NewScopeProperty(JSRuntime *rt) |
519 |
{ |
520 |
JSScopeProperty *sprop; |
521 |
|
522 |
sprop = rt->propertyFreeList; |
523 |
if (sprop) { |
524 |
FREENODE_REMOVE(sprop); |
525 |
} else { |
526 |
JS_ARENA_ALLOCATE_CAST(sprop, JSScopeProperty *, |
527 |
&rt->propertyArenaPool, |
528 |
sizeof(JSScopeProperty)); |
529 |
if (!sprop) |
530 |
return NULL; |
531 |
} |
532 |
|
533 |
JS_RUNTIME_METER(rt, livePropTreeNodes); |
534 |
JS_RUNTIME_METER(rt, totalPropTreeNodes); |
535 |
return sprop; |
536 |
} |
537 |
|
538 |
#define CHUNKY_KIDS_TAG ((jsuword)1) |
539 |
#define KIDS_IS_CHUNKY(kids) ((jsuword)(kids) & CHUNKY_KIDS_TAG) |
540 |
#define KIDS_TO_CHUNK(kids) ((PropTreeKidsChunk *) \ |
541 |
((jsuword)(kids) & ~CHUNKY_KIDS_TAG)) |
542 |
#define CHUNK_TO_KIDS(chunk) ((JSScopeProperty *) \ |
543 |
((jsuword)(chunk) | CHUNKY_KIDS_TAG)) |
544 |
#define MAX_KIDS_PER_CHUNK 10 |
545 |
#define CHUNK_HASH_THRESHOLD 30 |
546 |
|
547 |
typedef struct PropTreeKidsChunk PropTreeKidsChunk; |
548 |
|
549 |
struct PropTreeKidsChunk { |
550 |
JSScopeProperty *kids[MAX_KIDS_PER_CHUNK]; |
551 |
JSDHashTable *table; |
552 |
PropTreeKidsChunk *next; |
553 |
}; |
554 |
|
555 |
static PropTreeKidsChunk * |
556 |
NewPropTreeKidsChunk(JSRuntime *rt) |
557 |
{ |
558 |
PropTreeKidsChunk *chunk; |
559 |
|
560 |
chunk = (PropTreeKidsChunk *) calloc(1, sizeof *chunk); |
561 |
if (!chunk) |
562 |
return NULL; |
563 |
JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0); |
564 |
JS_RUNTIME_METER(rt, propTreeKidsChunks); |
565 |
return chunk; |
566 |
} |
567 |
|
568 |
static void |
569 |
DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk) |
570 |
{ |
571 |
JS_RUNTIME_UNMETER(rt, propTreeKidsChunks); |
572 |
if (chunk->table) |
573 |
JS_DHashTableDestroy(chunk->table); |
574 |
free(chunk); |
575 |
} |
576 |
|
577 |
/* NB: Called with rt->gcLock held. */ |
578 |
static JSBool |
579 |
InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent, |
580 |
JSScopeProperty *child, PropTreeKidsChunk *sweptChunk) |
581 |
{ |
582 |
JSDHashTable *table; |
583 |
JSPropertyTreeEntry *entry; |
584 |
JSScopeProperty **childp, *kids, *sprop; |
585 |
PropTreeKidsChunk *chunk, **chunkp; |
586 |
uintN i; |
587 |
|
588 |
JS_ASSERT(!parent || child->parent != parent); |
589 |
|
590 |
if (!parent) { |
591 |
table = &rt->propertyTreeHash; |
592 |
entry = (JSPropertyTreeEntry *) |
593 |
JS_DHashTableOperate(table, child, JS_DHASH_ADD); |
594 |
if (!entry) |
595 |
return JS_FALSE; |
596 |
childp = &entry->child; |
597 |
sprop = *childp; |
598 |
if (!sprop) { |
599 |
*childp = child; |
600 |
} else { |
601 |
/* |
602 |
* A "Duplicate child" case. |
603 |
* |
604 |
* We can't do away with child, as at least one live scope entry |
605 |
* still points at it. What's more, that scope's lastProp chains |
606 |
* through an ancestor line to reach child, and js_Enumerate and |
607 |
* others count on this linkage. We must leave child out of the |
608 |
* hash table, and not require it to be there when we eventually |
609 |
* GC it (see RemovePropertyTreeChild, below). |
610 |
* |
611 |
* It is necessary to leave the duplicate child out of the hash |
612 |
* table to preserve entry uniqueness. It is safe to leave the |
613 |
* child out of the hash table (unlike the duplicate child cases |
614 |
* below), because the child's parent link will be null, which |
615 |
* can't dangle. |
616 |
*/ |
617 |
JS_ASSERT(sprop != child && SPROP_MATCH(sprop, child)); |
618 |
JS_RUNTIME_METER(rt, duplicatePropTreeNodes); |
619 |
} |
620 |
} else { |
621 |
childp = &parent->kids; |
622 |
kids = *childp; |
623 |
if (kids) { |
624 |
if (KIDS_IS_CHUNKY(kids)) { |
625 |
chunk = KIDS_TO_CHUNK(kids); |
626 |
|
627 |
table = chunk->table; |
628 |
if (table) { |
629 |
entry = (JSPropertyTreeEntry *) |
630 |
JS_DHashTableOperate(table, child, JS_DHASH_ADD); |
631 |
if (!entry) |
632 |
return JS_FALSE; |
633 |
if (!entry->child) { |
634 |
entry->child = child; |
635 |
while (chunk->next) |
636 |
chunk = chunk->next; |
637 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
638 |
childp = &chunk->kids[i]; |
639 |
sprop = *childp; |
640 |
if (!sprop) |
641 |
goto insert; |
642 |
} |
643 |
chunkp = &chunk->next; |
644 |
goto new_chunk; |
645 |
} |
646 |
} |
647 |
|
648 |
do { |
649 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
650 |
childp = &chunk->kids[i]; |
651 |
sprop = *childp; |
652 |
if (!sprop) |
653 |
goto insert; |
654 |
|
655 |
JS_ASSERT(sprop != child); |
656 |
if (SPROP_MATCH(sprop, child)) { |
657 |
/* |
658 |
* Duplicate child, see comment above. In this |
659 |
* case, we must let the duplicate be inserted at |
660 |
* this level in the tree, so we keep iterating, |
661 |
* looking for an empty slot in which to insert. |
662 |
*/ |
663 |
JS_ASSERT(sprop != child); |
664 |
JS_RUNTIME_METER(rt, duplicatePropTreeNodes); |
665 |
} |
666 |
} |
667 |
chunkp = &chunk->next; |
668 |
} while ((chunk = *chunkp) != NULL); |
669 |
|
670 |
new_chunk: |
671 |
if (sweptChunk) { |
672 |
chunk = sweptChunk; |
673 |
} else { |
674 |
chunk = NewPropTreeKidsChunk(rt); |
675 |
if (!chunk) |
676 |
return JS_FALSE; |
677 |
} |
678 |
*chunkp = chunk; |
679 |
childp = &chunk->kids[0]; |
680 |
} else { |
681 |
sprop = kids; |
682 |
JS_ASSERT(sprop != child); |
683 |
if (SPROP_MATCH(sprop, child)) { |
684 |
/* |
685 |
* Duplicate child, see comment above. Once again, we |
686 |
* must let duplicates created by deletion pile up in a |
687 |
* kids-chunk-list, in order to find them when sweeping |
688 |
* and thereby avoid dangling parent pointers. |
689 |
*/ |
690 |
JS_RUNTIME_METER(rt, duplicatePropTreeNodes); |
691 |
} |
692 |
if (sweptChunk) { |
693 |
chunk = sweptChunk; |
694 |
} else { |
695 |
chunk = NewPropTreeKidsChunk(rt); |
696 |
if (!chunk) |
697 |
return JS_FALSE; |
698 |
} |
699 |
parent->kids = CHUNK_TO_KIDS(chunk); |
700 |
chunk->kids[0] = sprop; |
701 |
childp = &chunk->kids[1]; |
702 |
} |
703 |
} |
704 |
insert: |
705 |
*childp = child; |
706 |
} |
707 |
|
708 |
child->parent = parent; |
709 |
return JS_TRUE; |
710 |
} |
711 |
|
712 |
/* NB: Called with rt->gcLock held. */ |
713 |
static PropTreeKidsChunk * |
714 |
RemovePropertyTreeChild(JSRuntime *rt, JSScopeProperty *child) |
715 |
{ |
716 |
PropTreeKidsChunk *freeChunk; |
717 |
JSScopeProperty *parent, *kids, *kid; |
718 |
JSDHashTable *table; |
719 |
PropTreeKidsChunk *list, *chunk, **chunkp, *lastChunk; |
720 |
uintN i, j; |
721 |
JSPropertyTreeEntry *entry; |
722 |
|
723 |
freeChunk = NULL; |
724 |
parent = child->parent; |
725 |
if (!parent) { |
726 |
/* |
727 |
* Don't remove child if it is not in rt->propertyTreeHash, but only |
728 |
* matches a root child in the table that has compatible members. See |
729 |
* the "Duplicate child" comments in InsertPropertyTreeChild, above. |
730 |
*/ |
731 |
table = &rt->propertyTreeHash; |
732 |
} else { |
733 |
kids = parent->kids; |
734 |
if (KIDS_IS_CHUNKY(kids)) { |
735 |
list = chunk = KIDS_TO_CHUNK(kids); |
736 |
chunkp = &list; |
737 |
table = chunk->table; |
738 |
|
739 |
do { |
740 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
741 |
if (chunk->kids[i] == child) { |
742 |
lastChunk = chunk; |
743 |
if (!lastChunk->next) { |
744 |
j = i + 1; |
745 |
} else { |
746 |
j = 0; |
747 |
do { |
748 |
chunkp = &lastChunk->next; |
749 |
lastChunk = *chunkp; |
750 |
} while (lastChunk->next); |
751 |
} |
752 |
for (; j < MAX_KIDS_PER_CHUNK; j++) { |
753 |
if (!lastChunk->kids[j]) |
754 |
break; |
755 |
} |
756 |
--j; |
757 |
if (chunk != lastChunk || j > i) |
758 |
chunk->kids[i] = lastChunk->kids[j]; |
759 |
lastChunk->kids[j] = NULL; |
760 |
if (j == 0) { |
761 |
*chunkp = NULL; |
762 |
if (!list) |
763 |
parent->kids = NULL; |
764 |
freeChunk = lastChunk; |
765 |
} |
766 |
goto out; |
767 |
} |
768 |
} |
769 |
|
770 |
chunkp = &chunk->next; |
771 |
} while ((chunk = *chunkp) != NULL); |
772 |
} else { |
773 |
table = NULL; |
774 |
kid = kids; |
775 |
if (kid == child) |
776 |
parent->kids = NULL; |
777 |
} |
778 |
} |
779 |
|
780 |
out: |
781 |
if (table) { |
782 |
entry = (JSPropertyTreeEntry *) |
783 |
JS_DHashTableOperate(table, child, JS_DHASH_LOOKUP); |
784 |
|
785 |
if (entry->child == child) |
786 |
JS_DHashTableRawRemove(table, &entry->hdr); |
787 |
} |
788 |
return freeChunk; |
789 |
} |
790 |
|
791 |
static JSDHashTable * |
792 |
HashChunks(PropTreeKidsChunk *chunk, uintN n) |
793 |
{ |
794 |
JSDHashTable *table; |
795 |
uintN i; |
796 |
JSScopeProperty *sprop; |
797 |
JSPropertyTreeEntry *entry; |
798 |
|
799 |
table = JS_NewDHashTable(&PropertyTreeHashOps, NULL, |
800 |
sizeof(JSPropertyTreeEntry), |
801 |
JS_DHASH_DEFAULT_CAPACITY(n + 1)); |
802 |
if (!table) |
803 |
return NULL; |
804 |
do { |
805 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
806 |
sprop = chunk->kids[i]; |
807 |
if (!sprop) |
808 |
break; |
809 |
entry = (JSPropertyTreeEntry *) |
810 |
JS_DHashTableOperate(table, sprop, JS_DHASH_ADD); |
811 |
entry->child = sprop; |
812 |
} |
813 |
} while ((chunk = chunk->next) != NULL); |
814 |
return table; |
815 |
} |
816 |
|
817 |
/* |
818 |
* Called without cx->runtime->gcLock held. This function acquires that lock |
819 |
* only when inserting a new child. Thus there may be races to find or add a |
820 |
* node that result in duplicates. We expect such races to be rare! |
821 |
* |
822 |
* We use rt->gcLock, not rt->rtLock, to avoid nesting the former inside the |
823 |
* latter in js_GenerateShape below. |
824 |
*/ |
825 |
static JSScopeProperty * |
826 |
GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent, |
827 |
JSScopeProperty *child) |
828 |
{ |
829 |
JSRuntime *rt; |
830 |
JSDHashTable *table; |
831 |
JSPropertyTreeEntry *entry; |
832 |
JSScopeProperty *sprop; |
833 |
PropTreeKidsChunk *chunk; |
834 |
uintN i, n; |
835 |
|
836 |
rt = cx->runtime; |
837 |
if (!parent) { |
838 |
JS_LOCK_GC(rt); |
839 |
|
840 |
table = &rt->propertyTreeHash; |
841 |
entry = (JSPropertyTreeEntry *) |
842 |
JS_DHashTableOperate(table, child, JS_DHASH_ADD); |
843 |
if (!entry) |
844 |
goto out_of_memory; |
845 |
|
846 |
sprop = entry->child; |
847 |
if (sprop) |
848 |
goto out; |
849 |
} else { |
850 |
/* |
851 |
* Because chunks are appended at the end and never deleted except by |
852 |
* the GC, we can search without taking the runtime's GC lock. We may |
853 |
* miss a matching sprop added by another thread, and make a duplicate |
854 |
* one, but that is an unlikely, therefore small, cost. The property |
855 |
* tree has extremely low fan-out below its root in popular embeddings |
856 |
* with real-world workloads. |
857 |
* |
858 |
* Patterns such as defining closures that capture a constructor's |
859 |
* environment as getters or setters on the new object that is passed |
860 |
* in as |this| can significantly increase fan-out below the property |
861 |
* tree root -- see bug 335700 for details. |
862 |
*/ |
863 |
entry = NULL; |
864 |
sprop = parent->kids; |
865 |
if (sprop) { |
866 |
if (KIDS_IS_CHUNKY(sprop)) { |
867 |
chunk = KIDS_TO_CHUNK(sprop); |
868 |
|
869 |
table = chunk->table; |
870 |
if (table) { |
871 |
JS_LOCK_GC(rt); |
872 |
entry = (JSPropertyTreeEntry *) |
873 |
JS_DHashTableOperate(table, child, JS_DHASH_LOOKUP); |
874 |
sprop = entry->child; |
875 |
if (sprop) { |
876 |
JS_UNLOCK_GC(rt); |
877 |
return sprop; |
878 |
} |
879 |
goto locked_not_found; |
880 |
} |
881 |
|
882 |
n = 0; |
883 |
do { |
884 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
885 |
sprop = chunk->kids[i]; |
886 |
if (!sprop) { |
887 |
n += i; |
888 |
if (n >= CHUNK_HASH_THRESHOLD) { |
889 |
chunk = KIDS_TO_CHUNK(parent->kids); |
890 |
if (!chunk->table) { |
891 |
table = HashChunks(chunk, n); |
892 |
JS_LOCK_GC(rt); |
893 |
if (!table) |
894 |
goto out_of_memory; |
895 |
if (chunk->table) |
896 |
JS_DHashTableDestroy(table); |
897 |
else |
898 |
chunk->table = table; |
899 |
goto locked_not_found; |
900 |
} |
901 |
} |
902 |
goto not_found; |
903 |
} |
904 |
|
905 |
if (SPROP_MATCH(sprop, child)) |
906 |
return sprop; |
907 |
} |
908 |
n += MAX_KIDS_PER_CHUNK; |
909 |
} while ((chunk = chunk->next) != NULL); |
910 |
} else { |
911 |
if (SPROP_MATCH(sprop, child)) |
912 |
return sprop; |
913 |
} |
914 |
} |
915 |
|
916 |
not_found: |
917 |
JS_LOCK_GC(rt); |
918 |
} |
919 |
|
920 |
locked_not_found: |
921 |
sprop = NewScopeProperty(rt); |
922 |
if (!sprop) |
923 |
goto out_of_memory; |
924 |
|
925 |
sprop->id = child->id; |
926 |
sprop->getter = child->getter; |
927 |
sprop->setter = child->setter; |
928 |
sprop->slot = child->slot; |
929 |
sprop->attrs = child->attrs; |
930 |
sprop->flags = child->flags; |
931 |
sprop->shortid = child->shortid; |
932 |
sprop->parent = sprop->kids = NULL; |
933 |
sprop->shape = js_GenerateShape(cx, JS_TRUE); |
934 |
|
935 |
if (!parent) { |
936 |
entry->child = sprop; |
937 |
} else { |
938 |
if (!InsertPropertyTreeChild(rt, parent, sprop, NULL)) |
939 |
goto out_of_memory; |
940 |
} |
941 |
|
942 |
out: |
943 |
JS_UNLOCK_GC(rt); |
944 |
return sprop; |
945 |
|
946 |
out_of_memory: |
947 |
JS_UNLOCK_GC(rt); |
948 |
JS_ReportOutOfMemory(cx); |
949 |
return NULL; |
950 |
} |
951 |
|
952 |
#ifdef DEBUG_notbrendan |
953 |
#define CHECK_ANCESTOR_LINE(scope, sparse) \ |
954 |
JS_BEGIN_MACRO \ |
955 |
if ((scope)->table) CheckAncestorLine(scope, sparse); \ |
956 |
JS_END_MACRO |
957 |
|
958 |
static void |
959 |
CheckAncestorLine(JSScope *scope, JSBool sparse) |
960 |
{ |
961 |
uint32 size; |
962 |
JSScopeProperty **spp, **start, **end, *ancestorLine, *sprop, *aprop; |
963 |
uint32 entryCount, ancestorCount; |
964 |
|
965 |
ancestorLine = SCOPE_LAST_PROP(scope); |
966 |
if (ancestorLine) |
967 |
JS_ASSERT(SCOPE_HAS_PROPERTY(scope, ancestorLine)); |
968 |
|
969 |
entryCount = 0; |
970 |
size = SCOPE_CAPACITY(scope); |
971 |
start = scope->table; |
972 |
for (spp = start, end = start + size; spp < end; spp++) { |
973 |
sprop = SPROP_FETCH(spp); |
974 |
if (sprop) { |
975 |
entryCount++; |
976 |
for (aprop = ancestorLine; aprop; aprop = aprop->parent) { |
977 |
if (aprop == sprop) |
978 |
break; |
979 |
} |
980 |
JS_ASSERT(aprop); |
981 |
} |
982 |
} |
983 |
JS_ASSERT(entryCount == scope->entryCount); |
984 |
|
985 |
ancestorCount = 0; |
986 |
for (sprop = ancestorLine; sprop; sprop = sprop->parent) { |
987 |
if (SCOPE_HAD_MIDDLE_DELETE(scope) && |
988 |
!SCOPE_HAS_PROPERTY(scope, sprop)) { |
989 |
JS_ASSERT(sparse); |
990 |
continue; |
991 |
} |
992 |
ancestorCount++; |
993 |
} |
994 |
JS_ASSERT(ancestorCount == scope->entryCount); |
995 |
} |
996 |
#else |
997 |
#define CHECK_ANCESTOR_LINE(scope, sparse) /* nothing */ |
998 |
#endif |
999 |
|
1000 |
static void |
1001 |
ReportReadOnlyScope(JSContext *cx, JSScope *scope) |
1002 |
{ |
1003 |
JSString *str; |
1004 |
const char *bytes; |
1005 |
|
1006 |
str = js_ValueToString(cx, OBJECT_TO_JSVAL(scope->object)); |
1007 |
if (!str) |
1008 |
return; |
1009 |
bytes = js_GetStringBytes(cx, str); |
1010 |
if (!bytes) |
1011 |
return; |
1012 |
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_READ_ONLY, bytes); |
1013 |
} |
1014 |
|
1015 |
JSScopeProperty * |
1016 |
js_AddScopeProperty(JSContext *cx, JSScope *scope, jsid id, |
1017 |
JSPropertyOp getter, JSPropertyOp setter, uint32 slot, |
1018 |
uintN attrs, uintN flags, intN shortid) |
1019 |
{ |
1020 |
JSScopeProperty **spp, *sprop, *overwriting, **spvec, **spp2, child; |
1021 |
uint32 size, splen, i; |
1022 |
int change; |
1023 |
JSTempValueRooter tvr; |
1024 |
|
1025 |
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope)); |
1026 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1027 |
|
1028 |
/* |
1029 |
* You can't add properties to a sealed scope. But note well that you can |
1030 |
* change property attributes in a sealed scope, even though that replaces |
1031 |
* a JSScopeProperty * in the scope's hash table -- but no id is added, so |
1032 |
* the scope remains sealed. |
1033 |
*/ |
1034 |
if (SCOPE_IS_SEALED(scope)) { |
1035 |
ReportReadOnlyScope(cx, scope); |
1036 |
return NULL; |
1037 |
} |
1038 |
|
1039 |
/* |
1040 |
* Normalize stub getter and setter values for faster is-stub testing in |
1041 |
* the SPROP_CALL_[GS]ETTER macros. |
1042 |
*/ |
1043 |
if (getter == JS_PropertyStub) |
1044 |
getter = NULL; |
1045 |
if (setter == JS_PropertyStub) |
1046 |
setter = NULL; |
1047 |
|
1048 |
/* |
1049 |
* Search for id in order to claim its entry, allocating a property tree |
1050 |
* node if one doesn't already exist for our parameters. |
1051 |
*/ |
1052 |
spp = js_SearchScope(scope, id, JS_TRUE); |
1053 |
sprop = overwriting = SPROP_FETCH(spp); |
1054 |
if (!sprop) { |
1055 |
/* Check whether we need to grow, if the load factor is >= .75. */ |
1056 |
size = SCOPE_CAPACITY(scope); |
1057 |
if (scope->entryCount + scope->removedCount >= size - (size >> 2)) { |
1058 |
if (scope->removedCount >= size >> 2) { |
1059 |
METER(compresses); |
1060 |
change = 0; |
1061 |
} else { |
1062 |
METER(grows); |
1063 |
change = 1; |
1064 |
} |
1065 |
if (!ChangeScope(cx, scope, change) && |
1066 |
scope->entryCount + scope->removedCount == size - 1) { |
1067 |
METER(addFailures); |
1068 |
return NULL; |
1069 |
} |
1070 |
spp = js_SearchScope(scope, id, JS_TRUE); |
1071 |
JS_ASSERT(!SPROP_FETCH(spp)); |
1072 |
} |
1073 |
} else { |
1074 |
/* Property exists: js_SearchScope must have returned a valid entry. */ |
1075 |
JS_ASSERT(!SPROP_IS_REMOVED(*spp)); |
1076 |
|
1077 |
/* |
1078 |
* If all property members match, this is a redundant add and we can |
1079 |
* return early. If the caller wants to allocate a slot, but doesn't |
1080 |
* care which slot, copy sprop->slot into slot so we can match sprop, |
1081 |
* if all other members match. |
1082 |
*/ |
1083 |
if (!(attrs & JSPROP_SHARED) && |
1084 |
slot == SPROP_INVALID_SLOT && |
1085 |
SPROP_HAS_VALID_SLOT(sprop, scope)) { |
1086 |
slot = sprop->slot; |
1087 |
} |
1088 |
if (SPROP_MATCH_PARAMS_AFTER_ID(sprop, getter, setter, slot, attrs, |
1089 |
flags, shortid)) { |
1090 |
METER(redundantAdds); |
1091 |
return sprop; |
1092 |
} |
1093 |
|
1094 |
/* |
1095 |
* If we are clearing sprop to force the existing property that it |
1096 |
* describes to be overwritten, then we have to unlink sprop from the |
1097 |
* ancestor line at scope->lastProp, lazily if sprop is not lastProp. |
1098 |
* And we must remove the entry at *spp, precisely so the lazy "middle |
1099 |
* delete" fixup code further below won't find sprop in scope->table, |
1100 |
* in spite of sprop being on the ancestor line. |
1101 |
* |
1102 |
* When we finally succeed in finding or creating a new sprop |
1103 |
* and storing its pointer at *spp, we'll use the |overwriting| |
1104 |
* local saved when we first looked up id to decide whether we're |
1105 |
* indeed creating a new entry, or merely overwriting an existing |
1106 |
* property. |
1107 |
*/ |
1108 |
if (sprop == SCOPE_LAST_PROP(scope)) { |
1109 |
do { |
1110 |
SCOPE_REMOVE_LAST_PROP(scope); |
1111 |
if (!SCOPE_HAD_MIDDLE_DELETE(scope)) |
1112 |
break; |
1113 |
sprop = SCOPE_LAST_PROP(scope); |
1114 |
} while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop)); |
1115 |
} else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) { |
1116 |
/* |
1117 |
* If we have no hash table yet, we need one now. The middle |
1118 |
* delete code is simple-minded that way! |
1119 |
*/ |
1120 |
if (!scope->table) { |
1121 |
if (!CreateScopeTable(cx, scope, JS_TRUE)) |
1122 |
return NULL; |
1123 |
spp = js_SearchScope(scope, id, JS_TRUE); |
1124 |
sprop = overwriting = SPROP_FETCH(spp); |
1125 |
} |
1126 |
SCOPE_SET_MIDDLE_DELETE(scope); |
1127 |
} |
1128 |
js_MakeScopeShapeUnique(cx, scope); |
1129 |
|
1130 |
/* |
1131 |
* If we fail later on trying to find or create a new sprop, we will |
1132 |
* goto fail_overwrite and restore *spp from |overwriting|. Note that |
1133 |
* we don't bother to keep scope->removedCount in sync, because we'll |
1134 |
* fix up *spp and scope->entryCount shortly, no matter how control |
1135 |
* flow returns from this function. |
1136 |
*/ |
1137 |
if (scope->table) |
1138 |
SPROP_STORE_PRESERVING_COLLISION(spp, NULL); |
1139 |
scope->entryCount--; |
1140 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1141 |
sprop = NULL; |
1142 |
} |
1143 |
|
1144 |
if (!sprop) { |
1145 |
/* |
1146 |
* If properties were deleted from the middle of the list starting at |
1147 |
* scope->lastProp, we may need to fork the property tree and squeeze |
1148 |
* all deleted properties out of scope's ancestor line. Otherwise we |
1149 |
* risk adding a node with the same id as a "middle" node, violating |
1150 |
* the rule that properties along an ancestor line have distinct ids. |
1151 |
*/ |
1152 |
if (SCOPE_HAD_MIDDLE_DELETE(scope)) { |
1153 |
JS_ASSERT(scope->table); |
1154 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1155 |
|
1156 |
/* |
1157 |
* Our forking heuristic tries to balance the desire to avoid |
1158 |
* over-compacting (over-forking) against the desire to |
1159 |
* *periodically* fork anyways, in order to prevent paying scan |
1160 |
* penalties on each insert indefinitely, on a lineage with only |
1161 |
* a few old middle-deletions. So we fork if either: |
1162 |
* |
1163 |
* - A quick scan finds a true conflict. |
1164 |
* - We are passing through a doubling-threshold in size and |
1165 |
* have accumulated a nonzero count of uncompacted deletions. |
1166 |
*/ |
1167 |
|
1168 |
bool conflicts = false; |
1169 |
uint32 count = 0; |
1170 |
uint32 threshold = JS_BIT(JS_CeilingLog2(scope->entryCount)); |
1171 |
for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) { |
1172 |
++count; |
1173 |
if (sprop->id == id) { |
1174 |
conflicts = true; |
1175 |
break; |
1176 |
} |
1177 |
} |
1178 |
|
1179 |
if (conflicts || count > threshold) { |
1180 |
/* |
1181 |
* Enumerate live entries in scope->table using a temporary |
1182 |
* vector, by walking the (possibly sparse, due to deletions) |
1183 |
* ancestor line from scope->lastProp. |
1184 |
*/ |
1185 |
splen = scope->entryCount; |
1186 |
JS_ASSERT(splen != 0); |
1187 |
spvec = (JSScopeProperty **) |
1188 |
JS_malloc(cx, SCOPE_TABLE_NBYTES(splen)); |
1189 |
if (!spvec) |
1190 |
goto fail_overwrite; |
1191 |
i = splen; |
1192 |
sprop = SCOPE_LAST_PROP(scope); |
1193 |
JS_ASSERT(sprop); |
1194 |
do { |
1195 |
/* |
1196 |
* NB: test SCOPE_GET_PROPERTY, not SCOPE_HAS_PROPERTY, |
1197 |
* as the latter macro insists that sprop->id maps to |
1198 |
* sprop, while the former simply tests whether sprop->id |
1199 |
* is bound in scope. |
1200 |
*/ |
1201 |
if (!SCOPE_GET_PROPERTY(scope, sprop->id)) |
1202 |
continue; |
1203 |
|
1204 |
JS_ASSERT(sprop != overwriting); |
1205 |
JS_ASSERT(i != 0); |
1206 |
spvec[--i] = sprop; |
1207 |
} while ((sprop = sprop->parent) != NULL); |
1208 |
JS_ASSERT(i == 0); |
1209 |
|
1210 |
/* |
1211 |
* Now loop forward through spvec, forking the property tree |
1212 |
* whenever we see a "parent gap" due to deletions from scope. |
1213 |
* NB: sprop is null on first entry to the loop body. |
1214 |
*/ |
1215 |
do { |
1216 |
if (spvec[i]->parent == sprop) { |
1217 |
sprop = spvec[i]; |
1218 |
} else { |
1219 |
sprop = GetPropertyTreeChild(cx, sprop, spvec[i]); |
1220 |
if (!sprop) { |
1221 |
JS_free(cx, spvec); |
1222 |
goto fail_overwrite; |
1223 |
} |
1224 |
|
1225 |
spp2 = js_SearchScope(scope, sprop->id, JS_FALSE); |
1226 |
JS_ASSERT(SPROP_FETCH(spp2) == spvec[i]); |
1227 |
SPROP_STORE_PRESERVING_COLLISION(spp2, sprop); |
1228 |
} |
1229 |
} while (++i < splen); |
1230 |
JS_free(cx, spvec); |
1231 |
|
1232 |
/* |
1233 |
* Now sprop points to the last property in scope, where the |
1234 |
* ancestor line from sprop to the root is dense w.r.t. scope: |
1235 |
* it contains no nodes not mapped by scope->table. |
1236 |
*/ |
1237 |
scope->lastProp = sprop; |
1238 |
CHECK_ANCESTOR_LINE(scope, JS_FALSE); |
1239 |
JS_RUNTIME_METER(cx->runtime, middleDeleteFixups); |
1240 |
SCOPE_CLR_MIDDLE_DELETE(scope); |
1241 |
} |
1242 |
} |
1243 |
|
1244 |
/* |
1245 |
* Aliases share another property's slot, passed in the |slot| param. |
1246 |
* Shared properties have no slot. Unshared properties that do not |
1247 |
* alias another property's slot get one here, but may lose it due to |
1248 |
* a JS_ClearScope call. |
1249 |
*/ |
1250 |
if (!(flags & SPROP_IS_ALIAS)) { |
1251 |
if (attrs & JSPROP_SHARED) { |
1252 |
slot = SPROP_INVALID_SLOT; |
1253 |
} else { |
1254 |
/* |
1255 |
* We may have set slot from a nearly-matching sprop, above. |
1256 |
* If so, we're overwriting that nearly-matching sprop, so we |
1257 |
* can reuse its slot -- we don't need to allocate a new one. |
1258 |
* Similarly, we use a specific slot if provided by the caller. |
1259 |
*/ |
1260 |
if (slot == SPROP_INVALID_SLOT && |
1261 |
!js_AllocSlot(cx, scope->object, &slot)) { |
1262 |
goto fail_overwrite; |
1263 |
} |
1264 |
} |
1265 |
} |
1266 |
|
1267 |
/* |
1268 |
* Check for a watchpoint on a deleted property; if one exists, change |
1269 |
* setter to js_watch_set. |
1270 |
* XXXbe this could get expensive with lots of watchpoints... |
1271 |
*/ |
1272 |
if (!JS_CLIST_IS_EMPTY(&cx->runtime->watchPointList) && |
1273 |
js_FindWatchPoint(cx->runtime, scope, id)) { |
1274 |
if (overwriting) |
1275 |
JS_PUSH_TEMP_ROOT_SPROP(cx, overwriting, &tvr); |
1276 |
setter = js_WrapWatchedSetter(cx, id, attrs, setter); |
1277 |
if (overwriting) |
1278 |
JS_POP_TEMP_ROOT(cx, &tvr); |
1279 |
if (!setter) |
1280 |
goto fail_overwrite; |
1281 |
} |
1282 |
|
1283 |
/* Find or create a property tree node labeled by our arguments. */ |
1284 |
child.id = id; |
1285 |
child.getter = getter; |
1286 |
child.setter = setter; |
1287 |
child.slot = slot; |
1288 |
child.attrs = attrs; |
1289 |
child.flags = flags; |
1290 |
child.shortid = shortid; |
1291 |
sprop = GetPropertyTreeChild(cx, scope->lastProp, &child); |
1292 |
if (!sprop) |
1293 |
goto fail_overwrite; |
1294 |
|
1295 |
/* |
1296 |
* The scope's shape defaults to its last property's shape, but may |
1297 |
* be regenerated later as the scope diverges (from the property cache |
1298 |
* point of view) from the structural type associated with sprop. |
1299 |
*/ |
1300 |
js_ExtendScopeShape(cx, scope, sprop); |
1301 |
|
1302 |
/* Store the tree node pointer in the table entry for id. */ |
1303 |
if (scope->table) |
1304 |
SPROP_STORE_PRESERVING_COLLISION(spp, sprop); |
1305 |
scope->entryCount++; |
1306 |
scope->lastProp = sprop; |
1307 |
CHECK_ANCESTOR_LINE(scope, JS_FALSE); |
1308 |
#ifdef DEBUG |
1309 |
if (!overwriting) { |
1310 |
LIVE_SCOPE_METER(cx, ++cx->runtime->liveScopeProps); |
1311 |
JS_RUNTIME_METER(cx->runtime, totalScopeProps); |
1312 |
} |
1313 |
#endif |
1314 |
|
1315 |
/* |
1316 |
* If we reach the hashing threshold, try to allocate scope->table. |
1317 |
* If we can't (a rare event, preceded by swapping to death on most |
1318 |
* modern OSes), stick with linear search rather than whining about |
1319 |
* this little set-back. Therefore we must test !scope->table and |
1320 |
* scope->entryCount >= SCOPE_HASH_THRESHOLD, not merely whether the |
1321 |
* entry count just reached the threshold. |
1322 |
*/ |
1323 |
if (!scope->table && scope->entryCount >= SCOPE_HASH_THRESHOLD) |
1324 |
(void) CreateScopeTable(cx, scope, JS_FALSE); |
1325 |
} |
1326 |
|
1327 |
jsuint index; |
1328 |
if (js_IdIsIndex(sprop->id, &index)) |
1329 |
SCOPE_SET_INDEXED_PROPERTIES(scope); |
1330 |
|
1331 |
METER(adds); |
1332 |
return sprop; |
1333 |
|
1334 |
fail_overwrite: |
1335 |
if (overwriting) { |
1336 |
/* |
1337 |
* We may or may not have forked overwriting out of scope's ancestor |
1338 |
* line, so we must check (the alternative is to set a flag above, but |
1339 |
* that hurts the common, non-error case). If we did fork overwriting |
1340 |
* out, we'll add it back at scope->lastProp. This means enumeration |
1341 |
* order can change due to a failure to overwrite an id. |
1342 |
* XXXbe very minor incompatibility |
1343 |
*/ |
1344 |
for (sprop = SCOPE_LAST_PROP(scope); ; sprop = sprop->parent) { |
1345 |
if (!sprop) { |
1346 |
sprop = SCOPE_LAST_PROP(scope); |
1347 |
if (overwriting->parent == sprop) { |
1348 |
scope->lastProp = overwriting; |
1349 |
} else { |
1350 |
sprop = GetPropertyTreeChild(cx, sprop, overwriting); |
1351 |
if (sprop) { |
1352 |
JS_ASSERT(sprop != overwriting); |
1353 |
scope->lastProp = sprop; |
1354 |
} |
1355 |
overwriting = sprop; |
1356 |
} |
1357 |
break; |
1358 |
} |
1359 |
if (sprop == overwriting) |
1360 |
break; |
1361 |
} |
1362 |
if (overwriting) { |
1363 |
if (scope->table) |
1364 |
SPROP_STORE_PRESERVING_COLLISION(spp, overwriting); |
1365 |
scope->entryCount++; |
1366 |
} |
1367 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1368 |
} |
1369 |
METER(addFailures); |
1370 |
return NULL; |
1371 |
} |
1372 |
|
1373 |
JSScopeProperty * |
1374 |
js_ChangeScopePropertyAttrs(JSContext *cx, JSScope *scope, |
1375 |
JSScopeProperty *sprop, uintN attrs, uintN mask, |
1376 |
JSPropertyOp getter, JSPropertyOp setter) |
1377 |
{ |
1378 |
JSScopeProperty child, *newsprop, **spp; |
1379 |
|
1380 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1381 |
|
1382 |
/* Allow only shared (slot-less) => unshared (slot-full) transition. */ |
1383 |
attrs |= sprop->attrs & mask; |
1384 |
JS_ASSERT(!((attrs ^ sprop->attrs) & JSPROP_SHARED) || |
1385 |
!(attrs & JSPROP_SHARED)); |
1386 |
if (getter == JS_PropertyStub) |
1387 |
getter = NULL; |
1388 |
if (setter == JS_PropertyStub) |
1389 |
setter = NULL; |
1390 |
if (sprop->attrs == attrs && |
1391 |
sprop->getter == getter && |
1392 |
sprop->setter == setter) { |
1393 |
return sprop; |
1394 |
} |
1395 |
|
1396 |
child.id = sprop->id; |
1397 |
child.getter = getter; |
1398 |
child.setter = setter; |
1399 |
child.slot = sprop->slot; |
1400 |
child.attrs = attrs; |
1401 |
child.flags = sprop->flags; |
1402 |
child.shortid = sprop->shortid; |
1403 |
|
1404 |
if (SCOPE_LAST_PROP(scope) == sprop) { |
1405 |
/* |
1406 |
* Optimize the case where the last property added to scope is changed |
1407 |
* to have a different attrs, getter, or setter. In the last property |
1408 |
* case, we need not fork the property tree. But since we do not call |
1409 |
* js_AddScopeProperty, we may need to allocate a new slot directly. |
1410 |
*/ |
1411 |
if ((sprop->attrs & JSPROP_SHARED) && !(attrs & JSPROP_SHARED)) { |
1412 |
JS_ASSERT(child.slot == SPROP_INVALID_SLOT); |
1413 |
if (!js_AllocSlot(cx, scope->object, &child.slot)) |
1414 |
return NULL; |
1415 |
} |
1416 |
|
1417 |
newsprop = GetPropertyTreeChild(cx, sprop->parent, &child); |
1418 |
if (newsprop) { |
1419 |
spp = js_SearchScope(scope, sprop->id, JS_FALSE); |
1420 |
JS_ASSERT(SPROP_FETCH(spp) == sprop); |
1421 |
|
1422 |
if (scope->table) |
1423 |
SPROP_STORE_PRESERVING_COLLISION(spp, newsprop); |
1424 |
scope->lastProp = newsprop; |
1425 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1426 |
} |
1427 |
} else { |
1428 |
/* |
1429 |
* Let js_AddScopeProperty handle this |overwriting| case, including |
1430 |
* the conservation of sprop->slot (if it's valid). We must not call |
1431 |
* js_RemoveScopeProperty here, it will free a valid sprop->slot and |
1432 |
* js_AddScopeProperty won't re-allocate it. |
1433 |
*/ |
1434 |
newsprop = js_AddScopeProperty(cx, scope, child.id, |
1435 |
child.getter, child.setter, child.slot, |
1436 |
child.attrs, child.flags, child.shortid); |
1437 |
} |
1438 |
|
1439 |
if (newsprop) { |
1440 |
js_LeaveTraceIfGlobalObject(cx, scope->object); |
1441 |
if (scope->shape == sprop->shape) |
1442 |
scope->shape = newsprop->shape; |
1443 |
else |
1444 |
js_MakeScopeShapeUnique(cx, scope); |
1445 |
} |
1446 |
#ifdef JS_DUMP_PROPTREE_STATS |
1447 |
else |
1448 |
METER(changeFailures); |
1449 |
#endif |
1450 |
return newsprop; |
1451 |
} |
1452 |
|
1453 |
JSBool |
1454 |
js_RemoveScopeProperty(JSContext *cx, JSScope *scope, jsid id) |
1455 |
{ |
1456 |
JSScopeProperty **spp, *stored, *sprop; |
1457 |
uint32 size; |
1458 |
|
1459 |
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope)); |
1460 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1461 |
if (SCOPE_IS_SEALED(scope)) { |
1462 |
ReportReadOnlyScope(cx, scope); |
1463 |
return JS_FALSE; |
1464 |
} |
1465 |
METER(removes); |
1466 |
|
1467 |
spp = js_SearchScope(scope, id, JS_FALSE); |
1468 |
stored = *spp; |
1469 |
sprop = SPROP_CLEAR_COLLISION(stored); |
1470 |
if (!sprop) { |
1471 |
METER(uselessRemoves); |
1472 |
return JS_TRUE; |
1473 |
} |
1474 |
|
1475 |
/* Convert from a list to a hash so we can handle "middle deletes". */ |
1476 |
if (!scope->table && sprop != scope->lastProp) { |
1477 |
if (!CreateScopeTable(cx, scope, JS_TRUE)) |
1478 |
return JS_FALSE; |
1479 |
spp = js_SearchScope(scope, id, JS_FALSE); |
1480 |
stored = *spp; |
1481 |
sprop = SPROP_CLEAR_COLLISION(stored); |
1482 |
} |
1483 |
|
1484 |
/* First, if sprop is unshared and not cleared, free its slot number. */ |
1485 |
if (SPROP_HAS_VALID_SLOT(sprop, scope)) { |
1486 |
js_FreeSlot(cx, scope->object, sprop->slot); |
1487 |
JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals); |
1488 |
} |
1489 |
|
1490 |
/* Next, remove id by setting its entry to a removed or free sentinel. */ |
1491 |
if (SPROP_HAD_COLLISION(stored)) { |
1492 |
JS_ASSERT(scope->table); |
1493 |
*spp = SPROP_REMOVED; |
1494 |
scope->removedCount++; |
1495 |
} else { |
1496 |
METER(removeFrees); |
1497 |
if (scope->table) |
1498 |
*spp = NULL; |
1499 |
} |
1500 |
scope->entryCount--; |
1501 |
LIVE_SCOPE_METER(cx, --cx->runtime->liveScopeProps); |
1502 |
|
1503 |
/* Update scope->lastProp directly, or set its deferred update flag. */ |
1504 |
if (sprop == SCOPE_LAST_PROP(scope)) { |
1505 |
do { |
1506 |
SCOPE_REMOVE_LAST_PROP(scope); |
1507 |
if (!SCOPE_HAD_MIDDLE_DELETE(scope)) |
1508 |
break; |
1509 |
sprop = SCOPE_LAST_PROP(scope); |
1510 |
} while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop)); |
1511 |
if (!SCOPE_LAST_PROP(scope)) |
1512 |
SCOPE_CLR_MIDDLE_DELETE(scope); |
1513 |
} else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) { |
1514 |
SCOPE_SET_MIDDLE_DELETE(scope); |
1515 |
} |
1516 |
js_MakeScopeShapeUnique(cx, scope); |
1517 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1518 |
|
1519 |
/* Last, consider shrinking scope's table if its load factor is <= .25. */ |
1520 |
size = SCOPE_CAPACITY(scope); |
1521 |
if (size > MIN_SCOPE_SIZE && scope->entryCount <= size >> 2) { |
1522 |
METER(shrinks); |
1523 |
(void) ChangeScope(cx, scope, -1); |
1524 |
} |
1525 |
|
1526 |
return JS_TRUE; |
1527 |
} |
1528 |
|
1529 |
void |
1530 |
js_ClearScope(JSContext *cx, JSScope *scope) |
1531 |
{ |
1532 |
CHECK_ANCESTOR_LINE(scope, JS_TRUE); |
1533 |
LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= scope->entryCount); |
1534 |
|
1535 |
if (scope->table) |
1536 |
free(scope->table); |
1537 |
SCOPE_CLR_MIDDLE_DELETE(scope); |
1538 |
InitMinimalScope(cx, scope); |
1539 |
JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals); |
1540 |
} |
1541 |
|
1542 |
void |
1543 |
js_TraceId(JSTracer *trc, jsid id) |
1544 |
{ |
1545 |
jsval v; |
1546 |
|
1547 |
v = ID_TO_VALUE(id); |
1548 |
JS_CALL_VALUE_TRACER(trc, v, "id"); |
1549 |
} |
1550 |
|
1551 |
#ifdef DEBUG |
1552 |
static void |
1553 |
PrintPropertyGetterOrSetter(JSTracer *trc, char *buf, size_t bufsize) |
1554 |
{ |
1555 |
JSScopeProperty *sprop; |
1556 |
jsid id; |
1557 |
size_t n; |
1558 |
const char *name; |
1559 |
|
1560 |
JS_ASSERT(trc->debugPrinter == PrintPropertyGetterOrSetter); |
1561 |
sprop = (JSScopeProperty *)trc->debugPrintArg; |
1562 |
id = sprop->id; |
1563 |
name = trc->debugPrintIndex ? js_setter_str : js_getter_str; |
1564 |
|
1565 |
if (JSID_IS_ATOM(id)) { |
1566 |
n = js_PutEscapedString(buf, bufsize - 1, |
1567 |
ATOM_TO_STRING(JSID_TO_ATOM(id)), 0); |
1568 |
if (n < bufsize - 1) |
1569 |
JS_snprintf(buf + n, bufsize - n, " %s", name); |
1570 |
} else if (JSID_IS_INT(sprop->id)) { |
1571 |
JS_snprintf(buf, bufsize, "%d %s", JSID_TO_INT(id), name); |
1572 |
} else { |
1573 |
JS_snprintf(buf, bufsize, "<object> %s", name); |
1574 |
} |
1575 |
} |
1576 |
#endif |
1577 |
|
1578 |
|
1579 |
void |
1580 |
js_TraceScopeProperty(JSTracer *trc, JSScopeProperty *sprop) |
1581 |
{ |
1582 |
if (IS_GC_MARKING_TRACER(trc)) |
1583 |
sprop->flags |= SPROP_MARK; |
1584 |
TRACE_ID(trc, sprop->id); |
1585 |
|
1586 |
#if JS_HAS_GETTER_SETTER |
1587 |
if (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER)) { |
1588 |
if (sprop->attrs & JSPROP_GETTER) { |
1589 |
JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, sprop, 0); |
1590 |
JS_CallTracer(trc, js_CastAsObject(sprop->getter), JSTRACE_OBJECT); |
1591 |
} |
1592 |
if (sprop->attrs & JSPROP_SETTER) { |
1593 |
JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, sprop, 1); |
1594 |
JS_CallTracer(trc, js_CastAsObject(sprop->setter), JSTRACE_OBJECT); |
1595 |
} |
1596 |
} |
1597 |
#endif /* JS_HAS_GETTER_SETTER */ |
1598 |
} |
1599 |
|
1600 |
#ifdef JS_DUMP_PROPTREE_STATS |
1601 |
|
1602 |
#include <stdio.h> |
1603 |
|
1604 |
static void |
1605 |
MeterKidCount(JSBasicStats *bs, uintN nkids) |
1606 |
{ |
1607 |
JS_BASIC_STATS_ACCUM(bs, nkids); |
1608 |
bs->hist[JS_MIN(nkids, 10)]++; |
1609 |
} |
1610 |
|
1611 |
static void |
1612 |
MeterPropertyTree(JSBasicStats *bs, JSScopeProperty *node) |
1613 |
{ |
1614 |
uintN i, nkids; |
1615 |
JSScopeProperty *kids, *kid; |
1616 |
PropTreeKidsChunk *chunk; |
1617 |
|
1618 |
nkids = 0; |
1619 |
kids = node->kids; |
1620 |
if (kids) { |
1621 |
if (KIDS_IS_CHUNKY(kids)) { |
1622 |
for (chunk = KIDS_TO_CHUNK(kids); chunk; chunk = chunk->next) { |
1623 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
1624 |
kid = chunk->kids[i]; |
1625 |
if (!kid) |
1626 |
break; |
1627 |
MeterPropertyTree(bs, kid); |
1628 |
nkids++; |
1629 |
} |
1630 |
} |
1631 |
} else { |
1632 |
MeterPropertyTree(bs, kids); |
1633 |
nkids = 1; |
1634 |
} |
1635 |
} |
1636 |
|
1637 |
MeterKidCount(bs, nkids); |
1638 |
} |
1639 |
|
1640 |
static JSDHashOperator |
1641 |
js_MeterPropertyTree(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, |
1642 |
void *arg) |
1643 |
{ |
1644 |
JSPropertyTreeEntry *entry = (JSPropertyTreeEntry *)hdr; |
1645 |
JSBasicStats *bs = (JSBasicStats *)arg; |
1646 |
|
1647 |
MeterPropertyTree(bs, entry->child); |
1648 |
return JS_DHASH_NEXT; |
1649 |
} |
1650 |
|
1651 |
static void |
1652 |
DumpSubtree(JSContext *cx, JSScopeProperty *sprop, int level, FILE *fp) |
1653 |
{ |
1654 |
jsval v; |
1655 |
JSString *str; |
1656 |
JSScopeProperty *kids, *kid; |
1657 |
PropTreeKidsChunk *chunk; |
1658 |
uintN i; |
1659 |
|
1660 |
fprintf(fp, "%*sid ", level, ""); |
1661 |
v = ID_TO_VALUE(sprop->id); |
1662 |
if (JSID_IS_INT(sprop->id)) { |
1663 |
fprintf(fp, "%d", JSVAL_TO_INT(v)); |
1664 |
} else { |
1665 |
if (JSID_IS_ATOM(sprop->id)) { |
1666 |
str = JSVAL_TO_STRING(v); |
1667 |
} else { |
1668 |
JS_ASSERT(JSID_IS_OBJECT(sprop->id)); |
1669 |
str = js_ValueToString(cx, v); |
1670 |
fputs("object ", fp); |
1671 |
} |
1672 |
if (!str) |
1673 |
fputs("<error>", fp); |
1674 |
else |
1675 |
js_FileEscapedString(fp, str, '"'); |
1676 |
} |
1677 |
|
1678 |
fprintf(fp, " g/s %p/%p slot %u attrs %x flags %x shortid %d\n", |
1679 |
(void *) sprop->getter, (void *) sprop->setter, sprop->slot, |
1680 |
sprop->attrs, sprop->flags, sprop->shortid); |
1681 |
kids = sprop->kids; |
1682 |
if (kids) { |
1683 |
++level; |
1684 |
if (KIDS_IS_CHUNKY(kids)) { |
1685 |
chunk = KIDS_TO_CHUNK(kids); |
1686 |
do { |
1687 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
1688 |
kid = chunk->kids[i]; |
1689 |
if (!kid) |
1690 |
break; |
1691 |
JS_ASSERT(kid->parent == sprop); |
1692 |
DumpSubtree(cx, kid, level, fp); |
1693 |
} |
1694 |
} while ((chunk = chunk->next) != NULL); |
1695 |
} else { |
1696 |
kid = kids; |
1697 |
DumpSubtree(cx, kid, level, fp); |
1698 |
} |
1699 |
} |
1700 |
} |
1701 |
|
1702 |
#endif /* JS_DUMP_PROPTREE_STATS */ |
1703 |
|
1704 |
void |
1705 |
js_SweepScopeProperties(JSContext *cx) |
1706 |
{ |
1707 |
JSRuntime *rt = cx->runtime; |
1708 |
JSArena **ap, *a; |
1709 |
JSScopeProperty *limit, *sprop, *parent, *kids, *kid; |
1710 |
uintN liveCount; |
1711 |
PropTreeKidsChunk *chunk, *nextChunk, *freeChunk; |
1712 |
uintN i; |
1713 |
|
1714 |
#ifdef JS_DUMP_PROPTREE_STATS |
1715 |
JSBasicStats bs; |
1716 |
uint32 livePropCapacity = 0, totalLiveCount = 0; |
1717 |
static FILE *logfp; |
1718 |
if (!logfp) |
1719 |
logfp = fopen("/tmp/proptree.stats", "w"); |
1720 |
|
1721 |
JS_BASIC_STATS_INIT(&bs); |
1722 |
MeterKidCount(&bs, rt->propertyTreeHash.entryCount); |
1723 |
JS_DHashTableEnumerate(&rt->propertyTreeHash, js_MeterPropertyTree, &bs); |
1724 |
|
1725 |
{ |
1726 |
double props, nodes, mean, sigma; |
1727 |
|
1728 |
props = rt->liveScopePropsPreSweep; |
1729 |
nodes = rt->livePropTreeNodes; |
1730 |
JS_ASSERT(nodes == bs.sum); |
1731 |
mean = JS_MeanAndStdDevBS(&bs, &sigma); |
1732 |
|
1733 |
fprintf(logfp, |
1734 |
"props %g nodes %g beta %g meankids %g sigma %g max %u\n", |
1735 |
props, nodes, nodes / props, mean, sigma, bs.max); |
1736 |
} |
1737 |
|
1738 |
JS_DumpHistogram(&bs, logfp); |
1739 |
#endif |
1740 |
|
1741 |
ap = &rt->propertyArenaPool.first.next; |
1742 |
while ((a = *ap) != NULL) { |
1743 |
limit = (JSScopeProperty *) a->avail; |
1744 |
liveCount = 0; |
1745 |
for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) { |
1746 |
/* If the id is null, sprop is already on the freelist. */ |
1747 |
if (sprop->id == JSVAL_NULL) |
1748 |
continue; |
1749 |
|
1750 |
/* |
1751 |
* If the mark bit is set, sprop is alive, so clear the mark bit |
1752 |
* and continue the while loop. |
1753 |
* |
1754 |
* Regenerate sprop->shape if it hasn't already been refreshed |
1755 |
* during the mark phase, when live scopes' lastProp members are |
1756 |
* followed to update both scope->shape and lastProp->shape. |
1757 |
*/ |
1758 |
if (sprop->flags & SPROP_MARK) { |
1759 |
sprop->flags &= ~SPROP_MARK; |
1760 |
if (sprop->flags & SPROP_FLAG_SHAPE_REGEN) |
1761 |
sprop->flags &= ~SPROP_FLAG_SHAPE_REGEN; |
1762 |
else |
1763 |
sprop->shape = js_RegenerateShapeForGC(cx); |
1764 |
liveCount++; |
1765 |
continue; |
1766 |
} |
1767 |
|
1768 |
/* Ok, sprop is garbage to collect: unlink it from its parent. */ |
1769 |
freeChunk = RemovePropertyTreeChild(rt, sprop); |
1770 |
|
1771 |
/* |
1772 |
* Take care to reparent all sprop's kids to their grandparent. |
1773 |
* InsertPropertyTreeChild can potentially fail for two reasons: |
1774 |
* |
1775 |
* 1. If parent is null, insertion into the root property hash |
1776 |
* table may fail. We are forced to leave the kid out of the |
1777 |
* table (as can already happen with duplicates) but ensure |
1778 |
* that the kid's parent pointer is set to null. |
1779 |
* |
1780 |
* 2. If parent is non-null, allocation of a new KidsChunk can |
1781 |
* fail. To prevent this from happening, we allow sprops's own |
1782 |
* chunks to be reused by the grandparent, which removes the |
1783 |
* need for InsertPropertyTreeChild to malloc a new KidsChunk. |
1784 |
* |
1785 |
* If sprop does not have chunky kids, then we rely on the |
1786 |
* RemovePropertyTreeChild call above (which removed sprop from |
1787 |
* its parent) either leaving one free entry, or else returning |
1788 |
* the now-unused chunk to us so we can reuse it. |
1789 |
* |
1790 |
* We also require the grandparent to have either no kids or else |
1791 |
* chunky kids. A single non-chunky kid would force a new chunk to |
1792 |
* be malloced in some cases (if sprop had a single non-chunky |
1793 |
* kid, or a multiple of MAX_KIDS_PER_CHUNK kids). Note that |
1794 |
* RemovePropertyTreeChild never converts a single-entry chunky |
1795 |
* kid back to a non-chunky kid, so we are assured of correct |
1796 |
* behaviour. |
1797 |
*/ |
1798 |
kids = sprop->kids; |
1799 |
if (kids) { |
1800 |
sprop->kids = NULL; |
1801 |
parent = sprop->parent; |
1802 |
|
1803 |
/* Assert that grandparent has no kids or chunky kids. */ |
1804 |
JS_ASSERT(!parent || !parent->kids || |
1805 |
KIDS_IS_CHUNKY(parent->kids)); |
1806 |
if (KIDS_IS_CHUNKY(kids)) { |
1807 |
chunk = KIDS_TO_CHUNK(kids); |
1808 |
do { |
1809 |
nextChunk = chunk->next; |
1810 |
chunk->next = NULL; |
1811 |
for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { |
1812 |
kid = chunk->kids[i]; |
1813 |
if (!kid) |
1814 |
break; |
1815 |
JS_ASSERT(kid->parent == sprop); |
1816 |
|
1817 |
/* |
1818 |
* Clear a space in the kids array for possible |
1819 |
* re-use by InsertPropertyTreeChild. |
1820 |
*/ |
1821 |
chunk->kids[i] = NULL; |
1822 |
if (!InsertPropertyTreeChild(rt, parent, kid, |
1823 |
chunk)) { |
1824 |
/* |
1825 |
* This can happen only if we failed to add an |
1826 |
* entry to the root property hash table. |
1827 |
*/ |
1828 |
JS_ASSERT(!parent); |
1829 |
kid->parent = NULL; |
1830 |
} |
1831 |
} |
1832 |
if (!chunk->kids[0]) { |
1833 |
/* The chunk wasn't reused, so we must free it. */ |
1834 |
DestroyPropTreeKidsChunk(rt, chunk); |
1835 |
} |
1836 |
} while ((chunk = nextChunk) != NULL); |
1837 |
} else { |
1838 |
kid = kids; |
1839 |
if (!InsertPropertyTreeChild(rt, parent, kid, freeChunk)) { |
1840 |
/* |
1841 |
* This can happen only if we failed to add an entry |
1842 |
* to the root property hash table. |
1843 |
*/ |
1844 |
JS_ASSERT(!parent); |
1845 |
kid->parent = NULL; |
1846 |
} |
1847 |
} |
1848 |
} |
1849 |
|
1850 |
if (freeChunk && !freeChunk->kids[0]) { |
1851 |
/* The chunk wasn't reused, so we must free it. */ |
1852 |
DestroyPropTreeKidsChunk(rt, freeChunk); |
1853 |
} |
1854 |
|
1855 |
/* Clear id so we know (above) that sprop is on the freelist. */ |
1856 |
sprop->id = JSVAL_NULL; |
1857 |
FREENODE_INSERT(rt->propertyFreeList, sprop); |
1858 |
JS_RUNTIME_UNMETER(rt, livePropTreeNodes); |
1859 |
} |
1860 |
|
1861 |
/* If a contains no live properties, return it to the malloc heap. */ |
1862 |
if (liveCount == 0) { |
1863 |
for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) |
1864 |
FREENODE_REMOVE(sprop); |
1865 |
JS_ARENA_DESTROY(&rt->propertyArenaPool, a, ap); |
1866 |
} else { |
1867 |
#ifdef JS_DUMP_PROPTREE_STATS |
1868 |
livePropCapacity += limit - (JSScopeProperty *) a->base; |
1869 |
totalLiveCount += liveCount; |
1870 |
#endif |
1871 |
ap = &a->next; |
1872 |
} |
1873 |
} |
1874 |
|
1875 |
#ifdef JS_DUMP_PROPTREE_STATS |
1876 |
fprintf(logfp, "arenautil %g%%\n", |
1877 |
(totalLiveCount && livePropCapacity) |
1878 |
? (totalLiveCount * 100.0) / livePropCapacity |
1879 |
: 0.0); |
1880 |
|
1881 |
#define RATE(f1, f2) (((double)js_scope_stats.f1 / js_scope_stats.f2) * 100.0) |
1882 |
|
1883 |
fprintf(logfp, "Scope search stats:\n" |
1884 |
" searches: %6u\n" |
1885 |
" hits: %6u %5.2f%% of searches\n" |
1886 |
" misses: %6u %5.2f%%\n" |
1887 |
" hashes: %6u %5.2f%%\n" |
1888 |
" steps: %6u %5.2f%% %5.2f%% of hashes\n" |
1889 |
" stepHits: %6u %5.2f%% %5.2f%%\n" |
1890 |
" stepMisses: %6u %5.2f%% %5.2f%%\n" |
1891 |
" adds: %6u\n" |
1892 |
" redundantAdds: %6u\n" |
1893 |
" addFailures: %6u\n" |
1894 |
" changeFailures: %6u\n" |
1895 |
" compresses: %6u\n" |
1896 |
" grows: %6u\n" |
1897 |
" removes: %6u\n" |
1898 |
" removeFrees: %6u\n" |
1899 |
" uselessRemoves: %6u\n" |
1900 |
" shrinks: %6u\n", |
1901 |
js_scope_stats.searches, |
1902 |
js_scope_stats.hits, RATE(hits, searches), |
1903 |
js_scope_stats.misses, RATE(misses, searches), |
1904 |
js_scope_stats.hashes, RATE(hashes, searches), |
1905 |
js_scope_stats.steps, RATE(steps, searches), RATE(steps, hashes), |
1906 |
js_scope_stats.stepHits, |
1907 |
RATE(stepHits, searches), RATE(stepHits, hashes), |
1908 |
js_scope_stats.stepMisses, |
1909 |
RATE(stepMisses, searches), RATE(stepMisses, hashes), |
1910 |
js_scope_stats.adds, |
1911 |
js_scope_stats.redundantAdds, |
1912 |
js_scope_stats.addFailures, |
1913 |
js_scope_stats.changeFailures, |
1914 |
js_scope_stats.compresses, |
1915 |
js_scope_stats.grows, |
1916 |
js_scope_stats.removes, |
1917 |
js_scope_stats.removeFrees, |
1918 |
js_scope_stats.uselessRemoves, |
1919 |
js_scope_stats.shrinks); |
1920 |
|
1921 |
#undef RATE |
1922 |
|
1923 |
fflush(logfp); |
1924 |
#endif |
1925 |
|
1926 |
#ifdef DUMP_PROPERTY_TREE |
1927 |
{ |
1928 |
FILE *dumpfp = fopen("/tmp/proptree.dump", "w"); |
1929 |
if (dumpfp) { |
1930 |
JSPropertyTreeEntry *pte, *end; |
1931 |
|
1932 |
pte = (JSPropertyTreeEntry *) rt->propertyTreeHash.entryStore; |
1933 |
end = pte + JS_DHASH_TABLE_SIZE(&rt->propertyTreeHash); |
1934 |
while (pte < end) { |
1935 |
if (pte->child) |
1936 |
DumpSubtree(cx, pte->child, 0, dumpfp); |
1937 |
pte++; |
1938 |
} |
1939 |
fclose(dumpfp); |
1940 |
} |
1941 |
} |
1942 |
#endif |
1943 |
} |
1944 |
|
1945 |
JSBool |
1946 |
js_InitPropertyTree(JSRuntime *rt) |
1947 |
{ |
1948 |
if (!JS_DHashTableInit(&rt->propertyTreeHash, &PropertyTreeHashOps, NULL, |
1949 |
sizeof(JSPropertyTreeEntry), JS_DHASH_MIN_SIZE)) { |
1950 |
rt->propertyTreeHash.ops = NULL; |
1951 |
return JS_FALSE; |
1952 |
} |
1953 |
JS_INIT_ARENA_POOL(&rt->propertyArenaPool, "properties", |
1954 |
256 * sizeof(JSScopeProperty), sizeof(void *), NULL); |
1955 |
return JS_TRUE; |
1956 |
} |
1957 |
|
1958 |
void |
1959 |
js_FinishPropertyTree(JSRuntime *rt) |
1960 |
{ |
1961 |
if (rt->propertyTreeHash.ops) { |
1962 |
JS_DHashTableFinish(&rt->propertyTreeHash); |
1963 |
rt->propertyTreeHash.ops = NULL; |
1964 |
} |
1965 |
JS_FinishArenaPool(&rt->propertyArenaPool); |
1966 |
} |