Parent Directory
|
Revision Log
Upgrade to SpiderMonkey from Firefox 3.5.3.
1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 | * vim: set ts=4 sw=4 et tw=99: |
3 | * |
4 | * ***** BEGIN LICENSE BLOCK ***** |
5 | * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
6 | * |
7 | * The contents of this file are subject to the Mozilla Public License Version |
8 | * 1.1 (the "License"); you may not use this file except in compliance with |
9 | * the License. You may obtain a copy of the License at |
10 | * http://www.mozilla.org/MPL/ |
11 | * |
12 | * Software distributed under the License is distributed on an "AS IS" basis, |
13 | * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
14 | * for the specific language governing rights and limitations under the |
15 | * License. |
16 | * |
17 | * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released |
18 | * May 28, 2008. |
19 | * |
20 | * The Initial Developer of the Original Code is |
21 | * Brendan Eich <brendan@mozilla.org> |
22 | * |
23 | * Contributor(s): |
24 | * Andreas Gal <gal@mozilla.com> |
25 | * Mike Shaver <shaver@mozilla.org> |
26 | * David Anderson <danderson@mozilla.com> |
27 | * |
28 | * Alternatively, the contents of this file may be used under the terms of |
29 | * either of the GNU General Public License Version 2 or later (the "GPL"), |
30 | * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
31 | * in which case the provisions of the GPL or the LGPL are applicable instead |
32 | * of those above. If you wish to allow use of your version of this file only |
33 | * under the terms of either the GPL or the LGPL, and not to allow others to |
34 | * use your version of this file under the terms of the MPL, indicate your |
35 | * decision by deleting the provisions above and replace them with the notice |
36 | * and other provisions required by the GPL or the LGPL. If you do not delete |
37 | * the provisions above, a recipient may use your version of this file under |
38 | * the terms of any one of the MPL, the GPL or the LGPL. |
39 | * |
40 | * ***** END LICENSE BLOCK ***** */ |
41 | |
42 | #include "jsstddef.h" // always first |
43 | #include "jsbit.h" // low-level (NSPR-based) headers next |
44 | #include "jsprf.h" |
45 | #include <math.h> // standard headers next |
46 | #ifdef _MSC_VER |
47 | #include <malloc.h> |
48 | #define alloca _alloca |
49 | #endif |
50 | #ifdef SOLARIS |
51 | #include <alloca.h> |
52 | #endif |
53 | #include <limits.h> |
54 | |
55 | #include "nanojit/nanojit.h" |
56 | #include "jsapi.h" // higher-level library and API headers |
57 | #include "jsarray.h" |
58 | #include "jsbool.h" |
59 | #include "jscntxt.h" |
60 | #include "jsdbgapi.h" |
61 | #include "jsemit.h" |
62 | #include "jsfun.h" |
63 | #include "jsinterp.h" |
64 | #include "jsiter.h" |
65 | #include "jsobj.h" |
66 | #include "jsopcode.h" |
67 | #include "jsregexp.h" |
68 | #include "jsscope.h" |
69 | #include "jsscript.h" |
70 | #include "jsdate.h" |
71 | #include "jsstaticcheck.h" |
72 | #include "jstracer.h" |
73 | #include "jsxml.h" |
74 | |
75 | #include "jsautooplen.h" // generated headers last |
76 | #include "imacros.c.out" |
77 | |
78 | #if JS_HAS_XML_SUPPORT |
79 | #define ABORT_IF_XML(v) \ |
80 | JS_BEGIN_MACRO \ |
81 | if (!JSVAL_IS_PRIMITIVE(v) && OBJECT_IS_XML(BOGUS_CX, JSVAL_TO_OBJECT(v)))\ |
82 | ABORT_TRACE("xml detected"); \ |
83 | JS_END_MACRO |
84 | #else |
85 | #define ABORT_IF_XML(cx, v) ((void) 0) |
86 | #endif |
87 | |
88 | /* Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and |
89 | the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then |
90 | handle the undefined case properly (bug 457363). */ |
91 | #undef JSVAL_IS_BOOLEAN |
92 | #define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0) |
93 | |
94 | /* Use a fake tag to represent boxed values, borrowing from the integer tag |
95 | range since we only use JSVAL_INT to indicate integers. */ |
96 | #define JSVAL_BOXED 3 |
97 | |
98 | /* Another fake jsval tag, used to distinguish null from object values. */ |
99 | #define JSVAL_TNULL 5 |
100 | |
101 | /* A last fake jsval tag distinguishing functions from non-function objects. */ |
102 | #define JSVAL_TFUN 7 |
103 | |
104 | /* Map to translate a type tag into a printable representation. */ |
105 | static const char typeChar[] = "OIDXSNBF"; |
106 | static const char tagChar[] = "OIDISIBI"; |
107 | |
108 | /* Blacklist parameters. */ |
109 | |
110 | /* Number of iterations of a loop where we start tracing. That is, we don't |
111 | start tracing until the beginning of the HOTLOOP-th iteration. */ |
112 | #define HOTLOOP 2 |
113 | |
114 | /* Attempt recording this many times before blacklisting permanently. */ |
115 | #define BL_ATTEMPTS 2 |
116 | |
117 | /* Skip this many future hits before allowing recording again after blacklisting. */ |
118 | #define BL_BACKOFF 32 |
119 | |
120 | /* Number of times we wait to exit on a side exit before we try to extend the tree. */ |
121 | #define HOTEXIT 1 |
122 | |
123 | /* Number of times we try to extend the tree along a side exit. */ |
124 | #define MAXEXIT 3 |
125 | |
126 | /* Maximum number of peer trees allowed. */ |
127 | #define MAXPEERS 9 |
128 | |
129 | /* Max call depths for inlining. */ |
130 | #define MAX_CALLDEPTH 10 |
131 | |
132 | /* Max native stack size. */ |
133 | #define MAX_NATIVE_STACK_SLOTS 1024 |
134 | |
135 | /* Max call stack size. */ |
136 | #define MAX_CALL_STACK_ENTRIES 64 |
137 | |
138 | /* Max global object size. */ |
139 | #define MAX_GLOBAL_SLOTS 4096 |
140 | |
141 | /* Max memory needed to rebuild the interpreter stack when falling off trace. */ |
142 | #define MAX_INTERP_STACK_BYTES \ |
143 | (MAX_NATIVE_STACK_SLOTS * sizeof(jsval) + \ |
144 | MAX_CALL_STACK_ENTRIES * sizeof(JSInlineFrame) + \ |
145 | sizeof(JSInlineFrame)) /* possibly slow native frame at top of stack */ |
146 | |
147 | /* Max number of branches per tree. */ |
148 | #define MAX_BRANCHES 32 |
149 | |
150 | #define CHECK_STATUS(expr) \ |
151 | JS_BEGIN_MACRO \ |
152 | JSRecordingStatus _status = (expr); \ |
153 | if (_status != JSRS_CONTINUE) \ |
154 | return _status; \ |
155 | JS_END_MACRO |
156 | |
157 | #ifdef JS_JIT_SPEW |
158 | #define debug_only_a(x) if (js_verboseAbort || js_verboseDebug ) { x; } |
159 | #define ABORT_TRACE_RV(msg, value) \ |
160 | JS_BEGIN_MACRO \ |
161 | debug_only_a(fprintf(stdout, "abort: %d: %s\n", __LINE__, (msg));) \ |
162 | return (value); \ |
163 | JS_END_MACRO |
164 | #else |
165 | #define debug_only_a(x) |
166 | #define ABORT_TRACE_RV(msg, value) return (value) |
167 | #endif |
168 | |
169 | #define ABORT_TRACE(msg) ABORT_TRACE_RV(msg, JSRS_STOP) |
170 | #define ABORT_TRACE_ERROR(msg) ABORT_TRACE_RV(msg, JSRS_ERROR) |
171 | |
172 | #ifdef JS_JIT_SPEW |
173 | struct __jitstats { |
174 | #define JITSTAT(x) uint64 x; |
175 | #include "jitstats.tbl" |
176 | #undef JITSTAT |
177 | } jitstats = { 0LL, }; |
178 | |
179 | JS_STATIC_ASSERT(sizeof(jitstats) % sizeof(uint64) == 0); |
180 | |
181 | enum jitstat_ids { |
182 | #define JITSTAT(x) STAT ## x ## ID, |
183 | #include "jitstats.tbl" |
184 | #undef JITSTAT |
185 | STAT_IDS_TOTAL |
186 | }; |
187 | |
188 | static JSPropertySpec jitstats_props[] = { |
189 | #define JITSTAT(x) { #x, STAT ## x ## ID, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT }, |
190 | #include "jitstats.tbl" |
191 | #undef JITSTAT |
192 | { 0 } |
193 | }; |
194 | |
195 | static JSBool |
196 | jitstats_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) |
197 | { |
198 | int index = -1; |
199 | |
200 | if (JSVAL_IS_STRING(id)) { |
201 | JSString* str = JSVAL_TO_STRING(id); |
202 | if (strcmp(JS_GetStringBytes(str), "HOTLOOP") == 0) { |
203 | *vp = INT_TO_JSVAL(HOTLOOP); |
204 | return JS_TRUE; |
205 | } |
206 | } |
207 | |
208 | if (JSVAL_IS_INT(id)) |
209 | index = JSVAL_TO_INT(id); |
210 | |
211 | uint64 result = 0; |
212 | switch (index) { |
213 | #define JITSTAT(x) case STAT ## x ## ID: result = jitstats.x; break; |
214 | #include "jitstats.tbl" |
215 | #undef JITSTAT |
216 | default: |
217 | *vp = JSVAL_VOID; |
218 | return JS_TRUE; |
219 | } |
220 | |
221 | if (result < JSVAL_INT_MAX) { |
222 | *vp = INT_TO_JSVAL(result); |
223 | return JS_TRUE; |
224 | } |
225 | char retstr[64]; |
226 | JS_snprintf(retstr, sizeof retstr, "%llu", result); |
227 | *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, retstr)); |
228 | return JS_TRUE; |
229 | } |
230 | |
231 | JSClass jitstats_class = { |
232 | "jitstats", |
233 | JSCLASS_HAS_PRIVATE, |
234 | JS_PropertyStub, JS_PropertyStub, |
235 | jitstats_getProperty, JS_PropertyStub, |
236 | JS_EnumerateStub, JS_ResolveStub, |
237 | JS_ConvertStub, JS_FinalizeStub, |
238 | JSCLASS_NO_OPTIONAL_MEMBERS |
239 | }; |
240 | |
241 | void |
242 | js_InitJITStatsClass(JSContext *cx, JSObject *glob) |
243 | { |
244 | JS_InitClass(cx, glob, NULL, &jitstats_class, NULL, 0, jitstats_props, NULL, NULL, NULL); |
245 | } |
246 | |
247 | #define AUDIT(x) (jitstats.x++) |
248 | #else |
249 | #define AUDIT(x) ((void)0) |
250 | #endif /* JS_JIT_SPEW */ |
251 | |
252 | #define INS_CONST(c) addName(lir->insImm(c), #c) |
253 | #define INS_CONSTPTR(p) addName(lir->insImmPtr(p), #p) |
254 | #define INS_CONSTFUNPTR(p) addName(lir->insImmPtr(JS_FUNC_TO_DATA_PTR(void*, p)), #p) |
255 | #define INS_CONSTWORD(v) addName(lir->insImmPtr((void *) v), #v) |
256 | |
257 | using namespace avmplus; |
258 | using namespace nanojit; |
259 | |
260 | static GC gc = GC(); |
261 | static avmplus::AvmCore s_core = avmplus::AvmCore(); |
262 | static avmplus::AvmCore* core = &s_core; |
263 | |
264 | #ifdef JS_JIT_SPEW |
265 | void |
266 | js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc); |
267 | #endif |
268 | |
269 | /* We really need a better way to configure the JIT. Shaver, where is my fancy JIT object? */ |
270 | static bool did_we_check_processor_features = false; |
271 | |
272 | #ifdef JS_JIT_SPEW |
273 | bool js_verboseDebug = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose"); |
274 | bool js_verboseStats = js_verboseDebug || |
275 | (getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "stats")); |
276 | bool js_verboseAbort = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "abort"); |
277 | #endif |
278 | |
279 | /* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst |
280 | case cause performance regressions. */ |
281 | static Oracle oracle; |
282 | |
283 | Tracker::Tracker() |
284 | { |
285 | pagelist = 0; |
286 | } |
287 | |
288 | Tracker::~Tracker() |
289 | { |
290 | clear(); |
291 | } |
292 | |
293 | jsuword |
294 | Tracker::getPageBase(const void* v) const |
295 | { |
296 | return jsuword(v) & ~jsuword(NJ_PAGE_SIZE-1); |
297 | } |
298 | |
299 | struct Tracker::Page* |
300 | Tracker::findPage(const void* v) const |
301 | { |
302 | jsuword base = getPageBase(v); |
303 | struct Tracker::Page* p = pagelist; |
304 | while (p) { |
305 | if (p->base == base) { |
306 | return p; |
307 | } |
308 | p = p->next; |
309 | } |
310 | return 0; |
311 | } |
312 | |
313 | struct Tracker::Page* |
314 | Tracker::addPage(const void* v) { |
315 | jsuword base = getPageBase(v); |
316 | struct Tracker::Page* p = (struct Tracker::Page*) |
317 | GC::Alloc(sizeof(*p) - sizeof(p->map) + (NJ_PAGE_SIZE >> 2) * sizeof(LIns*)); |
318 | p->base = base; |
319 | p->next = pagelist; |
320 | pagelist = p; |
321 | return p; |
322 | } |
323 | |
324 | void |
325 | Tracker::clear() |
326 | { |
327 | while (pagelist) { |
328 | Page* p = pagelist; |
329 | pagelist = pagelist->next; |
330 | GC::Free(p); |
331 | } |
332 | } |
333 | |
334 | bool |
335 | Tracker::has(const void *v) const |
336 | { |
337 | return get(v) != NULL; |
338 | } |
339 | |
340 | #if defined NANOJIT_64BIT |
341 | #define PAGEMASK 0x7ff |
342 | #else |
343 | #define PAGEMASK 0xfff |
344 | #endif |
345 | |
346 | LIns* |
347 | Tracker::get(const void* v) const |
348 | { |
349 | struct Tracker::Page* p = findPage(v); |
350 | if (!p) |
351 | return NULL; |
352 | return p->map[(jsuword(v) & PAGEMASK) >> 2]; |
353 | } |
354 | |
355 | void |
356 | Tracker::set(const void* v, LIns* i) |
357 | { |
358 | struct Tracker::Page* p = findPage(v); |
359 | if (!p) |
360 | p = addPage(v); |
361 | p->map[(jsuword(v) & PAGEMASK) >> 2] = i; |
362 | } |
363 | |
364 | static inline jsuint argSlots(JSStackFrame* fp) |
365 | { |
366 | return JS_MAX(fp->argc, fp->fun->nargs); |
367 | } |
368 | |
369 | static inline bool isNumber(jsval v) |
370 | { |
371 | return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v); |
372 | } |
373 | |
374 | static inline jsdouble asNumber(jsval v) |
375 | { |
376 | JS_ASSERT(isNumber(v)); |
377 | if (JSVAL_IS_DOUBLE(v)) |
378 | return *JSVAL_TO_DOUBLE(v); |
379 | return (jsdouble)JSVAL_TO_INT(v); |
380 | } |
381 | |
382 | static inline bool isInt32(jsval v) |
383 | { |
384 | if (!isNumber(v)) |
385 | return false; |
386 | jsdouble d = asNumber(v); |
387 | jsint i; |
388 | return JSDOUBLE_IS_INT(d, i); |
389 | } |
390 | |
391 | static inline jsint asInt32(jsval v) |
392 | { |
393 | JS_ASSERT(isNumber(v)); |
394 | if (JSVAL_IS_INT(v)) |
395 | return JSVAL_TO_INT(v); |
396 | #ifdef DEBUG |
397 | jsint i; |
398 | JS_ASSERT(JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i)); |
399 | #endif |
400 | return jsint(*JSVAL_TO_DOUBLE(v)); |
401 | } |
402 | |
403 | /* Return JSVAL_DOUBLE for all numbers (int and double) and the tag otherwise. */ |
404 | static inline uint8 getPromotedType(jsval v) |
405 | { |
406 | if (JSVAL_IS_INT(v)) |
407 | return JSVAL_DOUBLE; |
408 | if (JSVAL_IS_OBJECT(v)) { |
409 | if (JSVAL_IS_NULL(v)) |
410 | return JSVAL_TNULL; |
411 | if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) |
412 | return JSVAL_TFUN; |
413 | return JSVAL_OBJECT; |
414 | } |
415 | return uint8(JSVAL_TAG(v)); |
416 | } |
417 | |
418 | /* Return JSVAL_INT for all whole numbers that fit into signed 32-bit and the tag otherwise. */ |
419 | static inline uint8 getCoercedType(jsval v) |
420 | { |
421 | if (isInt32(v)) |
422 | return JSVAL_INT; |
423 | if (JSVAL_IS_OBJECT(v)) { |
424 | if (JSVAL_IS_NULL(v)) |
425 | return JSVAL_TNULL; |
426 | if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) |
427 | return JSVAL_TFUN; |
428 | return JSVAL_OBJECT; |
429 | } |
430 | return uint8(JSVAL_TAG(v)); |
431 | } |
432 | |
433 | /* |
434 | * Constant seed and accumulate step borrowed from the DJB hash. |
435 | */ |
436 | |
437 | #define ORACLE_MASK (ORACLE_SIZE - 1) |
438 | #define FRAGMENT_TABLE_MASK (FRAGMENT_TABLE_SIZE - 1) |
439 | #define HASH_SEED 5381 |
440 | |
441 | static inline void |
442 | hash_accum(uintptr_t& h, uintptr_t i, uintptr_t mask) |
443 | { |
444 | h = ((h << 5) + h + (mask & i)) & mask; |
445 | } |
446 | |
447 | JS_REQUIRES_STACK static inline int |
448 | stackSlotHash(JSContext* cx, unsigned slot) |
449 | { |
450 | uintptr_t h = HASH_SEED; |
451 | hash_accum(h, uintptr_t(cx->fp->script), ORACLE_MASK); |
452 | hash_accum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK); |
453 | hash_accum(h, uintptr_t(slot), ORACLE_MASK); |
454 | return int(h); |
455 | } |
456 | |
457 | JS_REQUIRES_STACK static inline int |
458 | globalSlotHash(JSContext* cx, unsigned slot) |
459 | { |
460 | uintptr_t h = HASH_SEED; |
461 | JSStackFrame* fp = cx->fp; |
462 | |
463 | while (fp->down) |
464 | fp = fp->down; |
465 | |
466 | hash_accum(h, uintptr_t(fp->script), ORACLE_MASK); |
467 | hash_accum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), |
468 | ORACLE_MASK); |
469 | hash_accum(h, uintptr_t(slot), ORACLE_MASK); |
470 | return int(h); |
471 | } |
472 | |
473 | Oracle::Oracle() |
474 | { |
475 | /* Grow the oracle bitsets to their (fixed) size here, once. */ |
476 | _stackDontDemote.set(&gc, ORACLE_SIZE-1); |
477 | _globalDontDemote.set(&gc, ORACLE_SIZE-1); |
478 | clear(); |
479 | } |
480 | |
481 | /* Tell the oracle that a certain global variable should not be demoted. */ |
482 | JS_REQUIRES_STACK void |
483 | Oracle::markGlobalSlotUndemotable(JSContext* cx, unsigned slot) |
484 | { |
485 | _globalDontDemote.set(&gc, globalSlotHash(cx, slot)); |
486 | } |
487 | |
488 | /* Consult with the oracle whether we shouldn't demote a certain global variable. */ |
489 | JS_REQUIRES_STACK bool |
490 | Oracle::isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const |
491 | { |
492 | return _globalDontDemote.get(globalSlotHash(cx, slot)); |
493 | } |
494 | |
495 | /* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */ |
496 | JS_REQUIRES_STACK void |
497 | Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot) |
498 | { |
499 | _stackDontDemote.set(&gc, stackSlotHash(cx, slot)); |
500 | } |
501 | |
502 | /* Consult with the oracle whether we shouldn't demote a certain slot. */ |
503 | JS_REQUIRES_STACK bool |
504 | Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const |
505 | { |
506 | return _stackDontDemote.get(stackSlotHash(cx, slot)); |
507 | } |
508 | |
509 | void |
510 | Oracle::clearDemotability() |
511 | { |
512 | _stackDontDemote.reset(); |
513 | _globalDontDemote.reset(); |
514 | } |
515 | |
516 | |
517 | struct PCHashEntry : public JSDHashEntryStub { |
518 | size_t count; |
519 | }; |
520 | |
521 | #define PC_HASH_COUNT 1024 |
522 | |
523 | static void |
524 | js_Blacklist(jsbytecode* pc) |
525 | { |
526 | JS_ASSERT(*pc == JSOP_LOOP || *pc == JSOP_NOP); |
527 | *pc = JSOP_NOP; |
528 | } |
529 | |
530 | static void |
531 | js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) |
532 | { |
533 | JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; |
534 | |
535 | if (table->ops) { |
536 | PCHashEntry *entry = (PCHashEntry *) |
537 | JS_DHashTableOperate(table, pc, JS_DHASH_ADD); |
538 | |
539 | if (entry) { |
540 | if (!entry->key) { |
541 | entry->key = pc; |
542 | JS_ASSERT(entry->count == 0); |
543 | } |
544 | JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr))); |
545 | if (entry->count++ > (BL_ATTEMPTS * MAXPEERS)) { |
546 | entry->count = 0; |
547 | js_Blacklist(pc); |
548 | return; |
549 | } |
550 | } |
551 | } |
552 | |
553 | if (tree) { |
554 | tree->hits() -= BL_BACKOFF; |
555 | |
556 | /* |
557 | * In case there is no entry or no table (due to OOM) or some |
558 | * serious imbalance in the recording-attempt distribution on a |
559 | * multitree, give each tree another chance to blacklist here as |
560 | * well. |
561 | */ |
562 | if (++tree->recordAttempts > BL_ATTEMPTS) |
563 | js_Blacklist(pc); |
564 | } |
565 | } |
566 | |
567 | static void |
568 | js_resetRecordingAttempts(JSContext *cx, jsbytecode* pc) |
569 | { |
570 | JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; |
571 | if (table->ops) { |
572 | PCHashEntry *entry = (PCHashEntry *) |
573 | JS_DHashTableOperate(table, pc, JS_DHASH_LOOKUP); |
574 | |
575 | if (JS_DHASH_ENTRY_IS_FREE(&(entry->hdr))) |
576 | return; |
577 | JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr))); |
578 | entry->count = 0; |
579 | } |
580 | } |
581 | |
582 | static inline size_t |
583 | fragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) |
584 | { |
585 | uintptr_t h = HASH_SEED; |
586 | hash_accum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK); |
587 | hash_accum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK); |
588 | hash_accum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK); |
589 | hash_accum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK); |
590 | return size_t(h); |
591 | } |
592 | |
593 | /* |
594 | * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments |
595 | * pushed for the innermost JS frame. This is required as part of the fragment |
596 | * key because the fragment will write those arguments back to the interpreter |
597 | * stack when it exits, using its typemap, which implicitly incorporates a given |
598 | * value of argc. Without this feature, a fragment could be called as an inner |
599 | * tree with two different values of argc, and entry type checking or exit |
600 | * frame synthesis could crash. |
601 | */ |
602 | struct VMFragment : public Fragment |
603 | { |
604 | VMFragment(const void* _ip, JSObject* _globalObj, uint32 _globalShape, uint32 _argc) : |
605 | Fragment(_ip), |
606 | next(NULL), |
607 | globalObj(_globalObj), |
608 | globalShape(_globalShape), |
609 | argc(_argc) |
610 | {} |
611 | VMFragment* next; |
612 | JSObject* globalObj; |
613 | uint32 globalShape; |
614 | uint32 argc; |
615 | }; |
616 | |
617 | static VMFragment* |
618 | getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, |
619 | uint32 argc) |
620 | { |
621 | size_t h = fragmentHash(ip, globalObj, globalShape, argc); |
622 | VMFragment* vf = tm->vmfragments[h]; |
623 | while (vf && |
624 | ! (vf->globalObj == globalObj && |
625 | vf->globalShape == globalShape && |
626 | vf->ip == ip && |
627 | vf->argc == argc)) { |
628 | vf = vf->next; |
629 | } |
630 | return vf; |
631 | } |
632 | |
633 | static VMFragment* |
634 | getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, |
635 | uint32 argc) |
636 | { |
637 | return getVMFragment(tm, ip, globalObj, globalShape, argc); |
638 | } |
639 | |
640 | static Fragment* |
641 | getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, |
642 | uint32 argc) |
643 | { |
644 | VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc); |
645 | JS_ASSERT(f); |
646 | |
647 | Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc); |
648 | |
649 | if (p) { |
650 | f->first = p; |
651 | /* append at the end of the peer list */ |
652 | Fragment* next; |
653 | while ((next = p->peer) != NULL) |
654 | p = next; |
655 | p->peer = f; |
656 | } else { |
657 | /* this is the first fragment */ |
658 | f->first = f; |
659 | size_t h = fragmentHash(ip, globalObj, globalShape, argc); |
660 | f->next = tm->vmfragments[h]; |
661 | tm->vmfragments[h] = f; |
662 | } |
663 | f->anchor = f; |
664 | f->root = f; |
665 | f->kind = LoopTrace; |
666 | return f; |
667 | } |
668 | |
669 | #ifdef DEBUG |
670 | static void |
671 | ensureTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) |
672 | { |
673 | JS_ASSERT(f->root == f); |
674 | /* |
675 | * Check for duplicate entry type maps. This is always wrong and hints at |
676 | * trace explosion since we are trying to stabilize something without |
677 | * properly connecting peer edges. |
678 | */ |
679 | TreeInfo* ti_other; |
680 | for (Fragment* peer = getLoop(tm, f->ip, f->globalObj, f->globalShape, f->argc); |
681 | peer != NULL; |
682 | peer = peer->peer) { |
683 | if (!peer->code() || peer == f) |
684 | continue; |
685 | ti_other = (TreeInfo*)peer->vmprivate; |
686 | JS_ASSERT(ti_other); |
687 | JS_ASSERT(!ti->typeMap.matches(ti_other->typeMap)); |
688 | } |
689 | } |
690 | #endif |
691 | |
692 | static void |
693 | js_AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, |
694 | uint32 argc) |
695 | { |
696 | /* |
697 | * If we already permanently blacklisted the location, undo that. |
698 | */ |
699 | JS_ASSERT(*(jsbytecode*)pc == JSOP_NOP || *(jsbytecode*)pc == JSOP_LOOP); |
700 | *(jsbytecode*)pc = JSOP_LOOP; |
701 | js_resetRecordingAttempts(cx, pc); |
702 | |
703 | /* |
704 | * Breath new live into all peer fragments at the designated loop header. |
705 | */ |
706 | Fragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), |
707 | argc); |
708 | if (!f) { |
709 | /* |
710 | * If the global object's shape changed, we can't easily find the |
711 | * corresponding loop header via a hash table lookup. In this |
712 | * we simply bail here and hope that the fragment has another |
713 | * outstanding compilation attempt. This case is extremely rare. |
714 | */ |
715 | return; |
716 | } |
717 | JS_ASSERT(f->root == f); |
718 | f = f->first; |
719 | while (f) { |
720 | JS_ASSERT(f->root == f); |
721 | --f->recordAttempts; |
722 | f->hits() = HOTLOOP; |
723 | f = f->peer; |
724 | } |
725 | } |
726 | |
727 | JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1) |
728 | JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1) |
729 | |
730 | static bool isi2f(LInsp i) |
731 | { |
732 | if (i->isop(LIR_i2f)) |
733 | return true; |
734 | |
735 | if (nanojit::AvmCore::config.soft_float && |
736 | i->isop(LIR_qjoin) && |
737 | i->oprnd1()->isop(LIR_call) && |
738 | i->oprnd2()->isop(LIR_callh)) |
739 | { |
740 | if (i->oprnd1()->callInfo() == &i2f_ci) |
741 | return true; |
742 | } |
743 | |
744 | return false; |
745 | } |
746 | |
747 | static bool isu2f(LInsp i) |
748 | { |
749 | if (i->isop(LIR_u2f)) |
750 | return true; |
751 | |
752 | if (nanojit::AvmCore::config.soft_float && |
753 | i->isop(LIR_qjoin) && |
754 | i->oprnd1()->isop(LIR_call) && |
755 | i->oprnd2()->isop(LIR_callh)) |
756 | { |
757 | if (i->oprnd1()->callInfo() == &u2f_ci) |
758 | return true; |
759 | } |
760 | |
761 | return false; |
762 | } |
763 | |
764 | static LInsp iu2fArg(LInsp i) |
765 | { |
766 | if (nanojit::AvmCore::config.soft_float && |
767 | i->isop(LIR_qjoin)) |
768 | { |
769 | return i->oprnd1()->arg(0); |
770 | } |
771 | |
772 | return i->oprnd1(); |
773 | } |
774 | |
775 | |
776 | static LIns* demote(LirWriter *out, LInsp i) |
777 | { |
778 | if (i->isCall()) |
779 | return callArgN(i, 0); |
780 | if (isi2f(i) || isu2f(i)) |
781 | return iu2fArg(i); |
782 | if (i->isconst()) |
783 | return i; |
784 | AvmAssert(i->isconstq()); |
785 | double cf = i->constvalf(); |
786 | int32_t ci = cf > 0x7fffffff ? uint32_t(cf) : int32_t(cf); |
787 | return out->insImm(ci); |
788 | } |
789 | |
790 | static bool isPromoteInt(LIns* i) |
791 | { |
792 | if (isi2f(i) || i->isconst()) |
793 | return true; |
794 | if (!i->isconstq()) |
795 | return false; |
796 | jsdouble d = i->constvalf(); |
797 | return d == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d); |
798 | } |
799 | |
800 | static bool isPromoteUint(LIns* i) |
801 | { |
802 | if (isu2f(i) || i->isconst()) |
803 | return true; |
804 | if (!i->isconstq()) |
805 | return false; |
806 | jsdouble d = i->constvalf(); |
807 | return d == jsdouble(jsuint(d)) && !JSDOUBLE_IS_NEGZERO(d); |
808 | } |
809 | |
810 | static bool isPromote(LIns* i) |
811 | { |
812 | return isPromoteInt(i) || isPromoteUint(i); |
813 | } |
814 | |
815 | static bool isconst(LIns* i, int32_t c) |
816 | { |
817 | return i->isconst() && i->constval() == c; |
818 | } |
819 | |
820 | static bool overflowSafe(LIns* i) |
821 | { |
822 | LIns* c; |
823 | return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) && |
824 | ((c->constval() & 0xc0000000) == 0)) || |
825 | (i->isop(LIR_rsh) && ((c = i->oprnd2())->isconst()) && |
826 | ((c->constval() > 0))); |
827 | } |
828 | |
829 | /* soft float support */ |
830 | |
831 | JS_DEFINE_CALLINFO_1(static, DOUBLE, fneg, DOUBLE, 1, 1) |
832 | JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1) |
833 | JS_DEFINE_CALLINFO_2(static, INT32, fcmplt, DOUBLE, DOUBLE, 1, 1) |
834 | JS_DEFINE_CALLINFO_2(static, INT32, fcmple, DOUBLE, DOUBLE, 1, 1) |
835 | JS_DEFINE_CALLINFO_2(static, INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1) |
836 | JS_DEFINE_CALLINFO_2(static, INT32, fcmpge, DOUBLE, DOUBLE, 1, 1) |
837 | JS_DEFINE_CALLINFO_2(static, DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1) |
838 | JS_DEFINE_CALLINFO_2(static, DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1) |
839 | JS_DEFINE_CALLINFO_2(static, DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1) |
840 | JS_DEFINE_CALLINFO_2(static, DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1) |
841 | |
842 | jsdouble FASTCALL |
843 | fneg(jsdouble x) |
844 | { |
845 | return -x; |
846 | } |
847 | |
848 | jsdouble FASTCALL |
849 | i2f(int32 i) |
850 | { |
851 | return i; |
852 | } |
853 | |
854 | jsdouble FASTCALL |
855 | u2f(jsuint u) |
856 | { |
857 | return u; |
858 | } |
859 | |
860 | int32 FASTCALL |
861 | fcmpeq(jsdouble x, jsdouble y) |
862 | { |
863 | return x==y; |
864 | } |
865 | |
866 | int32 FASTCALL |
867 | fcmplt(jsdouble x, jsdouble y) |
868 | { |
869 | return x < y; |
870 | } |
871 | |
872 | int32 FASTCALL |
873 | fcmple(jsdouble x, jsdouble y) |
874 | { |
875 | return x <= y; |
876 | } |
877 | |
878 | int32 FASTCALL |
879 | fcmpgt(jsdouble x, jsdouble y) |
880 | { |
881 | return x > y; |
882 | } |
883 | |
884 | int32 FASTCALL |
885 | fcmpge(jsdouble x, jsdouble y) |
886 | { |
887 | return x >= y; |
888 | } |
889 | |
890 | jsdouble FASTCALL |
891 | fmul(jsdouble x, jsdouble y) |
892 | { |
893 | return x * y; |
894 | } |
895 | |
896 | jsdouble FASTCALL |
897 | fadd(jsdouble x, jsdouble y) |
898 | { |
899 | return x + y; |
900 | } |
901 | |
902 | jsdouble FASTCALL |
903 | fdiv(jsdouble x, jsdouble y) |
904 | { |
905 | return x / y; |
906 | } |
907 | |
908 | jsdouble FASTCALL |
909 | fsub(jsdouble x, jsdouble y) |
910 | { |
911 | return x - y; |
912 | } |
913 | |
914 | class SoftFloatFilter: public LirWriter |
915 | { |
916 | public: |
917 | SoftFloatFilter(LirWriter* out): |
918 | LirWriter(out) |
919 | { |
920 | } |
921 | |
922 | LInsp quadCall(const CallInfo *ci, LInsp args[]) { |
923 | LInsp qlo, qhi; |
924 | |
925 | qlo = out->insCall(ci, args); |
926 | qhi = out->ins1(LIR_callh, qlo); |
927 | return out->qjoin(qlo, qhi); |
928 | } |
929 | |
930 | LInsp ins1(LOpcode v, LInsp s0) |
931 | { |
932 | if (v == LIR_fneg) |
933 | return quadCall(&fneg_ci, &s0); |
934 | |
935 | if (v == LIR_i2f) |
936 | return quadCall(&i2f_ci, &s0); |
937 | |
938 | if (v == LIR_u2f) |
939 | return quadCall(&u2f_ci, &s0); |
940 | |
941 | return out->ins1(v, s0); |
942 | } |
943 | |
944 | LInsp ins2(LOpcode v, LInsp s0, LInsp s1) |
945 | { |
946 | LInsp args[2]; |
947 | LInsp bv; |
948 | |
949 | // change the numeric value and order of these LIR opcodes and die |
950 | if (LIR_fadd <= v && v <= LIR_fdiv) { |
951 | static const CallInfo *fmap[] = { &fadd_ci, &fsub_ci, &fmul_ci, &fdiv_ci }; |
952 | |
953 | args[0] = s1; |
954 | args[1] = s0; |
955 | |
956 | return quadCall(fmap[v - LIR_fadd], args); |
957 | } |
958 | |
959 | if (LIR_feq <= v && v <= LIR_fge) { |
960 | static const CallInfo *fmap[] = { &fcmpeq_ci, &fcmplt_ci, &fcmpgt_ci, &fcmple_ci, &fcmpge_ci }; |
961 | |
962 | args[0] = s1; |
963 | args[1] = s0; |
964 | |
965 | bv = out->insCall(fmap[v - LIR_feq], args); |
966 | return out->ins2(LIR_eq, bv, out->insImm(1)); |
967 | } |
968 | |
969 | return out->ins2(v, s0, s1); |
970 | } |
971 | |
972 | LInsp insCall(const CallInfo *ci, LInsp args[]) |
973 | { |
974 | // if the return type is ARGSIZE_F, we have |
975 | // to do a quadCall ( qjoin(call,callh) ) |
976 | if ((ci->_argtypes & 3) == ARGSIZE_F) |
977 | return quadCall(ci, args); |
978 | |
979 | return out->insCall(ci, args); |
980 | } |
981 | }; |
982 | |
983 | class FuncFilter: public LirWriter |
984 | { |
985 | public: |
986 | FuncFilter(LirWriter* out): |
987 | LirWriter(out) |
988 | { |
989 | } |
990 | |
991 | LInsp ins2(LOpcode v, LInsp s0, LInsp s1) |
992 | { |
993 | if (s0 == s1 && v == LIR_feq) { |
994 | if (isPromote(s0)) { |
995 | // double(int) and double(uint) cannot be nan |
996 | return insImm(1); |
997 | } |
998 | if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) { |
999 | LInsp lhs = s0->oprnd1(); |
1000 | LInsp rhs = s0->oprnd2(); |
1001 | if (isPromote(lhs) && isPromote(rhs)) { |
1002 | // add/sub/mul promoted ints can't be nan |
1003 | return insImm(1); |
1004 | } |
1005 | } |
1006 | } else if (LIR_feq <= v && v <= LIR_fge) { |
1007 | if (isPromoteInt(s0) && isPromoteInt(s1)) { |
1008 | // demote fcmp to cmp |
1009 | v = LOpcode(v + (LIR_eq - LIR_feq)); |
1010 | return out->ins2(v, demote(out, s0), demote(out, s1)); |
1011 | } else if (isPromoteUint(s0) && isPromoteUint(s1)) { |
1012 | // uint compare |
1013 | v = LOpcode(v + (LIR_eq - LIR_feq)); |
1014 | if (v != LIR_eq) |
1015 | v = LOpcode(v + (LIR_ult - LIR_lt)); // cmp -> ucmp |
1016 | return out->ins2(v, demote(out, s0), demote(out, s1)); |
1017 | } |
1018 | } else if (v == LIR_or && |
1019 | s0->isop(LIR_lsh) && isconst(s0->oprnd2(), 16) && |
1020 | s1->isop(LIR_and) && isconst(s1->oprnd2(), 0xffff)) { |
1021 | LIns* msw = s0->oprnd1(); |
1022 | LIns* lsw = s1->oprnd1(); |
1023 | LIns* x; |
1024 | LIns* y; |
1025 | if (lsw->isop(LIR_add) && |
1026 | lsw->oprnd1()->isop(LIR_and) && |
1027 | lsw->oprnd2()->isop(LIR_and) && |
1028 | isconst(lsw->oprnd1()->oprnd2(), 0xffff) && |
1029 | isconst(lsw->oprnd2()->oprnd2(), 0xffff) && |
1030 | msw->isop(LIR_add) && |
1031 | msw->oprnd1()->isop(LIR_add) && |
1032 | msw->oprnd2()->isop(LIR_rsh) && |
1033 | msw->oprnd1()->oprnd1()->isop(LIR_rsh) && |
1034 | msw->oprnd1()->oprnd2()->isop(LIR_rsh) && |
1035 | isconst(msw->oprnd2()->oprnd2(), 16) && |
1036 | isconst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && |
1037 | isconst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && |
1038 | (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() && |
1039 | (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() && |
1040 | lsw == msw->oprnd2()->oprnd1()) { |
1041 | return out->ins2(LIR_add, x, y); |
1042 | } |
1043 | } |
1044 | #ifdef NANOJIT_ARM |
1045 | else if (v == LIR_lsh || |
1046 | v == LIR_rsh || |
1047 | v == LIR_ush) |
1048 | { |
1049 | // needed on ARM -- arm doesn't mask shifts to 31 like x86 does |
1050 | if (s1->isconst()) |
1051 | s1->setimm16(s1->constval() & 31); |
1052 | else |
1053 | s1 = out->ins2(LIR_and, s1, out->insImm(31)); |
1054 | return out->ins2(v, s0, s1); |
1055 | } |
1056 | #endif |
1057 | |
1058 | return out->ins2(v, s0, s1); |
1059 | } |
1060 | |
1061 | LInsp insCall(const CallInfo *ci, LInsp args[]) |
1062 | { |
1063 | if (ci == &js_DoubleToUint32_ci) { |
1064 | LInsp s0 = args[0]; |
1065 | if (s0->isconstq()) |
1066 | return out->insImm(js_DoubleToECMAUint32(s0->constvalf())); |
1067 | if (isi2f(s0) || isu2f(s0)) |
1068 | return iu2fArg(s0); |
1069 | } else if (ci == &js_DoubleToInt32_ci) { |
1070 | LInsp s0 = args[0]; |
1071 | if (s0->isconstq()) |
1072 | return out->insImm(js_DoubleToECMAInt32(s0->constvalf())); |
1073 | if (s0->isop(LIR_fadd) || s0->isop(LIR_fsub)) { |
1074 | LInsp lhs = s0->oprnd1(); |
1075 | LInsp rhs = s0->oprnd2(); |
1076 | if (isPromote(lhs) && isPromote(rhs)) { |
1077 | LOpcode op = LOpcode(s0->opcode() & ~LIR64); |
1078 | return out->ins2(op, demote(out, lhs), demote(out, rhs)); |
1079 | } |
1080 | } |
1081 | if (isi2f(s0) || isu2f(s0)) |
1082 | return iu2fArg(s0); |
1083 | // XXX ARM -- check for qjoin(call(UnboxDouble),call(UnboxDouble)) |
1084 | if (s0->isCall()) { |
1085 | const CallInfo* ci2 = s0->callInfo(); |
1086 | if (ci2 == &js_UnboxDouble_ci) { |
1087 | LIns* args2[] = { callArgN(s0, 0) }; |
1088 | return out->insCall(&js_UnboxInt32_ci, args2); |
1089 | } else if (ci2 == &js_StringToNumber_ci) { |
1090 | // callArgN's ordering is that as seen by the builtin, not as stored in |
1091 | // args here. True story! |
1092 | LIns* args2[] = { callArgN(s0, 1), callArgN(s0, 0) }; |
1093 | return out->insCall(&js_StringToInt32_ci, args2); |
1094 | } else if (ci2 == &js_String_p_charCodeAt0_ci) { |
1095 | // Use a fast path builtin for a charCodeAt that converts to an int right away. |
1096 | LIns* args2[] = { callArgN(s0, 0) }; |
1097 | return out->insCall(&js_String_p_charCodeAt0_int_ci, args2); |
1098 | } else if (ci2 == &js_String_p_charCodeAt_ci) { |
1099 | LIns* idx = callArgN(s0, 1); |
1100 | // If the index is not already an integer, force it to be an integer. |
1101 | idx = isPromote(idx) |
1102 | ? demote(out, idx) |
1103 | : out->insCall(&js_DoubleToInt32_ci, &idx); |
1104 | LIns* args2[] = { idx, callArgN(s0, 0) }; |
1105 | return out->insCall(&js_String_p_charCodeAt_int_ci, args2); |
1106 | } |
1107 | } |
1108 | } else if (ci == &js_BoxDouble_ci) { |
1109 | LInsp s0 = args[0]; |
1110 | JS_ASSERT(s0->isQuad()); |
1111 | if (isi2f(s0)) { |
1112 | LIns* args2[] = { iu2fArg(s0), args[1] }; |
1113 | return out->insCall(&js_BoxInt32_ci, args2); |
1114 | } |
1115 | if (s0->isCall() && s0->callInfo() == &js_UnboxDouble_ci) |
1116 | return callArgN(s0, 0); |
1117 | } |
1118 | return out->insCall(ci, args); |
1119 | } |
1120 | }; |
1121 | |
1122 | /* In debug mode vpname contains a textual description of the type of the |
1123 | slot during the forall iteration over all slots. If JS_JIT_SPEW is not |
1124 | defined, vpnum is set to a very large integer to catch invalid uses of |
1125 | it. Non-debug code should never use vpnum. */ |
1126 | #ifdef JS_JIT_SPEW |
1127 | #define DEF_VPNAME const char* vpname; unsigned vpnum |
1128 | #define SET_VPNAME(name) do { vpname = name; vpnum = 0; } while(0) |
1129 | #define INC_VPNUM() do { ++vpnum; } while(0) |
1130 | #else |
1131 | #define DEF_VPNAME do {} while (0) |
1132 | #define vpname "" |
1133 | #define vpnum 0x40000000 |
1134 | #define SET_VPNAME(name) ((void)0) |
1135 | #define INC_VPNUM() ((void)0) |
1136 | #endif |
1137 | |
1138 | /* Iterate over all interned global variables. */ |
1139 | #define FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, code) \ |
1140 | JS_BEGIN_MACRO \ |
1141 | DEF_VPNAME; \ |
1142 | JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); \ |
1143 | unsigned n; \ |
1144 | jsval* vp; \ |
1145 | SET_VPNAME("global"); \ |
1146 | for (n = 0; n < ngslots; ++n) { \ |
1147 | vp = &STOBJ_GET_SLOT(globalObj, gslots[n]); \ |
1148 | { code; } \ |
1149 | INC_VPNUM(); \ |
1150 | } \ |
1151 | JS_END_MACRO |
1152 | |
1153 | /* Iterate over all slots in the frame, consisting of args, vars, and stack |
1154 | (except for the top-level frame which does not have args or vars. */ |
1155 | #define FORALL_FRAME_SLOTS(fp, depth, code) \ |
1156 | JS_BEGIN_MACRO \ |
1157 | jsval* vp; \ |
1158 | jsval* vpstop; \ |
1159 | if (fp->callee) { \ |
1160 | if (depth == 0) { \ |
1161 | SET_VPNAME("callee"); \ |
1162 | vp = &fp->argv[-2]; \ |
1163 | { code; } \ |
1164 | SET_VPNAME("this"); \ |
1165 | vp = &fp->argv[-1]; \ |
1166 | { code; } \ |
1167 | SET_VPNAME("argv"); \ |
1168 | vp = &fp->argv[0]; vpstop = &fp->argv[argSlots(fp)]; \ |
1169 | while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \ |
1170 | } \ |
1171 | SET_VPNAME("vars"); \ |
1172 | vp = fp->slots; vpstop = &fp->slots[fp->script->nfixed]; \ |
1173 | while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \ |
1174 | } \ |
1175 | SET_VPNAME("stack"); \ |
1176 | vp = StackBase(fp); vpstop = fp->regs->sp; \ |
1177 | while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \ |
1178 | if (fsp < fspstop - 1) { \ |
1179 | JSStackFrame* fp2 = fsp[1]; \ |
1180 | int missing = fp2->fun->nargs - fp2->argc; \ |
1181 | if (missing > 0) { \ |
1182 | SET_VPNAME("missing"); \ |
1183 | vp = fp->regs->sp; \ |
1184 | vpstop = vp + missing; \ |
1185 | while (vp < vpstop) { code; ++vp; INC_VPNUM(); } \ |
1186 | } \ |
1187 | } \ |
1188 | JS_END_MACRO |
1189 | |
1190 | /* Iterate over all slots in each pending frame. */ |
1191 | #define FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, code) \ |
1192 | JS_BEGIN_MACRO \ |
1193 | DEF_VPNAME; \ |
1194 | unsigned n; \ |
1195 | JSStackFrame* currentFrame = cx->fp; \ |
1196 | JSStackFrame* entryFrame; \ |
1197 | JSStackFrame* fp = currentFrame; \ |
1198 | for (n = 0; n < callDepth; ++n) { fp = fp->down; } \ |
1199 | entryFrame = fp; \ |
1200 | unsigned frames = callDepth+1; \ |
1201 | JSStackFrame** fstack = \ |
1202 | (JSStackFrame**) alloca(frames * sizeof (JSStackFrame*)); \ |
1203 | JSStackFrame** fspstop = &fstack[frames]; \ |
1204 | JSStackFrame** fsp = fspstop-1; \ |
1205 | fp = currentFrame; \ |
1206 | for (;; fp = fp->down) { *fsp-- = fp; if (fp == entryFrame) break; } \ |
1207 | unsigned depth; \ |
1208 | for (depth = 0, fsp = fstack; fsp < fspstop; ++fsp, ++depth) { \ |
1209 | fp = *fsp; \ |
1210 | FORALL_FRAME_SLOTS(fp, depth, code); \ |
1211 | } \ |
1212 | JS_END_MACRO |
1213 | |
1214 | #define FORALL_SLOTS(cx, ngslots, gslots, callDepth, code) \ |
1215 | JS_BEGIN_MACRO \ |
1216 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, code); \ |
1217 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, code); \ |
1218 | JS_END_MACRO |
1219 | |
1220 | /* Calculate the total number of native frame slots we need from this frame |
1221 | all the way back to the entry frame, including the current stack usage. */ |
1222 | JS_REQUIRES_STACK unsigned |
1223 | js_NativeStackSlots(JSContext *cx, unsigned callDepth) |
1224 | { |
1225 | JSStackFrame* fp = cx->fp; |
1226 | unsigned slots = 0; |
1227 | #if defined _DEBUG |
1228 | unsigned int origCallDepth = callDepth; |
1229 | #endif |
1230 | for (;;) { |
1231 | unsigned operands = fp->regs->sp - StackBase(fp); |
1232 | slots += operands; |
1233 | if (fp->callee) |
1234 | slots += fp->script->nfixed; |
1235 | if (callDepth-- == 0) { |
1236 | if (fp->callee) |
1237 | slots += 2/*callee,this*/ + argSlots(fp); |
1238 | #if defined _DEBUG |
1239 | unsigned int m = 0; |
1240 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, origCallDepth, m++); |
1241 | JS_ASSERT(m == slots); |
1242 | #endif |
1243 | return slots; |
1244 | } |
1245 | JSStackFrame* fp2 = fp; |
1246 | fp = fp->down; |
1247 | int missing = fp2->fun->nargs - fp2->argc; |
1248 | if (missing > 0) |
1249 | slots += missing; |
1250 | } |
1251 | JS_NOT_REACHED("js_NativeStackSlots"); |
1252 | } |
1253 | |
1254 | /* |
1255 | * Capture the type map for the selected slots of the global object and currently pending |
1256 | * stack frames. |
1257 | */ |
1258 | JS_REQUIRES_STACK void |
1259 | TypeMap::captureTypes(JSContext* cx, SlotList& slots, unsigned callDepth) |
1260 | { |
1261 | unsigned ngslots = slots.length(); |
1262 | uint16* gslots = slots.data(); |
1263 | setLength(js_NativeStackSlots(cx, callDepth) + ngslots); |
1264 | uint8* map = data(); |
1265 | uint8* m = map; |
1266 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
1267 | uint8 type = getCoercedType(*vp); |
1268 | if ((type == JSVAL_INT) && oracle.isStackSlotUndemotable(cx, unsigned(m - map))) |
1269 | type = JSVAL_DOUBLE; |
1270 | JS_ASSERT(type != JSVAL_BOXED); |
1271 | debug_only_v(printf("capture stack type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);) |
1272 | JS_ASSERT(uintptr_t(m - map) < length()); |
1273 | *m++ = type; |
1274 | ); |
1275 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
1276 | uint8 type = getCoercedType(*vp); |
1277 | if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, gslots[n])) |
1278 | type = JSVAL_DOUBLE; |
1279 | JS_ASSERT(type != JSVAL_BOXED); |
1280 | debug_only_v(printf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);) |
1281 | JS_ASSERT(uintptr_t(m - map) < length()); |
1282 | *m++ = type; |
1283 | ); |
1284 | JS_ASSERT(uintptr_t(m - map) == length()); |
1285 | } |
1286 | |
1287 | JS_REQUIRES_STACK void |
1288 | TypeMap::captureMissingGlobalTypes(JSContext* cx, SlotList& slots, unsigned stackSlots) |
1289 | { |
1290 | unsigned oldSlots = length() - stackSlots; |
1291 | int diff = slots.length() - oldSlots; |
1292 | JS_ASSERT(diff >= 0); |
1293 | unsigned ngslots = slots.length(); |
1294 | uint16* gslots = slots.data(); |
1295 | setLength(length() + diff); |
1296 | uint8* map = data() + stackSlots; |
1297 | uint8* m = map; |
1298 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
1299 | if (n >= oldSlots) { |
1300 | uint8 type = getCoercedType(*vp); |
1301 | if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, gslots[n])) |
1302 | type = JSVAL_DOUBLE; |
1303 | JS_ASSERT(type != JSVAL_BOXED); |
1304 | debug_only_v(printf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);) |
1305 | *m = type; |
1306 | JS_ASSERT((m > map + oldSlots) || (*m == type)); |
1307 | } |
1308 | m++; |
1309 | ); |
1310 | } |
1311 | |
1312 | /* Compare this type map to another one and see whether they match. */ |
1313 | bool |
1314 | TypeMap::matches(TypeMap& other) const |
1315 | { |
1316 | if (length() != other.length()) |
1317 | return false; |
1318 | return !memcmp(data(), other.data(), length()); |
1319 | } |
1320 | |
1321 | /* Use the provided storage area to create a new type map that contains the partial type map |
1322 | with the rest of it filled up from the complete type map. */ |
1323 | static void |
1324 | mergeTypeMaps(uint8** partial, unsigned* plength, uint8* complete, unsigned clength, uint8* mem) |
1325 | { |
1326 | unsigned l = *plength; |
1327 | JS_ASSERT(l < clength); |
1328 | memcpy(mem, *partial, l * sizeof(uint8)); |
1329 | memcpy(mem + l, complete + l, (clength - l) * sizeof(uint8)); |
1330 | *partial = mem; |
1331 | *plength = clength; |
1332 | } |
1333 | |
1334 | /* Specializes a tree to any missing globals, including any dependent trees. */ |
1335 | static JS_REQUIRES_STACK void |
1336 | specializeTreesToMissingGlobals(JSContext* cx, TreeInfo* root) |
1337 | { |
1338 | TreeInfo* ti = root; |
1339 | |
1340 | ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes); |
1341 | JS_ASSERT(ti->globalSlots->length() == ti->typeMap.length() - ti->nStackTypes); |
1342 | |
1343 | for (unsigned i = 0; i < root->dependentTrees.length(); i++) { |
1344 | ti = (TreeInfo*)root->dependentTrees.data()[i]->vmprivate; |
1345 | /* ti can be NULL if we hit the recording tree in emitTreeCall; this is harmless. */ |
1346 | if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) |
1347 | specializeTreesToMissingGlobals(cx, ti); |
1348 | } |
1349 | for (unsigned i = 0; i < root->linkedTrees.length(); i++) { |
1350 | ti = (TreeInfo*)root->linkedTrees.data()[i]->vmprivate; |
1351 | if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) |
1352 | specializeTreesToMissingGlobals(cx, ti); |
1353 | } |
1354 | } |
1355 | |
1356 | static void |
1357 | js_TrashTree(JSContext* cx, Fragment* f); |
1358 | |
1359 | JS_REQUIRES_STACK |
1360 | TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment, |
1361 | TreeInfo* ti, unsigned stackSlots, unsigned ngslots, uint8* typeMap, |
1362 | VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc) |
1363 | { |
1364 | JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip); |
1365 | |
1366 | /* Reset the fragment state we care about in case we got a recycled fragment. */ |
1367 | _fragment->lastIns = NULL; |
1368 | |
1369 | this->cx = cx; |
1370 | this->traceMonitor = &JS_TRACE_MONITOR(cx); |
1371 | this->globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); |
1372 | this->lexicalBlock = cx->fp->blockChain; |
1373 | this->anchor = _anchor; |
1374 | this->fragment = _fragment; |
1375 | this->lirbuf = _fragment->lirbuf; |
1376 | this->treeInfo = ti; |
1377 | this->callDepth = _anchor ? _anchor->calldepth : 0; |
1378 | this->atoms = FrameAtomBase(cx, cx->fp); |
1379 | this->deepAborted = false; |
1380 | this->trashSelf = false; |
1381 | this->global_dslots = this->globalObj->dslots; |
1382 | this->loop = true; /* default assumption is we are compiling a loop */ |
1383 | this->wasRootFragment = _fragment == _fragment->root; |
1384 | this->outer = outer; |
1385 | this->outerArgc = outerArgc; |
1386 | this->pendingTraceableNative = NULL; |
1387 | this->newobj_ins = NULL; |
1388 | this->generatedTraceableNative = new JSTraceableNative(); |
1389 | JS_ASSERT(generatedTraceableNative); |
1390 | |
1391 | debug_only_v(printf("recording starting from %s:%u@%u\n", |
1392 | ti->treeFileName, ti->treeLineNumber, ti->treePCOffset);) |
1393 | debug_only_v(printf("globalObj=%p, shape=%d\n", (void*)this->globalObj, OBJ_SHAPE(this->globalObj));) |
1394 | |
1395 | lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf); |
1396 | debug_only_v(lir = verbose_filter = new (&gc) VerboseWriter(&gc, lir, lirbuf->names);) |
1397 | if (nanojit::AvmCore::config.soft_float) |
1398 | lir = float_filter = new (&gc) SoftFloatFilter(lir); |
1399 | else |
1400 | float_filter = 0; |
1401 | lir = cse_filter = new (&gc) CseFilter(lir, &gc); |
1402 | lir = expr_filter = new (&gc) ExprFilter(lir); |
1403 | lir = func_filter = new (&gc) FuncFilter(lir); |
1404 | lir->ins0(LIR_start); |
1405 | |
1406 | if (!nanojit::AvmCore::config.tree_opt || fragment->root == fragment) |
1407 | lirbuf->state = addName(lir->insParam(0, 0), "state"); |
1408 | |
1409 | lirbuf->sp = addName(lir->insLoad(LIR_ldp, lirbuf->state, (int)offsetof(InterpState, sp)), "sp"); |
1410 | lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp"); |
1411 | cx_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx"); |
1412 | eos_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos"); |
1413 | eor_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor"); |
1414 | |
1415 | /* If we came from exit, we might not have enough global types. */ |
1416 | if (ti->globalSlots->length() > ti->nGlobalTypes()) |
1417 | specializeTreesToMissingGlobals(cx, ti); |
1418 | |
1419 | /* read into registers all values on the stack and all globals we know so far */ |
1420 | import(treeInfo, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap); |
1421 | |
1422 | if (fragment == fragment->root) { |
1423 | /* |
1424 | * We poll the operation callback request flag. It is updated asynchronously whenever |
1425 | * the callback is to be invoked. |
1426 | */ |
1427 | LIns* x = lir->insLoadi(cx_ins, offsetof(JSContext, operationCallbackFlag)); |
1428 | guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT)); |
1429 | } |
1430 | |
1431 | /* If we are attached to a tree call guard, make sure the guard the inner tree exited from |
1432 | is what we expect it to be. */ |
1433 | if (_anchor && _anchor->exitType == NESTED_EXIT) { |
1434 | LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, |
1435 | offsetof(InterpState, lastTreeExitGuard)), |
1436 | "lastTreeExitGuard"); |
1437 | guard(true, lir->ins2(LIR_eq, nested_ins, INS_CONSTPTR(innermostNestedGuard)), NESTED_EXIT); |
1438 | } |
1439 | } |
1440 | |
1441 | TreeInfo::~TreeInfo() |
1442 | { |
1443 | UnstableExit* temp; |
1444 | |
1445 | while (unstableExits) { |
1446 | temp = unstableExits->next; |
1447 | delete unstableExits; |
1448 | unstableExits = temp; |
1449 | } |
1450 | } |
1451 | |
1452 | TraceRecorder::~TraceRecorder() |
1453 | { |
1454 | JS_ASSERT(nextRecorderToAbort == NULL); |
1455 | JS_ASSERT(treeInfo && (fragment || wasDeepAborted())); |
1456 | #ifdef DEBUG |
1457 | TraceRecorder* tr = JS_TRACE_MONITOR(cx).abortStack; |
1458 | while (tr != NULL) |
1459 | { |
1460 | JS_ASSERT(this != tr); |
1461 | tr = tr->nextRecorderToAbort; |
1462 | } |
1463 | #endif |
1464 | if (fragment) { |
1465 | if (wasRootFragment && !fragment->root->code()) { |
1466 | JS_ASSERT(!fragment->root->vmprivate); |
1467 | delete treeInfo; |
1468 | } |
1469 | |
1470 | if (trashSelf) |
1471 | js_TrashTree(cx, fragment->root); |
1472 | |
1473 | for (unsigned int i = 0; i < whichTreesToTrash.length(); i++) |
1474 | js_TrashTree(cx, whichTreesToTrash.get(i)); |
1475 | } else if (wasRootFragment) { |
1476 | delete treeInfo; |
1477 | } |
1478 | #ifdef DEBUG |
1479 | delete verbose_filter; |
1480 | #endif |
1481 | delete cse_filter; |
1482 | delete expr_filter; |
1483 | delete func_filter; |
1484 | delete float_filter; |
1485 | delete lir_buf_writer; |
1486 | delete generatedTraceableNative; |
1487 | } |
1488 | |
1489 | void TraceRecorder::removeFragmentoReferences() |
1490 | { |
1491 | fragment = NULL; |
1492 | } |
1493 | |
1494 | void TraceRecorder::deepAbort() |
1495 | { |
1496 | debug_only_v(printf("deep abort");) |
1497 | deepAborted = true; |
1498 | } |
1499 | |
1500 | /* Add debug information to a LIR instruction as we emit it. */ |
1501 | inline LIns* |
1502 | TraceRecorder::addName(LIns* ins, const char* name) |
1503 | { |
1504 | #ifdef JS_JIT_SPEW |
1505 | if (js_verboseDebug) |
1506 | lirbuf->names->addName(ins, name); |
1507 | #endif |
1508 | return ins; |
1509 | } |
1510 | |
1511 | /* Determine the current call depth (starting with the entry frame.) */ |
1512 | unsigned |
1513 | TraceRecorder::getCallDepth() const |
1514 | { |
1515 | return callDepth; |
1516 | } |
1517 | |
1518 | /* Determine the offset in the native global frame for a jsval we track */ |
1519 | ptrdiff_t |
1520 | TraceRecorder::nativeGlobalOffset(jsval* p) const |
1521 | { |
1522 | JS_ASSERT(isGlobal(p)); |
1523 | if (size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) |
1524 | return sizeof(InterpState) + size_t(p - globalObj->fslots) * sizeof(double); |
1525 | return sizeof(InterpState) + ((p - globalObj->dslots) + JS_INITIAL_NSLOTS) * sizeof(double); |
1526 | } |
1527 | |
1528 | /* Determine whether a value is a global stack slot */ |
1529 | bool |
1530 | TraceRecorder::isGlobal(jsval* p) const |
1531 | { |
1532 | return ((size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) || |
1533 | (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS))); |
1534 | } |
1535 | |
1536 | /* Determine the offset in the native stack for a jsval we track */ |
1537 | JS_REQUIRES_STACK ptrdiff_t |
1538 | TraceRecorder::nativeStackOffset(jsval* p) const |
1539 | { |
1540 | #ifdef DEBUG |
1541 | size_t slow_offset = 0; |
1542 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
1543 | if (vp == p) goto done; |
1544 | slow_offset += sizeof(double) |
1545 | ); |
1546 | |
1547 | /* |
1548 | * If it's not in a pending frame, it must be on the stack of the current frame above |
1549 | * sp but below fp->slots + script->nslots. |
1550 | */ |
1551 | JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots); |
1552 | slow_offset += size_t(p - cx->fp->regs->sp) * sizeof(double); |
1553 | |
1554 | done: |
1555 | #define RETURN(offset) { JS_ASSERT((offset) == slow_offset); return offset; } |
1556 | #else |
1557 | #define RETURN(offset) { return offset; } |
1558 | #endif |
1559 | size_t offset = 0; |
1560 | JSStackFrame* currentFrame = cx->fp; |
1561 | JSStackFrame* entryFrame; |
1562 | JSStackFrame* fp = currentFrame; |
1563 | for (unsigned n = 0; n < callDepth; ++n) { fp = fp->down; } |
1564 | entryFrame = fp; |
1565 | unsigned frames = callDepth+1; |
1566 | JSStackFrame** fstack = (JSStackFrame **)alloca(frames * sizeof (JSStackFrame *)); |
1567 | JSStackFrame** fspstop = &fstack[frames]; |
1568 | JSStackFrame** fsp = fspstop-1; |
1569 | fp = currentFrame; |
1570 | for (;; fp = fp->down) { *fsp-- = fp; if (fp == entryFrame) break; } |
1571 | for (fsp = fstack; fsp < fspstop; ++fsp) { |
1572 | fp = *fsp; |
1573 | if (fp->callee) { |
1574 | if (fsp == fstack) { |
1575 | if (size_t(p - &fp->argv[-2]) < size_t(2/*callee,this*/ + argSlots(fp))) |
1576 | RETURN(offset + size_t(p - &fp->argv[-2]) * sizeof(double)); |
1577 | offset += (2/*callee,this*/ + argSlots(fp)) * sizeof(double); |
1578 | } |
1579 | if (size_t(p - &fp->slots[0]) < fp->script->nfixed) |
1580 | RETURN(offset + size_t(p - &fp->slots[0]) * sizeof(double)); |
1581 | offset += fp->script->nfixed * sizeof(double); |
1582 | } |
1583 | jsval* spbase = StackBase(fp); |
1584 | if (size_t(p - spbase) < size_t(fp->regs->sp - spbase)) |
1585 | RETURN(offset + size_t(p - spbase) * sizeof(double)); |
1586 | offset += size_t(fp->regs->sp - spbase) * sizeof(double); |
1587 | if (fsp < fspstop - 1) { |
1588 | JSStackFrame* fp2 = fsp[1]; |
1589 | int missing = fp2->fun->nargs - fp2->argc; |
1590 | if (missing > 0) { |
1591 | if (size_t(p - fp->regs->sp) < size_t(missing)) |
1592 | RETURN(offset + size_t(p - fp->regs->sp) * sizeof(double)); |
1593 | offset += size_t(missing) * sizeof(double); |
1594 | } |
1595 | } |
1596 | } |
1597 | |
1598 | /* |
1599 | * If it's not in a pending frame, it must be on the stack of the current frame above |
1600 | * sp but below fp->slots + script->nslots. |
1601 | */ |
1602 | JS_ASSERT(size_t(p - currentFrame->slots) < currentFrame->script->nslots); |
1603 | offset += size_t(p - currentFrame->regs->sp) * sizeof(double); |
1604 | RETURN(offset); |
1605 | #undef RETURN |
1606 | } |
1607 | |
1608 | /* Track the maximum number of native frame slots we need during |
1609 | execution. */ |
1610 | void |
1611 | TraceRecorder::trackNativeStackUse(unsigned slots) |
1612 | { |
1613 | if (slots > treeInfo->maxNativeStackSlots) |
1614 | treeInfo->maxNativeStackSlots = slots; |
1615 | } |
1616 | |
1617 | /* Unbox a jsval into a slot. Slots are wide enough to hold double values directly (instead of |
1618 | storing a pointer to them). We now assert instead of type checking, the caller must ensure the |
1619 | types are compatible. */ |
1620 | static void |
1621 | ValueToNative(JSContext* cx, jsval v, uint8 type, double* slot) |
1622 | { |
1623 | unsigned tag = JSVAL_TAG(v); |
1624 | switch (type) { |
1625 | case JSVAL_OBJECT: |
1626 | JS_ASSERT(tag == JSVAL_OBJECT); |
1627 | JS_ASSERT(!JSVAL_IS_NULL(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))); |
1628 | *(JSObject**)slot = JSVAL_TO_OBJECT(v); |
1629 | debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v), |
1630 | JSVAL_IS_NULL(v) |
1631 | ? "null" |
1632 | : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);) |
1633 | return; |
1634 | case JSVAL_INT: |
1635 | jsint i; |
1636 | if (JSVAL_IS_INT(v)) |
1637 | *(jsint*)slot = JSVAL_TO_INT(v); |
1638 | else if ((tag == JSVAL_DOUBLE) && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i)) |
1639 | *(jsint*)slot = i; |
1640 | else |
1641 | JS_ASSERT(JSVAL_IS_INT(v)); |
1642 | debug_only_v(printf("int<%d> ", *(jsint*)slot);) |
1643 | return; |
1644 | case JSVAL_DOUBLE: |
1645 | jsdouble d; |
1646 | if (JSVAL_IS_INT(v)) |
1647 | d = JSVAL_TO_INT(v); |
1648 | else |
1649 | d = *JSVAL_TO_DOUBLE(v); |
1650 | JS_ASSERT(JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v)); |
1651 | *(jsdouble*)slot = d; |
1652 | debug_only_v(printf("double<%g> ", d);) |
1653 | return; |
1654 | case JSVAL_BOXED: |
1655 | JS_NOT_REACHED("found boxed type in an entry type map"); |
1656 | return; |
1657 | case JSVAL_STRING: |
1658 | JS_ASSERT(tag == JSVAL_STRING); |
1659 | *(JSString**)slot = JSVAL_TO_STRING(v); |
1660 | debug_only_v(printf("string<%p> ", (void*)(*(JSString**)slot));) |
1661 | return; |
1662 | case JSVAL_TNULL: |
1663 | JS_ASSERT(tag == JSVAL_OBJECT); |
1664 | *(JSObject**)slot = NULL; |
1665 | debug_only_v(printf("null ");) |
1666 | return; |
1667 | case JSVAL_BOOLEAN: |
1668 | /* Watch out for pseudo-booleans. */ |
1669 | JS_ASSERT(tag == JSVAL_BOOLEAN); |
1670 | *(JSBool*)slot = JSVAL_TO_PSEUDO_BOOLEAN(v); |
1671 | debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);) |
1672 | return; |
1673 | case JSVAL_TFUN: { |
1674 | JS_ASSERT(tag == JSVAL_OBJECT); |
1675 | JSObject* obj = JSVAL_TO_OBJECT(v); |
1676 | *(JSObject**)slot = obj; |
1677 | #ifdef DEBUG |
1678 | JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj); |
1679 | debug_only_v(printf("function<%p:%s> ", (void*) obj, |
1680 | fun->atom |
1681 | ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom)) |
1682 | : "unnamed");) |
1683 | #endif |
1684 | return; |
1685 | } |
1686 | } |
1687 | |
1688 | JS_NOT_REACHED("unexpected type"); |
1689 | } |
1690 | |
1691 | /* We maintain an emergency pool of doubles so we can recover safely if a trace runs |
1692 | out of memory (doubles or objects). */ |
1693 | static jsval |
1694 | AllocateDoubleFromReservedPool(JSContext* cx) |
1695 | { |
1696 | JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); |
1697 | JS_ASSERT(tm->reservedDoublePoolPtr > tm->reservedDoublePool); |
1698 | return *--tm->reservedDoublePoolPtr; |
1699 | } |
1700 | |
1701 | static bool |
1702 | js_ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm) |
1703 | { |
1704 | /* We should not be called with a full pool. */ |
1705 | JS_ASSERT((size_t) (tm->reservedDoublePoolPtr - tm->reservedDoublePool) < |
1706 | MAX_NATIVE_STACK_SLOTS); |
1707 | |
1708 | /* |
1709 | * When the GC runs in js_NewDoubleInRootedValue, it resets |
1710 | * tm->reservedDoublePoolPtr back to tm->reservedDoublePool. |
1711 | */ |
1712 | JSRuntime* rt = cx->runtime; |
1713 | uintN gcNumber = rt->gcNumber; |
1714 | uintN lastgcNumber = gcNumber; |
1715 | jsval* ptr = tm->reservedDoublePoolPtr; |
1716 | while (ptr < tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) { |
1717 | if (!js_NewDoubleInRootedValue(cx, 0.0, ptr)) |
1718 | goto oom; |
1719 | |
1720 | /* Check if the last call to js_NewDoubleInRootedValue GC'd. */ |
1721 | if (rt->gcNumber != lastgcNumber) { |
1722 | lastgcNumber = rt->gcNumber; |
1723 | JS_ASSERT(tm->reservedDoublePoolPtr == tm->reservedDoublePool); |
1724 | ptr = tm->reservedDoublePool; |
1725 | |
1726 | /* |
1727 | * Have we GC'd more than once? We're probably running really |
1728 | * low on memory, bail now. |
1729 | */ |
1730 | if (uintN(rt->gcNumber - gcNumber) > uintN(1)) |
1731 | goto oom; |
1732 | continue; |
1733 | } |
1734 | ++ptr; |
1735 | } |
1736 | tm->reservedDoublePoolPtr = ptr; |
1737 | return true; |
1738 | |
1739 | oom: |
1740 | /* |
1741 | * Already massive GC pressure, no need to hold doubles back. |
1742 | * We won't run any native code anyway. |
1743 | */ |
1744 | tm->reservedDoublePoolPtr = tm->reservedDoublePool; |
1745 | return false; |
1746 | } |
1747 | |
1748 | /* Box a value from the native stack back into the jsval format. Integers |
1749 | that are too large to fit into a jsval are automatically boxed into |
1750 | heap-allocated doubles. */ |
1751 | static void |
1752 | NativeToValue(JSContext* cx, jsval& v, uint8 type, double* slot) |
1753 | { |
1754 | jsint i; |
1755 | jsdouble d; |
1756 | switch (type) { |
1757 | case JSVAL_OBJECT: |
1758 | v = OBJECT_TO_JSVAL(*(JSObject**)slot); |
1759 | JS_ASSERT(JSVAL_TAG(v) == JSVAL_OBJECT); /* if this fails the pointer was not aligned */ |
1760 | JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */ |
1761 | debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v), |
1762 | JSVAL_IS_NULL(v) |
1763 | ? "null" |
1764 | : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);) |
1765 | break; |
1766 | case JSVAL_INT: |
1767 | i = *(jsint*)slot; |
1768 | debug_only_v(printf("int<%d> ", i);) |
1769 | store_int: |
1770 | if (INT_FITS_IN_JSVAL(i)) { |
1771 | v = INT_TO_JSVAL(i); |
1772 | break; |
1773 | } |
1774 | d = (jsdouble)i; |
1775 | goto store_double; |
1776 | case JSVAL_DOUBLE: |
1777 | d = *slot; |
1778 | debug_only_v(printf("double<%g> ", d);) |
1779 | if (JSDOUBLE_IS_INT(d, i)) |
1780 | goto store_int; |
1781 | store_double: { |
1782 | /* Its not safe to trigger the GC here, so use an emergency heap if we are out of |
1783 | double boxes. */ |
1784 | if (cx->doubleFreeList) { |
1785 | #ifdef DEBUG |
1786 | JSBool ok = |
1787 | #endif |
1788 | js_NewDoubleInRootedValue(cx, d, &v); |
1789 | JS_ASSERT(ok); |
1790 | return; |
1791 | } |
1792 | v = AllocateDoubleFromReservedPool(cx); |
1793 | JS_ASSERT(JSVAL_IS_DOUBLE(v) && *JSVAL_TO_DOUBLE(v) == 0.0); |
1794 | *JSVAL_TO_DOUBLE(v) = d; |
1795 | return; |
1796 | } |
1797 | case JSVAL_BOXED: |
1798 | v = *(jsval*)slot; |
1799 | JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */ |
1800 | debug_only_v(printf("box<%lx> ", v)); |
1801 | break; |
1802 | case JSVAL_STRING: |
1803 | v = STRING_TO_JSVAL(*(JSString**)slot); |
1804 | JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING); /* if this fails the pointer was not aligned */ |
1805 | debug_only_v(printf("string<%p> ", *(JSString**)slot);) |
1806 | break; |
1807 | case JSVAL_TNULL: |
1808 | JS_ASSERT(*(JSObject**)slot == NULL); |
1809 | v = JSVAL_NULL; |
1810 | debug_only_v(printf("null<%p> ", *(JSObject**)slot)); |
1811 | break; |
1812 | case JSVAL_BOOLEAN: |
1813 | /* Watch out for pseudo-booleans. */ |
1814 | v = PSEUDO_BOOLEAN_TO_JSVAL(*(JSBool*)slot); |
1815 | debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);) |
1816 | break; |
1817 | case JSVAL_TFUN: { |
1818 | JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot)); |
1819 | v = OBJECT_TO_JSVAL(*(JSObject**)slot); |
1820 | #ifdef DEBUG |
1821 | JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v)); |
1822 | debug_only_v(printf("function<%p:%s> ", (void*)JSVAL_TO_OBJECT(v), |
1823 | fun->atom |
1824 | ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom)) |
1825 | : "unnamed");) |
1826 | #endif |
1827 | break; |
1828 | } |
1829 | } |
1830 | } |
1831 | |
1832 | /* Attempt to unbox the given list of interned globals onto the native global frame. */ |
1833 | static JS_REQUIRES_STACK void |
1834 | BuildNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np) |
1835 | { |
1836 | debug_only_v(printf("global: ");) |
1837 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
1838 | ValueToNative(cx, *vp, *mp, np + gslots[n]); |
1839 | ++mp; |
1840 | ); |
1841 | debug_only_v(printf("\n");) |
1842 | } |
1843 | |
1844 | /* Attempt to unbox the given JS frame onto a native frame. */ |
1845 | static JS_REQUIRES_STACK void |
1846 | BuildNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np) |
1847 | { |
1848 | debug_only_v(printf("stack: ");) |
1849 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
1850 | debug_only_v(printf("%s%u=", vpname, vpnum);) |
1851 | ValueToNative(cx, *vp, *mp, np); |
1852 | ++mp; ++np; |
1853 | ); |
1854 | debug_only_v(printf("\n");) |
1855 | } |
1856 | |
1857 | /* Box the given native frame into a JS frame. This is infallible. */ |
1858 | static JS_REQUIRES_STACK int |
1859 | FlushNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np) |
1860 | { |
1861 | uint8* mp_base = mp; |
1862 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
1863 | debug_only_v(printf("%s%u=", vpname, vpnum);) |
1864 | NativeToValue(cx, *vp, *mp, np + gslots[n]); |
1865 | ++mp; |
1866 | ); |
1867 | debug_only_v(printf("\n");) |
1868 | return mp - mp_base; |
1869 | } |
1870 | |
1871 | /* |
1872 | * Generic function to read upvars on trace. |
1873 | * T Traits type parameter. Must provide static functions: |
1874 | * interp_get(fp, slot) Read the value out of an interpreter frame. |
1875 | * native_slot(argc, slot) Return the position of the desired value in the on-trace |
1876 | * stack frame (with position 0 being callee). |
1877 | * |
1878 | * level Static level of the function containing the upvar definition. |
1879 | * slot Identifies the value to get. The meaning is defined by the traits type. |
1880 | * callDepth Call depth of current point relative to trace entry |
1881 | */ |
1882 | template<typename T> |
1883 | uint32 JS_INLINE |
1884 | js_GetUpvarOnTrace(JSContext* cx, uint32 level, int32 slot, uint32 callDepth, double* result) |
1885 | { |
1886 | InterpState* state = cx->interpState; |
1887 | FrameInfo** fip = state->rp + callDepth; |
1888 | |
1889 | /* |
1890 | * First search the FrameInfo call stack for an entry containing |
1891 | * our upvar, namely one with level == upvarLevel. |
1892 | */ |
1893 | while (--fip >= state->callstackBase) { |
1894 | FrameInfo* fi = *fip; |
1895 | JSFunction* fun = GET_FUNCTION_PRIVATE(cx, fi->callee); |
1896 | uintN calleeLevel = fun->u.i.script->staticLevel; |
1897 | if (calleeLevel == level) { |
1898 | /* |
1899 | * Now find the upvar's value in the native stack. |
1900 | * nativeStackFramePos is the offset of the start of the |
1901 | * activation record corresponding to *fip in the native |
1902 | * stack. |
1903 | */ |
1904 | int32 nativeStackFramePos = state->callstackBase[0]->spoffset; |
1905 | for (FrameInfo** fip2 = state->callstackBase; fip2 <= fip; fip2++) |
1906 | nativeStackFramePos += (*fip2)->spdist; |
1907 | nativeStackFramePos -= (2 + (*fip)->get_argc()); |
1908 | uint32 native_slot = T::native_slot((*fip)->get_argc(), slot); |
1909 | *result = state->stackBase[nativeStackFramePos + native_slot]; |
1910 | return fi->get_typemap()[native_slot]; |
1911 | } |
1912 | } |
1913 | |
1914 | // Next search the trace entry frame, which is not in the FrameInfo stack. |
1915 | if (state->outermostTree->script->staticLevel == level) { |
1916 | uint32 argc = ((VMFragment*) state->outermostTree->fragment)->argc; |
1917 | uint32 native_slot = T::native_slot(argc, slot); |
1918 | *result = state->stackBase[native_slot]; |
1919 | return state->callstackBase[0]->get_typemap()[native_slot]; |
1920 | } |
1921 | |
1922 | /* |
1923 | * If we did not find the upvar in the frames for the active traces, |
1924 | * then we simply get the value from the interpreter state. |
1925 | */ |
1926 | JS_ASSERT(level < JS_DISPLAY_SIZE); |
1927 | JSStackFrame* fp = cx->display[level]; |
1928 | jsval v = T::interp_get(fp, slot); |
1929 | uint8 type = getCoercedType(v); |
1930 | ValueToNative(cx, v, type, result); |
1931 | return type; |
1932 | } |
1933 | |
1934 | // For this traits type, 'slot' is the argument index, which may be -2 for callee. |
1935 | struct UpvarArgTraits { |
1936 | static jsval interp_get(JSStackFrame* fp, int32 slot) { |
1937 | return fp->argv[slot]; |
1938 | } |
1939 | |
1940 | static uint32 native_slot(uint32 argc, int32 slot) { |
1941 | return 2 /*callee,this*/ + slot; |
1942 | } |
1943 | }; |
1944 | |
1945 | uint32 JS_FASTCALL |
1946 | js_GetUpvarArgOnTrace(JSContext* cx, uint32 staticLevel, int32 slot, uint32 callDepth, double* result) |
1947 | { |
1948 | return js_GetUpvarOnTrace<UpvarArgTraits>(cx, staticLevel, slot, callDepth, result); |
1949 | } |
1950 | |
1951 | // For this traits type, 'slot' is an index into the local slots array. |
1952 | struct UpvarVarTraits { |
1953 | static jsval interp_get(JSStackFrame* fp, int32 slot) { |
1954 | return fp->slots[slot]; |
1955 | } |
1956 | |
1957 | static uint32 native_slot(uint32 argc, int32 slot) { |
1958 | return 2 /*callee,this*/ + argc + slot; |
1959 | } |
1960 | }; |
1961 | |
1962 | uint32 JS_FASTCALL |
1963 | js_GetUpvarVarOnTrace(JSContext* cx, uint32 staticLevel, int32 slot, uint32 callDepth, double* result) |
1964 | { |
1965 | return js_GetUpvarOnTrace<UpvarVarTraits>(cx, staticLevel, slot, callDepth, result); |
1966 | } |
1967 | |
1968 | /* |
1969 | * For this traits type, 'slot' is an index into the stack area (within slots, after nfixed) |
1970 | * of a frame with no function. (On trace, the top-level frame is the only one that can have |
1971 | * no function.) |
1972 | */ |
1973 | struct UpvarStackTraits { |
1974 | static jsval interp_get(JSStackFrame* fp, int32 slot) { |
1975 | return fp->slots[slot + fp->script->nfixed]; |
1976 | } |
1977 | |
1978 | static uint32 native_slot(uint32 argc, int32 slot) { |
1979 | /* |
1980 | * Locals are not imported by the tracer when the frame has no function, so |
1981 | * we do not add fp->script->nfixed. |
1982 | */ |
1983 | JS_ASSERT(argc == 0); |
1984 | return slot; |
1985 | } |
1986 | }; |
1987 | |
1988 | uint32 JS_FASTCALL |
1989 | js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) |
1990 | { |
1991 | return js_GetUpvarOnTrace<UpvarStackTraits>(cx, upvarLevel, slot, callDepth, result); |
1992 | } |
1993 | |
1994 | /** |
1995 | * Box the given native stack frame into the virtual machine stack. This |
1996 | * is infallible. |
1997 | * |
1998 | * @param callDepth the distance between the entry frame into our trace and |
1999 | * cx->fp when we make this call. If this is not called as a |
2000 | * result of a nested exit, callDepth is 0. |
2001 | * @param mp pointer to an array of type tags (JSVAL_INT, etc.) that indicate |
2002 | * what the types of the things on the stack are. |
2003 | * @param np pointer to the native stack. We want to copy values from here to |
2004 | * the JS stack as needed. |
2005 | * @param stopFrame if non-null, this frame and everything above it should not |
2006 | * be restored. |
2007 | * @return the number of things we popped off of np. |
2008 | */ |
2009 | static JS_REQUIRES_STACK int |
2010 | FlushNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np, |
2011 | JSStackFrame* stopFrame) |
2012 | { |
2013 | jsval* stopAt = stopFrame ? &stopFrame->argv[-2] : NULL; |
2014 | uint8* mp_base = mp; |
2015 | /* Root all string and object references first (we don't need to call the GC for this). */ |
2016 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
2017 | if (vp == stopAt) goto skip; |
2018 | debug_only_v(printf("%s%u=", vpname, vpnum);) |
2019 | NativeToValue(cx, *vp, *mp, np); |
2020 | ++mp; ++np |
2021 | ); |
2022 | skip: |
2023 | // Restore thisp from the now-restored argv[-1] in each pending frame. |
2024 | // Keep in mind that we didn't restore frames at stopFrame and above! |
2025 | // Scope to keep |fp| from leaking into the macros we're using. |
2026 | { |
2027 | unsigned n = callDepth+1; // +1 to make sure we restore the entry frame |
2028 | JSStackFrame* fp = cx->fp; |
2029 | if (stopFrame) { |
2030 | for (; fp != stopFrame; fp = fp->down) { |
2031 | JS_ASSERT(n != 0); |
2032 | --n; |
2033 | } |
2034 | // Skip over stopFrame itself. |
2035 | JS_ASSERT(n != 0); |
2036 | --n; |
2037 | fp = fp->down; |
2038 | } |
2039 | for (; n != 0; fp = fp->down) { |
2040 | --n; |
2041 | if (fp->callee) { |
2042 | /* |
2043 | * We might return from trace with a different callee object, but it still |
2044 | * has to be the same JSFunction (FIXME: bug 471425, eliminate fp->callee). |
2045 | */ |
2046 | JS_ASSERT(JSVAL_IS_OBJECT(fp->argv[-1])); |
2047 | JS_ASSERT(HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(fp->argv[-2]))); |
2048 | JS_ASSERT(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fp->argv[-2])) == |
2049 | GET_FUNCTION_PRIVATE(cx, fp->callee)); |
2050 | JS_ASSERT(GET_FUNCTION_PRIVATE(cx, fp->callee) == fp->fun); |
2051 | fp->callee = JSVAL_TO_OBJECT(fp->argv[-2]); |
2052 | |
2053 | /* |
2054 | * SynthesizeFrame sets scopeChain to NULL, because we can't calculate the |
2055 | * correct scope chain until we have the final callee. Calculate the real |
2056 | * scope object here. |
2057 | */ |
2058 | if (!fp->scopeChain) { |
2059 | fp->scopeChain = OBJ_GET_PARENT(cx, fp->callee); |
2060 | if (fp->fun->flags & JSFUN_HEAVYWEIGHT) { |
2061 | /* |
2062 | * Set hookData to null because the failure case for js_GetCallObject |
2063 | * involves it calling the debugger hook. |
2064 | * |
2065 | * Allocating the Call object must not fail, so use an object |
2066 | * previously reserved by js_ExecuteTree if needed. |
2067 | */ |
2068 | void* hookData = ((JSInlineFrame*)fp)->hookData; |
2069 | ((JSInlineFrame*)fp)->hookData = NULL; |
2070 | JS_ASSERT(!JS_TRACE_MONITOR(cx).useReservedObjects); |
2071 | JS_TRACE_MONITOR(cx).useReservedObjects = JS_TRUE; |
2072 | #ifdef DEBUG |
2073 | JSObject *obj = |
2074 | #endif |
2075 | js_GetCallObject(cx, fp); |
2076 | JS_ASSERT(obj); |
2077 | JS_TRACE_MONITOR(cx).useReservedObjects = JS_FALSE; |
2078 | ((JSInlineFrame*)fp)->hookData = hookData; |
2079 | } |
2080 | } |
2081 | fp->thisp = JSVAL_TO_OBJECT(fp->argv[-1]); |
2082 | if (fp->flags & JSFRAME_CONSTRUCTING) // constructors always compute 'this' |
2083 | fp->flags |= JSFRAME_COMPUTED_THIS; |
2084 | } |
2085 | } |
2086 | } |
2087 | debug_only_v(printf("\n");) |
2088 | return mp - mp_base; |
2089 | } |
2090 | |
2091 | /* Emit load instructions onto the trace that read the initial stack state. */ |
2092 | JS_REQUIRES_STACK void |
2093 | TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, uint8 t, |
2094 | const char *prefix, uintN index, JSStackFrame *fp) |
2095 | { |
2096 | LIns* ins; |
2097 | if (t == JSVAL_INT) { /* demoted */ |
2098 | JS_ASSERT(isInt32(*p)); |
2099 | /* Ok, we have a valid demotion attempt pending, so insert an integer |
2100 | read and promote it to double since all arithmetic operations expect |
2101 | to see doubles on entry. The first op to use this slot will emit a |
2102 | f2i cast which will cancel out the i2f we insert here. */ |
2103 | ins = lir->insLoadi(base, offset); |
2104 | ins = lir->ins1(LIR_i2f, ins); |
2105 | } else { |
2106 | JS_ASSERT_IF(t != JSVAL_BOXED, isNumber(*p) == (t == JSVAL_DOUBLE)); |
2107 | if (t == JSVAL_DOUBLE) { |
2108 | ins = lir->insLoad(LIR_ldq, base, offset); |
2109 | } else if (t == JSVAL_BOOLEAN) { |
2110 | ins = lir->insLoad(LIR_ld, base, offset); |
2111 | } else { |
2112 | ins = lir->insLoad(LIR_ldp, base, offset); |
2113 | } |
2114 | } |
2115 | checkForGlobalObjectReallocation(); |
2116 | tracker.set(p, ins); |
2117 | |
2118 | #ifdef DEBUG |
2119 | char name[64]; |
2120 | JS_ASSERT(strlen(prefix) < 10); |
2121 | void* mark = NULL; |
2122 | jsuword* localNames = NULL; |
2123 | const char* funName = NULL; |
2124 | if (*prefix == 'a' || *prefix == 'v') { |
2125 | mark = JS_ARENA_MARK(&cx->tempPool); |
2126 | if (fp->fun->hasLocalNames()) |
2127 | localNames = js_GetLocalNameArray(cx, fp->fun, &cx->tempPool); |
2128 | funName = fp->fun->atom ? js_AtomToPrintableString(cx, fp->fun->atom) : "<anonymous>"; |
2129 | } |
2130 | if (!strcmp(prefix, "argv")) { |
2131 | if (index < fp->fun->nargs) { |
2132 | JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[index]); |
2133 | JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom)); |
2134 | } else { |
2135 | JS_snprintf(name, sizeof name, "$%s.<arg%d>", funName, index); |
2136 | } |
2137 | } else if (!strcmp(prefix, "vars")) { |
2138 | JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[fp->fun->nargs + index]); |
2139 | JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom)); |
2140 | } else { |
2141 | JS_snprintf(name, sizeof name, "$%s%d", prefix, index); |
2142 | } |
2143 | |
2144 | if (mark) |
2145 | JS_ARENA_RELEASE(&cx->tempPool, mark); |
2146 | addName(ins, name); |
2147 | |
2148 | static const char* typestr[] = { |
2149 | "object", "int", "double", "boxed", "string", "null", "boolean", "function" |
2150 | }; |
2151 | debug_only_v(printf("import vp=%p name=%s type=%s flags=%d\n", |
2152 | (void*)p, name, typestr[t & 7], t >> 3);) |
2153 | #endif |
2154 | } |
2155 | |
2156 | JS_REQUIRES_STACK void |
2157 | TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots, |
2158 | unsigned callDepth, uint8* typeMap) |
2159 | { |
2160 | /* If we get a partial list that doesn't have all the types (i.e. recording from a side |
2161 | exit that was recorded but we added more global slots later), merge the missing types |
2162 | from the entry type map. This is safe because at the loop edge we verify that we |
2163 | have compatible types for all globals (entry type and loop edge type match). While |
2164 | a different trace of the tree might have had a guard with a different type map for |
2165 | these slots we just filled in here (the guard we continue from didn't know about them), |
2166 | since we didn't take that particular guard the only way we could have ended up here |
2167 | is if that other trace had at its end a compatible type distribution with the entry |
2168 | map. Since thats exactly what we used to fill in the types our current side exit |
2169 | didn't provide, this is always safe to do. */ |
2170 | |
2171 | uint8* globalTypeMap = typeMap + stackSlots; |
2172 | unsigned length = treeInfo->nGlobalTypes(); |
2173 | |
2174 | /* |
2175 | * This is potentially the typemap of the side exit and thus shorter than the tree's |
2176 | * global type map. |
2177 | */ |
2178 | if (ngslots < length) { |
2179 | mergeTypeMaps(&globalTypeMap/*out param*/, &ngslots/*out param*/, |
2180 | treeInfo->globalTypeMap(), length, |
2181 | (uint8*)alloca(sizeof(uint8) * length)); |
2182 | } |
2183 | JS_ASSERT(ngslots == treeInfo->nGlobalTypes()); |
2184 | |
2185 | /* |
2186 | * Check whether there are any values on the stack we have to unbox and do that first |
2187 | * before we waste any time fetching the state from the stack. |
2188 | */ |
2189 | ptrdiff_t offset = -treeInfo->nativeStackBase; |
2190 | uint8* m = typeMap; |
2191 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
2192 | if (*m == JSVAL_BOXED) { |
2193 | import(sp, offset, vp, JSVAL_BOXED, "boxed", vpnum, cx->fp); |
2194 | LIns* vp_ins = get(vp); |
2195 | unbox_jsval(*vp, vp_ins, copy(anchor)); |
2196 | set(vp, vp_ins); |
2197 | } |
2198 | m++; offset += sizeof(double); |
2199 | ); |
2200 | |
2201 | /* |
2202 | * The first time we compile a tree this will be empty as we add entries lazily. |
2203 | */ |
2204 | uint16* gslots = treeInfo->globalSlots->data(); |
2205 | m = globalTypeMap; |
2206 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
2207 | JS_ASSERT(*m != JSVAL_BOXED); |
2208 | import(lirbuf->state, nativeGlobalOffset(vp), vp, *m, vpname, vpnum, NULL); |
2209 | m++; |
2210 | ); |
2211 | offset = -treeInfo->nativeStackBase; |
2212 | m = typeMap; |
2213 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth, |
2214 | if (*m != JSVAL_BOXED) |
2215 | import(sp, offset, vp, *m, vpname, vpnum, fp); |
2216 | m++; offset += sizeof(double); |
2217 | ); |
2218 | } |
2219 | |
2220 | JS_REQUIRES_STACK bool |
2221 | TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) |
2222 | { |
2223 | uint32 setflags = (js_CodeSpec[*cx->fp->regs->pc].format & (JOF_SET | JOF_INCDEC | JOF_FOR)); |
2224 | |
2225 | if (setflags) { |
2226 | if (!SPROP_HAS_STUB_SETTER(sprop)) |
2227 | ABORT_TRACE_RV("non-stub setter", false); |
2228 | if (sprop->attrs & JSPROP_READONLY) |
2229 | ABORT_TRACE_RV("writing to a read-only property", false); |
2230 | } |
2231 | /* This check applies even when setflags == 0. */ |
2232 | if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) |
2233 | ABORT_TRACE_RV("non-stub getter", false); |
2234 | |
2235 | if (!SPROP_HAS_VALID_SLOT(sprop, scope)) |
2236 | ABORT_TRACE_RV("slotless obj property", false); |
2237 | |
2238 | return true; |
2239 | } |
2240 | |
2241 | /* Lazily import a global slot if we don't already have it in the tracker. */ |
2242 | JS_REQUIRES_STACK bool |
2243 | TraceRecorder::lazilyImportGlobalSlot(unsigned slot) |
2244 | { |
2245 | if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */ |
2246 | return false; |
2247 | /* |
2248 | * If the global object grows too large, alloca in js_ExecuteTree might fail, so |
2249 | * abort tracing on global objects with unreasonably many slots. |
2250 | */ |
2251 | if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS) |
2252 | return false; |
2253 | jsval* vp = &STOBJ_GET_SLOT(globalObj, slot); |
2254 | if (known(vp)) |
2255 | return true; /* we already have it */ |
2256 | unsigned index = treeInfo->globalSlots->length(); |
2257 | /* Add the slot to the list of interned global slots. */ |
2258 | JS_ASSERT(treeInfo->nGlobalTypes() == treeInfo->globalSlots->length()); |
2259 | treeInfo->globalSlots->add(slot); |
2260 | uint8 type = getCoercedType(*vp); |
2261 | if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, slot)) |
2262 | type = JSVAL_DOUBLE; |
2263 | treeInfo->typeMap.add(type); |
2264 | import(lirbuf->state, sizeof(struct InterpState) + slot*sizeof(double), |
2265 | vp, type, "global", index, NULL); |
2266 | specializeTreesToMissingGlobals(cx, treeInfo); |
2267 | return true; |
2268 | } |
2269 | |
2270 | /* Write back a value onto the stack or global frames. */ |
2271 | LIns* |
2272 | TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset) |
2273 | { |
2274 | /* Sink all type casts targeting the stack into the side exit by simply storing the original |
2275 | (uncasted) value. Each guard generates the side exit map based on the types of the |
2276 | last stores to every stack location, so its safe to not perform them on-trace. */ |
2277 | if (isPromoteInt(i)) |
2278 | i = ::demote(lir, i); |
2279 | return lir->insStorei(i, base, offset); |
2280 | } |
2281 | |
2282 | /* Update the tracker, then issue a write back store. */ |
2283 | JS_REQUIRES_STACK void |
2284 | TraceRecorder::set(jsval* p, LIns* i, bool initializing) |
2285 | { |
2286 | JS_ASSERT(i != NULL); |
2287 | JS_ASSERT(initializing || known(p)); |
2288 | checkForGlobalObjectReallocation(); |
2289 | tracker.set(p, i); |
2290 | /* If we are writing to this location for the first time, calculate the offset into the |
2291 | native frame manually, otherwise just look up the last load or store associated with |
2292 | the same source address (p) and use the same offset/base. */ |
2293 | LIns* x = nativeFrameTracker.get(p); |
2294 | if (!x) { |
2295 | if (isGlobal(p)) |
2296 | x = writeBack(i, lirbuf->state, nativeGlobalOffset(p)); |
2297 | else |
2298 | x = writeBack(i, lirbuf->sp, -treeInfo->nativeStackBase + nativeStackOffset(p)); |
2299 | nativeFrameTracker.set(p, x); |
2300 | } else { |
2301 | #define ASSERT_VALID_CACHE_HIT(base, offset) \ |
2302 | JS_ASSERT(base == lirbuf->sp || base == lirbuf->state); \ |
2303 | JS_ASSERT(offset == ((base == lirbuf->sp) \ |
2304 | ? -treeInfo->nativeStackBase + nativeStackOffset(p) \ |
2305 | : nativeGlobalOffset(p))); \ |
2306 | |
2307 | if (x->isop(LIR_st) || x->isop(LIR_stq)) { |
2308 | ASSERT_VALID_CACHE_HIT(x->oprnd2(), x->oprnd3()->constval()); |
2309 | writeBack(i, x->oprnd2(), x->oprnd3()->constval()); |
2310 | } else { |
2311 | JS_ASSERT(x->isop(LIR_sti) || x->isop(LIR_stqi)); |
2312 | ASSERT_VALID_CACHE_HIT(x->oprnd2(), x->immdisp()); |
2313 | writeBack(i, x->oprnd2(), x->immdisp()); |
2314 | } |
2315 | } |
2316 | #undef ASSERT_VALID_CACHE_HIT |
2317 | } |
2318 | |
2319 | JS_REQUIRES_STACK LIns* |
2320 | TraceRecorder::get(jsval* p) |
2321 | { |
2322 | checkForGlobalObjectReallocation(); |
2323 | return tracker.get(p); |
2324 | } |
2325 | |
2326 | JS_REQUIRES_STACK bool |
2327 | TraceRecorder::known(jsval* p) |
2328 | { |
2329 | checkForGlobalObjectReallocation(); |
2330 | return tracker.has(p); |
2331 | } |
2332 | |
2333 | /* |
2334 | * The dslots of the global object are sometimes reallocated by the interpreter. |
2335 | * This function check for that condition and re-maps the entries of the tracker |
2336 | * accordingly. |
2337 | */ |
2338 | JS_REQUIRES_STACK void |
2339 | TraceRecorder::checkForGlobalObjectReallocation() |
2340 | { |
2341 | if (global_dslots != globalObj->dslots) { |
2342 | debug_only_v(printf("globalObj->dslots relocated, updating tracker\n");) |
2343 | jsval* src = global_dslots; |
2344 | jsval* dst = globalObj->dslots; |
2345 | jsuint length = globalObj->dslots[-1] - JS_INITIAL_NSLOTS; |
2346 | LIns** map = (LIns**)alloca(sizeof(LIns*) * length); |
2347 | for (jsuint n = 0; n < length; ++n) { |
2348 | map[n] = tracker.get(src); |
2349 | tracker.set(src++, NULL); |
2350 | } |
2351 | for (jsuint n = 0; n < length; ++n) |
2352 | tracker.set(dst++, map[n]); |
2353 | global_dslots = globalObj->dslots; |
2354 | } |
2355 | } |
2356 | |
2357 | /* Determine whether the current branch is a loop edge (taken or not taken). */ |
2358 | static JS_REQUIRES_STACK bool |
2359 | js_IsLoopEdge(jsbytecode* pc, jsbytecode* header) |
2360 | { |
2361 | switch (*pc) { |
2362 | case JSOP_IFEQ: |
2363 | case JSOP_IFNE: |
2364 | return ((pc + GET_JUMP_OFFSET(pc)) == header); |
2365 | case JSOP_IFEQX: |
2366 | case JSOP_IFNEX: |
2367 | return ((pc + GET_JUMPX_OFFSET(pc)) == header); |
2368 | default: |
2369 | JS_ASSERT((*pc == JSOP_AND) || (*pc == JSOP_ANDX) || |
2370 | (*pc == JSOP_OR) || (*pc == JSOP_ORX)); |
2371 | } |
2372 | return false; |
2373 | } |
2374 | |
2375 | /* |
2376 | * Promote slots if necessary to match the called tree's type map. This function is |
2377 | * infallible and must only be called if we are certain that it is possible to |
2378 | * reconcile the types for each slot in the inner and outer trees. |
2379 | */ |
2380 | JS_REQUIRES_STACK void |
2381 | TraceRecorder::adjustCallerTypes(Fragment* f) |
2382 | { |
2383 | uint16* gslots = treeInfo->globalSlots->data(); |
2384 | unsigned ngslots = treeInfo->globalSlots->length(); |
2385 | JS_ASSERT(ngslots == treeInfo->nGlobalTypes()); |
2386 | TreeInfo* ti = (TreeInfo*)f->vmprivate; |
2387 | uint8* map = ti->globalTypeMap(); |
2388 | uint8* m = map; |
2389 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
2390 | LIns* i = get(vp); |
2391 | bool isPromote = isPromoteInt(i); |
2392 | if (isPromote && *m == JSVAL_DOUBLE) |
2393 | lir->insStorei(get(vp), lirbuf->state, nativeGlobalOffset(vp)); |
2394 | JS_ASSERT(!(!isPromote && *m == JSVAL_INT)); |
2395 | ++m; |
2396 | ); |
2397 | JS_ASSERT(unsigned(m - map) == ti->nGlobalTypes()); |
2398 | map = ti->stackTypeMap(); |
2399 | m = map; |
2400 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0, |
2401 | LIns* i = get(vp); |
2402 | bool isPromote = isPromoteInt(i); |
2403 | if (isPromote && *m == JSVAL_DOUBLE) { |
2404 | lir->insStorei(get(vp), lirbuf->sp, |
2405 | -treeInfo->nativeStackBase + nativeStackOffset(vp)); |
2406 | /* Aggressively undo speculation so the inner tree will compile if this fails. */ |
2407 | oracle.markStackSlotUndemotable(cx, unsigned(m - map)); |
2408 | } |
2409 | JS_ASSERT(!(!isPromote && *m == JSVAL_INT)); |
2410 | ++m; |
2411 | ); |
2412 | JS_ASSERT(unsigned(m - map) == ti->nStackTypes); |
2413 | JS_ASSERT(f == f->root); |
2414 | } |
2415 | |
2416 | JS_REQUIRES_STACK uint8 |
2417 | TraceRecorder::determineSlotType(jsval* vp) |
2418 | { |
2419 | uint8 m; |
2420 | LIns* i = get(vp); |
2421 | if (isNumber(*vp)) { |
2422 | m = isPromoteInt(i) ? JSVAL_INT : JSVAL_DOUBLE; |
2423 | } else if (JSVAL_IS_OBJECT(*vp)) { |
2424 | if (JSVAL_IS_NULL(*vp)) |
2425 | m = JSVAL_TNULL; |
2426 | else if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) |
2427 | m = JSVAL_TFUN; |
2428 | else |
2429 | m = JSVAL_OBJECT; |
2430 | } else { |
2431 | m = JSVAL_TAG(*vp); |
2432 | } |
2433 | JS_ASSERT((m != JSVAL_INT) || isInt32(*vp)); |
2434 | return m; |
2435 | } |
2436 | |
2437 | JS_REQUIRES_STACK VMSideExit* |
2438 | TraceRecorder::snapshot(ExitType exitType) |
2439 | { |
2440 | JSStackFrame* fp = cx->fp; |
2441 | JSFrameRegs* regs = fp->regs; |
2442 | jsbytecode* pc = regs->pc; |
2443 | |
2444 | /* Check for a return-value opcode that needs to restart at the next instruction. */ |
2445 | const JSCodeSpec& cs = js_CodeSpec[*pc]; |
2446 | |
2447 | /* |
2448 | * When calling a _FAIL native, make the snapshot's pc point to the next |
2449 | * instruction after the CALL or APPLY. Even on failure, a _FAIL native must not |
2450 | * be called again from the interpreter. |
2451 | */ |
2452 | bool resumeAfter = (pendingTraceableNative && |
2453 | JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS); |
2454 | if (resumeAfter) { |
2455 | JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW); |
2456 | pc += cs.length; |
2457 | regs->pc = pc; |
2458 | MUST_FLOW_THROUGH("restore_pc"); |
2459 | } |
2460 | |
2461 | /* Generate the entry map for the (possibly advanced) pc and stash it in the trace. */ |
2462 | unsigned stackSlots = js_NativeStackSlots(cx, callDepth); |
2463 | |
2464 | /* It's sufficient to track the native stack use here since all stores above the |
2465 | stack watermark defined by guards are killed. */ |
2466 | trackNativeStackUse(stackSlots + 1); |
2467 | |
2468 | /* Capture the type map into a temporary location. */ |
2469 | unsigned ngslots = treeInfo->globalSlots->length(); |
2470 | unsigned typemap_size = (stackSlots + ngslots) * sizeof(uint8); |
2471 | void *mark = JS_ARENA_MARK(&cx->tempPool); |
2472 | uint8* typemap; |
2473 | JS_ARENA_ALLOCATE_CAST(typemap, uint8*, &cx->tempPool, typemap_size); |
2474 | uint8* m = typemap; |
2475 | |
2476 | /* Determine the type of a store by looking at the current type of the actual value the |
2477 | interpreter is using. For numbers we have to check what kind of store we used last |
2478 | (integer or double) to figure out what the side exit show reflect in its typemap. */ |
2479 | FORALL_SLOTS(cx, ngslots, treeInfo->globalSlots->data(), callDepth, |
2480 | *m++ = determineSlotType(vp); |
2481 | ); |
2482 | JS_ASSERT(unsigned(m - typemap) == ngslots + stackSlots); |
2483 | |
2484 | /* |
2485 | * If we are currently executing a traceable native or we are attaching a second trace |
2486 | * to it, the value on top of the stack is boxed. Make a note of this in the typemap. |
2487 | */ |
2488 | if (pendingTraceableNative && (pendingTraceableNative->flags & JSTN_UNBOX_AFTER)) |
2489 | typemap[stackSlots - 1] = JSVAL_BOXED; |
2490 | |
2491 | /* Now restore the the original pc (after which early returns are ok). */ |
2492 | if (resumeAfter) { |
2493 | MUST_FLOW_LABEL(restore_pc); |
2494 | regs->pc = pc - cs.length; |
2495 | } else { |
2496 | /* If we take a snapshot on a goto, advance to the target address. This avoids inner |
2497 | trees returning on a break goto, which the outer recorder then would confuse with |
2498 | a break in the outer tree. */ |
2499 | if (*pc == JSOP_GOTO) |
2500 | pc += GET_JUMP_OFFSET(pc); |
2501 | else if (*pc == JSOP_GOTOX) |
2502 | pc += GET_JUMPX_OFFSET(pc); |
2503 | } |
2504 | |
2505 | /* |
2506 | * Check if we already have a matching side exit; if so we can return that |
2507 | * side exit instead of creating a new one. |
2508 | */ |
2509 | VMSideExit** exits = treeInfo->sideExits.data(); |
2510 | unsigned nexits = treeInfo->sideExits.length(); |
2511 | if (exitType == LOOP_EXIT) { |
2512 | for (unsigned n = 0; n < nexits; ++n) { |
2513 | VMSideExit* e = exits[n]; |
2514 | if (e->pc == pc && e->imacpc == fp->imacpc && |
2515 | ngslots == e->numGlobalSlots && |
2516 | !memcmp(getFullTypeMap(exits[n]), typemap, typemap_size)) { |
2517 | AUDIT(mergedLoopExits); |
2518 | JS_ARENA_RELEASE(&cx->tempPool, mark); |
2519 | return e; |
2520 | } |
2521 | } |
2522 | } |
2523 | |
2524 | if (sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(uint8) >= MAX_SKIP_BYTES) { |
2525 | /* |
2526 | * ::snapshot() is infallible in the sense that callers don't |
2527 | * expect errors; but this is a trace-aborting error condition. So |
2528 | * mangle the request to consume zero slots, and mark the tree as |
2529 | * to-be-trashed. This should be safe as the trace will be aborted |
2530 | * before assembly or execution due to the call to |
2531 | * trackNativeStackUse above. |
2532 | */ |
2533 | stackSlots = 0; |
2534 | ngslots = 0; |
2535 | typemap_size = 0; |
2536 | trashSelf = true; |
2537 | } |
2538 | |
2539 | /* We couldn't find a matching side exit, so create a new one. */ |
2540 | LIns* data = lir->skip(sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(uint8)); |
2541 | VMSideExit* exit = (VMSideExit*) data->payload(); |
2542 | |
2543 | /* Setup side exit structure. */ |
2544 | memset(exit, 0, sizeof(VMSideExit)); |
2545 | exit->from = fragment; |
2546 | exit->calldepth = callDepth; |
2547 | exit->numGlobalSlots = ngslots; |
2548 | exit->numStackSlots = stackSlots; |
2549 | exit->numStackSlotsBelowCurrentFrame = cx->fp->callee |
2550 | ? nativeStackOffset(&cx->fp->argv[-2])/sizeof(double) |
2551 | : 0; |
2552 | exit->exitType = exitType; |
2553 | exit->block = fp->blockChain; |
2554 | exit->pc = pc; |
2555 | exit->imacpc = fp->imacpc; |
2556 | exit->sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase; |
2557 | exit->rp_adj = exit->calldepth * sizeof(FrameInfo*); |
2558 | exit->nativeCalleeWord = 0; |
2559 | memcpy(getFullTypeMap(exit), typemap, typemap_size); |
2560 | |
2561 | JS_ARENA_RELEASE(&cx->tempPool, mark); |
2562 | return exit; |
2563 | } |
2564 | |
2565 | JS_REQUIRES_STACK LIns* |
2566 | TraceRecorder::createGuardRecord(VMSideExit* exit) |
2567 | { |
2568 | LIns* guardRec = lir->skip(sizeof(GuardRecord)); |
2569 | GuardRecord* gr = (GuardRecord*) guardRec->payload(); |
2570 | |
2571 | memset(gr, 0, sizeof(GuardRecord)); |
2572 | gr->exit = exit; |
2573 | exit->addGuard(gr); |
2574 | |
2575 | return guardRec; |
2576 | } |
2577 | |
2578 | /* |
2579 | * Emit a guard for condition (cond), expecting to evaluate to boolean result |
2580 | * (expected) and using the supplied side exit if the conditon doesn't hold. |
2581 | */ |
2582 | JS_REQUIRES_STACK void |
2583 | TraceRecorder::guard(bool expected, LIns* cond, VMSideExit* exit) |
2584 | { |
2585 | LIns* guardRec = createGuardRecord(exit); |
2586 | |
2587 | /* |
2588 | * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's |
2589 | * safe to keep references to the side exits here. If we ever start |
2590 | * rewinding those lirbufs, we have to make sure we purge the side exits |
2591 | * that then no longer will be in valid memory. |
2592 | */ |
2593 | if (exit->exitType == LOOP_EXIT) |
2594 | treeInfo->sideExits.add(exit); |
2595 | |
2596 | if (!cond->isCond()) { |
2597 | expected = !expected; |
2598 | cond = lir->ins_eq0(cond); |
2599 | } |
2600 | |
2601 | LIns* guardIns = |
2602 | lir->insGuard(expected ? LIR_xf : LIR_xt, cond, guardRec); |
2603 | if (guardIns) { |
2604 | debug_only_v(printf(" SideExit=%p exitType=%d\n", (void*)exit, exit->exitType);) |
2605 | } else { |
2606 | debug_only_v(printf(" redundant guard, eliminated\n");) |
2607 | } |
2608 | } |
2609 | |
2610 | JS_REQUIRES_STACK VMSideExit* |
2611 | TraceRecorder::copy(VMSideExit* copy) |
2612 | { |
2613 | size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots; |
2614 | LIns* data = lir->skip(sizeof(VMSideExit) + |
2615 | typemap_size * sizeof(uint8)); |
2616 | VMSideExit* exit = (VMSideExit*) data->payload(); |
2617 | |
2618 | /* Copy side exit structure. */ |
2619 | memcpy(exit, copy, sizeof(VMSideExit) + typemap_size * sizeof(uint8)); |
2620 | exit->guards = NULL; |
2621 | exit->from = fragment; |
2622 | exit->target = NULL; |
2623 | |
2624 | /* |
2625 | * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's |
2626 | * safe to keep references to the side exits here. If we ever start |
2627 | * rewinding those lirbufs, we have to make sure we purge the side exits |
2628 | * that then no longer will be in valid memory. |
2629 | */ |
2630 | if (exit->exitType == LOOP_EXIT) |
2631 | treeInfo->sideExits.add(exit); |
2632 | return exit; |
2633 | } |
2634 | |
2635 | /* Emit a guard for condition (cond), expecting to evaluate to boolean result (expected) |
2636 | and generate a side exit with type exitType to jump to if the condition does not hold. */ |
2637 | JS_REQUIRES_STACK void |
2638 | TraceRecorder::guard(bool expected, LIns* cond, ExitType exitType) |
2639 | { |
2640 | guard(expected, cond, snapshot(exitType)); |
2641 | } |
2642 | |
2643 | /* Try to match the type of a slot to type t. checkType is used to verify that the type of |
2644 | * values flowing into the loop edge is compatible with the type we expect in the loop header. |
2645 | * |
2646 | * @param v Value. |
2647 | * @param t Typemap entry for value. |
2648 | * @param stage_val Outparam for set() address. |
2649 | * @param stage_ins Outparam for set() instruction. |
2650 | * @param stage_count Outparam for set() buffer count. |
2651 | * @return True if types are compatible, false otherwise. |
2652 | */ |
2653 | JS_REQUIRES_STACK bool |
2654 | TraceRecorder::checkType(jsval& v, uint8 t, jsval*& stage_val, LIns*& stage_ins, |
2655 | unsigned& stage_count) |
2656 | { |
2657 | if (t == JSVAL_INT) { /* initially all whole numbers cause the slot to be demoted */ |
2658 | debug_only_v(printf("checkType(tag=1, t=%d, isnum=%d, i2f=%d) stage_count=%d\n", |
2659 | t, |
2660 | isNumber(v), |
2661 | isPromoteInt(get(&v)), |
2662 | stage_count);) |
2663 | if (!isNumber(v)) |
2664 | return false; /* not a number? type mismatch */ |
2665 | LIns* i = get(&v); |
2666 | /* This is always a type mismatch, we can't close a double to an int. */ |
2667 | if (!isPromoteInt(i)) |
2668 | return false; |
2669 | /* Looks good, slot is an int32, the last instruction should be promotable. */ |
2670 | JS_ASSERT(isInt32(v) && isPromoteInt(i)); |
2671 | /* Overwrite the value in this slot with the argument promoted back to an integer. */ |
2672 | stage_val = &v; |
2673 | stage_ins = f2i(i); |
2674 | stage_count++; |
2675 | return true; |
2676 | } |
2677 | if (t == JSVAL_DOUBLE) { |
2678 | debug_only_v(printf("checkType(tag=2, t=%d, isnum=%d, promote=%d) stage_count=%d\n", |
2679 | t, |
2680 | isNumber(v), |
2681 | isPromoteInt(get(&v)), |
2682 | stage_count);) |
2683 | if (!isNumber(v)) |
2684 | return false; /* not a number? type mismatch */ |
2685 | LIns* i = get(&v); |
2686 | /* We sink i2f conversions into the side exit, but at the loop edge we have to make |
2687 | sure we promote back to double if at loop entry we want a double. */ |
2688 | if (isPromoteInt(i)) { |
2689 | stage_val = &v; |
2690 | stage_ins = lir->ins1(LIR_i2f, i); |
2691 | stage_count++; |
2692 | } |
2693 | return true; |
2694 | } |
2695 | if (t == JSVAL_TNULL) |
2696 | return JSVAL_IS_NULL(v); |
2697 | if (t == JSVAL_TFUN) |
2698 | return !JSVAL_IS_PRIMITIVE(v) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)); |
2699 | if (t == JSVAL_OBJECT) |
2700 | return !JSVAL_IS_PRIMITIVE(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)); |
2701 | |
2702 | /* for non-number types we expect a precise match of the type */ |
2703 | uint8 vt = getCoercedType(v); |
2704 | #ifdef DEBUG |
2705 | if (vt != t) { |
2706 | debug_only_v(printf("Type mismatch: val %c, map %c ", typeChar[vt], |
2707 | typeChar[t]);) |
2708 | } |
2709 | #endif |
2710 | debug_only_v(printf("checkType(vt=%d, t=%d) stage_count=%d\n", |
2711 | (int) vt, t, stage_count);) |
2712 | return vt == t; |
2713 | } |
2714 | |
2715 | /** |
2716 | * Make sure that the current values in the given stack frame and all stack frames |
2717 | * up and including entryFrame are type-compatible with the entry map. |
2718 | * |
2719 | * @param root_peer First fragment in peer list. |
2720 | * @param stable_peer Outparam for first type stable peer. |
2721 | * @param demote True if stability was achieved through demotion. |
2722 | * @return True if type stable, false otherwise. |
2723 | */ |
2724 | JS_REQUIRES_STACK bool |
2725 | TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, bool& demote) |
2726 | { |
2727 | uint8* m; |
2728 | uint8* typemap; |
2729 | unsigned ngslots = treeInfo->globalSlots->length(); |
2730 | uint16* gslots = treeInfo->globalSlots->data(); |
2731 | JS_ASSERT(ngslots == treeInfo->nGlobalTypes()); |
2732 | |
2733 | if (stable_peer) |
2734 | *stable_peer = NULL; |
2735 | |
2736 | /* |
2737 | * Rather than calculate all of this stuff twice, it gets cached locally. The "stage" buffers |
2738 | * are for calls to set() that will change the exit types. |
2739 | */ |
2740 | bool success; |
2741 | unsigned stage_count; |
2742 | jsval** stage_vals = (jsval**)alloca(sizeof(jsval*) * (treeInfo->typeMap.length())); |
2743 | LIns** stage_ins = (LIns**)alloca(sizeof(LIns*) * (treeInfo->typeMap.length())); |
2744 | |
2745 | /* First run through and see if we can close ourselves - best case! */ |
2746 | stage_count = 0; |
2747 | success = false; |
2748 | |
2749 | debug_only_v(printf("Checking type stability against self=%p\n", (void*)fragment);) |
2750 | |
2751 | m = typemap = treeInfo->globalTypeMap(); |
2752 | FORALL_GLOBAL_SLOTS(cx, ngslots, gslots, |
2753 | debug_only_v(printf("%s%d ", vpname, vpnum);) |
2754 | if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) { |
2755 | /* If the failure was an int->double, tell the oracle. */ |
2756 | if (*m == JSVAL_INT && isNumber(*vp) && !isPromoteInt(get(vp))) { |
2757 | oracle.markGlobalSlotUndemotable(cx, gslots[n]); |
2758 | demote = true; |
2759 | } else { |
2760 | goto checktype_fail_1; |
2761 | } |
2762 | } |
2763 | ++m; |
2764 | ); |
2765 | m = typemap = treeInfo->stackTypeMap(); |
2766 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0, |
2767 | debug_only_v(printf("%s%d ", vpname, vpnum);) |
2768 | if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) { |
2769 | if (*m == JSVAL_INT && isNumber(*vp) && !isPromoteInt(get(vp))) { |
2770 | oracle.markStackSlotUndemotable(cx, unsigned(m - typemap)); |
2771 | demote = true; |
2772 | } else { |
2773 | goto checktype_fail_1; |
2774 | } |
2775 | } |
2776 | ++m; |
2777 | ); |
2778 | |
2779 | success = true; |
2780 | |
2781 | checktype_fail_1: |
2782 | /* If we got a success and we don't need to recompile, we should just close here. */ |
2783 | if (success && !demote) { |
2784 | for (unsigned i = 0; i < stage_count; i++) |
2785 | set(stage_vals[i], stage_ins[i]); |
2786 | return true; |
2787 | /* If we need to trash, don't bother checking peers. */ |
2788 | } else if (trashSelf) { |
2789 | return false; |
2790 | } |
2791 | |
2792 | demote = false; |
2793 | |
2794 | /* At this point the tree is about to be incomplete, so let's see if we can connect to any |
2795 | * peer fragment that is type stable. |
2796 | */ |
2797 | Fragment* f; |
2798 | TreeInfo* ti; |
2799 | for (f = root_peer; f != NULL; f = f->peer) { |
2800 | debug_only_v(printf("Checking type stability against peer=%p (code=%p)\n", (void*)f, f->code());) |
2801 | if (!f->code()) |
2802 | continue; |
2803 | ti = (TreeInfo*)f->vmprivate; |
2804 | /* Don't allow varying stack depths */ |
2805 | if ((ti->nStackTypes != treeInfo->nStackTypes) || |
2806 | (ti->typeMap.length() != treeInfo->typeMap.length()) || |
2807 | (ti->globalSlots->length() != treeInfo->globalSlots->length())) |
2808 | continue; |
2809 | stage_count = 0; |
2810 | success = false; |
2811 | |
2812 | m = ti->globalTypeMap(); |
2813 | FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(), |
2814 | if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) |
2815 | goto checktype_fail_2; |
2816 | ++m; |
2817 | ); |
2818 | |
2819 | m = ti->stackTypeMap(); |
2820 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0, |
2821 | if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) |
2822 | goto checktype_fail_2; |
2823 | ++m; |
2824 | ); |
2825 | |
2826 | success = true; |
2827 | |
2828 | checktype_fail_2: |
2829 | if (success) { |
2830 | /* |
2831 | * There was a successful match. We don't care about restoring the saved staging, but |
2832 | * we do need to clear the original undemote list. |
2833 | */ |
2834 | for (unsigned i = 0; i < stage_count; i++) |
2835 | set(stage_vals[i], stage_ins[i]); |
2836 | if (stable_peer) |
2837 | *stable_peer = f; |
2838 | demote = false; |
2839 | return false; |
2840 | } |
2841 | } |
2842 | |
2843 | /* |
2844 | * If this is a loop trace and it would be stable with demotions, build an undemote list |
2845 | * and return true. Our caller should sniff this and trash the tree, recording a new one |
2846 | * that will assumedly stabilize. |
2847 | */ |
2848 | if (demote && fragment->kind == LoopTrace) { |
2849 | typemap = m = treeInfo->globalTypeMap(); |
2850 | FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(), |
2851 | if (*m == JSVAL_INT) { |
2852 | JS_ASSERT(isNumber(*vp)); |
2853 | if (!isPromoteInt(get(vp))) |
2854 | oracle.markGlobalSlotUndemotable(cx, gslots[n]); |
2855 | } else if (*m == JSVAL_DOUBLE) { |
2856 | JS_ASSERT(isNumber(*vp)); |
2857 | oracle.markGlobalSlotUndemotable(cx, gslots[n]); |
2858 | } else { |
2859 | JS_ASSERT(*m == JSVAL_TAG(*vp)); |
2860 | } |
2861 | m++; |
2862 | ); |
2863 | |
2864 | typemap = m = treeInfo->stackTypeMap(); |
2865 | FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0, |
2866 | if (*m == JSVAL_INT) { |
2867 | JS_ASSERT(isNumber(*vp)); |
2868 | if (!isPromoteInt(get(vp))) |
2869 | oracle.markStackSlotUndemotable(cx, unsigned(m - typemap)); |
2870 | } else if (*m == JSVAL_DOUBLE) { |
2871 | JS_ASSERT(isNumber(*vp)); |
2872 | oracle.markStackSlotUndemotable(cx, unsigned(m - typemap)); |
2873 | } else { |
2874 | JS_ASSERT((*m == JSVAL_TNULL) |
2875 | ? JSVAL_IS_NULL(*vp) |
2876 | : *m == JSVAL_TFUN |
2877 | ? !JSVAL_IS_PRIMITIVE(*vp) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp)) |
2878 | : *m == JSVAL_TAG(*vp)); |
2879 | } |
2880 | m++; |
2881 | ); |
2882 | return true; |
2883 | } else { |
2884 | demote = false; |
2885 | } |
2886 | |
2887 | return false; |
2888 | } |
2889 | |
2890 | static JS_REQUIRES_STACK void |
2891 | FlushJITCache(JSContext* cx) |
2892 | { |
2893 | if (!TRACING_ENABLED(cx)) |
2894 | return; |
2895 | JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); |
2896 | debug_only_v(printf("Flushing cache.\n");) |
2897 | if (tm->recorder) |
2898 | js_AbortRecording(cx, "flush cache"); |
2899 | TraceRecorder* tr; |
2900 | while ((tr = tm->abortStack) != NULL) { |
2901 | tr->removeFragmentoReferences(); |
2902 | tr->deepAbort(); |
2903 | tr->popAbortStack(); |
2904 | } |
2905 | Fragmento* fragmento = tm->fragmento; |
2906 | if (fragmento) { |
2907 | if (tm->prohibitFlush) { |
2908 | debug_only_v(printf("Deferring fragmento flush due to deep bail.\n");) |
2909 | tm->needFlush = JS_TRUE; |
2910 | return; |
2911 | } |
2912 | |
2913 | fragmento->clearFrags(); |
2914 | #ifdef DEBUG |
2915 | JS_ASSERT(fragmento->labels); |
2916 | fragmento->labels->clear(); |
2917 | #endif |
2918 | tm->lirbuf->rewind(); |
2919 | for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { |
2920 | VMFragment* f = tm->vmfragments[i]; |
2921 | while (f) { |
2922 | VMFragment* next = f->next; |
2923 | fragmento->clearFragment(f); |
2924 | f = next; |
2925 | } |
2926 | tm->vmfragments[i] = NULL; |
2927 | } |
2928 | for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) { |
2929 | tm->globalStates[i].globalShape = -1; |
2930 | tm->globalStates[i].globalSlots->clear(); |
2931 | } |
2932 | } |
2933 | tm->needFlush = JS_FALSE; |
2934 | } |
2935 | |
2936 | /* Compile the current fragment. */ |
2937 | JS_REQUIRES_STACK void |
2938 | TraceRecorder::compile(JSTraceMonitor* tm) |
2939 | { |
2940 | if (tm->needFlush) { |
2941 | FlushJITCache(cx); |
2942 | return; |
2943 | } |
2944 | Fragmento* fragmento = tm->fragmento; |
2945 | if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) { |
2946 | debug_only_v(printf("Blacklist: excessive stack use.\n")); |
2947 | js_Blacklist((jsbytecode*) fragment->root->ip); |
2948 | return; |
2949 | } |
2950 | if (anchor && anchor->exitType != CASE_EXIT) |
2951 | ++treeInfo->branchCount; |
2952 | if (lirbuf->outOMem()) { |
2953 | fragmento->assm()->setError(nanojit::OutOMem); |
2954 | return; |
2955 | } |
2956 | ::compile(fragmento->assm(), fragment); |
2957 | if (fragmento->assm()->error() == nanojit::OutOMem) |
2958 | return; |
2959 | if (fragmento->assm()->error() != nanojit::None) { |
2960 | debug_only_v(printf("Blacklisted: error during compilation\n");) |
2961 | js_Blacklist((jsbytecode*) fragment->root->ip); |
2962 | return; |
2963 | } |
2964 | js_resetRecordingAttempts(cx, (jsbytecode*) fragment->ip); |
2965 | js_resetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); |
2966 | if (anchor) { |
2967 | #ifdef NANOJIT_IA32 |
2968 | if (anchor->exitType == CASE_EXIT) |
2969 | fragmento->assm()->patch(anchor, anchor->switchInfo); |
2970 | else |
2971 | #endif |
2972 | fragmento->assm()->patch(anchor); |
2973 | } |
2974 | JS_ASSERT(fragment->code()); |
2975 | JS_ASSERT(!fragment->vmprivate); |
2976 | if (fragment == fragment->root) |
2977 | fragment->vmprivate = treeInfo; |
2978 | /* :TODO: windows support */ |
2979 | #if defined DEBUG && !defined WIN32 |
2980 | const char* filename = cx->fp->script->filename; |
2981 | char* label = (char*)malloc((filename ? strlen(filename) : 7) + 16); |
2982 | sprintf(label, "%s:%u", filename ? filename : "<stdin>", |
2983 | js_FramePCToLineNumber(cx, cx->fp)); |
2984 | fragmento->labels->add(fragment, sizeof(Fragment), 0, label); |
2985 | free(label); |
2986 | #endif |
2987 | AUDIT(traceCompleted); |
2988 | } |
2989 | |
2990 | static bool |
2991 | js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, |
2992 | VMSideExit* exit) |
2993 | { |
2994 | JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes); |
2995 | |
2996 | /* Must have a matching type unstable exit. */ |
2997 | if ((exit->numGlobalSlots + exit->numStackSlots != stableTree->typeMap.length()) || |
2998 | memcmp(getFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) { |
2999 | return false; |
3000 | } |
3001 | |
3002 | exit->target = stableFrag; |
3003 | frago->assm()->patch(exit); |
3004 | |
3005 | stableTree->dependentTrees.addUnique(exit->from->root); |
3006 | ((TreeInfo*)exit->from->root->vmprivate)->linkedTrees.addUnique(stableFrag); |
3007 | |
3008 | return true; |
3009 | } |
3010 | |
3011 | /* Complete and compile a trace and link it to the existing tree if appropriate. */ |
3012 | JS_REQUIRES_STACK void |
3013 | TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) |
3014 | { |
3015 | /* |
3016 | * We should have arrived back at the loop header, and hence we don't want to be in an imacro |
3017 | * here and the opcode should be either JSOP_LOOP, or in case this loop was blacklisted in the |
3018 | * meantime JSOP_NOP. |
3019 | */ |
3020 | JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc); |
3021 | |
3022 | bool stable; |
3023 | Fragment* peer; |
3024 | VMFragment* peer_root; |
3025 | Fragmento* fragmento = tm->fragmento; |
3026 | |
3027 | if (callDepth != 0) { |
3028 | debug_only_v(printf("Blacklisted: stack depth mismatch, possible recursion.\n");) |
3029 | js_Blacklist((jsbytecode*) fragment->root->ip); |
3030 | trashSelf = true; |
3031 | return; |
3032 | } |
3033 | |
3034 | VMSideExit* exit = snapshot(UNSTABLE_LOOP_EXIT); |
3035 | JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes); |
3036 | |
3037 | VMFragment* root = (VMFragment*)fragment->root; |
3038 | peer_root = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc); |
3039 | JS_ASSERT(peer_root != NULL); |
3040 | |
3041 | stable = deduceTypeStability(peer_root, &peer, demote); |
3042 | |
3043 | #if DEBUG |
3044 | if (!stable) |
3045 | AUDIT(unstableLoopVariable); |
3046 | #endif |
3047 | |
3048 | if (trashSelf) { |
3049 | debug_only_v(printf("Trashing tree from type instability.\n");) |
3050 | return; |
3051 | } |
3052 | |
3053 | if (stable && demote) { |
3054 | JS_ASSERT(fragment->kind == LoopTrace); |
3055 | return; |
3056 | } |
3057 | |
3058 | if (!stable) { |
3059 | fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), createGuardRecord(exit)); |
3060 | |
3061 | /* |
3062 | * If we didn't find a type stable peer, we compile the loop anyway and |
3063 | * hope it becomes stable later. |
3064 | */ |
3065 | if (!peer) { |
3066 | /* |
3067 | * If such a fragment does not exist, let's compile the loop ahead |
3068 | * of time anyway. Later, if the loop becomes type stable, we will |
3069 | * connect these two fragments together. |
3070 | */ |
3071 | debug_only_v(printf("Trace has unstable loop variable with no stable peer, " |
3072 | "compiling anyway.\n");) |
3073 | UnstableExit* uexit = new UnstableExit; |
3074 | uexit->fragment = fragment; |
3075 | uexit->exit = exit; |
3076 | uexit->next = treeInfo->unstableExits; |
3077 | treeInfo->unstableExits = uexit; |
3078 | } else { |
3079 | JS_ASSERT(peer->code()); |
3080 | exit->target = peer; |
3081 | debug_only_v(printf("Joining type-unstable trace to target fragment %p.\n", (void*)peer);) |
3082 | stable = true; |
3083 | ((TreeInfo*)peer->vmprivate)->dependentTrees.addUnique(fragment->root); |
3084 | treeInfo->linkedTrees.addUnique(peer); |
3085 | } |
3086 | } else { |
3087 | exit->target = fragment->root; |
3088 | fragment->lastIns = lir->insGuard(LIR_loop, lir->insImm(1), createGuardRecord(exit)); |
3089 | } |
3090 | compile(tm); |
3091 | |
3092 | if (fragmento->assm()->error() != nanojit::None) |
3093 | return; |
3094 | |
3095 | joinEdgesToEntry(fragmento, peer_root); |
3096 | |
3097 | debug_only_v(printf("updating specializations on dependent and linked trees\n")) |
3098 | if (fragment->root->vmprivate) |
3099 | specializeTreesToMissingGlobals(cx, (TreeInfo*)fragment->root->vmprivate); |
3100 | |
3101 | /* |
3102 | * If this is a newly formed tree, and the outer tree has not been compiled yet, we |
3103 | * should try to compile the outer tree again. |
3104 | */ |
3105 | if (outer) |
3106 | js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc); |
3107 | |
3108 | debug_only_v(printf("recording completed at %s:%u@%u via closeLoop\n", |
3109 | cx->fp->script->filename, |
3110 | js_FramePCToLineNumber(cx, cx->fp), |
3111 | FramePCOffset(cx->fp));) |
3112 | } |
3113 | |
3114 | JS_REQUIRES_STACK void |
3115 |