1 |
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 |
* vim: set ts=8 sw=4 et tw=99 ft=cpp: |
* vim: set ts=8 sw=4 et tw=99 ft=cpp: |
3 |
* |
* |
4 |
* ***** BEGIN LICENSE BLOCK ***** |
* ***** BEGIN LICENSE BLOCK ***** |
66 |
while (_max < size) |
while (_max < size) |
67 |
_max <<= 1; |
_max <<= 1; |
68 |
_data = (T*)realloc(_data, _max * sizeof(T)); |
_data = (T*)realloc(_data, _max * sizeof(T)); |
69 |
|
#if defined(DEBUG) |
70 |
|
memset(&_data[_len], 0xcd, _max - _len); |
71 |
|
#endif |
72 |
} |
} |
73 |
public: |
public: |
74 |
Queue(unsigned max = 16) { |
Queue(unsigned max = 16) { |
82 |
} |
} |
83 |
|
|
84 |
bool contains(T a) { |
bool contains(T a) { |
85 |
for (unsigned n = 0; n < _len; ++n) |
for (unsigned n = 0; n < _len; ++n) { |
86 |
if (_data[n] == a) |
if (_data[n] == a) |
87 |
return true; |
return true; |
88 |
|
} |
89 |
return false; |
return false; |
90 |
} |
} |
91 |
|
|
116 |
_len = 0; |
_len = 0; |
117 |
} |
} |
118 |
|
|
119 |
|
const T & get(unsigned i) const { |
120 |
|
return _data[i]; |
121 |
|
} |
122 |
|
|
123 |
unsigned length() const { |
unsigned length() const { |
124 |
return _len; |
return _len; |
125 |
} |
} |
154 |
void clear(); |
void clear(); |
155 |
}; |
}; |
156 |
|
|
157 |
|
#ifdef JS_JIT_SPEW |
158 |
|
extern bool js_verboseDebug; |
159 |
|
#define debug_only_v(x) if (js_verboseDebug) { x; fflush(stdout); } |
160 |
|
#else |
161 |
|
#define debug_only_v(x) |
162 |
|
#endif |
163 |
|
|
164 |
/* |
/* |
165 |
* The oracle keeps track of slots that should not be demoted to int because we know them |
* The oracle keeps track of hit counts for program counter locations, as |
166 |
* to overflow or they result in type-unstable traces. We are using a simple hash table. |
* well as slots that should not be demoted to int because we know them to |
167 |
* Collisions lead to loss of optimization (demotable slots are not demoted) but have no |
* overflow or they result in type-unstable traces. We are using simple |
168 |
* correctness implications. |
* hash tables. Collisions lead to loss of optimization (demotable slots |
169 |
|
* are not demoted, etc.) but have no correctness implications. |
170 |
*/ |
*/ |
171 |
#define ORACLE_SIZE 4096 |
#define ORACLE_SIZE 4096 |
172 |
|
|
173 |
class Oracle { |
class Oracle { |
174 |
avmplus::BitSet _dontDemote; |
avmplus::BitSet _stackDontDemote; |
175 |
|
avmplus::BitSet _globalDontDemote; |
176 |
public: |
public: |
177 |
void markGlobalSlotUndemotable(JSScript* script, unsigned slot); |
Oracle(); |
178 |
bool isGlobalSlotUndemotable(JSScript* script, unsigned slot) const; |
|
179 |
void markStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot); |
JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot); |
180 |
bool isStackSlotUndemotable(JSScript* script, jsbytecode* ip, unsigned slot) const; |
JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const; |
181 |
void clear(); |
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot); |
182 |
|
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const; |
183 |
|
void clearDemotability(); |
184 |
|
void clear() { |
185 |
|
clearDemotability(); |
186 |
|
} |
187 |
}; |
}; |
188 |
|
|
189 |
typedef Queue<uint16> SlotList; |
typedef Queue<uint16> SlotList; |
190 |
|
|
191 |
class TypeMap : public Queue<uint8> { |
class TypeMap : public Queue<uint8> { |
192 |
public: |
public: |
193 |
void captureGlobalTypes(JSContext* cx, SlotList& slots); |
JS_REQUIRES_STACK void captureTypes(JSContext* cx, SlotList& slots, unsigned callDepth); |
194 |
void captureStackTypes(JSContext* cx, unsigned callDepth); |
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, |
195 |
|
SlotList& slots, |
196 |
|
unsigned stackSlots); |
197 |
bool matches(TypeMap& other) const; |
bool matches(TypeMap& other) const; |
198 |
}; |
}; |
199 |
|
|
200 |
enum ExitType { |
enum ExitType { |
201 |
BRANCH_EXIT, |
/* |
202 |
LOOP_EXIT, |
* An exit at a possible branch-point in the trace at which to attach a |
203 |
|
* future secondary trace. Therefore the recorder must generate different |
204 |
|
* code to handle the other outcome of the branch condition from the |
205 |
|
* primary trace's outcome. |
206 |
|
*/ |
207 |
|
BRANCH_EXIT, |
208 |
|
|
209 |
|
/* |
210 |
|
* Exit at a tableswitch via a numbered case. |
211 |
|
*/ |
212 |
|
CASE_EXIT, |
213 |
|
|
214 |
|
/* |
215 |
|
* Exit at a tableswitch via the default case. |
216 |
|
*/ |
217 |
|
DEFAULT_EXIT, |
218 |
|
|
219 |
|
LOOP_EXIT, |
220 |
NESTED_EXIT, |
NESTED_EXIT, |
221 |
|
|
222 |
|
/* |
223 |
|
* An exit from a trace because a condition relied upon at recording time |
224 |
|
* no longer holds, where the alternate path of execution is so rare or |
225 |
|
* difficult to address in native code that it is not traced at all, e.g. |
226 |
|
* negative array index accesses, which differ from positive indexes in |
227 |
|
* that they require a string-based property lookup rather than a simple |
228 |
|
* memory access. |
229 |
|
*/ |
230 |
MISMATCH_EXIT, |
MISMATCH_EXIT, |
231 |
|
|
232 |
|
/* |
233 |
|
* A specialization of MISMATCH_EXIT to handle allocation failures. |
234 |
|
*/ |
235 |
OOM_EXIT, |
OOM_EXIT, |
236 |
OVERFLOW_EXIT, |
OVERFLOW_EXIT, |
237 |
UNSTABLE_LOOP_EXIT, |
UNSTABLE_LOOP_EXIT, |
238 |
TIMEOUT_EXIT |
TIMEOUT_EXIT, |
239 |
|
DEEP_BAIL_EXIT, |
240 |
|
STATUS_EXIT |
241 |
}; |
}; |
242 |
|
|
243 |
struct VMSideExit : public nanojit::SideExit |
struct VMSideExit : public nanojit::SideExit |
244 |
{ |
{ |
245 |
intptr_t ip_adj; |
JSObject* block; |
246 |
|
jsbytecode* pc; |
247 |
|
jsbytecode* imacpc; |
248 |
intptr_t sp_adj; |
intptr_t sp_adj; |
249 |
intptr_t rp_adj; |
intptr_t rp_adj; |
250 |
int32_t calldepth; |
int32_t calldepth; |
252 |
uint32 numStackSlots; |
uint32 numStackSlots; |
253 |
uint32 numStackSlotsBelowCurrentFrame; |
uint32 numStackSlotsBelowCurrentFrame; |
254 |
ExitType exitType; |
ExitType exitType; |
255 |
|
|
256 |
|
/* |
257 |
|
* Ordinarily 0. If a slow native function is atop the stack, the 1 bit is |
258 |
|
* set if constructing and the other bits are a pointer to the funobj. |
259 |
|
*/ |
260 |
|
uintptr_t nativeCalleeWord; |
261 |
|
|
262 |
|
JSObject * nativeCallee() { |
263 |
|
return (JSObject *) (nativeCalleeWord & ~1); |
264 |
|
} |
265 |
|
|
266 |
|
bool constructing() { |
267 |
|
return bool(nativeCalleeWord & 1); |
268 |
|
} |
269 |
|
|
270 |
|
void setNativeCallee(JSObject *callee, bool constructing) { |
271 |
|
nativeCalleeWord = uintptr_t(callee) | (constructing ? 1 : 0); |
272 |
|
} |
273 |
}; |
}; |
274 |
|
|
275 |
static inline uint8* getTypeMap(nanojit::SideExit* exit) |
static inline uint8* getStackTypeMap(nanojit::SideExit* exit) |
276 |
{ |
{ |
277 |
return (uint8*)(((VMSideExit*)exit) + 1); |
return (uint8*)(((VMSideExit*)exit) + 1); |
278 |
} |
} |
279 |
|
|
280 |
struct InterpState |
static inline uint8* getGlobalTypeMap(nanojit::SideExit* exit) |
281 |
|
{ |
282 |
|
return getStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots; |
283 |
|
} |
284 |
|
|
285 |
|
static inline uint8* getFullTypeMap(nanojit::SideExit* exit) |
286 |
{ |
{ |
287 |
void* sp; /* native stack pointer, stack[0] is spbase[0] */ |
return getStackTypeMap(exit); |
288 |
void* rp; /* call stack pointer */ |
} |
289 |
void* gp; /* global frame pointer */ |
|
290 |
JSContext *cx; /* current VM context handle */ |
struct FrameInfo { |
291 |
void* eos; /* first unusable word after the native stack */ |
JSObject* callee; // callee function object |
292 |
void* eor; /* first unusable word after the call stack */ |
JSObject* block; // caller block chain head |
293 |
VMSideExit* lastTreeExitGuard; /* guard we exited on during a tree call */ |
jsbytecode* pc; // caller fp->regs->pc |
294 |
VMSideExit* lastTreeCallGuard; /* guard we want to grow from if the tree |
jsbytecode* imacpc; // caller fp->imacpc |
295 |
call exit guard mismatched */ |
uint16 spdist; // distance from fp->slots to fp->regs->sp at JSOP_CALL |
296 |
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */ |
|
297 |
}; |
/* |
298 |
|
* Bit 15 (0x8000) is a flag that is set if constructing (called through new). |
299 |
|
* Bits 0-14 are the actual argument count. This may be less than fun->nargs. |
300 |
|
*/ |
301 |
|
uint16 argc; |
302 |
|
|
303 |
|
/* |
304 |
|
* Stack pointer adjustment needed for navigation of native stack in |
305 |
|
* js_GetUpvarOnTrace. spoffset is the number of slots in the native |
306 |
|
* stack frame for the caller *before* the slots covered by spdist. |
307 |
|
* This may be negative if the caller is the top level script. |
308 |
|
* The key fact is that if we let 'cpos' be the start of the caller's |
309 |
|
* native stack frame, then (cpos + spoffset) points to the first |
310 |
|
* non-argument slot in the callee's native stack frame. |
311 |
|
*/ |
312 |
|
int32 spoffset; |
313 |
|
|
314 |
|
// Safer accessors for argc. |
315 |
|
enum { CONSTRUCTING_MASK = 0x8000 }; |
316 |
|
void set_argc(uint16 argc, bool constructing) { |
317 |
|
this->argc = argc | (constructing ? CONSTRUCTING_MASK : 0); |
318 |
|
} |
319 |
|
uint16 get_argc() const { return argc & ~CONSTRUCTING_MASK; } |
320 |
|
bool is_constructing() const { return (argc & CONSTRUCTING_MASK) != 0; } |
321 |
|
|
322 |
|
// The typemap just before the callee is called. |
323 |
|
uint8* get_typemap() { return (uint8*) (this+1); } |
324 |
|
}; |
325 |
|
|
326 |
struct UnstableExit |
struct UnstableExit |
327 |
{ |
{ |
331 |
}; |
}; |
332 |
|
|
333 |
class TreeInfo MMGC_SUBCLASS_DECL { |
class TreeInfo MMGC_SUBCLASS_DECL { |
|
nanojit::Fragment* fragment; |
|
334 |
public: |
public: |
335 |
|
nanojit::Fragment* const fragment; |
336 |
JSScript* script; |
JSScript* script; |
337 |
unsigned maxNativeStackSlots; |
unsigned maxNativeStackSlots; |
338 |
ptrdiff_t nativeStackBase; |
ptrdiff_t nativeStackBase; |
339 |
unsigned maxCallDepth; |
unsigned maxCallDepth; |
340 |
TypeMap stackTypeMap; |
TypeMap typeMap; |
341 |
|
unsigned nStackTypes; |
342 |
|
SlotList* globalSlots; |
343 |
|
/* Dependent trees must be trashed if this tree dies, and updated on missing global types */ |
344 |
Queue<nanojit::Fragment*> dependentTrees; |
Queue<nanojit::Fragment*> dependentTrees; |
345 |
|
/* Linked trees must be updated on missing global types, but are not dependent */ |
346 |
|
Queue<nanojit::Fragment*> linkedTrees; |
347 |
unsigned branchCount; |
unsigned branchCount; |
348 |
Queue<VMSideExit*> sideExits; |
Queue<VMSideExit*> sideExits; |
349 |
UnstableExit* unstableExits; |
UnstableExit* unstableExits; |
350 |
|
#ifdef DEBUG |
351 |
|
const char* treeFileName; |
352 |
|
uintN treeLineNumber; |
353 |
|
uintN treePCOffset; |
354 |
|
#endif |
355 |
|
|
356 |
TreeInfo(nanojit::Fragment* _fragment) : unstableExits(NULL) { |
TreeInfo(nanojit::Fragment* _fragment, |
357 |
fragment = _fragment; |
SlotList* _globalSlots) |
358 |
} |
: fragment(_fragment), |
359 |
|
script(NULL), |
360 |
|
maxNativeStackSlots(0), |
361 |
|
nativeStackBase(0), |
362 |
|
maxCallDepth(0), |
363 |
|
nStackTypes(0), |
364 |
|
globalSlots(_globalSlots), |
365 |
|
branchCount(0), |
366 |
|
unstableExits(NULL) |
367 |
|
{} |
368 |
~TreeInfo(); |
~TreeInfo(); |
369 |
|
|
370 |
|
inline unsigned nGlobalTypes() { |
371 |
|
return typeMap.length() - nStackTypes; |
372 |
|
} |
373 |
|
inline uint8* globalTypeMap() { |
374 |
|
return typeMap.data() + nStackTypes; |
375 |
|
} |
376 |
|
inline uint8* stackTypeMap() { |
377 |
|
return typeMap.data(); |
378 |
|
} |
379 |
}; |
}; |
380 |
|
|
381 |
struct FrameInfo { |
#if defined(JS_JIT_SPEW) && (defined(NANOJIT_IA32) || (defined(NANOJIT_AMD64) && defined(__GNUC__))) |
382 |
JSObject* callee; // callee function object |
# define EXECUTE_TREE_TIMER |
383 |
intptr_t ip_adj; // callee script-based pc index and imacro pc |
#endif |
384 |
uint8* typemap; // typemap for the stack frame |
|
385 |
union { |
typedef enum JSBuiltinStatus { |
386 |
struct { |
JSBUILTIN_BAILED = 1, |
387 |
uint16 spdist; // distance from fp->slots to fp->regs->sp at JSOP_CALL |
JSBUILTIN_ERROR = 2 |
388 |
uint16 argc; // actual argument count, may be < fun->nargs |
} JSBuiltinStatus; |
389 |
} s; |
|
390 |
uint32 word; // for spdist/argc LIR store in record_JSOP_CALL |
struct InterpState |
391 |
}; |
{ |
392 |
|
double *sp; // native stack pointer, stack[0] is spbase[0] |
393 |
|
FrameInfo** rp; // call stack pointer |
394 |
|
JSContext *cx; // current VM context handle |
395 |
|
double *eos; // first unusable word after the native stack |
396 |
|
void *eor; // first unusable word after the call stack |
397 |
|
VMSideExit* lastTreeExitGuard; // guard we exited on during a tree call |
398 |
|
VMSideExit* lastTreeCallGuard; // guard we want to grow from if the tree |
399 |
|
// call exit guard mismatched |
400 |
|
void* rpAtLastTreeCall; // value of rp at innermost tree call guard |
401 |
|
TreeInfo* outermostTree; // the outermost tree we initially invoked |
402 |
|
double* stackBase; // native stack base |
403 |
|
FrameInfo** callstackBase; // call stack base |
404 |
|
uintN* inlineCallCountp; // inline call count counter |
405 |
|
VMSideExit** innermostNestedGuardp; |
406 |
|
void* stackMark; |
407 |
|
VMSideExit* innermost; |
408 |
|
#ifdef EXECUTE_TREE_TIMER |
409 |
|
uint64 startTime; |
410 |
|
#endif |
411 |
|
InterpState* prev; |
412 |
|
|
413 |
|
/* |
414 |
|
* Used by _FAIL builtins; see jsbuiltins.h. The builtin sets the |
415 |
|
* JSBUILTIN_BAILED bit if it bails off trace and the JSBUILTIN_ERROR bit |
416 |
|
* if an error or exception occurred. |
417 |
|
*/ |
418 |
|
uint32 builtinStatus; |
419 |
|
|
420 |
|
// Used to communicate the location of the return value in case of a deep bail. |
421 |
|
double* deepBailSp; |
422 |
|
}; |
423 |
|
|
424 |
|
static JS_INLINE void |
425 |
|
js_SetBuiltinError(JSContext *cx) |
426 |
|
{ |
427 |
|
cx->interpState->builtinStatus |= JSBUILTIN_ERROR; |
428 |
|
} |
429 |
|
|
430 |
|
#ifdef DEBUG_JSRS_NOT_BOOL |
431 |
|
struct JSRecordingStatus { |
432 |
|
int code; |
433 |
|
bool operator==(JSRecordingStatus &s) { return this->code == s.code; }; |
434 |
|
bool operator!=(JSRecordingStatus &s) { return this->code != s.code; }; |
435 |
|
}; |
436 |
|
enum JSRScodes { |
437 |
|
JSRS_ERROR_code, |
438 |
|
JSRS_STOP_code, |
439 |
|
JSRS_CONTINUE_code, |
440 |
|
JSRS_IMACRO_code |
441 |
|
}; |
442 |
|
struct JSRecordingStatus JSRS_CONTINUE = { JSRS_CONTINUE_code }; |
443 |
|
struct JSRecordingStatus JSRS_STOP = { JSRS_STOP_code }; |
444 |
|
struct JSRecordingStatus JSRS_IMACRO = { JSRS_IMACRO_code }; |
445 |
|
struct JSRecordingStatus JSRS_ERROR = { JSRS_ERROR_code }; |
446 |
|
#define STATUS_ABORTS_RECORDING(s) ((s) == JSRS_STOP || (s) == JSRS_ERROR) |
447 |
|
#else |
448 |
|
enum JSRecordingStatus { |
449 |
|
JSRS_ERROR, // Error; propagate to interpreter. |
450 |
|
JSRS_STOP, // Abort recording. |
451 |
|
JSRS_CONTINUE, // Continue recording. |
452 |
|
JSRS_IMACRO // Entered imacro; continue recording. |
453 |
|
// Only JSOP_IS_IMACOP opcodes may return this. |
454 |
}; |
}; |
455 |
|
#define STATUS_ABORTS_RECORDING(s) ((s) <= JSRS_STOP) |
456 |
|
#endif |
457 |
|
|
458 |
|
|
459 |
|
|
460 |
class TraceRecorder : public avmplus::GCObject { |
class TraceRecorder : public avmplus::GCObject { |
461 |
JSContext* cx; |
JSContext* cx; |
462 |
JSTraceMonitor* traceMonitor; |
JSTraceMonitor* traceMonitor; |
463 |
JSObject* globalObj; |
JSObject* globalObj; |
464 |
|
JSObject* lexicalBlock; |
465 |
Tracker tracker; |
Tracker tracker; |
466 |
Tracker nativeFrameTracker; |
Tracker nativeFrameTracker; |
467 |
char* entryTypeMap; |
char* entryTypeMap; |
477 |
nanojit::LirWriter* cse_filter; |
nanojit::LirWriter* cse_filter; |
478 |
nanojit::LirWriter* expr_filter; |
nanojit::LirWriter* expr_filter; |
479 |
nanojit::LirWriter* func_filter; |
nanojit::LirWriter* func_filter; |
|
#ifdef NJ_SOFTFLOAT |
|
480 |
nanojit::LirWriter* float_filter; |
nanojit::LirWriter* float_filter; |
|
#endif |
|
481 |
nanojit::LIns* cx_ins; |
nanojit::LIns* cx_ins; |
|
nanojit::LIns* gp_ins; |
|
482 |
nanojit::LIns* eos_ins; |
nanojit::LIns* eos_ins; |
483 |
nanojit::LIns* eor_ins; |
nanojit::LIns* eor_ins; |
484 |
nanojit::LIns* rval_ins; |
nanojit::LIns* rval_ins; |
485 |
nanojit::LIns* inner_sp_ins; |
nanojit::LIns* inner_sp_ins; |
486 |
|
nanojit::LIns* native_rval_ins; |
487 |
|
nanojit::LIns* newobj_ins; |
488 |
bool deepAborted; |
bool deepAborted; |
489 |
bool applyingArguments; |
bool trashSelf; |
490 |
bool trashTree; |
Queue<nanojit::Fragment*> whichTreesToTrash; |
|
nanojit::Fragment* whichTreeToTrash; |
|
491 |
Queue<jsbytecode*> cfgMerges; |
Queue<jsbytecode*> cfgMerges; |
492 |
jsval* global_dslots; |
jsval* global_dslots; |
493 |
|
JSTraceableNative* generatedTraceableNative; |
494 |
JSTraceableNative* pendingTraceableNative; |
JSTraceableNative* pendingTraceableNative; |
|
bool terminate; |
|
|
intptr_t terminate_ip_adj; |
|
|
nanojit::Fragment* outerToBlacklist; |
|
|
nanojit::Fragment* promotedPeer; |
|
495 |
TraceRecorder* nextRecorderToAbort; |
TraceRecorder* nextRecorderToAbort; |
496 |
bool wasRootFragment; |
bool wasRootFragment; |
497 |
|
jsbytecode* outer; /* outer trace header PC */ |
498 |
|
uint32 outerArgc; /* outer trace deepest frame argc */ |
499 |
|
bool loop; |
500 |
|
|
501 |
bool isGlobal(jsval* p) const; |
bool isGlobal(jsval* p) const; |
502 |
ptrdiff_t nativeGlobalOffset(jsval* p) const; |
ptrdiff_t nativeGlobalOffset(jsval* p) const; |
503 |
ptrdiff_t nativeStackOffset(jsval* p) const; |
JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(jsval* p) const; |
504 |
void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, uint8& t, |
JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, uint8 t, |
505 |
const char *prefix, uintN index, JSStackFrame *fp); |
const char *prefix, uintN index, JSStackFrame *fp); |
506 |
void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned ngslots, unsigned callDepth, |
JS_REQUIRES_STACK void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned stackSlots, |
507 |
uint8* globalTypeMap, uint8* stackTypeMap); |
unsigned callDepth, unsigned ngslots, uint8* typeMap); |
508 |
void trackNativeStackUse(unsigned slots); |
void trackNativeStackUse(unsigned slots); |
509 |
|
|
510 |
bool lazilyImportGlobalSlot(unsigned slot); |
JS_REQUIRES_STACK bool isValidSlot(JSScope* scope, JSScopeProperty* sprop); |
511 |
|
JS_REQUIRES_STACK bool lazilyImportGlobalSlot(unsigned slot); |
512 |
|
|
513 |
|
JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, ExitType exitType); |
514 |
|
JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, VMSideExit* exit); |
515 |
|
|
|
nanojit::LIns* guard(bool expected, nanojit::LIns* cond, ExitType exitType); |
|
|
nanojit::LIns* guard(bool expected, nanojit::LIns* cond, nanojit::LIns* exit); |
|
516 |
nanojit::LIns* addName(nanojit::LIns* ins, const char* name); |
nanojit::LIns* addName(nanojit::LIns* ins, const char* name); |
517 |
|
|
|
nanojit::LIns* get(jsval* p) const; |
|
518 |
nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset); |
nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset); |
519 |
void set(jsval* p, nanojit::LIns* l, bool initializing = false); |
JS_REQUIRES_STACK void set(jsval* p, nanojit::LIns* l, bool initializing = false); |
520 |
|
JS_REQUIRES_STACK nanojit::LIns* get(jsval* p); |
521 |
bool checkType(jsval& v, uint8 t, jsval*& stage_val, nanojit::LIns*& stage_ins, |
JS_REQUIRES_STACK bool known(jsval* p); |
522 |
unsigned& stage_count); |
JS_REQUIRES_STACK void checkForGlobalObjectReallocation(); |
523 |
bool deduceTypeStability(nanojit::Fragment* root_peer, nanojit::Fragment** stable_peer, |
|
524 |
unsigned* demotes); |
JS_REQUIRES_STACK bool checkType(jsval& v, uint8 t, jsval*& stage_val, |
525 |
|
nanojit::LIns*& stage_ins, unsigned& stage_count); |
526 |
jsval& argval(unsigned n) const; |
JS_REQUIRES_STACK bool deduceTypeStability(nanojit::Fragment* root_peer, |
527 |
jsval& varval(unsigned n) const; |
nanojit::Fragment** stable_peer, |
528 |
jsval& stackval(int n) const; |
bool& demote); |
529 |
|
|
530 |
nanojit::LIns* scopeChain() const; |
JS_REQUIRES_STACK jsval& argval(unsigned n) const; |
531 |
bool activeCallOrGlobalSlot(JSObject* obj, jsval*& vp); |
JS_REQUIRES_STACK jsval& varval(unsigned n) const; |
532 |
|
JS_REQUIRES_STACK jsval& stackval(int n) const; |
533 |
nanojit::LIns* arg(unsigned n); |
|
534 |
void arg(unsigned n, nanojit::LIns* i); |
JS_REQUIRES_STACK nanojit::LIns* scopeChain() const; |
535 |
nanojit::LIns* var(unsigned n); |
JS_REQUIRES_STACK JSRecordingStatus activeCallOrGlobalSlot(JSObject* obj, jsval*& vp); |
536 |
void var(unsigned n, nanojit::LIns* i); |
|
537 |
nanojit::LIns* stack(int n); |
JS_REQUIRES_STACK nanojit::LIns* arg(unsigned n); |
538 |
void stack(int n, nanojit::LIns* i); |
JS_REQUIRES_STACK void arg(unsigned n, nanojit::LIns* i); |
539 |
|
JS_REQUIRES_STACK nanojit::LIns* var(unsigned n); |
540 |
|
JS_REQUIRES_STACK void var(unsigned n, nanojit::LIns* i); |
541 |
|
JS_REQUIRES_STACK nanojit::LIns* upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v); |
542 |
|
JS_REQUIRES_STACK nanojit::LIns* stack(int n); |
543 |
|
JS_REQUIRES_STACK void stack(int n, nanojit::LIns* i); |
544 |
|
|
545 |
nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1, |
JS_REQUIRES_STACK nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1, |
546 |
nanojit::LIns* s0, nanojit::LIns* s1); |
nanojit::LIns* s0, nanojit::LIns* s1); |
547 |
nanojit::LIns* f2i(nanojit::LIns* f); |
nanojit::LIns* f2i(nanojit::LIns* f); |
548 |
nanojit::LIns* makeNumberInt32(nanojit::LIns* f); |
JS_REQUIRES_STACK nanojit::LIns* makeNumberInt32(nanojit::LIns* f); |
549 |
nanojit::LIns* stringify(jsval& v); |
JS_REQUIRES_STACK nanojit::LIns* stringify(jsval& v); |
550 |
|
|
551 |
bool call_imacro(jsbytecode* imacro); |
JS_REQUIRES_STACK JSRecordingStatus call_imacro(jsbytecode* imacro); |
552 |
|
|
553 |
bool ifop(); |
JS_REQUIRES_STACK JSRecordingStatus ifop(); |
554 |
bool switchop(); |
JS_REQUIRES_STACK JSRecordingStatus switchop(); |
555 |
bool inc(jsval& v, jsint incr, bool pre = true); |
#ifdef NANOJIT_IA32 |
556 |
bool inc(jsval& v, nanojit::LIns*& v_ins, jsint incr, bool pre = true); |
JS_REQUIRES_STACK nanojit::LIns* tableswitch(); |
557 |
bool incProp(jsint incr, bool pre = true); |
#endif |
558 |
bool incElem(jsint incr, bool pre = true); |
JS_REQUIRES_STACK JSRecordingStatus inc(jsval& v, jsint incr, bool pre = true); |
559 |
bool incName(jsint incr, bool pre = true); |
JS_REQUIRES_STACK JSRecordingStatus inc(jsval& v, nanojit::LIns*& v_ins, jsint incr, |
560 |
|
bool pre = true); |
561 |
enum { CMP_NEGATE = 1, CMP_TRY_BRANCH_AFTER_COND = 2, CMP_CASE = 4, CMP_STRICT = 8 }; |
JS_REQUIRES_STACK JSRecordingStatus incProp(jsint incr, bool pre = true); |
562 |
bool cmp(nanojit::LOpcode op, int flags = 0); |
JS_REQUIRES_STACK JSRecordingStatus incElem(jsint incr, bool pre = true); |
563 |
|
JS_REQUIRES_STACK JSRecordingStatus incName(jsint incr, bool pre = true); |
564 |
|
|
565 |
|
JS_REQUIRES_STACK void strictEquality(bool equal, bool cmpCase); |
566 |
|
JS_REQUIRES_STACK JSRecordingStatus equality(bool negate, bool tryBranchAfterCond); |
567 |
|
JS_REQUIRES_STACK JSRecordingStatus equalityHelper(jsval l, jsval r, |
568 |
|
nanojit::LIns* l_ins, nanojit::LIns* r_ins, |
569 |
|
bool negate, bool tryBranchAfterCond, |
570 |
|
jsval& rval); |
571 |
|
JS_REQUIRES_STACK JSRecordingStatus relational(nanojit::LOpcode op, bool tryBranchAfterCond); |
572 |
|
|
573 |
bool unary(nanojit::LOpcode op); |
JS_REQUIRES_STACK JSRecordingStatus unary(nanojit::LOpcode op); |
574 |
bool binary(nanojit::LOpcode op); |
JS_REQUIRES_STACK JSRecordingStatus binary(nanojit::LOpcode op); |
575 |
|
|
576 |
bool ibinary(nanojit::LOpcode op); |
bool ibinary(nanojit::LOpcode op); |
577 |
bool iunary(nanojit::LOpcode op); |
bool iunary(nanojit::LOpcode op); |
578 |
bool bbinary(nanojit::LOpcode op); |
bool bbinary(nanojit::LOpcode op); |
579 |
void demote(jsval& v, jsdouble result); |
void demote(jsval& v, jsdouble result); |
580 |
|
|
581 |
bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins, nanojit::LIns*& ops_ins, |
JS_REQUIRES_STACK bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins, |
582 |
size_t op_offset = 0); |
nanojit::LIns*& ops_ins, size_t op_offset = 0); |
583 |
bool test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, JSObject*& obj2, |
JS_REQUIRES_STACK JSRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, |
584 |
jsuword& pcval); |
JSObject*& obj2, jsuword& pcval); |
585 |
bool test_property_cache_direct_slot(JSObject* obj, nanojit::LIns* obj_ins, uint32& slot); |
void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot, |
586 |
void stobj_set_slot(nanojit::LIns* obj_ins, unsigned slot, |
nanojit::LIns* v_ins, const char *name); |
587 |
nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins); |
void stobj_set_dslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns*& dslots_ins, |
588 |
|
nanojit::LIns* v_ins, const char *name); |
589 |
|
void stobj_set_slot(nanojit::LIns* obj_ins, unsigned slot, nanojit::LIns*& dslots_ins, |
590 |
|
nanojit::LIns* v_ins); |
591 |
|
|
592 |
nanojit::LIns* stobj_get_fslot(nanojit::LIns* obj_ins, unsigned slot); |
nanojit::LIns* stobj_get_fslot(nanojit::LIns* obj_ins, unsigned slot); |
593 |
|
nanojit::LIns* stobj_get_dslot(nanojit::LIns* obj_ins, unsigned index, |
594 |
|
nanojit::LIns*& dslots_ins); |
595 |
nanojit::LIns* stobj_get_slot(nanojit::LIns* obj_ins, unsigned slot, |
nanojit::LIns* stobj_get_slot(nanojit::LIns* obj_ins, unsigned slot, |
596 |
nanojit::LIns*& dslots_ins); |
nanojit::LIns*& dslots_ins); |
597 |
bool native_set(nanojit::LIns* obj_ins, JSScopeProperty* sprop, |
JSRecordingStatus native_set(nanojit::LIns* obj_ins, JSScopeProperty* sprop, |
598 |
nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins); |
nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins); |
599 |
bool native_get(nanojit::LIns* obj_ins, nanojit::LIns* pobj_ins, JSScopeProperty* sprop, |
JSRecordingStatus native_get(nanojit::LIns* obj_ins, nanojit::LIns* pobj_ins, |
600 |
nanojit::LIns*& dslots_ins, nanojit::LIns*& v_ins); |
JSScopeProperty* sprop, nanojit::LIns*& dslots_ins, |
601 |
|
nanojit::LIns*& v_ins); |
602 |
bool name(jsval*& vp); |
|
603 |
bool prop(JSObject* obj, nanojit::LIns* obj_ins, uint32& slot, nanojit::LIns*& v_ins); |
nanojit::LIns* getStringLength(nanojit::LIns* str_ins); |
604 |
bool elem(jsval& oval, jsval& idx, jsval*& vp, nanojit::LIns*& v_ins, nanojit::LIns*& addr_ins); |
|
605 |
|
JS_REQUIRES_STACK JSRecordingStatus name(jsval*& vp); |
606 |
bool getProp(JSObject* obj, nanojit::LIns* obj_ins); |
JS_REQUIRES_STACK JSRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins, uint32& slot, |
607 |
bool getProp(jsval& v); |
nanojit::LIns*& v_ins); |
608 |
bool getThis(nanojit::LIns*& this_ins); |
JS_REQUIRES_STACK JSRecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp, |
609 |
|
nanojit::LIns*& v_ins, |
610 |
bool box_jsval(jsval v, nanojit::LIns*& v_ins); |
nanojit::LIns*& addr_ins); |
611 |
bool unbox_jsval(jsval v, nanojit::LIns*& v_ins); |
JS_REQUIRES_STACK JSRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins); |
612 |
bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, |
JS_REQUIRES_STACK JSRecordingStatus getProp(jsval& v); |
613 |
ExitType exitType = MISMATCH_EXIT); |
JS_REQUIRES_STACK JSRecordingStatus getThis(nanojit::LIns*& this_ins); |
614 |
bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins, |
|
615 |
ExitType exitType = MISMATCH_EXIT); |
JS_REQUIRES_STACK void box_jsval(jsval v, nanojit::LIns*& v_ins); |
616 |
bool guardDenseArrayIndex(JSObject* obj, jsint idx, nanojit::LIns* obj_ins, |
JS_REQUIRES_STACK void unbox_jsval(jsval v, nanojit::LIns*& v_ins, VMSideExit* exit); |
617 |
nanojit::LIns* dslots_ins, nanojit::LIns* idx_ins, |
JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, |
618 |
ExitType exitType); |
VMSideExit* exit); |
619 |
bool guardElemOp(JSObject* obj, nanojit::LIns* obj_ins, jsid id, size_t op_offset, jsval* vp); |
JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins, |
620 |
|
ExitType exitType = MISMATCH_EXIT); |
621 |
|
JS_REQUIRES_STACK bool guardHasPrototype(JSObject* obj, nanojit::LIns* obj_ins, |
622 |
|
JSObject** pobj, nanojit::LIns** pobj_ins, |
623 |
|
VMSideExit* exit); |
624 |
|
JS_REQUIRES_STACK JSRecordingStatus guardPrototypeHasNoIndexedProperties(JSObject* obj, |
625 |
|
nanojit::LIns* obj_ins, |
626 |
|
ExitType exitType); |
627 |
|
JS_REQUIRES_STACK JSRecordingStatus guardNotGlobalObject(JSObject* obj, |
628 |
|
nanojit::LIns* obj_ins); |
629 |
void clearFrameSlotsFromCache(); |
void clearFrameSlotsFromCache(); |
630 |
bool guardShapelessCallee(jsval& callee); |
JS_REQUIRES_STACK JSRecordingStatus guardCallee(jsval& callee); |
631 |
bool interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, bool constructing); |
JS_REQUIRES_STACK JSRecordingStatus getClassPrototype(JSObject* ctor, |
632 |
bool functionCall(bool constructing); |
nanojit::LIns*& proto_ins); |
633 |
|
JS_REQUIRES_STACK JSRecordingStatus getClassPrototype(JSProtoKey key, |
634 |
void trackCfgMerges(jsbytecode* pc); |
nanojit::LIns*& proto_ins); |
635 |
void flipIf(jsbytecode* pc, bool& cond); |
JS_REQUIRES_STACK JSRecordingStatus newArray(JSObject* ctor, uint32 argc, jsval* argv, |
636 |
void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x); |
jsval* rval); |
637 |
|
JS_REQUIRES_STACK JSRecordingStatus newString(JSObject* ctor, uint32 argc, jsval* argv, |
638 |
|
jsval* rval); |
639 |
|
JS_REQUIRES_STACK JSRecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun, |
640 |
|
uintN argc, bool constructing); |
641 |
|
JS_REQUIRES_STACK JSRecordingStatus emitNativeCall(JSTraceableNative* known, uintN argc, |
642 |
|
nanojit::LIns* args[]); |
643 |
|
JS_REQUIRES_STACK JSRecordingStatus callTraceableNative(JSFunction* fun, uintN argc, |
644 |
|
bool constructing); |
645 |
|
JS_REQUIRES_STACK JSRecordingStatus callNative(uintN argc, JSOp mode); |
646 |
|
JS_REQUIRES_STACK JSRecordingStatus functionCall(uintN argc, JSOp mode); |
647 |
|
|
648 |
|
JS_REQUIRES_STACK void trackCfgMerges(jsbytecode* pc); |
649 |
|
JS_REQUIRES_STACK void emitIf(jsbytecode* pc, bool cond, nanojit::LIns* x); |
650 |
|
JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x); |
651 |
|
JS_REQUIRES_STACK JSRecordingStatus checkTraceEnd(jsbytecode* pc); |
652 |
|
|
653 |
bool hasMethod(JSObject* obj, jsid id); |
bool hasMethod(JSObject* obj, jsid id); |
654 |
bool hasToStringMethod(JSObject* obj); |
JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj); |
|
bool hasToStringMethod(jsval v) { |
|
|
JS_ASSERT(JSVAL_IS_OBJECT(v)); |
|
|
return hasToStringMethod(JSVAL_TO_OBJECT(v)); |
|
|
} |
|
|
bool hasValueOfMethod(JSObject* obj); |
|
|
bool hasValueOfMethod(jsval v) { |
|
|
JS_ASSERT(JSVAL_IS_OBJECT(v)); |
|
|
return hasValueOfMethod(JSVAL_TO_OBJECT(v)); |
|
|
} |
|
|
bool hasIteratorMethod(JSObject* obj); |
|
|
bool hasIteratorMethod(jsval v) { |
|
|
JS_ASSERT(JSVAL_IS_OBJECT(v)); |
|
|
return hasIteratorMethod(JSVAL_TO_OBJECT(v)); |
|
|
} |
|
655 |
|
|
656 |
public: |
JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0); |
|
friend bool js_MonitorRecording(TraceRecorder* tr); |
|
657 |
|
|
658 |
|
public: |
659 |
|
JS_REQUIRES_STACK |
660 |
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*, |
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*, |
661 |
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap, |
unsigned stackSlots, unsigned ngslots, uint8* typeMap, |
662 |
VMSideExit* expectedInnerExit, nanojit::Fragment* outerToBlacklist); |
VMSideExit* expectedInnerExit, jsbytecode* outerTree, |
663 |
|
uint32 outerArgc); |
664 |
~TraceRecorder(); |
~TraceRecorder(); |
665 |
|
|
666 |
uint8 determineSlotType(jsval* vp) const; |
static JS_REQUIRES_STACK JSRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr, |
667 |
nanojit::LIns* snapshot(ExitType exitType); |
JSOp op); |
668 |
|
|
669 |
|
JS_REQUIRES_STACK uint8 determineSlotType(jsval* vp); |
670 |
|
|
671 |
|
/* |
672 |
|
* Examines current interpreter state to record information suitable for |
673 |
|
* returning to the interpreter through a side exit of the given type. |
674 |
|
*/ |
675 |
|
JS_REQUIRES_STACK VMSideExit* snapshot(ExitType exitType); |
676 |
|
|
677 |
|
/* |
678 |
|
* Creates a separate but identical copy of the given side exit, allowing |
679 |
|
* the guards associated with each to be entirely separate even after |
680 |
|
* subsequent patching. |
681 |
|
*/ |
682 |
|
JS_REQUIRES_STACK VMSideExit* copy(VMSideExit* exit); |
683 |
|
|
684 |
|
/* |
685 |
|
* Creates an instruction whose payload is a GuardRecord for the given exit. |
686 |
|
* The instruction is suitable for use as the final argument of a single |
687 |
|
* call to LirBuffer::insGuard; do not reuse the returned value. |
688 |
|
*/ |
689 |
|
JS_REQUIRES_STACK nanojit::LIns* createGuardRecord(VMSideExit* exit); |
690 |
|
|
691 |
nanojit::Fragment* getFragment() const { return fragment; } |
nanojit::Fragment* getFragment() const { return fragment; } |
692 |
bool isLoopHeader(JSContext* cx) const; |
TreeInfo* getTreeInfo() const { return treeInfo; } |
693 |
void compile(nanojit::Fragmento* fragmento); |
JS_REQUIRES_STACK void compile(JSTraceMonitor* tm); |
694 |
bool closeLoop(nanojit::Fragmento* fragmento, bool& demote, unsigned *demotes); |
JS_REQUIRES_STACK void closeLoop(JSTraceMonitor* tm, bool& demote); |
695 |
void endLoop(nanojit::Fragmento* fragmento); |
JS_REQUIRES_STACK void endLoop(JSTraceMonitor* tm); |
696 |
void joinEdgesToEntry(nanojit::Fragmento* fragmento, nanojit::Fragment* peer_root); |
JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento, |
697 |
|
VMFragment* peer_root); |
698 |
void blacklist() { fragment->blacklist(); } |
void blacklist() { fragment->blacklist(); } |
699 |
bool adjustCallerTypes(nanojit::Fragment* f, unsigned* demote_slots, bool& trash); |
JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f); |
700 |
nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f, nanojit::Fragment** empty); |
JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f); |
701 |
void prepareTreeCall(nanojit::Fragment* inner); |
JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner); |
702 |
void emitTreeCall(nanojit::Fragment* inner, VMSideExit* exit); |
JS_REQUIRES_STACK void emitTreeCall(nanojit::Fragment* inner, VMSideExit* exit); |
703 |
unsigned getCallDepth() const; |
unsigned getCallDepth() const; |
704 |
void pushAbortStack(); |
void pushAbortStack(); |
705 |
void popAbortStack(); |
void popAbortStack(); |
706 |
void removeFragmentoReferences(); |
void removeFragmentoReferences(); |
707 |
|
void deepAbort(); |
708 |
|
|
709 |
bool record_EnterFrame(); |
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame(); |
710 |
bool record_LeaveFrame(); |
JS_REQUIRES_STACK JSRecordingStatus record_LeaveFrame(); |
711 |
bool record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop); |
JS_REQUIRES_STACK JSRecordingStatus record_SetPropHit(JSPropCacheEntry* entry, |
712 |
bool record_SetPropMiss(JSPropCacheEntry* entry); |
JSScopeProperty* sprop); |
713 |
bool record_DefLocalFunSetSlot(uint32 slot, JSObject* obj); |
JS_REQUIRES_STACK JSRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj); |
714 |
bool record_FastNativeCallComplete(); |
JS_REQUIRES_STACK JSRecordingStatus record_NativeCallComplete(); |
|
bool record_IteratorNextComplete(); |
|
715 |
|
|
|
nanojit::Fragment* getOuterToBlacklist() { return outerToBlacklist; } |
|
|
void deepAbort() { deepAborted = true; } |
|
716 |
bool wasDeepAborted() { return deepAborted; } |
bool wasDeepAborted() { return deepAborted; } |
|
bool walkedOutOfLoop() { return terminate; } |
|
|
void setPromotedPeer(nanojit::Fragment* peer) { promotedPeer = peer; } |
|
717 |
TreeInfo* getTreeInfo() { return treeInfo; } |
TreeInfo* getTreeInfo() { return treeInfo; } |
718 |
|
|
719 |
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \ |
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \ |
720 |
bool record_##op(); |
JS_REQUIRES_STACK JSRecordingStatus record_##op(); |
721 |
# include "jsopcode.tbl" |
# include "jsopcode.tbl" |
722 |
#undef OPDEF |
#undef OPDEF |
723 |
}; |
}; |
|
|
|
724 |
#define TRACING_ENABLED(cx) JS_HAS_OPTION(cx, JSOPTION_JIT) |
#define TRACING_ENABLED(cx) JS_HAS_OPTION(cx, JSOPTION_JIT) |
725 |
#define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder) |
#define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder) |
726 |
#define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr)) |
#define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr)) |
727 |
|
|
728 |
#define JSOP_IS_BINARY(op) ((uintN)((op) - JSOP_BITOR) <= (uintN)(JSOP_MOD - JSOP_BITOR)) |
#define JSOP_IN_RANGE(op,lo,hi) (uintN((op) - (lo)) <= uintN((hi) - (lo))) |
729 |
|
#define JSOP_IS_BINARY(op) JSOP_IN_RANGE(op, JSOP_BITOR, JSOP_MOD) |
730 |
/* |
#define JSOP_IS_UNARY(op) JSOP_IN_RANGE(op, JSOP_NEG, JSOP_POS) |
731 |
* See jsinterp.cpp for the ENABLE_TRACER definition. Also note how comparing x |
#define JSOP_IS_EQUALITY(op) JSOP_IN_RANGE(op, JSOP_EQ, JSOP_NE) |
|
* to JSOP_* constants specializes trace-recording code at compile time either |
|
|
* to include imacro support, or exclude it altogether for this particular x. |
|
|
* |
|
|
* We save macro-generated code size also via bool TraceRecorder::record_JSOP_* |
|
|
* return type, instead of a three-state: OK, ABORTED, IMACRO_STARTED. But the |
|
|
* price of this is the JSFRAME_IMACRO_START frame flag. We need one more bit |
|
|
* to detect that TraceRecorder::call_imacro was invoked by the record_JSOP_* |
|
|
* method invoked by TRACE_ARGS_. |
|
|
*/ |
|
|
#define RECORD_ARGS(x,args) \ |
|
|
JS_BEGIN_MACRO \ |
|
|
if (!js_MonitorRecording(TRACE_RECORDER(cx))) { \ |
|
|
ENABLE_TRACER(0); \ |
|
|
} else { \ |
|
|
TRACE_ARGS_(x, args, \ |
|
|
if ((fp->flags & JSFRAME_IMACRO_START) && \ |
|
|
(x == JSOP_ITER || x == JSOP_NEXTITER || \ |
|
|
JSOP_IS_BINARY(x))) { \ |
|
|
fp->flags &= ~JSFRAME_IMACRO_START; \ |
|
|
atoms = COMMON_ATOMS_START(&rt->atomState); \ |
|
|
op = JSOp(*regs.pc); \ |
|
|
DO_OP(); \ |
|
|
} \ |
|
|
); \ |
|
|
} \ |
|
|
JS_END_MACRO |
|
732 |
|
|
733 |
#define TRACE_ARGS_(x,args,onfalse) \ |
#define TRACE_ARGS_(x,args) \ |
734 |
JS_BEGIN_MACRO \ |
JS_BEGIN_MACRO \ |
735 |
TraceRecorder* tr_ = TRACE_RECORDER(cx); \ |
TraceRecorder* tr_ = TRACE_RECORDER(cx); \ |
736 |
if (tr_ && !tr_->record_##x args) { \ |
if (tr_ && !tr_->wasDeepAborted()) { \ |
737 |
onfalse \ |
JSRecordingStatus status = tr_->record_##x args; \ |
738 |
js_AbortRecording(cx, #x); \ |
if (STATUS_ABORTS_RECORDING(status)) { \ |
739 |
ENABLE_TRACER(0); \ |
js_AbortRecording(cx, #x); \ |
740 |
|
if (status == JSRS_ERROR) \ |
741 |
|
goto error; \ |
742 |
|
} \ |
743 |
|
JS_ASSERT(status != JSRS_IMACRO); \ |
744 |
} \ |
} \ |
745 |
JS_END_MACRO |
JS_END_MACRO |
746 |
|
|
747 |
#define TRACE_ARGS(x,args) TRACE_ARGS_(x, args, ) |
#define TRACE_ARGS(x,args) TRACE_ARGS_(x, args) |
|
|
|
|
#define RECORD(x) RECORD_ARGS(x, ()) |
|
748 |
#define TRACE_0(x) TRACE_ARGS(x, ()) |
#define TRACE_0(x) TRACE_ARGS(x, ()) |
749 |
#define TRACE_1(x,a) TRACE_ARGS(x, (a)) |
#define TRACE_1(x,a) TRACE_ARGS(x, (a)) |
750 |
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b)) |
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b)) |
751 |
|
|
752 |
extern bool |
extern JS_REQUIRES_STACK bool |
753 |
js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount); |
js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount); |
754 |
|
|
755 |
extern bool |
#ifdef DEBUG |
756 |
js_MonitorRecording(TraceRecorder *tr); |
# define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx, reason) |
757 |
|
#else |
758 |
|
# define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx) |
759 |
|
#endif |
760 |
|
|
761 |
extern void |
extern JS_REQUIRES_STACK void |
762 |
js_AbortRecording(JSContext* cx, const char* reason); |
js_AbortRecording(JSContext* cx, const char* reason); |
763 |
|
|
764 |
extern void |
extern void |
768 |
js_FinishJIT(JSTraceMonitor *tm); |
js_FinishJIT(JSTraceMonitor *tm); |
769 |
|
|
770 |
extern void |
extern void |
771 |
js_FlushJITCache(JSContext* cx); |
js_PurgeScriptFragments(JSContext* cx, JSScript* script); |
772 |
|
|
773 |
|
extern bool |
774 |
|
js_OverfullFragmento(JSTraceMonitor* tm, nanojit::Fragmento *frago); |
775 |
|
|
776 |
|
extern void |
777 |
|
js_PurgeJITOracle(); |
778 |
|
|
779 |
|
extern JSObject * |
780 |
|
js_GetBuiltinFunction(JSContext *cx, uintN index); |
781 |
|
|
782 |
extern void |
extern void |
783 |
js_FlushJITOracle(JSContext* cx); |
js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes); |
784 |
|
|
785 |
#else /* !JS_TRACER */ |
#else /* !JS_TRACER */ |
786 |
|
|
|
#define RECORD(x) ((void)0) |
|
787 |
#define TRACE_0(x) ((void)0) |
#define TRACE_0(x) ((void)0) |
788 |
#define TRACE_1(x,a) ((void)0) |
#define TRACE_1(x,a) ((void)0) |
789 |
#define TRACE_2(x,a,b) ((void)0) |
#define TRACE_2(x,a,b) ((void)0) |