72 |
uint32 hits; |
uint32 hits; |
73 |
uint32 misses; |
uint32 misses; |
74 |
uint32 fills; |
uint32 fills; |
75 |
uint32 clears; |
uint32 purges; |
76 |
# define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt) |
# define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt) |
77 |
#else |
#else |
78 |
# define GSN_CACHE_METER(cache,cnt) /* nothing */ |
# define GSN_CACHE_METER(cache,cnt) /* nothing */ |
79 |
#endif |
#endif |
80 |
} JSGSNCache; |
} JSGSNCache; |
81 |
|
|
82 |
#define GSN_CACHE_CLEAR(cache) \ |
#define js_FinishGSNCache(cache) js_PurgeGSNCache(cache) |
83 |
JS_BEGIN_MACRO \ |
|
84 |
(cache)->code = NULL; \ |
extern void |
85 |
if ((cache)->table.ops) { \ |
js_PurgeGSNCache(JSGSNCache *cache); |
|
JS_DHashTableFinish(&(cache)->table); \ |
|
|
(cache)->table.ops = NULL; \ |
|
|
} \ |
|
|
GSN_CACHE_METER(cache, clears); \ |
|
|
JS_END_MACRO |
|
86 |
|
|
87 |
/* These helper macros take a cx as parameter and operate on its GSN cache. */ |
/* These helper macros take a cx as parameter and operate on its GSN cache. */ |
88 |
#define JS_CLEAR_GSN_CACHE(cx) GSN_CACHE_CLEAR(&JS_GSN_CACHE(cx)) |
#define JS_PURGE_GSN_CACHE(cx) js_PurgeGSNCache(&JS_GSN_CACHE(cx)) |
89 |
#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt) |
#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt) |
90 |
|
|
91 |
|
typedef struct InterpState InterpState; |
92 |
|
typedef struct VMSideExit VMSideExit; |
93 |
|
|
94 |
#ifdef __cplusplus |
#ifdef __cplusplus |
95 |
namespace nanojit { |
namespace nanojit { |
96 |
class Fragment; |
class Fragment; |
97 |
class Fragmento; |
class Fragmento; |
98 |
|
class LirBuffer; |
99 |
} |
} |
100 |
class TraceRecorder; |
class TraceRecorder; |
101 |
extern "C++" { template<typename T> class Queue; } |
extern "C++" { template<typename T> class Queue; } |
102 |
typedef Queue<uint16> SlotList; |
typedef Queue<uint16> SlotList; |
|
class TypeMap; |
|
103 |
|
|
104 |
# define CLS(T) T* |
# define CLS(T) T* |
105 |
#else |
#else |
106 |
# define CLS(T) void* |
# define CLS(T) void* |
107 |
#endif |
#endif |
108 |
|
|
109 |
|
#define FRAGMENT_TABLE_SIZE 512 |
110 |
|
struct VMFragment; |
111 |
|
|
112 |
|
#define MONITOR_N_GLOBAL_STATES 4 |
113 |
|
struct GlobalState { |
114 |
|
JSObject* globalObj; |
115 |
|
uint32 globalShape; |
116 |
|
CLS(SlotList) globalSlots; |
117 |
|
}; |
118 |
|
|
119 |
/* |
/* |
120 |
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not |
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not |
121 |
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop |
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop |
122 |
* frequencies for all JavaScript code loaded into that runtime. |
* frequencies for all JavaScript code loaded into that runtime. |
123 |
*/ |
*/ |
124 |
typedef struct JSTraceMonitor { |
struct JSTraceMonitor { |
125 |
/* |
/* |
126 |
* Flag set when running (or recording) JIT-compiled code. This prevents |
* The context currently executing JIT-compiled code on this thread, or |
127 |
* both interpreter activation and last-ditch garbage collection when up |
* NULL if none. Among other things, this can in certain cases prevent |
128 |
* against our runtime's memory limits. This flag also suppresses calls to |
* last-ditch GC and suppress calls to JS_ReportOutOfMemory. |
129 |
* JS_ReportOutOfMemory when failing due to runtime limits. |
* |
130 |
|
* !tracecx && !recorder: not on trace |
131 |
|
* !tracecx && !recorder && prohibitFlush: deep-bailed |
132 |
|
* !tracecx && recorder && !recorder->deepAborted: recording |
133 |
|
* !tracecx && recorder && recorder->deepAborted: deep aborted |
134 |
|
* tracecx && !recorder: executing a trace |
135 |
|
* tracecx && recorder: executing inner loop, recording outer loop |
136 |
*/ |
*/ |
137 |
JSBool onTrace; |
JSContext *tracecx; |
138 |
|
|
139 |
|
CLS(nanojit::LirBuffer) lirbuf; |
140 |
CLS(nanojit::Fragmento) fragmento; |
CLS(nanojit::Fragmento) fragmento; |
141 |
CLS(TraceRecorder) recorder; |
CLS(TraceRecorder) recorder; |
142 |
uint32 globalShape; |
jsval *reservedDoublePool; |
143 |
CLS(SlotList) globalSlots; |
jsval *reservedDoublePoolPtr; |
144 |
CLS(TypeMap) globalTypeMap; |
|
145 |
jsval *recoveryDoublePool; |
struct GlobalState globalStates[MONITOR_N_GLOBAL_STATES]; |
146 |
jsval *recoveryDoublePoolPtr; |
struct VMFragment* vmfragments[FRAGMENT_TABLE_SIZE]; |
147 |
|
JSDHashTable recordAttempts; |
148 |
|
|
149 |
|
/* |
150 |
|
* Maximum size of the code cache before we start flushing. 1/16 of this |
151 |
|
* size is used as threshold for the regular expression code cache. |
152 |
|
*/ |
153 |
|
uint32 maxCodeCacheBytes; |
154 |
|
|
155 |
|
/* |
156 |
|
* If nonzero, do not flush the JIT cache after a deep bail. That would |
157 |
|
* free JITted code pages that we will later return to. Instead, set the |
158 |
|
* needFlush flag so that it can be flushed later. |
159 |
|
* |
160 |
|
* NB: needFlush and useReservedObjects are packed together. |
161 |
|
*/ |
162 |
|
uintN prohibitFlush; |
163 |
|
JSPackedBool needFlush; |
164 |
|
|
165 |
|
/* |
166 |
|
* reservedObjects is a linked list (via fslots[0]) of preallocated JSObjects. |
167 |
|
* The JIT uses this to ensure that leaving a trace tree can't fail. |
168 |
|
*/ |
169 |
|
JSPackedBool useReservedObjects; |
170 |
|
JSObject *reservedObjects; |
171 |
|
|
172 |
/* Fragmento for the regular expression compiler. This is logically |
/* Fragmento for the regular expression compiler. This is logically |
173 |
* a distinct compiler but needs to be managed in exactly the same |
* a distinct compiler but needs to be managed in exactly the same |
174 |
* way as the real tracing Fragmento. */ |
* way as the real tracing Fragmento. */ |
175 |
|
CLS(nanojit::LirBuffer) reLirBuf; |
176 |
CLS(nanojit::Fragmento) reFragmento; |
CLS(nanojit::Fragmento) reFragmento; |
177 |
|
|
178 |
/* Keep a list of recorders we need to abort on cache flush. */ |
/* Keep a list of recorders we need to abort on cache flush. */ |
179 |
CLS(TraceRecorder) abortStack; |
CLS(TraceRecorder) abortStack; |
180 |
} JSTraceMonitor; |
}; |
181 |
|
|
182 |
|
typedef struct InterpStruct InterpStruct; |
183 |
|
|
184 |
|
/* |
185 |
|
* N.B. JS_ON_TRACE(cx) is true if JIT code is on the stack in the current |
186 |
|
* thread, regardless of whether cx is the context in which that trace is |
187 |
|
* executing. cx must be a context on the current thread. |
188 |
|
*/ |
189 |
#ifdef JS_TRACER |
#ifdef JS_TRACER |
190 |
# define JS_ON_TRACE(cx) (JS_TRACE_MONITOR(cx).onTrace) |
# define JS_ON_TRACE(cx) (JS_TRACE_MONITOR(cx).tracecx != NULL) |
191 |
#else |
#else |
192 |
# define JS_ON_TRACE(cx) JS_FALSE |
# define JS_ON_TRACE(cx) JS_FALSE |
193 |
|
#endif |
194 |
|
|
195 |
|
#ifdef DEBUG |
196 |
|
# define JS_EVAL_CACHE_METERING 1 |
197 |
|
# define JS_FUNCTION_METERING 1 |
198 |
|
#endif |
199 |
|
|
200 |
|
/* Number of potentially reusable scriptsToGC to search for the eval cache. */ |
201 |
|
#ifndef JS_EVAL_CACHE_SHIFT |
202 |
|
# define JS_EVAL_CACHE_SHIFT 6 |
203 |
|
#endif |
204 |
|
#define JS_EVAL_CACHE_SIZE JS_BIT(JS_EVAL_CACHE_SHIFT) |
205 |
|
|
206 |
|
#ifdef JS_EVAL_CACHE_METERING |
207 |
|
# define EVAL_CACHE_METER_LIST(_) _(probe), _(hit), _(step), _(noscope) |
208 |
|
# define identity(x) x |
209 |
|
|
210 |
|
/* Have to typedef this for LiveConnect C code, which includes us. */ |
211 |
|
typedef struct JSEvalCacheMeter { |
212 |
|
uint64 EVAL_CACHE_METER_LIST(identity); |
213 |
|
} JSEvalCacheMeter; |
214 |
|
|
215 |
|
# undef identity |
216 |
|
#endif |
217 |
|
|
218 |
|
#ifdef JS_FUNCTION_METERING |
219 |
|
# define FUNCTION_KIND_METER_LIST(_) \ |
220 |
|
_(allfun), _(heavy), _(nofreeupvar), _(onlyfreevar), \ |
221 |
|
_(display), _(flat), _(setupvar), _(badfunarg) |
222 |
|
# define identity(x) x |
223 |
|
|
224 |
|
typedef struct JSFunctionMeter { |
225 |
|
int32 FUNCTION_KIND_METER_LIST(identity); |
226 |
|
} JSFunctionMeter; |
227 |
|
|
228 |
|
# undef identity |
229 |
|
#endif |
230 |
|
|
231 |
|
struct JSThreadData { |
232 |
|
/* |
233 |
|
* The GSN cache is per thread since even multi-cx-per-thread embeddings |
234 |
|
* do not interleave js_GetSrcNote calls. |
235 |
|
*/ |
236 |
|
JSGSNCache gsnCache; |
237 |
|
|
238 |
|
/* Property cache for faster call/get/set invocation. */ |
239 |
|
JSPropertyCache propertyCache; |
240 |
|
|
241 |
|
#ifdef JS_TRACER |
242 |
|
/* Trace-tree JIT recorder/interpreter state. */ |
243 |
|
JSTraceMonitor traceMonitor; |
244 |
#endif |
#endif |
245 |
|
|
246 |
|
/* Lock-free hashed lists of scripts created by eval to garbage-collect. */ |
247 |
|
JSScript *scriptsToGC[JS_EVAL_CACHE_SIZE]; |
248 |
|
|
249 |
|
#ifdef JS_EVAL_CACHE_METERING |
250 |
|
JSEvalCacheMeter evalCacheMeter; |
251 |
|
#endif |
252 |
|
}; |
253 |
|
|
254 |
#ifdef JS_THREADSAFE |
#ifdef JS_THREADSAFE |
255 |
|
|
256 |
/* |
/* |
258 |
* that can be accessed without a global lock. |
* that can be accessed without a global lock. |
259 |
*/ |
*/ |
260 |
struct JSThread { |
struct JSThread { |
261 |
/* Linked list of all contexts active on this thread. */ |
/* Linked list of all contexts in use on this thread. */ |
262 |
JSCList contextList; |
JSCList contextList; |
263 |
|
|
264 |
/* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */ |
/* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */ |
270 |
*/ |
*/ |
271 |
uint32 gcMallocBytes; |
uint32 gcMallocBytes; |
272 |
|
|
273 |
/* |
/* Indicates that the thread is waiting in ClaimTitle from jslock.cpp. */ |
274 |
* Store the GSN cache in struct JSThread, not struct JSContext, both to |
JSTitle *titleToShare; |
|
* save space and to simplify cleanup in js_GC. Any embedding (Firefox |
|
|
* or another Gecko application) that uses many contexts per thread is |
|
|
* unlikely to interleave js_GetSrcNote-intensive loops in the decompiler |
|
|
* among two or more contexts running script in one thread. |
|
|
*/ |
|
|
JSGSNCache gsnCache; |
|
|
|
|
|
/* Property cache for faster call/get/set invocation. */ |
|
|
JSPropertyCache propertyCache; |
|
275 |
|
|
276 |
/* Trace-tree JIT recorder/interpreter state. */ |
/* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */ |
277 |
JSTraceMonitor traceMonitor; |
JSThreadData data; |
|
|
|
|
/* Lock-free list of scripts created by eval to garbage-collect. */ |
|
|
JSScript *scriptsToGC; |
|
278 |
}; |
}; |
279 |
|
|
280 |
#define JS_GSN_CACHE(cx) ((cx)->thread->gsnCache) |
#define JS_THREAD_DATA(cx) (&(cx)->thread->data) |
|
#define JS_PROPERTY_CACHE(cx) ((cx)->thread->propertyCache) |
|
|
#define JS_TRACE_MONITOR(cx) ((cx)->thread->traceMonitor) |
|
|
#define JS_SCRIPTS_TO_GC(cx) ((cx)->thread->scriptsToGC) |
|
281 |
|
|
282 |
extern void |
struct JSThreadsHashEntry { |
283 |
js_ThreadDestructorCB(void *ptr); |
JSDHashEntryHdr base; |
284 |
|
JSThread *thread; |
285 |
|
}; |
286 |
|
|
287 |
|
/* |
288 |
|
* The function takes the GC lock and does not release in successful return. |
289 |
|
* On error (out of memory) the function releases the lock but delegates |
290 |
|
* the error reporting to the caller. |
291 |
|
*/ |
292 |
extern JSBool |
extern JSBool |
293 |
js_SetContextThread(JSContext *cx); |
js_InitContextThread(JSContext *cx); |
294 |
|
|
295 |
|
/* |
296 |
|
* On entrance the GC lock must be held and it will be held on exit. |
297 |
|
*/ |
298 |
extern void |
extern void |
299 |
js_ClearContextThread(JSContext *cx); |
js_ClearContextThread(JSContext *cx); |
300 |
|
|
|
extern JSThread * |
|
|
js_GetCurrentThread(JSRuntime *rt); |
|
|
|
|
301 |
#endif /* JS_THREADSAFE */ |
#endif /* JS_THREADSAFE */ |
302 |
|
|
303 |
typedef enum JSDestroyContextMode { |
typedef enum JSDestroyContextMode { |
314 |
JSRTS_LANDING |
JSRTS_LANDING |
315 |
} JSRuntimeState; |
} JSRuntimeState; |
316 |
|
|
317 |
|
typedef enum JSBuiltinFunctionId { |
318 |
|
JSBUILTIN_ObjectToIterator, |
319 |
|
JSBUILTIN_CallIteratorNext, |
320 |
|
JSBUILTIN_GetProperty, |
321 |
|
JSBUILTIN_GetElement, |
322 |
|
JSBUILTIN_SetProperty, |
323 |
|
JSBUILTIN_SetElement, |
324 |
|
JSBUILTIN_LIMIT |
325 |
|
} JSBuiltinFunctionId; |
326 |
|
|
327 |
typedef struct JSPropertyTreeEntry { |
typedef struct JSPropertyTreeEntry { |
328 |
JSDHashEntryHdr hdr; |
JSDHashEntryHdr hdr; |
329 |
JSScopeProperty *child; |
JSScopeProperty *child; |
335 |
JSObject *obj; /* object containing slot to set */ |
JSObject *obj; /* object containing slot to set */ |
336 |
JSObject *pobj; /* new proto or parent reference */ |
JSObject *pobj; /* new proto or parent reference */ |
337 |
uint16 slot; /* which to set, proto or parent */ |
uint16 slot; /* which to set, proto or parent */ |
338 |
uint16 errnum; /* JSMSG_NO_ERROR or error result */ |
JSPackedBool cycle; /* true if a cycle was detected */ |
339 |
JSSetSlotRequest *next; /* next request in GC worklist */ |
JSSetSlotRequest *next; /* next request in GC worklist */ |
340 |
}; |
}; |
341 |
|
|
346 |
/* Context create/destroy callback. */ |
/* Context create/destroy callback. */ |
347 |
JSContextCallback cxCallback; |
JSContextCallback cxCallback; |
348 |
|
|
349 |
|
/* |
350 |
|
* Shape regenerated whenever a prototype implicated by an "add property" |
351 |
|
* property cache fill and induced trace guard has a readonly property or a |
352 |
|
* setter defined on it. This number proxies for the shapes of all objects |
353 |
|
* along the prototype chain of all objects in the runtime on which such an |
354 |
|
* add-property result has been cached/traced. |
355 |
|
* |
356 |
|
* See bug 492355 for more details. |
357 |
|
* |
358 |
|
* This comes early in JSRuntime to minimize the immediate format used by |
359 |
|
* trace-JITted code that reads it. |
360 |
|
*/ |
361 |
|
uint32 protoHazardShape; |
362 |
|
|
363 |
/* Garbage collector state, used by jsgc.c. */ |
/* Garbage collector state, used by jsgc.c. */ |
364 |
JSGCChunkInfo *gcChunkList; |
JSGCChunkInfo *gcChunkList; |
365 |
JSGCArenaList gcArenaList[GC_NUM_FREELISTS]; |
JSGCArenaList gcArenaList[GC_NUM_FREELISTS]; |
376 |
uint32 gcLevel; |
uint32 gcLevel; |
377 |
uint32 gcNumber; |
uint32 gcNumber; |
378 |
JSTracer *gcMarkingTracer; |
JSTracer *gcMarkingTracer; |
379 |
|
uint32 gcTriggerFactor; |
380 |
|
volatile JSBool gcIsNeeded; |
381 |
|
|
382 |
/* |
/* |
383 |
* NB: do not pack another flag here by claiming gcPadding unless the new |
* NB: do not pack another flag here by claiming gcPadding unless the new |
448 |
JSString *emptyString; |
JSString *emptyString; |
449 |
JSString **unitStrings; |
JSString **unitStrings; |
450 |
|
|
451 |
|
/* |
452 |
|
* Builtin functions, lazily created and held for use by the trace recorder. |
453 |
|
* |
454 |
|
* This field would be #ifdef JS_TRACER, but XPConnect is compiled without |
455 |
|
* -DJS_TRACER and includes this header. |
456 |
|
*/ |
457 |
|
JSObject *builtinFunctions[JSBUILTIN_LIMIT]; |
458 |
|
|
459 |
/* List of active contexts sharing this runtime; protected by gcLock. */ |
/* List of active contexts sharing this runtime; protected by gcLock. */ |
460 |
JSCList contextList; |
JSCList contextList; |
461 |
|
|
515 |
* case too. |
* case too. |
516 |
*/ |
*/ |
517 |
PRLock *debuggerLock; |
PRLock *debuggerLock; |
518 |
|
|
519 |
|
JSDHashTable threads; |
520 |
#endif /* JS_THREADSAFE */ |
#endif /* JS_THREADSAFE */ |
521 |
uint32 debuggerMutations; |
uint32 debuggerMutations; |
522 |
|
|
566 |
JSNativeEnumerator *nativeEnumerators; |
JSNativeEnumerator *nativeEnumerators; |
567 |
|
|
568 |
#ifndef JS_THREADSAFE |
#ifndef JS_THREADSAFE |
569 |
/* |
JSThreadData threadData; |
|
* For thread-unsafe embeddings, the GSN cache lives in the runtime and |
|
|
* not each context, since we expect it to be filled once when decompiling |
|
|
* a longer script, then hit repeatedly as js_GetSrcNote is called during |
|
|
* the decompiler activation that filled it. |
|
|
*/ |
|
|
JSGSNCache gsnCache; |
|
|
|
|
|
/* Property cache for faster call/get/set invocation. */ |
|
|
JSPropertyCache propertyCache; |
|
|
|
|
|
/* Trace-tree JIT recorder/interpreter state. */ |
|
|
JSTraceMonitor traceMonitor; |
|
|
|
|
|
/* Lock-free list of scripts created by eval to garbage-collect. */ |
|
|
JSScript *scriptsToGC; |
|
570 |
|
|
571 |
#define JS_GSN_CACHE(cx) ((cx)->runtime->gsnCache) |
#define JS_THREAD_DATA(cx) (&(cx)->runtime->threadData) |
|
#define JS_PROPERTY_CACHE(cx) ((cx)->runtime->propertyCache) |
|
|
#define JS_TRACE_MONITOR(cx) ((cx)->runtime->traceMonitor) |
|
|
#define JS_SCRIPTS_TO_GC(cx) ((cx)->runtime->scriptsToGC) |
|
572 |
#endif |
#endif |
573 |
|
|
574 |
/* |
/* |
575 |
* Object shape (property cache structural type) identifier generator. |
* Object shape (property cache structural type) identifier generator. |
576 |
* |
* |
577 |
* Type 0 stands for the empty scope, and must not be regenerated due to |
* Type 0 stands for the empty scope, and must not be regenerated due to |
578 |
* uint32 wrap-around. Since we use atomic pre-increment, the initial |
* uint32 wrap-around. Since js_GenerateShape (in jsinterp.cpp) uses |
579 |
* value for the first typed non-empty scope will be 1. |
* atomic pre-increment, the initial value for the first typed non-empty |
580 |
|
* scope will be 1. |
581 |
* |
* |
|
* The GC compresses live types, minimizing rt->shapeGen in the process. |
|
582 |
* If this counter overflows into SHAPE_OVERFLOW_BIT (in jsinterp.h), the |
* If this counter overflows into SHAPE_OVERFLOW_BIT (in jsinterp.h), the |
583 |
* GC will disable property caches for all threads, to avoid aliasing two |
* cache is disabled, to avoid aliasing two different types. It stays |
584 |
* different types. Updated by js_GenerateShape (in jsinterp.c). |
* disabled until a triggered GC at some later moment compresses live |
585 |
|
* types, minimizing rt->shapeGen in the process. |
586 |
*/ |
*/ |
587 |
uint32 shapeGen; |
volatile uint32 shapeGen; |
588 |
|
|
589 |
/* Literal table maintained by jsatom.c functions. */ |
/* Literal table maintained by jsatom.c functions. */ |
590 |
JSAtomState atomState; |
JSAtomState atomState; |
604 |
jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE]; |
jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE]; |
605 |
|
|
606 |
/* |
/* |
|
* Runtime-wide flag set to true when any Array prototype has an indexed |
|
|
* property defined on it, creating a hazard for code reading or writing |
|
|
* over a hole from a dense Array instance that is not prepared to look up |
|
|
* the proto chain (the writing case must involve a check for a read-only |
|
|
* element, which cannot be shadowed). |
|
|
*/ |
|
|
JSBool anyArrayProtoHasElement; |
|
|
|
|
|
/* |
|
607 |
* Various metering fields are defined at the end of JSRuntime. In this |
* Various metering fields are defined at the end of JSRuntime. In this |
608 |
* way there is no need to recompile all the code that refers to other |
* way there is no need to recompile all the code that refers to other |
609 |
* fields of JSRuntime after enabling the corresponding metering macro. |
* fields of JSRuntime after enabling the corresponding metering macro. |
676 |
#ifdef JS_GCMETER |
#ifdef JS_GCMETER |
677 |
JSGCStats gcStats; |
JSGCStats gcStats; |
678 |
#endif |
#endif |
679 |
|
|
680 |
|
#ifdef JS_FUNCTION_METERING |
681 |
|
JSFunctionMeter functionMeter; |
682 |
|
char lastScriptFilename[1024]; |
683 |
|
#endif |
684 |
}; |
}; |
685 |
|
|
686 |
|
/* Common macros to access thread-local caches in JSThread or JSRuntime. */ |
687 |
|
#define JS_GSN_CACHE(cx) (JS_THREAD_DATA(cx)->gsnCache) |
688 |
|
#define JS_PROPERTY_CACHE(cx) (JS_THREAD_DATA(cx)->propertyCache) |
689 |
|
#define JS_TRACE_MONITOR(cx) (JS_THREAD_DATA(cx)->traceMonitor) |
690 |
|
#define JS_SCRIPTS_TO_GC(cx) (JS_THREAD_DATA(cx)->scriptsToGC) |
691 |
|
|
692 |
|
#ifdef JS_EVAL_CACHE_METERING |
693 |
|
# define EVAL_CACHE_METER(x) (JS_THREAD_DATA(cx)->evalCacheMeter.x++) |
694 |
|
#else |
695 |
|
# define EVAL_CACHE_METER(x) ((void) 0) |
696 |
|
#endif |
697 |
|
|
698 |
#ifdef DEBUG |
#ifdef DEBUG |
699 |
# define JS_RUNTIME_METER(rt, which) JS_ATOMIC_INCREMENT(&(rt)->which) |
# define JS_RUNTIME_METER(rt, which) JS_ATOMIC_INCREMENT(&(rt)->which) |
700 |
# define JS_RUNTIME_UNMETER(rt, which) JS_ATOMIC_DECREMENT(&(rt)->which) |
# define JS_RUNTIME_UNMETER(rt, which) JS_ATOMIC_DECREMENT(&(rt)->which) |
788 |
* structure */ |
* structure */ |
789 |
#define JSTVU_SPROP (-3) /* u.sprop roots property tree node */ |
#define JSTVU_SPROP (-3) /* u.sprop roots property tree node */ |
790 |
#define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roots */ |
#define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roots */ |
791 |
#define JSTVU_PARSE_CONTEXT (-5) /* u.parseContext roots JSParseContext* */ |
#define JSTVU_COMPILER (-5) /* u.compiler roots JSCompiler* */ |
792 |
#define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */ |
#define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */ |
793 |
|
|
794 |
/* |
/* |
800 |
* bits. This is how, for example, js_GetGCThingTraceKind uses its |thing| |
* bits. This is how, for example, js_GetGCThingTraceKind uses its |thing| |
801 |
* parameter -- it consults GC-thing flags stored separately from the thing to |
* parameter -- it consults GC-thing flags stored separately from the thing to |
802 |
* decide the kind of thing. |
* decide the kind of thing. |
|
* |
|
|
* The following checks that this type-punning is possible. |
|
803 |
*/ |
*/ |
|
JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval)); |
|
|
JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *)); |
|
|
|
|
804 |
#define JS_PUSH_TEMP_ROOT_COMMON(cx,x,tvr,cnt,kind) \ |
#define JS_PUSH_TEMP_ROOT_COMMON(cx,x,tvr,cnt,kind) \ |
805 |
JS_BEGIN_MACRO \ |
JS_BEGIN_MACRO \ |
806 |
JS_ASSERT((cx)->tempValueRooters != (tvr)); \ |
JS_ASSERT((cx)->tempValueRooters != (tvr)); \ |
843 |
#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \ |
#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \ |
844 |
JS_PUSH_TEMP_ROOT_COMMON(cx, weakRoots_, tvr, JSTVU_WEAK_ROOTS, weakRoots) |
JS_PUSH_TEMP_ROOT_COMMON(cx, weakRoots_, tvr, JSTVU_WEAK_ROOTS, weakRoots) |
845 |
|
|
846 |
#define JS_PUSH_TEMP_ROOT_PARSE_CONTEXT(cx,pc,tvr) \ |
#define JS_PUSH_TEMP_ROOT_COMPILER(cx,pc,tvr) \ |
847 |
JS_PUSH_TEMP_ROOT_COMMON(cx, pc, tvr, JSTVU_PARSE_CONTEXT, parseContext) |
JS_PUSH_TEMP_ROOT_COMMON(cx, pc, tvr, JSTVU_COMPILER, compiler) |
848 |
|
|
849 |
#define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr) \ |
#define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr) \ |
850 |
JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script) |
JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script) |
853 |
#define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */ |
#define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */ |
854 |
|
|
855 |
struct JSContext { |
struct JSContext { |
|
/* JSRuntime contextList linkage. */ |
|
|
JSCList links; |
|
|
|
|
856 |
/* |
/* |
857 |
* Operation count. It is declared early in the structure as a frequently |
* If this flag is set, we were asked to call back the operation callback |
858 |
* accessed field. |
* as soon as possible. |
859 |
*/ |
*/ |
860 |
int32 operationCount; |
volatile jsint operationCallbackFlag; |
861 |
|
|
862 |
|
/* JSRuntime contextList linkage. */ |
863 |
|
JSCList link; |
864 |
|
|
865 |
#if JS_HAS_XML_SUPPORT |
#if JS_HAS_XML_SUPPORT |
866 |
/* |
/* |
878 |
/* |
/* |
879 |
* Classic Algol "display" static link optimization. |
* Classic Algol "display" static link optimization. |
880 |
*/ |
*/ |
881 |
#define JS_DISPLAY_SIZE 16 |
#define JS_DISPLAY_SIZE 16U |
882 |
|
|
883 |
JSStackFrame *display[JS_DISPLAY_SIZE]; |
JSStackFrame *display[JS_DISPLAY_SIZE]; |
884 |
|
|
934 |
JSRuntime *runtime; |
JSRuntime *runtime; |
935 |
|
|
936 |
/* Stack arena pool and frame pointer register. */ |
/* Stack arena pool and frame pointer register. */ |
937 |
|
JS_REQUIRES_STACK |
938 |
JSArenaPool stackPool; |
JSArenaPool stackPool; |
939 |
|
|
940 |
|
JS_REQUIRES_STACK |
941 |
JSStackFrame *fp; |
JSStackFrame *fp; |
942 |
|
|
943 |
/* Temporary arena pool used while compiling and decompiling. */ |
/* Temporary arena pool used while compiling and decompiling. */ |
962 |
char *lastMessage; |
char *lastMessage; |
963 |
#ifdef DEBUG |
#ifdef DEBUG |
964 |
void *tracefp; |
void *tracefp; |
965 |
|
jsbytecode *tracePrevPc; |
966 |
#endif |
#endif |
967 |
|
|
968 |
/* Per-context optional error reporter. */ |
/* Per-context optional error reporter. */ |
969 |
JSErrorReporter errorReporter; |
JSErrorReporter errorReporter; |
970 |
|
|
971 |
/* |
/* Branch callback. */ |
|
* Flag indicating that the operation callback is set. When the flag is 0 |
|
|
* but operationCallback is not null, operationCallback stores the branch |
|
|
* callback. |
|
|
*/ |
|
|
uint32 operationCallbackIsSet : 1; |
|
|
uint32 operationLimit : 31; |
|
972 |
JSOperationCallback operationCallback; |
JSOperationCallback operationCallback; |
973 |
|
|
974 |
/* Interpreter activation count. */ |
/* Interpreter activation count. */ |
985 |
jsrefcount requestDepth; |
jsrefcount requestDepth; |
986 |
/* Same as requestDepth but ignoring JS_SuspendRequest/JS_ResumeRequest */ |
/* Same as requestDepth but ignoring JS_SuspendRequest/JS_ResumeRequest */ |
987 |
jsrefcount outstandingRequests; |
jsrefcount outstandingRequests; |
|
JSTitle *titleToShare; /* weak reference, see jslock.c */ |
|
988 |
JSTitle *lockedSealedTitle; /* weak ref, for low-cost sealed |
JSTitle *lockedSealedTitle; /* weak ref, for low-cost sealed |
989 |
title locking */ |
title locking */ |
990 |
JSCList threadLinks; /* JSThread contextList linkage */ |
JSCList threadLinks; /* JSThread contextList linkage */ |
1020 |
|
|
1021 |
/* Stored here to avoid passing it around as a parameter. */ |
/* Stored here to avoid passing it around as a parameter. */ |
1022 |
uintN resolveFlags; |
uintN resolveFlags; |
1023 |
|
|
1024 |
|
#ifdef JS_TRACER |
1025 |
|
/* |
1026 |
|
* State for the current tree execution. bailExit is valid if the tree has |
1027 |
|
* called back into native code via a _FAIL builtin and has not yet bailed, |
1028 |
|
* else garbage (NULL in debug builds). |
1029 |
|
*/ |
1030 |
|
InterpState *interpState; |
1031 |
|
VMSideExit *bailExit; |
1032 |
|
|
1033 |
|
/* Used when calling natives from trace to root the vp vector. */ |
1034 |
|
uintN nativeVpLen; |
1035 |
|
jsval *nativeVp; |
1036 |
|
#endif |
1037 |
}; |
}; |
1038 |
|
|
1039 |
#ifdef JS_THREADSAFE |
#ifdef JS_THREADSAFE |
1041 |
#endif |
#endif |
1042 |
|
|
1043 |
#ifdef __cplusplus |
#ifdef __cplusplus |
1044 |
|
|
1045 |
|
static inline JSAtom ** |
1046 |
|
FrameAtomBase(JSContext *cx, JSStackFrame *fp) |
1047 |
|
{ |
1048 |
|
return fp->imacpc |
1049 |
|
? COMMON_ATOMS_START(&cx->runtime->atomState) |
1050 |
|
: fp->script->atomMap.vector; |
1051 |
|
} |
1052 |
|
|
1053 |
/* FIXME(bug 332648): Move this into a public header. */ |
/* FIXME(bug 332648): Move this into a public header. */ |
1054 |
class JSAutoTempValueRooter |
class JSAutoTempValueRooter |
1055 |
{ |
{ |
1058 |
: mContext(cx) { |
: mContext(cx) { |
1059 |
JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr); |
JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr); |
1060 |
} |
} |
1061 |
JSAutoTempValueRooter(JSContext *cx, jsval v) |
explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL) |
1062 |
: mContext(cx) { |
: mContext(cx) { |
1063 |
JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr); |
JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr); |
1064 |
} |
} |
1065 |
|
JSAutoTempValueRooter(JSContext *cx, JSString *str) |
1066 |
|
: mContext(cx) { |
1067 |
|
JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr); |
1068 |
|
} |
1069 |
|
JSAutoTempValueRooter(JSContext *cx, JSObject *obj) |
1070 |
|
: mContext(cx) { |
1071 |
|
JS_PUSH_TEMP_ROOT_OBJECT(mContext, obj, &mTvr); |
1072 |
|
} |
1073 |
|
|
1074 |
~JSAutoTempValueRooter() { |
~JSAutoTempValueRooter() { |
1075 |
JS_POP_TEMP_ROOT(mContext, &mTvr); |
JS_POP_TEMP_ROOT(mContext, &mTvr); |
1076 |
} |
} |
1077 |
|
|
1078 |
|
jsval value() { return mTvr.u.value; } |
1079 |
|
jsval *addr() { return &mTvr.u.value; } |
1080 |
|
|
1081 |
protected: |
protected: |
1082 |
JSContext *mContext; |
JSContext *mContext; |
1083 |
|
|
1090 |
JSTempValueRooter mTvr; |
JSTempValueRooter mTvr; |
1091 |
}; |
}; |
1092 |
|
|
1093 |
|
class JSAutoTempIdRooter |
1094 |
|
{ |
1095 |
|
public: |
1096 |
|
explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0)) |
1097 |
|
: mContext(cx) { |
1098 |
|
JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr); |
1099 |
|
} |
1100 |
|
|
1101 |
|
~JSAutoTempIdRooter() { |
1102 |
|
JS_POP_TEMP_ROOT(mContext, &mTvr); |
1103 |
|
} |
1104 |
|
|
1105 |
|
jsid id() { return (jsid) mTvr.u.value; } |
1106 |
|
jsid * addr() { return (jsid *) &mTvr.u.value; } |
1107 |
|
|
1108 |
|
private: |
1109 |
|
JSContext *mContext; |
1110 |
|
JSTempValueRooter mTvr; |
1111 |
|
}; |
1112 |
|
|
1113 |
class JSAutoResolveFlags |
class JSAutoResolveFlags |
1114 |
{ |
{ |
1115 |
public: |
public: |
1124 |
JSContext *mContext; |
JSContext *mContext; |
1125 |
uintN mSaved; |
uintN mSaved; |
1126 |
}; |
}; |
1127 |
#endif |
|
1128 |
|
#endif /* __cpluscplus */ |
1129 |
|
|
1130 |
/* |
/* |
1131 |
* Slightly more readable macros for testing per-context option settings (also |
* Slightly more readable macros for testing per-context option settings (also |
1162 |
|
|
1163 |
#define JSVERSION_MASK 0x0FFF /* see JSVersion in jspubtd.h */ |
#define JSVERSION_MASK 0x0FFF /* see JSVersion in jspubtd.h */ |
1164 |
#define JSVERSION_HAS_XML 0x1000 /* flag induced by XML option */ |
#define JSVERSION_HAS_XML 0x1000 /* flag induced by XML option */ |
1165 |
|
#define JSVERSION_ANONFUNFIX 0x2000 /* see jsapi.h, the comments |
1166 |
|
for JSOPTION_ANONFUNFIX */ |
1167 |
|
|
1168 |
#define JSVERSION_NUMBER(cx) ((JSVersion)((cx)->version & \ |
#define JSVERSION_NUMBER(cx) ((JSVersion)((cx)->version & \ |
1169 |
JSVERSION_MASK)) |
JSVERSION_MASK)) |
1170 |
#define JS_HAS_XML_OPTION(cx) ((cx)->version & JSVERSION_HAS_XML || \ |
#define JS_HAS_XML_OPTION(cx) ((cx)->version & JSVERSION_HAS_XML || \ |
1171 |
JSVERSION_NUMBER(cx) >= JSVERSION_1_6) |
JSVERSION_NUMBER(cx) >= JSVERSION_1_6) |
1172 |
|
|
1173 |
|
extern JSBool |
1174 |
|
js_InitThreads(JSRuntime *rt); |
1175 |
|
|
1176 |
|
extern void |
1177 |
|
js_FinishThreads(JSRuntime *rt); |
1178 |
|
|
1179 |
|
extern void |
1180 |
|
js_PurgeThreads(JSContext *cx); |
1181 |
|
|
1182 |
/* |
/* |
1183 |
* Initialize a library-wide thread private data index, and remember that it |
* Ensures the JSOPTION_XML and JSOPTION_ANONFUNFIX bits of cx->options are |
1184 |
* has already been done, so that it happens only once ever. Returns true on |
* reflected in cx->version, since each bit must travel with a script that has |
1185 |
* success. |
* it set. |
1186 |
*/ |
*/ |
1187 |
extern JSBool |
extern void |
1188 |
js_InitThreadPrivateIndex(void (*ptr)(void *)); |
js_SyncOptionsToVersion(JSContext *cx); |
1189 |
|
|
1190 |
/* |
/* |
1191 |
* Common subroutine of JS_SetVersion and js_SetVersion, to update per-context |
* Common subroutine of JS_SetVersion and js_SetVersion, to update per-context |
1218 |
extern JSBool |
extern JSBool |
1219 |
js_ValidContextPointer(JSRuntime *rt, JSContext *cx); |
js_ValidContextPointer(JSRuntime *rt, JSContext *cx); |
1220 |
|
|
1221 |
|
static JS_INLINE JSContext * |
1222 |
|
js_ContextFromLinkField(JSCList *link) |
1223 |
|
{ |
1224 |
|
JS_ASSERT(link); |
1225 |
|
return (JSContext *) ((uint8 *) link - offsetof(JSContext, link)); |
1226 |
|
} |
1227 |
|
|
1228 |
/* |
/* |
1229 |
* If unlocked, acquire and release rt->gcLock around *iterp update; otherwise |
* If unlocked, acquire and release rt->gcLock around *iterp update; otherwise |
1230 |
* the caller must be holding rt->gcLock. |
* the caller must be holding rt->gcLock. |
1233 |
js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp); |
js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp); |
1234 |
|
|
1235 |
/* |
/* |
1236 |
|
* Iterate through contexts with active requests. The caller must be holding |
1237 |
|
* rt->gcLock in case of a thread-safe build, or otherwise guarantee that the |
1238 |
|
* context list is not alternated asynchroniously. |
1239 |
|
*/ |
1240 |
|
extern JS_FRIEND_API(JSContext *) |
1241 |
|
js_NextActiveContext(JSRuntime *, JSContext *); |
1242 |
|
|
1243 |
|
#ifdef JS_THREADSAFE |
1244 |
|
|
1245 |
|
/* |
1246 |
|
* Count the number of contexts entered requests on the current thread. |
1247 |
|
*/ |
1248 |
|
uint32 |
1249 |
|
js_CountThreadRequests(JSContext *cx); |
1250 |
|
|
1251 |
|
/* |
1252 |
|
* This is a helper for code at can potentially run outside JS request to |
1253 |
|
* ensure that the GC is not running when the function returns. |
1254 |
|
* |
1255 |
|
* This function must be called with the GC lock held. |
1256 |
|
*/ |
1257 |
|
extern void |
1258 |
|
js_WaitForGC(JSRuntime *rt); |
1259 |
|
|
1260 |
|
/* |
1261 |
|
* If we're in one or more requests (possibly on more than one context) |
1262 |
|
* running on the current thread, indicate, temporarily, that all these |
1263 |
|
* requests are inactive so a possible GC can proceed on another thread. |
1264 |
|
* This function returns the number of discounted requests. The number must |
1265 |
|
* be passed later to js_ActivateRequestAfterGC to reactivate the requests. |
1266 |
|
* |
1267 |
|
* This function must be called with the GC lock held. |
1268 |
|
*/ |
1269 |
|
uint32 |
1270 |
|
js_DiscountRequestsForGC(JSContext *cx); |
1271 |
|
|
1272 |
|
/* |
1273 |
|
* This function must be called with the GC lock held. |
1274 |
|
*/ |
1275 |
|
void |
1276 |
|
js_RecountRequestsAfterGC(JSRuntime *rt, uint32 requestDebit); |
1277 |
|
|
1278 |
|
#else /* !JS_THREADSAFE */ |
1279 |
|
|
1280 |
|
# define js_WaitForGC(rt) ((void) 0) |
1281 |
|
|
1282 |
|
#endif |
1283 |
|
|
1284 |
|
/* |
1285 |
* JSClass.resolve and watchpoint recursion damping machinery. |
* JSClass.resolve and watchpoint recursion damping machinery. |
1286 |
*/ |
*/ |
1287 |
extern JSBool |
extern JSBool |
1432 |
#endif |
#endif |
1433 |
|
|
1434 |
/* |
/* |
1435 |
* Update the operation counter according to the given weight and call the |
* If the operation callback flag was set, call the operation callback. |
|
* operation callback when we reach the operation limit. To make this |
|
|
* frequently executed macro faster we decrease the counter from |
|
|
* JSContext.operationLimit and compare against zero to check the limit. |
|
|
* |
|
1436 |
* This macro can run the full GC. Return true if it is OK to continue and |
* This macro can run the full GC. Return true if it is OK to continue and |
1437 |
* false otherwise. |
* false otherwise. |
1438 |
*/ |
*/ |
1439 |
#define JS_CHECK_OPERATION_LIMIT(cx, weight) \ |
#define JS_CHECK_OPERATION_LIMIT(cx) \ |
1440 |
(JS_CHECK_OPERATION_WEIGHT(weight), \ |
(!(cx)->operationCallbackFlag || js_InvokeOperationCallback(cx)) |
|
(((cx)->operationCount -= (weight)) > 0 || js_ResetOperationCount(cx))) |
|
|
|
|
|
/* |
|
|
* A version of JS_CHECK_OPERATION_LIMIT that just updates the operation count |
|
|
* without calling the operation callback or any other API. This macro resets |
|
|
* the count to 0 when it becomes negative to prevent a wrap-around when the |
|
|
* macro is called repeatably. |
|
|
*/ |
|
|
#define JS_COUNT_OPERATION(cx, weight) \ |
|
|
((void)(JS_CHECK_OPERATION_WEIGHT(weight), \ |
|
|
(cx)->operationCount = ((cx)->operationCount > 0) \ |
|
|
? (cx)->operationCount - (weight) \ |
|
|
: 0)) |
|
|
|
|
|
/* |
|
|
* The implementation of the above macros assumes that subtracting weights |
|
|
* twice from a positive number does not wrap-around INT32_MIN. |
|
|
*/ |
|
|
#define JS_CHECK_OPERATION_WEIGHT(weight) \ |
|
|
(JS_ASSERT((uint32) (weight) > 0), \ |
|
|
JS_ASSERT((uint32) (weight) < JS_BIT(30))) |
|
|
|
|
|
/* Relative operations weights. */ |
|
|
#define JSOW_JUMP 1 |
|
|
#define JSOW_ALLOCATION 100 |
|
|
#define JSOW_LOOKUP_PROPERTY 5 |
|
|
#define JSOW_GET_PROPERTY 10 |
|
|
#define JSOW_SET_PROPERTY 20 |
|
|
#define JSOW_NEW_PROPERTY 200 |
|
|
#define JSOW_DELETE_PROPERTY 30 |
|
|
#define JSOW_ENTER_SHARP JS_OPERATION_WEIGHT_BASE |
|
|
#define JSOW_SCRIPT_JUMP JS_OPERATION_WEIGHT_BASE |
|
1441 |
|
|
1442 |
/* |
/* |
1443 |
* Reset the operation count and call the operation callback assuming that the |
* Invoke the operation callback and return false if the current execution |
1444 |
* operation limit is reached. |
* is to be terminated. |
1445 |
*/ |
*/ |
1446 |
extern JSBool |
extern JSBool |
1447 |
js_ResetOperationCount(JSContext *cx); |
js_InvokeOperationCallback(JSContext *cx); |
1448 |
|
|
1449 |
|
#ifndef JS_THREADSAFE |
1450 |
|
# define js_TriggerAllOperationCallbacks(rt, gcLocked) \ |
1451 |
|
js_TriggerAllOperationCallbacks (rt) |
1452 |
|
#endif |
1453 |
|
|
1454 |
|
void |
1455 |
|
js_TriggerAllOperationCallbacks(JSRuntime *rt, JSBool gcLocked); |
1456 |
|
|
1457 |
|
extern JSStackFrame * |
1458 |
|
js_GetScriptedCaller(JSContext *cx, JSStackFrame *fp); |
1459 |
|
|
1460 |
|
extern jsbytecode* |
1461 |
|
js_GetCurrentBytecodePC(JSContext* cx); |
1462 |
|
|
1463 |
|
#ifdef JS_TRACER |
1464 |
|
/* |
1465 |
|
* Reconstruct the JS stack and clear cx->tracecx. We must be currently in a |
1466 |
|
* _FAIL builtin from trace on cx or another context on the same thread. The |
1467 |
|
* machine code for the trace remains on the C stack when js_DeepBail returns. |
1468 |
|
* |
1469 |
|
* Implemented in jstracer.cpp. |
1470 |
|
*/ |
1471 |
|
JS_FORCES_STACK JS_FRIEND_API(void) |
1472 |
|
js_DeepBail(JSContext *cx); |
1473 |
|
#endif |
1474 |
|
|
1475 |
|
static JS_FORCES_STACK JS_INLINE void |
1476 |
|
js_LeaveTrace(JSContext *cx) |
1477 |
|
{ |
1478 |
|
#ifdef JS_TRACER |
1479 |
|
if (JS_ON_TRACE(cx)) |
1480 |
|
js_DeepBail(cx); |
1481 |
|
#endif |
1482 |
|
} |
1483 |
|
|
1484 |
|
static JS_INLINE void |
1485 |
|
js_LeaveTraceIfGlobalObject(JSContext *cx, JSObject *obj) |
1486 |
|
{ |
1487 |
|
if (!obj->fslots[JSSLOT_PARENT]) |
1488 |
|
js_LeaveTrace(cx); |
1489 |
|
} |
1490 |
|
|
1491 |
|
static JS_INLINE JSBool |
1492 |
|
js_CanLeaveTrace(JSContext *cx) |
1493 |
|
{ |
1494 |
|
JS_ASSERT(JS_ON_TRACE(cx)); |
1495 |
|
#ifdef JS_TRACER |
1496 |
|
return cx->bailExit != NULL; |
1497 |
|
#else |
1498 |
|
return JS_FALSE; |
1499 |
|
#endif |
1500 |
|
} |
1501 |
|
|
1502 |
|
/* |
1503 |
|
* Get the current cx->fp, first lazily instantiating stack frames if needed. |
1504 |
|
* (Do not access cx->fp directly except in JS_REQUIRES_STACK code.) |
1505 |
|
* |
1506 |
|
* Defined in jstracer.cpp if JS_TRACER is defined. |
1507 |
|
*/ |
1508 |
|
static JS_FORCES_STACK JS_INLINE JSStackFrame * |
1509 |
|
js_GetTopStackFrame(JSContext *cx) |
1510 |
|
{ |
1511 |
|
js_LeaveTrace(cx); |
1512 |
|
return cx->fp; |
1513 |
|
} |
1514 |
|
|
1515 |
|
static JS_INLINE JSBool |
1516 |
|
js_IsPropertyCacheDisabled(JSContext *cx) |
1517 |
|
{ |
1518 |
|
return cx->runtime->shapeGen >= SHAPE_OVERFLOW_BIT; |
1519 |
|
} |
1520 |
|
|
1521 |
|
static JS_INLINE uint32 |
1522 |
|
js_RegenerateShapeForGC(JSContext *cx) |
1523 |
|
{ |
1524 |
|
uint32 shape; |
1525 |
|
|
1526 |
|
JS_ASSERT(cx->runtime->gcRunning); |
1527 |
|
|
1528 |
|
/* |
1529 |
|
* Under the GC, compared with js_GenerateShape, we don't need to use |
1530 |
|
* atomic increments but we still must make sure that after an overflow |
1531 |
|
* the shape stays such. |
1532 |
|
*/ |
1533 |
|
shape = cx->runtime->shapeGen; |
1534 |
|
shape = (shape + 1) | (shape & SHAPE_OVERFLOW_BIT); |
1535 |
|
cx->runtime->shapeGen = shape; |
1536 |
|
return shape; |
1537 |
|
} |
1538 |
|
|
1539 |
JS_END_EXTERN_C |
JS_END_EXTERN_C |
1540 |
|
|