/[jscoverage]/trunk/js/jslock.cpp
ViewVC logotype

Contents of /trunk/js/jslock.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 332 - (show annotations)
Thu Oct 23 19:03:33 2008 UTC (13 years, 8 months ago) by siliconforks
File size: 40897 byte(s)
Add SpiderMonkey from Firefox 3.1b1.

The following directories and files were removed:
correct/, correct.js
liveconnect/
nanojit/
t/
v8/
vprof/
xpconnect/
all JavaScript files (Y.js, call.js, if.js, math-partial-sums.js, md5.js, perfect.js, trace-test.js, trace.js)


1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 *
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
18 *
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
23 *
24 * Contributor(s):
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
39
40 #ifdef JS_THREADSAFE
41
42 /*
43 * JS locking stubs.
44 */
45 #include "jsstddef.h"
46 #include <stdlib.h>
47 #include <string.h>
48 #include "jspubtd.h"
49 #include "jsutil.h" /* Added by JSIFY */
50 #include "jstypes.h"
51 #include "jsbit.h"
52 #include "jscntxt.h"
53 #include "jsdtoa.h"
54 #include "jsgc.h"
55 #include "jsfun.h" /* for VALUE_IS_FUNCTION used by *_WRITE_BARRIER */
56 #include "jslock.h"
57 #include "jsscope.h"
58 #include "jsstr.h"
59
60 #define ReadWord(W) (W)
61
62 /* Implement NativeCompareAndSwap. */
63
64 #if defined(_WIN32) && defined(_M_IX86)
65 #pragma warning( disable : 4035 )
66 JS_BEGIN_EXTERN_C
67 extern long __cdecl
68 _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
69 JS_END_EXTERN_C
70 #pragma intrinsic(_InterlockedCompareExchange)
71
72 static JS_ALWAYS_INLINE int
73 NativeCompareAndSwapHelper(jsword *w, jsword ov, jsword nv)
74 {
75 _InterlockedCompareExchange(w, nv, ov);
76 __asm {
77 sete al
78 }
79 }
80
81 static JS_ALWAYS_INLINE int
82 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
83 {
84 return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
85 }
86
87 #elif defined(XP_MACOSX) || defined(DARWIN)
88
89 #include <libkern/OSAtomic.h>
90
91 static JS_ALWAYS_INLINE int
92 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
93 {
94 /* Details on these functions available in the manpage for atomic */
95 #if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
96 return OSAtomicCompareAndSwap64Barrier(ov, nv, (int64_t*) w);
97 #else
98 return OSAtomicCompareAndSwap32Barrier(ov, nv, (int32_t*) w);
99 #endif
100 }
101
102 #elif defined(__GNUC__) && defined(__i386__)
103
104 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
105 static JS_ALWAYS_INLINE int
106 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
107 {
108 unsigned int res;
109
110 __asm__ __volatile__ (
111 "lock\n"
112 "cmpxchgl %2, (%1)\n"
113 "sete %%al\n"
114 "andl $1, %%eax\n"
115 : "=a" (res)
116 : "r" (w), "r" (nv), "a" (ov)
117 : "cc", "memory");
118 return (int)res;
119 }
120
121 #elif defined(__GNUC__) && defined(__x86_64__)
122 static JS_ALWAYS_INLINE int
123 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
124 {
125 unsigned int res;
126
127 __asm__ __volatile__ (
128 "lock\n"
129 "cmpxchgq %2, (%1)\n"
130 "sete %%al\n"
131 "movzbl %%al, %%eax\n"
132 : "=a" (res)
133 : "r" (w), "r" (nv), "a" (ov)
134 : "cc", "memory");
135 return (int)res;
136 }
137
138 #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
139
140 static JS_ALWAYS_INLINE int
141 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
142 {
143 #if defined(__GNUC__)
144 unsigned int res;
145 JS_ASSERT(ov != nv);
146 asm volatile ("\
147 stbar\n\
148 cas [%1],%2,%3\n\
149 cmp %2,%3\n\
150 be,a 1f\n\
151 mov 1,%0\n\
152 mov 0,%0\n\
153 1:"
154 : "=r" (res)
155 : "r" (w), "r" (ov), "r" (nv));
156 return (int)res;
157 #else /* !__GNUC__ */
158 extern int compare_and_swap(jsword*, jsword, jsword);
159 JS_ASSERT(ov != nv);
160 return compare_and_swap(w, ov, nv);
161 #endif
162 }
163
164 #elif defined(AIX)
165
166 #include <sys/atomic_op.h>
167
168 static JS_ALWAYS_INLINE int
169 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
170 {
171 return !_check_lock((atomic_p)w, ov, nv);
172 }
173
174 #elif defined(USE_ARM_KUSER)
175
176 /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
177 * description of this ABI; this is a function provided at a fixed
178 * location by the kernel in the memory space of each process.
179 */
180 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
181 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
182
183 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
184
185 static JS_ALWAYS_INLINE int
186 NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
187 {
188 volatile int *vp = (volatile int *) w;
189 PRInt32 failed = 1;
190
191 /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
192 do {
193 failed = __kernel_cmpxchg(ov, nv, vp);
194 } while (failed && *vp == ov);
195 return !failed;
196 }
197
198 #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
199
200 #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
201
202 #endif /* arch-tests */
203
204 #if JS_HAS_NATIVE_COMPARE_AND_SWAP
205
206 JSBool
207 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
208 {
209 return !!NativeCompareAndSwap(w, ov, nv);
210 }
211
212 #elif defined(NSPR_LOCK)
213
214 # ifdef __GNUC__
215 # warning "js_CompareAndSwap is implemented using NSSP lock"
216 # endif
217
218 JSBool
219 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
220 {
221 int result;
222 static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
223
224 JS_ACQUIRE_LOCK(CompareAndSwapLock);
225 result = (*w == ov);
226 if (result)
227 *w = nv;
228 JS_RELEASE_LOCK(CompareAndSwapLock);
229 return result;
230 }
231
232 #else /* !defined(NSPR_LOCK) */
233
234 #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
235
236 #endif
237
238 #ifndef NSPR_LOCK
239
240 struct JSFatLock {
241 int susp;
242 PRLock *slock;
243 PRCondVar *svar;
244 JSFatLock *next;
245 JSFatLock **prevp;
246 };
247
248 typedef struct JSFatLockTable {
249 JSFatLock *free;
250 JSFatLock *taken;
251 } JSFatLockTable;
252
253 #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
254
255 static void
256 js_Dequeue(JSThinLock *);
257
258 static PRLock **global_locks;
259 static uint32 global_lock_count = 1;
260 static uint32 global_locks_log2 = 0;
261 static uint32 global_locks_mask = 0;
262
263 static void
264 js_LockGlobal(void *id)
265 {
266 uint32 i = GLOBAL_LOCK_INDEX(id);
267 PR_Lock(global_locks[i]);
268 }
269
270 static void
271 js_UnlockGlobal(void *id)
272 {
273 uint32 i = GLOBAL_LOCK_INDEX(id);
274 PR_Unlock(global_locks[i]);
275 }
276
277 #endif /* !NSPR_LOCK */
278
279 void
280 js_InitLock(JSThinLock *tl)
281 {
282 #ifdef NSPR_LOCK
283 tl->owner = 0;
284 tl->fat = (JSFatLock*)JS_NEW_LOCK();
285 #else
286 memset(tl, 0, sizeof(JSThinLock));
287 #endif
288 }
289
290 void
291 js_FinishLock(JSThinLock *tl)
292 {
293 #ifdef NSPR_LOCK
294 tl->owner = 0xdeadbeef;
295 if (tl->fat)
296 JS_DESTROY_LOCK(((JSLock*)tl->fat));
297 #else
298 JS_ASSERT(tl->owner == 0);
299 JS_ASSERT(tl->fat == NULL);
300 #endif
301 }
302
303 #ifdef DEBUG_SCOPE_COUNT
304
305 #include <stdio.h>
306 #include "jsdhash.h"
307
308 static FILE *logfp;
309 static JSDHashTable logtbl;
310
311 typedef struct logentry {
312 JSDHashEntryStub stub;
313 char op;
314 const char *file;
315 int line;
316 } logentry;
317
318 static void
319 logit(JSScope *scope, char op, const char *file, int line)
320 {
321 logentry *entry;
322
323 if (!logfp) {
324 logfp = fopen("/tmp/scope.log", "w");
325 if (!logfp)
326 return;
327 setvbuf(logfp, NULL, _IONBF, 0);
328 }
329 fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
330
331 if (!logtbl.entryStore &&
332 !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
333 sizeof(logentry), 100)) {
334 return;
335 }
336 entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
337 if (!entry)
338 return;
339 entry->stub.key = scope;
340 entry->op = op;
341 entry->file = file;
342 entry->line = line;
343 }
344
345 void
346 js_unlog_scope(JSScope *scope)
347 {
348 if (!logtbl.entryStore)
349 return;
350 (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
351 }
352
353 # define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
354
355 #else
356
357 # define LOGIT(scope,op) /* nothing */
358
359 #endif /* DEBUG_SCOPE_COUNT */
360
361 /*
362 * Return true if scope's ownercx, or the ownercx of a single-threaded scope
363 * for which ownercx is waiting to become multi-threaded and shared, is cx.
364 * That condition implies deadlock in ClaimScope if cx's thread were to wait
365 * to share scope.
366 *
367 * (i) rt->gcLock held
368 */
369 static JSBool
370 WillDeadlock(JSTitle *title, JSContext *cx)
371 {
372 JSContext *ownercx;
373
374 do {
375 ownercx = title->ownercx;
376 if (ownercx == cx) {
377 JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
378 return JS_TRUE;
379 }
380 } while (ownercx && (title = ownercx->titleToShare) != NULL);
381 return JS_FALSE;
382 }
383
384 /*
385 * Make title multi-threaded, i.e. share its ownership among contexts in rt
386 * using a "thin" or (if necessary due to contention) "fat" lock. Called only
387 * from ClaimTitle, immediately below, when we detect deadlock were we to wait
388 * for title's lock, because its ownercx is waiting on a title owned by the
389 * calling cx.
390 *
391 * (i) rt->gcLock held
392 */
393 static void
394 ShareTitle(JSContext *cx, JSTitle *title)
395 {
396 JSRuntime *rt;
397 JSTitle **todop;
398
399 rt = cx->runtime;
400 if (title->u.link) {
401 for (todop = &rt->titleSharingTodo; *todop != title;
402 todop = &(*todop)->u.link) {
403 JS_ASSERT(*todop != NO_TITLE_SHARING_TODO);
404 }
405 *todop = title->u.link;
406 title->u.link = NULL; /* null u.link for sanity ASAP */
407 JS_NOTIFY_ALL_CONDVAR(rt->titleSharingDone);
408 }
409 js_InitLock(&title->lock);
410 title->u.count = 0;
411 js_FinishSharingTitle(cx, title);
412 }
413
414 /*
415 * js_FinishSharingTitle is the tail part of ShareTitle, split out to become a
416 * subroutine of JS_EndRequest too. The bulk of the work here involves making
417 * mutable strings in the title's object's slots be immutable. We have to do
418 * this because such strings will soon be available to multiple threads, so
419 * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
420 * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
421 * or js_UndependString.
422 *
423 * The last bit of work done by js_FinishSharingTitle nulls title->ownercx and
424 * updates rt->sharedTitles.
425 */
426
427 void
428 js_FinishSharingTitle(JSContext *cx, JSTitle *title)
429 {
430 JSObjectMap *map;
431 JSScope *scope;
432 JSObject *obj;
433 uint32 nslots, i;
434 jsval v;
435
436 map = TITLE_TO_MAP(title);
437 if (!MAP_IS_NATIVE(map))
438 return;
439 scope = (JSScope *)map;
440
441 obj = scope->object;
442 if (obj) {
443 nslots = scope->map.freeslot;
444 for (i = 0; i != nslots; ++i) {
445 v = STOBJ_GET_SLOT(obj, i);
446 if (JSVAL_IS_STRING(v) &&
447 !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
448 /*
449 * FIXME bug 363059: The following error recovery changes
450 * runtime execution semantics, arbitrarily and silently
451 * ignoring errors except out-of-memory, which should have been
452 * reported through JS_ReportOutOfMemory at this point.
453 */
454 STOBJ_SET_SLOT(obj, i, JSVAL_VOID);
455 }
456 }
457 }
458
459 title->ownercx = NULL; /* NB: set last, after lock init */
460 JS_RUNTIME_METER(cx->runtime, sharedTitles);
461 }
462
463 /*
464 * Given a title with apparently non-null ownercx different from cx, try to
465 * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
466 * If we claim ownership, return true. Otherwise, we wait for ownercx to be
467 * set to null (indicating that title is multi-threaded); or if waiting would
468 * deadlock, we set ownercx to null ourselves via ShareTitle. In any case,
469 * once ownercx is null we return false.
470 */
471 static JSBool
472 ClaimTitle(JSTitle *title, JSContext *cx)
473 {
474 JSRuntime *rt;
475 JSContext *ownercx;
476 jsrefcount saveDepth;
477 PRStatus stat;
478
479 rt = cx->runtime;
480 JS_RUNTIME_METER(rt, claimAttempts);
481 JS_LOCK_GC(rt);
482
483 /* Reload in case ownercx went away while we blocked on the lock. */
484 while ((ownercx = title->ownercx) != NULL) {
485 /*
486 * Avoid selflock if ownercx is dead, or is not running a request, or
487 * has the same thread as cx. Set title->ownercx to cx so that the
488 * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
489 * fast path around the corresponding js_UnlockTitle or js_UnlockObj
490 * function call.
491 *
492 * If title->u.link is non-null, title has already been inserted on
493 * the rt->titleSharingTodo list, because another thread's context
494 * already wanted to lock title while ownercx was running a request.
495 * We can't claim any title whose u.link is non-null at this point,
496 * even if ownercx->requestDepth is 0 (see below where we suspend our
497 * request before waiting on rt->titleSharingDone).
498 */
499 if (!title->u.link &&
500 (!js_ValidContextPointer(rt, ownercx) ||
501 !ownercx->requestDepth ||
502 ownercx->thread == cx->thread)) {
503 JS_ASSERT(title->u.count == 0);
504 title->ownercx = cx;
505 JS_UNLOCK_GC(rt);
506 JS_RUNTIME_METER(rt, claimedTitles);
507 return JS_TRUE;
508 }
509
510 /*
511 * Avoid deadlock if title's owner context is waiting on a title that
512 * we own, by revoking title's ownership. This approach to deadlock
513 * avoidance works because the engine never nests title locks.
514 *
515 * If cx could hold locks on ownercx->titleToShare, or if ownercx could
516 * hold locks on title, we would need to keep reentrancy counts for all
517 * such "flyweight" (ownercx != NULL) locks, so that control would
518 * unwind properly once these locks became "thin" or "fat". The engine
519 * promotes a title from exclusive to shared access only when locking,
520 * never when holding or unlocking.
521 *
522 * Avoid deadlock before any of this title/context cycle detection if
523 * cx is on the active GC's thread, because in that case, no requests
524 * will run until the GC completes. Any title wanted by the GC (from
525 * a finalizer) that can't be claimed must become shared.
526 */
527 if (rt->gcThread == cx->thread ||
528 (ownercx->titleToShare &&
529 WillDeadlock(ownercx->titleToShare, cx))) {
530 ShareTitle(cx, title);
531 break;
532 }
533
534 /*
535 * Thanks to the non-zero NO_TITLE_SHARING_TODO link terminator, we
536 * can decide whether title is on rt->titleSharingTodo with a single
537 * non-null test, and avoid double-insertion bugs.
538 */
539 if (!title->u.link) {
540 title->u.link = rt->titleSharingTodo;
541 rt->titleSharingTodo = title;
542 js_HoldObjectMap(cx, TITLE_TO_MAP(title));
543 }
544
545 /*
546 * Inline JS_SuspendRequest before we wait on rt->titleSharingDone,
547 * saving and clearing cx->requestDepth so we don't deadlock if the
548 * GC needs to run on ownercx.
549 *
550 * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
551 * to decrement rt->requestCount if cx is active on the GC's thread,
552 * because the GC has already reduced rt->requestCount to exclude all
553 * such such contexts.
554 */
555 saveDepth = cx->requestDepth;
556 if (saveDepth) {
557 cx->requestDepth = 0;
558 if (rt->gcThread != cx->thread) {
559 JS_ASSERT(rt->requestCount > 0);
560 rt->requestCount--;
561 if (rt->requestCount == 0)
562 JS_NOTIFY_REQUEST_DONE(rt);
563 }
564 }
565
566 /*
567 * We know that some other thread's context owns title, which is now
568 * linked onto rt->titleSharingTodo, awaiting the end of that other
569 * thread's request. So it is safe to wait on rt->titleSharingDone.
570 */
571 cx->titleToShare = title;
572 stat = PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
573 JS_ASSERT(stat != PR_FAILURE);
574
575 /*
576 * Inline JS_ResumeRequest after waiting on rt->titleSharingDone,
577 * restoring cx->requestDepth. Same note as above for the inlined,
578 * specialized JS_SuspendRequest code: beware rt->gcThread.
579 */
580 if (saveDepth) {
581 if (rt->gcThread != cx->thread) {
582 while (rt->gcLevel > 0)
583 JS_AWAIT_GC_DONE(rt);
584 rt->requestCount++;
585 }
586 cx->requestDepth = saveDepth;
587 }
588
589 /*
590 * Don't clear cx->titleToShare until after we're through waiting on
591 * all condition variables protected by rt->gcLock -- that includes
592 * rt->titleSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
593 * in the inlined JS_ResumeRequest code immediately above).
594 *
595 * Otherwise, the GC could easily deadlock with another thread that
596 * owns a title wanted by a finalizer. By keeping cx->titleToShare
597 * set till here, we ensure that such deadlocks are detected, which
598 * results in the finalized object's title being shared (it must, of
599 * course, have other, live objects sharing it).
600 */
601 cx->titleToShare = NULL;
602 }
603
604 JS_UNLOCK_GC(rt);
605 return JS_FALSE;
606 }
607
608 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
609 JS_FRIEND_API(jsval)
610 js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
611 {
612 jsval v;
613 JSScope *scope;
614 JSTitle *title;
615 #ifndef NSPR_LOCK
616 JSThinLock *tl;
617 jsword me;
618 #endif
619
620 /*
621 * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
622 * all slots starting from 0 as required slots. A property definition or
623 * some prior arrangement must have allocated slot.
624 *
625 * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
626 * the crucial distinction between a |required slot number| that's passed
627 * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
628 * passed to the JS_Get/SetReservedSlot APIs.
629 */
630 if (!OBJ_IS_NATIVE(obj))
631 return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
632
633 /*
634 * Native object locking is inlined here to optimize the single-threaded
635 * and contention-free multi-threaded cases.
636 */
637 scope = OBJ_SCOPE(obj);
638 title = &scope->title;
639 JS_ASSERT(title->ownercx != cx);
640 JS_ASSERT(slot < obj->map->freeslot);
641
642 /*
643 * Avoid locking if called from the GC. Also avoid locking an object
644 * owning a sealed scope. If neither of those special cases applies, try
645 * to claim scope's flyweight lock from whatever context may have had it in
646 * an earlier request.
647 */
648 if (CX_THREAD_IS_RUNNING_GC(cx) ||
649 (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
650 (title->ownercx && ClaimTitle(title, cx))) {
651 return STOBJ_GET_SLOT(obj, slot);
652 }
653
654 #ifndef NSPR_LOCK
655 tl = &title->lock;
656 me = CX_THINLOCK_ID(cx);
657 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
658 if (NativeCompareAndSwap(&tl->owner, 0, me)) {
659 /*
660 * Got the lock with one compare-and-swap. Even so, someone else may
661 * have mutated obj so it now has its own scope and lock, which would
662 * require either a restart from the top of this routine, or a thin
663 * lock release followed by fat lock acquisition.
664 */
665 if (scope == OBJ_SCOPE(obj)) {
666 v = STOBJ_GET_SLOT(obj, slot);
667 if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
668 /* Assert that scope locks never revert to flyweight. */
669 JS_ASSERT(title->ownercx != cx);
670 LOGIT(scope, '1');
671 title->u.count = 1;
672 js_UnlockObj(cx, obj);
673 }
674 return v;
675 }
676 if (!NativeCompareAndSwap(&tl->owner, me, 0))
677 js_Dequeue(tl);
678 }
679 else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
680 return STOBJ_GET_SLOT(obj, slot);
681 }
682 #endif
683
684 js_LockObj(cx, obj);
685 v = STOBJ_GET_SLOT(obj, slot);
686
687 /*
688 * Test whether cx took ownership of obj's scope during js_LockObj.
689 *
690 * This does not mean that a given scope reverted to flyweight from "thin"
691 * or "fat" -- it does mean that obj's map pointer changed due to another
692 * thread setting a property, requiring obj to cease sharing a prototype
693 * object's scope (whose lock was not flyweight, else we wouldn't be here
694 * in the first place!).
695 */
696 title = &OBJ_SCOPE(obj)->title;
697 if (title->ownercx != cx)
698 js_UnlockTitle(cx, title);
699 return v;
700 }
701
702 void
703 js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
704 {
705 JSTitle *title;
706 JSScope *scope;
707 #ifndef NSPR_LOCK
708 JSThinLock *tl;
709 jsword me;
710 #endif
711
712 /* Any string stored in a thread-safe object must be immutable. */
713 if (JSVAL_IS_STRING(v) &&
714 !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
715 /* FIXME bug 363059: See comments in js_FinishSharingScope. */
716 v = JSVAL_NULL;
717 }
718
719 /*
720 * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
721 * for the Get case.
722 */
723 if (!OBJ_IS_NATIVE(obj)) {
724 OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
725 return;
726 }
727
728 /*
729 * Native object locking is inlined here to optimize the single-threaded
730 * and contention-free multi-threaded cases.
731 */
732 scope = OBJ_SCOPE(obj);
733 title = &scope->title;
734 JS_ASSERT(title->ownercx != cx);
735 JS_ASSERT(slot < obj->map->freeslot);
736
737 /*
738 * Avoid locking if called from the GC. Also avoid locking an object
739 * owning a sealed scope. If neither of those special cases applies, try
740 * to claim scope's flyweight lock from whatever context may have had it in
741 * an earlier request.
742 */
743 if (CX_THREAD_IS_RUNNING_GC(cx) ||
744 (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
745 (title->ownercx && ClaimTitle(title, cx))) {
746 LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
747 return;
748 }
749
750 #ifndef NSPR_LOCK
751 tl = &title->lock;
752 me = CX_THINLOCK_ID(cx);
753 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
754 if (NativeCompareAndSwap(&tl->owner, 0, me)) {
755 if (scope == OBJ_SCOPE(obj)) {
756 LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
757 if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
758 /* Assert that scope locks never revert to flyweight. */
759 JS_ASSERT(title->ownercx != cx);
760 LOGIT(scope, '1');
761 title->u.count = 1;
762 js_UnlockObj(cx, obj);
763 }
764 return;
765 }
766 if (!NativeCompareAndSwap(&tl->owner, me, 0))
767 js_Dequeue(tl);
768 }
769 else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
770 LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
771 return;
772 }
773 #endif
774
775 js_LockObj(cx, obj);
776 LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
777
778 /*
779 * Same drill as above, in js_GetSlotThreadSafe.
780 */
781 title = &OBJ_SCOPE(obj)->title;
782 if (title->ownercx != cx)
783 js_UnlockTitle(cx, title);
784 }
785
786 #ifndef NSPR_LOCK
787
788 static JSFatLock *
789 NewFatlock()
790 {
791 JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
792 if (!fl) return NULL;
793 fl->susp = 0;
794 fl->next = NULL;
795 fl->prevp = NULL;
796 fl->slock = PR_NewLock();
797 fl->svar = PR_NewCondVar(fl->slock);
798 return fl;
799 }
800
801 static void
802 DestroyFatlock(JSFatLock *fl)
803 {
804 PR_DestroyLock(fl->slock);
805 PR_DestroyCondVar(fl->svar);
806 free(fl);
807 }
808
809 static JSFatLock *
810 ListOfFatlocks(int listc)
811 {
812 JSFatLock *m;
813 JSFatLock *m0;
814 int i;
815
816 JS_ASSERT(listc>0);
817 m0 = m = NewFatlock();
818 for (i=1; i<listc; i++) {
819 m->next = NewFatlock();
820 m = m->next;
821 }
822 return m0;
823 }
824
825 static void
826 DeleteListOfFatlocks(JSFatLock *m)
827 {
828 JSFatLock *m0;
829 for (; m; m=m0) {
830 m0 = m->next;
831 DestroyFatlock(m);
832 }
833 }
834
835 static JSFatLockTable *fl_list_table = NULL;
836 static uint32 fl_list_table_len = 0;
837 static uint32 fl_list_chunk_len = 0;
838
839 static JSFatLock *
840 GetFatlock(void *id)
841 {
842 JSFatLock *m;
843
844 uint32 i = GLOBAL_LOCK_INDEX(id);
845 if (fl_list_table[i].free == NULL) {
846 #ifdef DEBUG
847 if (fl_list_table[i].taken)
848 printf("Ran out of fat locks!\n");
849 #endif
850 fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
851 }
852 m = fl_list_table[i].free;
853 fl_list_table[i].free = m->next;
854 m->susp = 0;
855 m->next = fl_list_table[i].taken;
856 m->prevp = &fl_list_table[i].taken;
857 if (fl_list_table[i].taken)
858 fl_list_table[i].taken->prevp = &m->next;
859 fl_list_table[i].taken = m;
860 return m;
861 }
862
863 static void
864 PutFatlock(JSFatLock *m, void *id)
865 {
866 uint32 i;
867 if (m == NULL)
868 return;
869
870 /* Unlink m from fl_list_table[i].taken. */
871 *m->prevp = m->next;
872 if (m->next)
873 m->next->prevp = m->prevp;
874
875 /* Insert m in fl_list_table[i].free. */
876 i = GLOBAL_LOCK_INDEX(id);
877 m->next = fl_list_table[i].free;
878 fl_list_table[i].free = m;
879 }
880
881 #endif /* !NSPR_LOCK */
882
883 JSBool
884 js_SetupLocks(int listc, int globc)
885 {
886 #ifndef NSPR_LOCK
887 uint32 i;
888
889 if (global_locks)
890 return JS_TRUE;
891 #ifdef DEBUG
892 if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
893 printf("Bad number %d in js_SetupLocks()!\n", listc);
894 if (globc > 100 || globc < 0) /* globc == number of global locks */
895 printf("Bad number %d in js_SetupLocks()!\n", listc);
896 #endif
897 global_locks_log2 = JS_CeilingLog2(globc);
898 global_locks_mask = JS_BITMASK(global_locks_log2);
899 global_lock_count = JS_BIT(global_locks_log2);
900 global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
901 if (!global_locks)
902 return JS_FALSE;
903 for (i = 0; i < global_lock_count; i++) {
904 global_locks[i] = PR_NewLock();
905 if (!global_locks[i]) {
906 global_lock_count = i;
907 js_CleanupLocks();
908 return JS_FALSE;
909 }
910 }
911 fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
912 if (!fl_list_table) {
913 js_CleanupLocks();
914 return JS_FALSE;
915 }
916 fl_list_table_len = global_lock_count;
917 for (i = 0; i < global_lock_count; i++)
918 fl_list_table[i].free = fl_list_table[i].taken = NULL;
919 fl_list_chunk_len = listc;
920 #endif /* !NSPR_LOCK */
921 return JS_TRUE;
922 }
923
924 void
925 js_CleanupLocks()
926 {
927 #ifndef NSPR_LOCK
928 uint32 i;
929
930 if (global_locks) {
931 for (i = 0; i < global_lock_count; i++)
932 PR_DestroyLock(global_locks[i]);
933 free(global_locks);
934 global_locks = NULL;
935 global_lock_count = 1;
936 global_locks_log2 = 0;
937 global_locks_mask = 0;
938 }
939 if (fl_list_table) {
940 for (i = 0; i < fl_list_table_len; i++) {
941 DeleteListOfFatlocks(fl_list_table[i].free);
942 fl_list_table[i].free = NULL;
943 DeleteListOfFatlocks(fl_list_table[i].taken);
944 fl_list_table[i].taken = NULL;
945 }
946 free(fl_list_table);
947 fl_list_table = NULL;
948 fl_list_table_len = 0;
949 }
950 #endif /* !NSPR_LOCK */
951 }
952
953 #ifdef NSPR_LOCK
954
955 static JS_ALWAYS_INLINE void
956 ThinLock(JSThinLock *tl, jsword me)
957 {
958 JS_ACQUIRE_LOCK((JSLock *) tl->fat);
959 tl->owner = me;
960 }
961
962 static JS_ALWAYS_INLINE void
963 ThinUnlock(JSThinLock *tl, jsword /*me*/)
964 {
965 tl->owner = 0;
966 JS_RELEASE_LOCK((JSLock *) tl->fat);
967 }
968
969 #else
970
971 /*
972 * Fast locking and unlocking is implemented by delaying the allocation of a
973 * system lock (fat lock) until contention. As long as a locking thread A
974 * runs uncontended, the lock is represented solely by storing A's identity in
975 * the object being locked.
976 *
977 * If another thread B tries to lock the object currently locked by A, B is
978 * enqueued into a fat lock structure (which might have to be allocated and
979 * pointed to by the object), and suspended using NSPR conditional variables
980 * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
981 * signalling to A that when releasing the lock, B must be dequeued and
982 * notified.
983 *
984 * The basic operation of the locking primitives (js_Lock, js_Unlock,
985 * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
986 * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
987 * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
988 * succeeds this implies that p is uncontended (no one is waiting because the
989 * wait bit is not set).
990 *
991 * When dequeueing, the lock is released, and one of the threads suspended on
992 * the lock is notified. If other threads still are waiting, the wait bit is
993 * kept (in js_Enqueue), and if not, the fat lock is deallocated.
994 *
995 * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
996 * are serialized using a global lock. For scalability, a hashtable of global
997 * locks is used, which is indexed modulo the thin lock pointer.
998 */
999
1000 /*
1001 * Invariants:
1002 * (i) global lock is held
1003 * (ii) fl->susp >= 0
1004 */
1005 static int
1006 js_SuspendThread(JSThinLock *tl)
1007 {
1008 JSFatLock *fl;
1009 PRStatus stat;
1010
1011 if (tl->fat == NULL)
1012 fl = tl->fat = GetFatlock(tl);
1013 else
1014 fl = tl->fat;
1015 JS_ASSERT(fl->susp >= 0);
1016 fl->susp++;
1017 PR_Lock(fl->slock);
1018 js_UnlockGlobal(tl);
1019 stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
1020 JS_ASSERT(stat != PR_FAILURE);
1021 PR_Unlock(fl->slock);
1022 js_LockGlobal(tl);
1023 fl->susp--;
1024 if (fl->susp == 0) {
1025 PutFatlock(fl, tl);
1026 tl->fat = NULL;
1027 }
1028 return tl->fat == NULL;
1029 }
1030
1031 /*
1032 * (i) global lock is held
1033 * (ii) fl->susp > 0
1034 */
1035 static void
1036 js_ResumeThread(JSThinLock *tl)
1037 {
1038 JSFatLock *fl = tl->fat;
1039 PRStatus stat;
1040
1041 JS_ASSERT(fl != NULL);
1042 JS_ASSERT(fl->susp > 0);
1043 PR_Lock(fl->slock);
1044 js_UnlockGlobal(tl);
1045 stat = PR_NotifyCondVar(fl->svar);
1046 JS_ASSERT(stat != PR_FAILURE);
1047 PR_Unlock(fl->slock);
1048 }
1049
1050 static void
1051 js_Enqueue(JSThinLock *tl, jsword me)
1052 {
1053 jsword o, n;
1054
1055 js_LockGlobal(tl);
1056 for (;;) {
1057 o = ReadWord(tl->owner);
1058 n = Thin_SetWait(o);
1059 if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
1060 if (js_SuspendThread(tl))
1061 me = Thin_RemoveWait(me);
1062 else
1063 me = Thin_SetWait(me);
1064 }
1065 else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
1066 js_UnlockGlobal(tl);
1067 return;
1068 }
1069 }
1070 }
1071
1072 static void
1073 js_Dequeue(JSThinLock *tl)
1074 {
1075 jsword o;
1076
1077 js_LockGlobal(tl);
1078 o = ReadWord(tl->owner);
1079 JS_ASSERT(Thin_GetWait(o) != 0);
1080 JS_ASSERT(tl->fat != NULL);
1081 if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
1082 JS_ASSERT(0);
1083 js_ResumeThread(tl);
1084 }
1085
1086 static JS_ALWAYS_INLINE void
1087 ThinLock(JSThinLock *tl, jsword me)
1088 {
1089 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1090 if (NativeCompareAndSwap(&tl->owner, 0, me))
1091 return;
1092 if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1093 js_Enqueue(tl, me);
1094 #ifdef DEBUG
1095 else
1096 JS_ASSERT(0);
1097 #endif
1098 }
1099
1100 static JS_ALWAYS_INLINE void
1101 ThinUnlock(JSThinLock *tl, jsword me)
1102 {
1103 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1104
1105 /*
1106 * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
1107 * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
1108 */
1109 if (NativeCompareAndSwap(&tl->owner, me, 0))
1110 return;
1111
1112 JS_ASSERT(Thin_GetWait(tl->owner));
1113 if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1114 js_Dequeue(tl);
1115 #ifdef DEBUG
1116 else
1117 JS_ASSERT(0); /* unbalanced unlock */
1118 #endif
1119 }
1120
1121 #endif /* !NSPR_LOCK */
1122
1123 void
1124 js_Lock(JSContext *cx, JSThinLock *tl)
1125 {
1126 ThinLock(tl, CX_THINLOCK_ID(cx));
1127 }
1128
1129 void
1130 js_Unlock(JSContext *cx, JSThinLock *tl)
1131 {
1132 ThinUnlock(tl, CX_THINLOCK_ID(cx));
1133 }
1134
1135 void
1136 js_LockRuntime(JSRuntime *rt)
1137 {
1138 PR_Lock(rt->rtLock);
1139 #ifdef DEBUG
1140 rt->rtLockOwner = js_CurrentThreadId();
1141 #endif
1142 }
1143
1144 void
1145 js_UnlockRuntime(JSRuntime *rt)
1146 {
1147 #ifdef DEBUG
1148 rt->rtLockOwner = 0;
1149 #endif
1150 PR_Unlock(rt->rtLock);
1151 }
1152
1153 void
1154 js_LockTitle(JSContext *cx, JSTitle *title)
1155 {
1156 jsword me = CX_THINLOCK_ID(cx);
1157
1158 JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1159 JS_ASSERT(title->ownercx != cx);
1160 if (CX_THREAD_IS_RUNNING_GC(cx))
1161 return;
1162 if (title->ownercx && ClaimTitle(title, cx))
1163 return;
1164
1165 if (Thin_RemoveWait(ReadWord(title->lock.owner)) == me) {
1166 JS_ASSERT(title->u.count > 0);
1167 LOGIT(scope, '+');
1168 title->u.count++;
1169 } else {
1170 ThinLock(&title->lock, me);
1171 JS_ASSERT(title->u.count == 0);
1172 LOGIT(scope, '1');
1173 title->u.count = 1;
1174 }
1175 }
1176
1177 void
1178 js_UnlockTitle(JSContext *cx, JSTitle *title)
1179 {
1180 jsword me = CX_THINLOCK_ID(cx);
1181
1182 /* We hope compilers use me instead of reloading cx->thread in the macro. */
1183 if (CX_THREAD_IS_RUNNING_GC(cx))
1184 return;
1185 if (cx->lockedSealedTitle == title) {
1186 cx->lockedSealedTitle = NULL;
1187 return;
1188 }
1189
1190 /*
1191 * If title->ownercx is not null, it's likely that two contexts not using
1192 * requests nested locks for title. The first context, cx here, claimed
1193 * title; the second, title->ownercx here, re-claimed it because the first
1194 * was not in a request, or was on the same thread. We don't want to keep
1195 * track of such nesting, because it penalizes the common non-nested case.
1196 * Instead of asserting here and silently coping, we simply re-claim title
1197 * for cx and return.
1198 *
1199 * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1200 * case where an asymmetric thread model (Mozilla's main thread is known
1201 * to be the only thread that runs the GC) combined with multiple contexts
1202 * per thread has led to such request-less nesting.
1203 */
1204 if (title->ownercx) {
1205 JS_ASSERT(title->u.count == 0);
1206 JS_ASSERT(title->lock.owner == 0);
1207 title->ownercx = cx;
1208 return;
1209 }
1210
1211 JS_ASSERT(title->u.count > 0);
1212 if (Thin_RemoveWait(ReadWord(title->lock.owner)) != me) {
1213 JS_ASSERT(0); /* unbalanced unlock */
1214 return;
1215 }
1216 LOGIT(scope, '-');
1217 if (--title->u.count == 0)
1218 ThinUnlock(&title->lock, me);
1219 }
1220
1221 /*
1222 * NB: oldtitle may be null if our caller is js_GetMutableScope and it just
1223 * dropped the last reference to oldtitle.
1224 */
1225 void
1226 js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
1227 {
1228 JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
1229
1230 /*
1231 * If the last reference to oldtitle went away, newtitle needs no lock
1232 * state update.
1233 */
1234 if (!oldtitle)
1235 return;
1236 JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
1237
1238 /*
1239 * Special case in js_LockTitle and js_UnlockTitle for the GC calling
1240 * code that locks, unlocks, or mutates. Nothing to do in these cases,
1241 * because title and newtitle were "locked" by the GC thread, so neither
1242 * was actually locked.
1243 */
1244 if (CX_THREAD_IS_RUNNING_GC(cx))
1245 return;
1246
1247 /*
1248 * Special case in js_LockObj and js_UnlockTitle for locking the sealed
1249 * scope of an object that owns that scope (the prototype or mutated obj
1250 * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1251 */
1252 JS_ASSERT(cx->lockedSealedTitle != newtitle);
1253 if (cx->lockedSealedTitle == oldtitle) {
1254 JS_ASSERT(newtitle->ownercx == cx ||
1255 (!newtitle->ownercx && newtitle->u.count == 1));
1256 cx->lockedSealedTitle = NULL;
1257 return;
1258 }
1259
1260 /*
1261 * If oldtitle is single-threaded, there's nothing to do.
1262 */
1263 if (oldtitle->ownercx) {
1264 JS_ASSERT(oldtitle->ownercx == cx);
1265 JS_ASSERT(newtitle->ownercx == cx ||
1266 (!newtitle->ownercx && newtitle->u.count == 1));
1267 return;
1268 }
1269
1270 /*
1271 * We transfer oldtitle->u.count only if newtitle is not single-threaded.
1272 * Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
1273 * JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
1274 * if they find newtitle->ownercx != cx.
1275 */
1276 if (newtitle->ownercx != cx) {
1277 JS_ASSERT(!newtitle->ownercx);
1278 newtitle->u.count = oldtitle->u.count;
1279 }
1280
1281 /*
1282 * Reset oldtitle's lock state so that it is completely unlocked.
1283 */
1284 LOGIT(oldscope, '0');
1285 oldtitle->u.count = 0;
1286 ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
1287 }
1288
1289 void
1290 js_LockObj(JSContext *cx, JSObject *obj)
1291 {
1292 JSScope *scope;
1293 JSTitle *title;
1294
1295 JS_ASSERT(OBJ_IS_NATIVE(obj));
1296
1297 /*
1298 * We must test whether the GC is calling and return without mutating any
1299 * state, especially cx->lockedSealedScope. Note asymmetry with respect to
1300 * js_UnlockObj, which is a thin-layer on top of js_UnlockTitle.
1301 */
1302 if (CX_THREAD_IS_RUNNING_GC(cx))
1303 return;
1304
1305 for (;;) {
1306 scope = OBJ_SCOPE(obj);
1307 title = &scope->title;
1308 if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
1309 !cx->lockedSealedTitle) {
1310 cx->lockedSealedTitle = title;
1311 return;
1312 }
1313
1314 js_LockTitle(cx, title);
1315
1316 /* If obj still has this scope, we're done. */
1317 if (scope == OBJ_SCOPE(obj))
1318 return;
1319
1320 /* Lost a race with a mutator; retry with obj's new scope. */
1321 js_UnlockTitle(cx, title);
1322 }
1323 }
1324
1325 void
1326 js_UnlockObj(JSContext *cx, JSObject *obj)
1327 {
1328 JS_ASSERT(OBJ_IS_NATIVE(obj));
1329 js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title);
1330 }
1331
1332 void
1333 js_InitTitle(JSContext *cx, JSTitle *title)
1334 {
1335 #ifdef JS_THREADSAFE
1336 title->ownercx = cx;
1337 memset(&title->lock, 0, sizeof title->lock);
1338
1339 /*
1340 * Set u.link = NULL, not u.count = 0, in case the target architecture's
1341 * null pointer has a non-zero integer representation.
1342 */
1343 title->u.link = NULL;
1344
1345 #ifdef JS_DEBUG_TITLE_LOCKS
1346 title->file[0] = title->file[1] = title->file[2] = title->file[3] = NULL;
1347 title->line[0] = title->line[1] = title->line[2] = title->line[3] = 0;
1348 #endif
1349 #endif
1350 }
1351
1352 void
1353 js_FinishTitle(JSContext *cx, JSTitle *title)
1354 {
1355 #ifdef JS_THREADSAFE
1356 /* Title must be single-threaded at this point, so set ownercx. */
1357 JS_ASSERT(title->u.count == 0);
1358 title->ownercx = cx;
1359 js_FinishLock(&title->lock);
1360 #endif
1361 }
1362
1363 #ifdef DEBUG
1364
1365 JSBool
1366 js_IsRuntimeLocked(JSRuntime *rt)
1367 {
1368 return js_CurrentThreadId() == rt->rtLockOwner;
1369 }
1370
1371 JSBool
1372 js_IsObjLocked(JSContext *cx, JSObject *obj)
1373 {
1374 JSScope *scope = OBJ_SCOPE(obj);
1375
1376 return MAP_IS_NATIVE(&scope->map) && js_IsTitleLocked(cx, &scope->title);
1377 }
1378
1379 JSBool
1380 js_IsTitleLocked(JSContext *cx, JSTitle *title)
1381 {
1382 /* Special case: the GC locking any object's title, see js_LockTitle. */
1383 if (CX_THREAD_IS_RUNNING_GC(cx))
1384 return JS_TRUE;
1385
1386 /* Special case: locked object owning a sealed scope, see js_LockObj. */
1387 if (cx->lockedSealedTitle == title)
1388 return JS_TRUE;
1389
1390 /*
1391 * General case: the title is either exclusively owned (by cx), or it has
1392 * a thin or fat lock to cope with shared (concurrent) ownership.
1393 */
1394 if (title->ownercx) {
1395 JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
1396 return JS_TRUE;
1397 }
1398 return js_CurrentThreadId() ==
1399 ((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
1400 }
1401
1402 #ifdef JS_DEBUG_TITLE_LOCKS
1403 void
1404 js_SetScopeInfo(JSScope *scope, const char *file, int line)
1405 {
1406 JSTitle *title = &scope->title;
1407 if (!title->ownercx) {
1408 jsrefcount count = title->u.count;
1409 JS_ASSERT_IF(!SCOPE_IS_SEALED(scope), count > 0);
1410 JS_ASSERT(count <= 4);
1411 title->file[count - 1] = file;
1412 title->line[count - 1] = line;
1413 }
1414 }
1415 #endif /* JS_DEBUG_TITLE_LOCKS */
1416 #endif /* DEBUG */
1417 #endif /* JS_THREADSAFE */

  ViewVC Help
Powered by ViewVC 1.1.24