/[jscoverage]/trunk/js/jslock.cpp
ViewVC logotype

Annotation of /trunk/js/jslock.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 585 - (hide annotations)
Sun Sep 12 15:13:23 2010 UTC (11 years, 9 months ago) by siliconforks
File size: 43773 byte(s)
Update to SpiderMonkey from Firefox 3.6.9.

1 siliconforks 507 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 siliconforks 332 *
3     * ***** BEGIN LICENSE BLOCK *****
4     * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5     *
6     * The contents of this file are subject to the Mozilla Public License Version
7     * 1.1 (the "License"); you may not use this file except in compliance with
8     * the License. You may obtain a copy of the License at
9     * http://www.mozilla.org/MPL/
10     *
11     * Software distributed under the License is distributed on an "AS IS" basis,
12     * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13     * for the specific language governing rights and limitations under the
14     * License.
15     *
16     * The Original Code is Mozilla Communicator client code, released
17     * March 31, 1998.
18     *
19     * The Initial Developer of the Original Code is
20     * Netscape Communications Corporation.
21     * Portions created by the Initial Developer are Copyright (C) 1998
22     * the Initial Developer. All Rights Reserved.
23     *
24     * Contributor(s):
25     *
26     * Alternatively, the contents of this file may be used under the terms of
27     * either of the GNU General Public License Version 2 or later (the "GPL"),
28     * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29     * in which case the provisions of the GPL or the LGPL are applicable instead
30     * of those above. If you wish to allow use of your version of this file only
31     * under the terms of either the GPL or the LGPL, and not to allow others to
32     * use your version of this file under the terms of the MPL, indicate your
33     * decision by deleting the provisions above and replace them with the notice
34     * and other provisions required by the GPL or the LGPL. If you do not delete
35     * the provisions above, a recipient may use your version of this file under
36     * the terms of any one of the MPL, the GPL or the LGPL.
37     *
38     * ***** END LICENSE BLOCK ***** */
39    
40     #ifdef JS_THREADSAFE
41    
42     /*
43     * JS locking stubs.
44     */
45     #include <stdlib.h>
46     #include <string.h>
47     #include "jspubtd.h"
48     #include "jsutil.h" /* Added by JSIFY */
49     #include "jstypes.h"
50 siliconforks 507 #include "jsstdint.h"
51 siliconforks 332 #include "jsbit.h"
52     #include "jscntxt.h"
53     #include "jsdtoa.h"
54     #include "jsgc.h"
55 siliconforks 507 #include "jsfun.h" /* for VALUE_IS_FUNCTION from LOCKED_OBJ_WRITE_SLOT */
56 siliconforks 332 #include "jslock.h"
57     #include "jsscope.h"
58     #include "jsstr.h"
59    
60     #define ReadWord(W) (W)
61    
62 siliconforks 507 #if !defined(__GNUC__)
63     # define __asm__ asm
64     # define __volatile__ volatile
65     #endif
66    
67 siliconforks 332 /* Implement NativeCompareAndSwap. */
68    
69     #if defined(_WIN32) && defined(_M_IX86)
70     #pragma warning( disable : 4035 )
71     JS_BEGIN_EXTERN_C
72     extern long __cdecl
73     _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
74     JS_END_EXTERN_C
75     #pragma intrinsic(_InterlockedCompareExchange)
76    
77 siliconforks 460 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
78    
79 siliconforks 332 static JS_ALWAYS_INLINE int
80     NativeCompareAndSwapHelper(jsword *w, jsword ov, jsword nv)
81     {
82 siliconforks 460 _InterlockedCompareExchange((long*) w, nv, ov);
83 siliconforks 332 __asm {
84     sete al
85     }
86     }
87    
88     static JS_ALWAYS_INLINE int
89     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
90     {
91     return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
92     }
93    
94 siliconforks 507 #elif defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_X64))
95     JS_BEGIN_EXTERN_C
96     extern long long __cdecl
97     _InterlockedCompareExchange64(long long *volatile dest, long long exchange, long long comp);
98     JS_END_EXTERN_C
99     #pragma intrinsic(_InterlockedCompareExchange64)
100    
101     static JS_ALWAYS_INLINE int
102     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
103     {
104     return _InterlockedCompareExchange64(w, nv, ov) == ov;
105     }
106    
107 siliconforks 332 #elif defined(XP_MACOSX) || defined(DARWIN)
108    
109     #include <libkern/OSAtomic.h>
110    
111     static JS_ALWAYS_INLINE int
112     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
113     {
114     /* Details on these functions available in the manpage for atomic */
115 siliconforks 460 return OSAtomicCompareAndSwapPtrBarrier(ov, nv, w);
116 siliconforks 332 }
117    
118 siliconforks 507 #elif defined(__i386) && (defined(__GNUC__) || defined(__SUNPRO_CC))
119 siliconforks 332
120     /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
121     static JS_ALWAYS_INLINE int
122     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
123     {
124     unsigned int res;
125    
126     __asm__ __volatile__ (
127     "lock\n"
128     "cmpxchgl %2, (%1)\n"
129     "sete %%al\n"
130     "andl $1, %%eax\n"
131     : "=a" (res)
132     : "r" (w), "r" (nv), "a" (ov)
133     : "cc", "memory");
134     return (int)res;
135     }
136    
137 siliconforks 507 #elif defined(__x86_64) && (defined(__GNUC__) || defined(__SUNPRO_CC))
138    
139 siliconforks 332 static JS_ALWAYS_INLINE int
140     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
141     {
142     unsigned int res;
143    
144     __asm__ __volatile__ (
145     "lock\n"
146     "cmpxchgq %2, (%1)\n"
147     "sete %%al\n"
148     "movzbl %%al, %%eax\n"
149     : "=a" (res)
150     : "r" (w), "r" (nv), "a" (ov)
151     : "cc", "memory");
152     return (int)res;
153     }
154    
155 siliconforks 507 #elif defined(__sparc) && (defined(__GNUC__) || defined(__SUNPRO_CC))
156 siliconforks 332
157     static JS_ALWAYS_INLINE int
158     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
159     {
160     unsigned int res;
161 siliconforks 507
162     __asm__ __volatile__ (
163     "stbar\n"
164     "cas [%1],%2,%3\n"
165     "cmp %2,%3\n"
166     "be,a 1f\n"
167     "mov 1,%0\n"
168     "mov 0,%0\n"
169     "1:"
170 siliconforks 332 : "=r" (res)
171     : "r" (w), "r" (ov), "r" (nv));
172     return (int)res;
173     }
174    
175     #elif defined(AIX)
176    
177     #include <sys/atomic_op.h>
178    
179     static JS_ALWAYS_INLINE int
180     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
181     {
182 siliconforks 585 int res;
183     JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
184    
185     res = compare_and_swaplp((atomic_l)w, &ov, nv);
186     if (res)
187     __asm__("isync");
188     return res;
189 siliconforks 332 }
190    
191     #elif defined(USE_ARM_KUSER)
192    
193     /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
194     * description of this ABI; this is a function provided at a fixed
195     * location by the kernel in the memory space of each process.
196     */
197     typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
198     #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
199    
200     JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
201    
202     static JS_ALWAYS_INLINE int
203     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
204     {
205     volatile int *vp = (volatile int *) w;
206     PRInt32 failed = 1;
207    
208     /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
209     do {
210     failed = __kernel_cmpxchg(ov, nv, vp);
211     } while (failed && *vp == ov);
212     return !failed;
213     }
214    
215     #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
216    
217     #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
218    
219     #endif /* arch-tests */
220    
221     #if JS_HAS_NATIVE_COMPARE_AND_SWAP
222    
223     JSBool
224     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
225     {
226     return !!NativeCompareAndSwap(w, ov, nv);
227     }
228    
229     #elif defined(NSPR_LOCK)
230    
231     # ifdef __GNUC__
232 siliconforks 507 # warning "js_CompareAndSwap is implemented using NSPR lock"
233 siliconforks 332 # endif
234    
235     JSBool
236     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
237     {
238     int result;
239     static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
240    
241     JS_ACQUIRE_LOCK(CompareAndSwapLock);
242     result = (*w == ov);
243     if (result)
244     *w = nv;
245     JS_RELEASE_LOCK(CompareAndSwapLock);
246     return result;
247     }
248    
249     #else /* !defined(NSPR_LOCK) */
250    
251     #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
252    
253     #endif
254    
255 siliconforks 460 void
256     js_AtomicSetMask(jsword *w, jsword mask)
257     {
258     jsword ov, nv;
259    
260     do {
261     ov = *w;
262     nv = ov | mask;
263     } while (!js_CompareAndSwap(w, ov, nv));
264     }
265    
266 siliconforks 332 #ifndef NSPR_LOCK
267    
268     struct JSFatLock {
269     int susp;
270     PRLock *slock;
271     PRCondVar *svar;
272     JSFatLock *next;
273     JSFatLock **prevp;
274     };
275    
276     typedef struct JSFatLockTable {
277     JSFatLock *free;
278     JSFatLock *taken;
279     } JSFatLockTable;
280    
281     #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
282    
283     static void
284     js_Dequeue(JSThinLock *);
285    
286     static PRLock **global_locks;
287     static uint32 global_lock_count = 1;
288     static uint32 global_locks_log2 = 0;
289     static uint32 global_locks_mask = 0;
290    
291     static void
292     js_LockGlobal(void *id)
293     {
294     uint32 i = GLOBAL_LOCK_INDEX(id);
295     PR_Lock(global_locks[i]);
296     }
297    
298     static void
299     js_UnlockGlobal(void *id)
300     {
301     uint32 i = GLOBAL_LOCK_INDEX(id);
302     PR_Unlock(global_locks[i]);
303     }
304    
305     #endif /* !NSPR_LOCK */
306    
307     void
308     js_InitLock(JSThinLock *tl)
309     {
310     #ifdef NSPR_LOCK
311     tl->owner = 0;
312     tl->fat = (JSFatLock*)JS_NEW_LOCK();
313     #else
314     memset(tl, 0, sizeof(JSThinLock));
315     #endif
316     }
317    
318     void
319     js_FinishLock(JSThinLock *tl)
320     {
321     #ifdef NSPR_LOCK
322     tl->owner = 0xdeadbeef;
323     if (tl->fat)
324     JS_DESTROY_LOCK(((JSLock*)tl->fat));
325     #else
326     JS_ASSERT(tl->owner == 0);
327     JS_ASSERT(tl->fat == NULL);
328     #endif
329     }
330    
331     #ifdef DEBUG_SCOPE_COUNT
332    
333     #include <stdio.h>
334     #include "jsdhash.h"
335    
336 siliconforks 460 static FILE *logfp = NULL;
337 siliconforks 332 static JSDHashTable logtbl;
338    
339     typedef struct logentry {
340     JSDHashEntryStub stub;
341     char op;
342     const char *file;
343     int line;
344     } logentry;
345    
346     static void
347 siliconforks 460 logit(JSTitle *title, char op, const char *file, int line)
348 siliconforks 332 {
349     logentry *entry;
350    
351     if (!logfp) {
352     logfp = fopen("/tmp/scope.log", "w");
353     if (!logfp)
354     return;
355     setvbuf(logfp, NULL, _IONBF, 0);
356     }
357 siliconforks 460 fprintf(logfp, "%p %d %c %s %d\n", title, title->u.count, op, file, line);
358 siliconforks 332
359     if (!logtbl.entryStore &&
360     !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
361     sizeof(logentry), 100)) {
362     return;
363     }
364 siliconforks 460 entry = (logentry *) JS_DHashTableOperate(&logtbl, title, JS_DHASH_ADD);
365 siliconforks 332 if (!entry)
366     return;
367 siliconforks 460 entry->stub.key = title;
368 siliconforks 332 entry->op = op;
369     entry->file = file;
370     entry->line = line;
371     }
372    
373     void
374 siliconforks 460 js_unlog_title(JSTitle *title)
375 siliconforks 332 {
376     if (!logtbl.entryStore)
377     return;
378 siliconforks 460 (void) JS_DHashTableOperate(&logtbl, title, JS_DHASH_REMOVE);
379 siliconforks 332 }
380    
381 siliconforks 460 # define LOGIT(title,op) logit(title, op, __FILE__, __LINE__)
382 siliconforks 332
383     #else
384    
385 siliconforks 460 # define LOGIT(title, op) /* nothing */
386 siliconforks 332
387     #endif /* DEBUG_SCOPE_COUNT */
388    
389     /*
390 siliconforks 460 * Return true if we would deadlock waiting in ClaimTitle on
391     * rt->titleSharingDone until ownercx finishes its request and shares a title.
392 siliconforks 332 *
393     * (i) rt->gcLock held
394     */
395 siliconforks 460 static bool
396     WillDeadlock(JSContext *ownercx, JSThread *thread)
397 siliconforks 332 {
398 siliconforks 460 JS_ASSERT(CURRENT_THREAD_IS_ME(thread));
399     JS_ASSERT(ownercx->thread != thread);
400 siliconforks 332
401 siliconforks 460 for (;;) {
402     JS_ASSERT(ownercx->thread);
403     JS_ASSERT(ownercx->requestDepth > 0);
404     JSTitle *title = ownercx->thread->titleToShare;
405     if (!title || !title->ownercx) {
406     /*
407     * ownercx->thread doesn't wait or has just been notified that the
408     * title became shared.
409     */
410     return false;
411     }
412    
413     /*
414     * ownercx->thread is waiting in ClaimTitle for a context from some
415     * thread to finish its request. If that thread is the current thread,
416     * we would deadlock. Otherwise we must recursively check if that
417     * thread waits for the current thread.
418     */
419     if (title->ownercx->thread == thread) {
420     JS_RUNTIME_METER(ownercx->runtime, deadlocksAvoided);
421     return true;
422     }
423 siliconforks 332 ownercx = title->ownercx;
424 siliconforks 460 }
425 siliconforks 332 }
426    
427 siliconforks 460 static void
428     FinishSharingTitle(JSContext *cx, JSTitle *title);
429    
430 siliconforks 332 /*
431     * Make title multi-threaded, i.e. share its ownership among contexts in rt
432     * using a "thin" or (if necessary due to contention) "fat" lock. Called only
433     * from ClaimTitle, immediately below, when we detect deadlock were we to wait
434     * for title's lock, because its ownercx is waiting on a title owned by the
435     * calling cx.
436     *
437     * (i) rt->gcLock held
438     */
439     static void
440     ShareTitle(JSContext *cx, JSTitle *title)
441     {
442     JSRuntime *rt;
443     JSTitle **todop;
444    
445     rt = cx->runtime;
446     if (title->u.link) {
447     for (todop = &rt->titleSharingTodo; *todop != title;
448     todop = &(*todop)->u.link) {
449     JS_ASSERT(*todop != NO_TITLE_SHARING_TODO);
450     }
451     *todop = title->u.link;
452     title->u.link = NULL; /* null u.link for sanity ASAP */
453     JS_NOTIFY_ALL_CONDVAR(rt->titleSharingDone);
454     }
455 siliconforks 460 FinishSharingTitle(cx, title);
456 siliconforks 332 }
457    
458     /*
459 siliconforks 460 * FinishSharingTitle is the tail part of ShareTitle, split out to become a
460     * subroutine of js_ShareWaitingTitles too. The bulk of the work here involves
461     * making mutable strings in the title's object's slots be immutable. We have
462     * to do this because such strings will soon be available to multiple threads,
463     * so their buffers can't be realloc'd any longer in js_ConcatStrings, and
464     * their members can't be modified by js_ConcatStrings, js_UndependString or
465 siliconforks 507 * MinimizeDependentStrings.
466 siliconforks 332 *
467 siliconforks 460 * The last bit of work done by this function nulls title->ownercx and updates
468     * rt->sharedTitles.
469 siliconforks 332 */
470 siliconforks 460 static void
471     FinishSharingTitle(JSContext *cx, JSTitle *title)
472 siliconforks 332 {
473 siliconforks 460 js_InitLock(&title->lock);
474     title->u.count = 0; /* NULL may not pun as 0 */
475 siliconforks 507
476     JSScope *scope = TITLE_TO_SCOPE(title);
477     JSObject *obj = scope->object;
478 siliconforks 332 if (obj) {
479 siliconforks 507 uint32 nslots = scope->freeslot;
480     JS_ASSERT(nslots >= JSSLOT_START(obj->getClass()));
481     for (uint32 i = JSSLOT_START(obj->getClass()); i != nslots; ++i) {
482     jsval v = STOBJ_GET_SLOT(obj, i);
483 siliconforks 332 if (JSVAL_IS_STRING(v) &&
484     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
485     /*
486     * FIXME bug 363059: The following error recovery changes
487     * runtime execution semantics, arbitrarily and silently
488     * ignoring errors except out-of-memory, which should have been
489     * reported through JS_ReportOutOfMemory at this point.
490     */
491     STOBJ_SET_SLOT(obj, i, JSVAL_VOID);
492     }
493     }
494     }
495    
496     title->ownercx = NULL; /* NB: set last, after lock init */
497     JS_RUNTIME_METER(cx->runtime, sharedTitles);
498     }
499    
500     /*
501 siliconforks 460 * Notify all contexts that are currently in a request, which will give them a
502     * chance to yield their current request.
503     */
504     void
505     js_NudgeOtherContexts(JSContext *cx)
506     {
507     JSRuntime *rt = cx->runtime;
508     JSContext *acx = NULL;
509    
510     while ((acx = js_NextActiveContext(rt, acx)) != NULL) {
511     if (cx != acx)
512     JS_TriggerOperationCallback(acx);
513     }
514     }
515    
516     /*
517     * Notify all contexts that are currently in a request and execute on this
518     * specific thread.
519     */
520     static void
521     NudgeThread(JSThread *thread)
522     {
523     JSCList *link;
524     JSContext *acx;
525    
526     link = &thread->contextList;
527     while ((link = link->next) != &thread->contextList) {
528     acx = CX_FROM_THREAD_LINKS(link);
529     JS_ASSERT(acx->thread == thread);
530     if (acx->requestDepth)
531     JS_TriggerOperationCallback(acx);
532     }
533     }
534    
535     /*
536 siliconforks 332 * Given a title with apparently non-null ownercx different from cx, try to
537     * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
538     * If we claim ownership, return true. Otherwise, we wait for ownercx to be
539     * set to null (indicating that title is multi-threaded); or if waiting would
540     * deadlock, we set ownercx to null ourselves via ShareTitle. In any case,
541     * once ownercx is null we return false.
542     */
543     static JSBool
544     ClaimTitle(JSTitle *title, JSContext *cx)
545     {
546     JSRuntime *rt;
547     JSContext *ownercx;
548 siliconforks 460 uint32 requestDebit;
549 siliconforks 332
550     rt = cx->runtime;
551     JS_RUNTIME_METER(rt, claimAttempts);
552     JS_LOCK_GC(rt);
553    
554     /* Reload in case ownercx went away while we blocked on the lock. */
555     while ((ownercx = title->ownercx) != NULL) {
556     /*
557     * Avoid selflock if ownercx is dead, or is not running a request, or
558     * has the same thread as cx. Set title->ownercx to cx so that the
559     * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
560     * fast path around the corresponding js_UnlockTitle or js_UnlockObj
561     * function call.
562     *
563     * If title->u.link is non-null, title has already been inserted on
564     * the rt->titleSharingTodo list, because another thread's context
565     * already wanted to lock title while ownercx was running a request.
566 siliconforks 460 * That context must still be in request and cannot be dead. We can
567     * claim it if its thread matches ours but only if cx itself is in a
568     * request.
569     *
570     * The latter check covers the case when the embedding triggers a call
571     * to js_GC on a cx outside a request while having ownercx running a
572     * request on the same thread, and then js_GC calls a mark hook or a
573     * finalizer accessing the title. In this case we cannot claim the
574     * title but must share it now as no title-sharing JS_EndRequest will
575     * follow.
576 siliconforks 332 */
577 siliconforks 460 bool canClaim;
578     if (title->u.link) {
579     JS_ASSERT(js_ValidContextPointer(rt, ownercx));
580     JS_ASSERT(ownercx->requestDepth > 0);
581     JS_ASSERT_IF(cx->requestDepth == 0, cx->thread == rt->gcThread);
582     canClaim = (ownercx->thread == cx->thread &&
583     cx->requestDepth > 0);
584     } else {
585     canClaim = (!js_ValidContextPointer(rt, ownercx) ||
586     !ownercx->requestDepth ||
587     ownercx->thread == cx->thread);
588     }
589     if (canClaim) {
590 siliconforks 332 title->ownercx = cx;
591     JS_UNLOCK_GC(rt);
592     JS_RUNTIME_METER(rt, claimedTitles);
593     return JS_TRUE;
594     }
595    
596     /*
597 siliconforks 460 * Avoid deadlock if title's owner thread is waiting on a title that
598     * the current thread owns, by revoking title's ownership. This
599     * approach to deadlock avoidance works because the engine never nests
600     * title locks.
601 siliconforks 332 *
602 siliconforks 460 * If cx->thread could hold locks on ownercx->thread->titleToShare, or
603     * if ownercx->thread could hold locks on title, we would need to keep
604     * reentrancy counts for all such "flyweight" (ownercx != NULL) locks,
605     * so that control would unwind properly once these locks became
606     * "thin" or "fat". The engine promotes a title from exclusive to
607     * shared access only when locking, never when holding or unlocking.
608 siliconforks 332 *
609     * Avoid deadlock before any of this title/context cycle detection if
610     * cx is on the active GC's thread, because in that case, no requests
611     * will run until the GC completes. Any title wanted by the GC (from
612 siliconforks 460 * a finalizer or a mark hook) that can't be claimed must become
613     * shared.
614 siliconforks 332 */
615 siliconforks 460 if (rt->gcThread == cx->thread || WillDeadlock(ownercx, cx->thread)) {
616 siliconforks 332 ShareTitle(cx, title);
617     break;
618     }
619    
620     /*
621     * Thanks to the non-zero NO_TITLE_SHARING_TODO link terminator, we
622     * can decide whether title is on rt->titleSharingTodo with a single
623     * non-null test, and avoid double-insertion bugs.
624     */
625     if (!title->u.link) {
626 siliconforks 507 TITLE_TO_SCOPE(title)->hold();
627 siliconforks 332 title->u.link = rt->titleSharingTodo;
628     rt->titleSharingTodo = title;
629     }
630    
631     /*
632 siliconforks 460 * Discount all the requests running on the current thread so a
633     * possible GC can proceed on another thread while we wait on
634     * rt->titleSharingDone.
635 siliconforks 332 */
636 siliconforks 460 requestDebit = js_DiscountRequestsForGC(cx);
637     if (title->ownercx != ownercx) {
638     /*
639     * js_DiscountRequestsForGC released and reacquired the GC lock,
640     * and the title was taken or shared. Start over.
641     */
642     js_RecountRequestsAfterGC(rt, requestDebit);
643     continue;
644 siliconforks 332 }
645    
646     /*
647     * We know that some other thread's context owns title, which is now
648     * linked onto rt->titleSharingTodo, awaiting the end of that other
649 siliconforks 460 * thread's request. So it is safe to wait on rt->titleSharingDone.
650     * But before waiting, we force the operation callback for that other
651     * thread so it can quickly suspend.
652 siliconforks 332 */
653 siliconforks 460 NudgeThread(ownercx->thread);
654    
655     JS_ASSERT(!cx->thread->titleToShare);
656     cx->thread->titleToShare = title;
657     #ifdef DEBUG
658     PRStatus stat =
659     #endif
660     PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
661 siliconforks 332 JS_ASSERT(stat != PR_FAILURE);
662    
663 siliconforks 460 js_RecountRequestsAfterGC(rt, requestDebit);
664 siliconforks 332
665     /*
666 siliconforks 460 * Don't clear titleToShare until after we're through waiting on
667 siliconforks 332 * all condition variables protected by rt->gcLock -- that includes
668 siliconforks 460 * rt->titleSharingDone *and* rt->gcDone (hidden in the call to
669     * js_RecountRequestsAfterGC immediately above).
670 siliconforks 332 *
671     * Otherwise, the GC could easily deadlock with another thread that
672     * owns a title wanted by a finalizer. By keeping cx->titleToShare
673     * set till here, we ensure that such deadlocks are detected, which
674     * results in the finalized object's title being shared (it must, of
675     * course, have other, live objects sharing it).
676     */
677 siliconforks 460 cx->thread->titleToShare = NULL;
678 siliconforks 332 }
679    
680     JS_UNLOCK_GC(rt);
681     return JS_FALSE;
682     }
683    
684 siliconforks 460 void
685     js_ShareWaitingTitles(JSContext *cx)
686     {
687     JSTitle *title, **todop;
688     bool shared;
689    
690     /* See whether cx has any single-threaded titles to start sharing. */
691     todop = &cx->runtime->titleSharingTodo;
692     shared = false;
693     while ((title = *todop) != NO_TITLE_SHARING_TODO) {
694     if (title->ownercx != cx) {
695     todop = &title->u.link;
696     continue;
697     }
698     *todop = title->u.link;
699     title->u.link = NULL; /* null u.link for sanity ASAP */
700    
701     /*
702 siliconforks 507 * If JSScope::drop returns false, we held the last ref to scope. The
703 siliconforks 460 * waiting thread(s) must have been killed, after which the GC
704     * collected the object that held this scope. Unlikely, because it
705     * requires that the GC ran (e.g., from an operation callback)
706     * during this request, but possible.
707     */
708 siliconforks 507 if (TITLE_TO_SCOPE(title)->drop(cx, NULL)) {
709 siliconforks 460 FinishSharingTitle(cx, title); /* set ownercx = NULL */
710     shared = true;
711     }
712     }
713     if (shared)
714     JS_NOTIFY_ALL_CONDVAR(cx->runtime->titleSharingDone);
715     }
716    
717 siliconforks 332 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
718     JS_FRIEND_API(jsval)
719     js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
720     {
721     jsval v;
722     JSScope *scope;
723     JSTitle *title;
724     #ifndef NSPR_LOCK
725     JSThinLock *tl;
726     jsword me;
727     #endif
728    
729 siliconforks 507 OBJ_CHECK_SLOT(obj, slot);
730 siliconforks 332
731     /*
732     * Native object locking is inlined here to optimize the single-threaded
733     * and contention-free multi-threaded cases.
734     */
735     scope = OBJ_SCOPE(obj);
736     title = &scope->title;
737     JS_ASSERT(title->ownercx != cx);
738 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
739 siliconforks 332
740     /*
741     * Avoid locking if called from the GC. Also avoid locking an object
742     * owning a sealed scope. If neither of those special cases applies, try
743     * to claim scope's flyweight lock from whatever context may have had it in
744     * an earlier request.
745     */
746     if (CX_THREAD_IS_RUNNING_GC(cx) ||
747 siliconforks 507 scope->sealed() ||
748 siliconforks 332 (title->ownercx && ClaimTitle(title, cx))) {
749     return STOBJ_GET_SLOT(obj, slot);
750     }
751    
752     #ifndef NSPR_LOCK
753     tl = &title->lock;
754     me = CX_THINLOCK_ID(cx);
755     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
756     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
757     /*
758     * Got the lock with one compare-and-swap. Even so, someone else may
759     * have mutated obj so it now has its own scope and lock, which would
760     * require either a restart from the top of this routine, or a thin
761     * lock release followed by fat lock acquisition.
762     */
763     if (scope == OBJ_SCOPE(obj)) {
764     v = STOBJ_GET_SLOT(obj, slot);
765     if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
766     /* Assert that scope locks never revert to flyweight. */
767     JS_ASSERT(title->ownercx != cx);
768 siliconforks 460 LOGIT(title, '1');
769 siliconforks 332 title->u.count = 1;
770     js_UnlockObj(cx, obj);
771     }
772     return v;
773     }
774     if (!NativeCompareAndSwap(&tl->owner, me, 0))
775     js_Dequeue(tl);
776     }
777     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
778     return STOBJ_GET_SLOT(obj, slot);
779     }
780     #endif
781    
782     js_LockObj(cx, obj);
783     v = STOBJ_GET_SLOT(obj, slot);
784    
785     /*
786     * Test whether cx took ownership of obj's scope during js_LockObj.
787     *
788     * This does not mean that a given scope reverted to flyweight from "thin"
789     * or "fat" -- it does mean that obj's map pointer changed due to another
790     * thread setting a property, requiring obj to cease sharing a prototype
791     * object's scope (whose lock was not flyweight, else we wouldn't be here
792     * in the first place!).
793     */
794     title = &OBJ_SCOPE(obj)->title;
795     if (title->ownercx != cx)
796     js_UnlockTitle(cx, title);
797     return v;
798     }
799    
800     void
801     js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
802     {
803     JSTitle *title;
804     JSScope *scope;
805     #ifndef NSPR_LOCK
806     JSThinLock *tl;
807     jsword me;
808     #endif
809    
810 siliconforks 507 OBJ_CHECK_SLOT(obj, slot);
811    
812 siliconforks 332 /* Any string stored in a thread-safe object must be immutable. */
813     if (JSVAL_IS_STRING(v) &&
814     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
815     /* FIXME bug 363059: See comments in js_FinishSharingScope. */
816     v = JSVAL_NULL;
817     }
818    
819     /*
820     * Native object locking is inlined here to optimize the single-threaded
821     * and contention-free multi-threaded cases.
822     */
823     scope = OBJ_SCOPE(obj);
824     title = &scope->title;
825     JS_ASSERT(title->ownercx != cx);
826 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
827 siliconforks 332
828     /*
829     * Avoid locking if called from the GC. Also avoid locking an object
830     * owning a sealed scope. If neither of those special cases applies, try
831     * to claim scope's flyweight lock from whatever context may have had it in
832     * an earlier request.
833     */
834     if (CX_THREAD_IS_RUNNING_GC(cx) ||
835 siliconforks 507 scope->sealed() ||
836 siliconforks 332 (title->ownercx && ClaimTitle(title, cx))) {
837 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
838 siliconforks 332 return;
839     }
840    
841     #ifndef NSPR_LOCK
842     tl = &title->lock;
843     me = CX_THINLOCK_ID(cx);
844     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
845     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
846     if (scope == OBJ_SCOPE(obj)) {
847 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
848 siliconforks 332 if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
849     /* Assert that scope locks never revert to flyweight. */
850     JS_ASSERT(title->ownercx != cx);
851 siliconforks 460 LOGIT(title, '1');
852 siliconforks 332 title->u.count = 1;
853     js_UnlockObj(cx, obj);
854     }
855     return;
856     }
857     if (!NativeCompareAndSwap(&tl->owner, me, 0))
858     js_Dequeue(tl);
859     }
860     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
861 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
862 siliconforks 332 return;
863     }
864     #endif
865    
866     js_LockObj(cx, obj);
867 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
868 siliconforks 332
869     /*
870     * Same drill as above, in js_GetSlotThreadSafe.
871     */
872     title = &OBJ_SCOPE(obj)->title;
873     if (title->ownercx != cx)
874     js_UnlockTitle(cx, title);
875     }
876    
877     #ifndef NSPR_LOCK
878    
879     static JSFatLock *
880     NewFatlock()
881     {
882     JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
883     if (!fl) return NULL;
884     fl->susp = 0;
885     fl->next = NULL;
886     fl->prevp = NULL;
887     fl->slock = PR_NewLock();
888     fl->svar = PR_NewCondVar(fl->slock);
889     return fl;
890     }
891    
892     static void
893     DestroyFatlock(JSFatLock *fl)
894     {
895     PR_DestroyLock(fl->slock);
896     PR_DestroyCondVar(fl->svar);
897 siliconforks 507 js_free(fl);
898 siliconforks 332 }
899    
900     static JSFatLock *
901     ListOfFatlocks(int listc)
902     {
903     JSFatLock *m;
904     JSFatLock *m0;
905     int i;
906    
907     JS_ASSERT(listc>0);
908     m0 = m = NewFatlock();
909     for (i=1; i<listc; i++) {
910     m->next = NewFatlock();
911     m = m->next;
912     }
913     return m0;
914     }
915    
916     static void
917     DeleteListOfFatlocks(JSFatLock *m)
918     {
919     JSFatLock *m0;
920     for (; m; m=m0) {
921     m0 = m->next;
922     DestroyFatlock(m);
923     }
924     }
925    
926     static JSFatLockTable *fl_list_table = NULL;
927     static uint32 fl_list_table_len = 0;
928     static uint32 fl_list_chunk_len = 0;
929    
930     static JSFatLock *
931     GetFatlock(void *id)
932     {
933     JSFatLock *m;
934    
935     uint32 i = GLOBAL_LOCK_INDEX(id);
936     if (fl_list_table[i].free == NULL) {
937     #ifdef DEBUG
938     if (fl_list_table[i].taken)
939     printf("Ran out of fat locks!\n");
940     #endif
941     fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
942     }
943     m = fl_list_table[i].free;
944     fl_list_table[i].free = m->next;
945     m->susp = 0;
946     m->next = fl_list_table[i].taken;
947     m->prevp = &fl_list_table[i].taken;
948     if (fl_list_table[i].taken)
949     fl_list_table[i].taken->prevp = &m->next;
950     fl_list_table[i].taken = m;
951     return m;
952     }
953    
954     static void
955     PutFatlock(JSFatLock *m, void *id)
956     {
957     uint32 i;
958     if (m == NULL)
959     return;
960    
961     /* Unlink m from fl_list_table[i].taken. */
962     *m->prevp = m->next;
963     if (m->next)
964     m->next->prevp = m->prevp;
965    
966     /* Insert m in fl_list_table[i].free. */
967     i = GLOBAL_LOCK_INDEX(id);
968     m->next = fl_list_table[i].free;
969     fl_list_table[i].free = m;
970     }
971    
972     #endif /* !NSPR_LOCK */
973    
974     JSBool
975     js_SetupLocks(int listc, int globc)
976     {
977     #ifndef NSPR_LOCK
978     uint32 i;
979    
980     if (global_locks)
981     return JS_TRUE;
982     #ifdef DEBUG
983     if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
984     printf("Bad number %d in js_SetupLocks()!\n", listc);
985     if (globc > 100 || globc < 0) /* globc == number of global locks */
986     printf("Bad number %d in js_SetupLocks()!\n", listc);
987     #endif
988     global_locks_log2 = JS_CeilingLog2(globc);
989     global_locks_mask = JS_BITMASK(global_locks_log2);
990     global_lock_count = JS_BIT(global_locks_log2);
991 siliconforks 507 global_locks = (PRLock **) js_malloc(global_lock_count * sizeof(PRLock*));
992 siliconforks 332 if (!global_locks)
993     return JS_FALSE;
994     for (i = 0; i < global_lock_count; i++) {
995     global_locks[i] = PR_NewLock();
996     if (!global_locks[i]) {
997     global_lock_count = i;
998     js_CleanupLocks();
999     return JS_FALSE;
1000     }
1001     }
1002 siliconforks 507 fl_list_table = (JSFatLockTable *) js_malloc(i * sizeof(JSFatLockTable));
1003 siliconforks 332 if (!fl_list_table) {
1004     js_CleanupLocks();
1005     return JS_FALSE;
1006     }
1007     fl_list_table_len = global_lock_count;
1008     for (i = 0; i < global_lock_count; i++)
1009     fl_list_table[i].free = fl_list_table[i].taken = NULL;
1010     fl_list_chunk_len = listc;
1011     #endif /* !NSPR_LOCK */
1012     return JS_TRUE;
1013     }
1014    
1015     void
1016     js_CleanupLocks()
1017     {
1018     #ifndef NSPR_LOCK
1019     uint32 i;
1020    
1021     if (global_locks) {
1022     for (i = 0; i < global_lock_count; i++)
1023     PR_DestroyLock(global_locks[i]);
1024 siliconforks 507 js_free(global_locks);
1025 siliconforks 332 global_locks = NULL;
1026     global_lock_count = 1;
1027     global_locks_log2 = 0;
1028     global_locks_mask = 0;
1029     }
1030     if (fl_list_table) {
1031     for (i = 0; i < fl_list_table_len; i++) {
1032     DeleteListOfFatlocks(fl_list_table[i].free);
1033     fl_list_table[i].free = NULL;
1034     DeleteListOfFatlocks(fl_list_table[i].taken);
1035     fl_list_table[i].taken = NULL;
1036     }
1037 siliconforks 507 js_free(fl_list_table);
1038 siliconforks 332 fl_list_table = NULL;
1039     fl_list_table_len = 0;
1040     }
1041     #endif /* !NSPR_LOCK */
1042     }
1043    
1044     #ifdef NSPR_LOCK
1045    
1046     static JS_ALWAYS_INLINE void
1047     ThinLock(JSThinLock *tl, jsword me)
1048     {
1049     JS_ACQUIRE_LOCK((JSLock *) tl->fat);
1050     tl->owner = me;
1051     }
1052    
1053     static JS_ALWAYS_INLINE void
1054     ThinUnlock(JSThinLock *tl, jsword /*me*/)
1055     {
1056     tl->owner = 0;
1057     JS_RELEASE_LOCK((JSLock *) tl->fat);
1058     }
1059    
1060     #else
1061    
1062     /*
1063     * Fast locking and unlocking is implemented by delaying the allocation of a
1064     * system lock (fat lock) until contention. As long as a locking thread A
1065     * runs uncontended, the lock is represented solely by storing A's identity in
1066     * the object being locked.
1067     *
1068     * If another thread B tries to lock the object currently locked by A, B is
1069     * enqueued into a fat lock structure (which might have to be allocated and
1070     * pointed to by the object), and suspended using NSPR conditional variables
1071     * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
1072     * signalling to A that when releasing the lock, B must be dequeued and
1073     * notified.
1074     *
1075     * The basic operation of the locking primitives (js_Lock, js_Unlock,
1076     * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
1077     * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
1078     * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
1079     * succeeds this implies that p is uncontended (no one is waiting because the
1080     * wait bit is not set).
1081     *
1082     * When dequeueing, the lock is released, and one of the threads suspended on
1083     * the lock is notified. If other threads still are waiting, the wait bit is
1084     * kept (in js_Enqueue), and if not, the fat lock is deallocated.
1085     *
1086     * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
1087     * are serialized using a global lock. For scalability, a hashtable of global
1088     * locks is used, which is indexed modulo the thin lock pointer.
1089     */
1090    
1091     /*
1092     * Invariants:
1093     * (i) global lock is held
1094     * (ii) fl->susp >= 0
1095     */
1096     static int
1097     js_SuspendThread(JSThinLock *tl)
1098     {
1099     JSFatLock *fl;
1100     PRStatus stat;
1101    
1102     if (tl->fat == NULL)
1103     fl = tl->fat = GetFatlock(tl);
1104     else
1105     fl = tl->fat;
1106     JS_ASSERT(fl->susp >= 0);
1107     fl->susp++;
1108     PR_Lock(fl->slock);
1109     js_UnlockGlobal(tl);
1110     stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
1111     JS_ASSERT(stat != PR_FAILURE);
1112     PR_Unlock(fl->slock);
1113     js_LockGlobal(tl);
1114     fl->susp--;
1115     if (fl->susp == 0) {
1116     PutFatlock(fl, tl);
1117     tl->fat = NULL;
1118     }
1119     return tl->fat == NULL;
1120     }
1121    
1122     /*
1123     * (i) global lock is held
1124     * (ii) fl->susp > 0
1125     */
1126     static void
1127     js_ResumeThread(JSThinLock *tl)
1128     {
1129     JSFatLock *fl = tl->fat;
1130     PRStatus stat;
1131    
1132     JS_ASSERT(fl != NULL);
1133     JS_ASSERT(fl->susp > 0);
1134     PR_Lock(fl->slock);
1135     js_UnlockGlobal(tl);
1136     stat = PR_NotifyCondVar(fl->svar);
1137     JS_ASSERT(stat != PR_FAILURE);
1138     PR_Unlock(fl->slock);
1139     }
1140    
1141     static void
1142     js_Enqueue(JSThinLock *tl, jsword me)
1143     {
1144     jsword o, n;
1145    
1146     js_LockGlobal(tl);
1147     for (;;) {
1148     o = ReadWord(tl->owner);
1149     n = Thin_SetWait(o);
1150     if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
1151     if (js_SuspendThread(tl))
1152     me = Thin_RemoveWait(me);
1153     else
1154     me = Thin_SetWait(me);
1155     }
1156     else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
1157     js_UnlockGlobal(tl);
1158     return;
1159     }
1160     }
1161     }
1162    
1163     static void
1164     js_Dequeue(JSThinLock *tl)
1165     {
1166     jsword o;
1167    
1168     js_LockGlobal(tl);
1169     o = ReadWord(tl->owner);
1170     JS_ASSERT(Thin_GetWait(o) != 0);
1171     JS_ASSERT(tl->fat != NULL);
1172     if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
1173     JS_ASSERT(0);
1174     js_ResumeThread(tl);
1175     }
1176    
1177     static JS_ALWAYS_INLINE void
1178     ThinLock(JSThinLock *tl, jsword me)
1179     {
1180     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1181     if (NativeCompareAndSwap(&tl->owner, 0, me))
1182     return;
1183     if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1184     js_Enqueue(tl, me);
1185     #ifdef DEBUG
1186     else
1187     JS_ASSERT(0);
1188     #endif
1189     }
1190    
1191     static JS_ALWAYS_INLINE void
1192     ThinUnlock(JSThinLock *tl, jsword me)
1193     {
1194     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1195    
1196     /*
1197     * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
1198     * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
1199     */
1200     if (NativeCompareAndSwap(&tl->owner, me, 0))
1201     return;
1202    
1203     JS_ASSERT(Thin_GetWait(tl->owner));
1204     if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1205     js_Dequeue(tl);
1206     #ifdef DEBUG
1207     else
1208     JS_ASSERT(0); /* unbalanced unlock */
1209     #endif
1210     }
1211    
1212     #endif /* !NSPR_LOCK */
1213    
1214     void
1215     js_Lock(JSContext *cx, JSThinLock *tl)
1216     {
1217     ThinLock(tl, CX_THINLOCK_ID(cx));
1218     }
1219    
1220     void
1221     js_Unlock(JSContext *cx, JSThinLock *tl)
1222     {
1223     ThinUnlock(tl, CX_THINLOCK_ID(cx));
1224     }
1225    
1226     void
1227     js_LockRuntime(JSRuntime *rt)
1228     {
1229     PR_Lock(rt->rtLock);
1230     #ifdef DEBUG
1231     rt->rtLockOwner = js_CurrentThreadId();
1232     #endif
1233     }
1234    
1235     void
1236     js_UnlockRuntime(JSRuntime *rt)
1237     {
1238     #ifdef DEBUG
1239     rt->rtLockOwner = 0;
1240     #endif
1241     PR_Unlock(rt->rtLock);
1242     }
1243    
1244     void
1245     js_LockTitle(JSContext *cx, JSTitle *title)
1246     {
1247     jsword me = CX_THINLOCK_ID(cx);
1248    
1249     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1250     JS_ASSERT(title->ownercx != cx);
1251     if (CX_THREAD_IS_RUNNING_GC(cx))
1252     return;
1253     if (title->ownercx && ClaimTitle(title, cx))
1254     return;
1255    
1256     if (Thin_RemoveWait(ReadWord(title->lock.owner)) == me) {
1257     JS_ASSERT(title->u.count > 0);
1258     LOGIT(scope, '+');
1259     title->u.count++;
1260     } else {
1261     ThinLock(&title->lock, me);
1262     JS_ASSERT(title->u.count == 0);
1263     LOGIT(scope, '1');
1264     title->u.count = 1;
1265     }
1266     }
1267    
1268     void
1269     js_UnlockTitle(JSContext *cx, JSTitle *title)
1270     {
1271     jsword me = CX_THINLOCK_ID(cx);
1272    
1273     /* We hope compilers use me instead of reloading cx->thread in the macro. */
1274     if (CX_THREAD_IS_RUNNING_GC(cx))
1275     return;
1276     if (cx->lockedSealedTitle == title) {
1277     cx->lockedSealedTitle = NULL;
1278     return;
1279     }
1280    
1281     /*
1282     * If title->ownercx is not null, it's likely that two contexts not using
1283     * requests nested locks for title. The first context, cx here, claimed
1284     * title; the second, title->ownercx here, re-claimed it because the first
1285     * was not in a request, or was on the same thread. We don't want to keep
1286     * track of such nesting, because it penalizes the common non-nested case.
1287     * Instead of asserting here and silently coping, we simply re-claim title
1288     * for cx and return.
1289     *
1290     * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1291     * case where an asymmetric thread model (Mozilla's main thread is known
1292     * to be the only thread that runs the GC) combined with multiple contexts
1293     * per thread has led to such request-less nesting.
1294     */
1295     if (title->ownercx) {
1296     JS_ASSERT(title->u.count == 0);
1297     JS_ASSERT(title->lock.owner == 0);
1298     title->ownercx = cx;
1299     return;
1300     }
1301    
1302     JS_ASSERT(title->u.count > 0);
1303     if (Thin_RemoveWait(ReadWord(title->lock.owner)) != me) {
1304     JS_ASSERT(0); /* unbalanced unlock */
1305     return;
1306     }
1307 siliconforks 460 LOGIT(title, '-');
1308 siliconforks 332 if (--title->u.count == 0)
1309     ThinUnlock(&title->lock, me);
1310     }
1311    
1312     /*
1313     * NB: oldtitle may be null if our caller is js_GetMutableScope and it just
1314     * dropped the last reference to oldtitle.
1315     */
1316     void
1317     js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
1318     {
1319     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
1320    
1321     /*
1322     * If the last reference to oldtitle went away, newtitle needs no lock
1323     * state update.
1324     */
1325     if (!oldtitle)
1326     return;
1327     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
1328    
1329     /*
1330     * Special case in js_LockTitle and js_UnlockTitle for the GC calling
1331     * code that locks, unlocks, or mutates. Nothing to do in these cases,
1332     * because title and newtitle were "locked" by the GC thread, so neither
1333     * was actually locked.
1334     */
1335     if (CX_THREAD_IS_RUNNING_GC(cx))
1336     return;
1337    
1338     /*
1339     * Special case in js_LockObj and js_UnlockTitle for locking the sealed
1340     * scope of an object that owns that scope (the prototype or mutated obj
1341     * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1342     */
1343     JS_ASSERT(cx->lockedSealedTitle != newtitle);
1344     if (cx->lockedSealedTitle == oldtitle) {
1345     JS_ASSERT(newtitle->ownercx == cx ||
1346     (!newtitle->ownercx && newtitle->u.count == 1));
1347     cx->lockedSealedTitle = NULL;
1348     return;
1349     }
1350    
1351     /*
1352     * If oldtitle is single-threaded, there's nothing to do.
1353     */
1354     if (oldtitle->ownercx) {
1355     JS_ASSERT(oldtitle->ownercx == cx);
1356     JS_ASSERT(newtitle->ownercx == cx ||
1357     (!newtitle->ownercx && newtitle->u.count == 1));
1358     return;
1359     }
1360    
1361     /*
1362     * We transfer oldtitle->u.count only if newtitle is not single-threaded.
1363     * Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
1364     * JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
1365     * if they find newtitle->ownercx != cx.
1366     */
1367     if (newtitle->ownercx != cx) {
1368     JS_ASSERT(!newtitle->ownercx);
1369     newtitle->u.count = oldtitle->u.count;
1370     }
1371    
1372     /*
1373     * Reset oldtitle's lock state so that it is completely unlocked.
1374     */
1375 siliconforks 460 LOGIT(oldtitle, '0');
1376 siliconforks 332 oldtitle->u.count = 0;
1377     ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
1378     }
1379    
1380     void
1381     js_LockObj(JSContext *cx, JSObject *obj)
1382     {
1383     JSScope *scope;
1384     JSTitle *title;
1385    
1386     JS_ASSERT(OBJ_IS_NATIVE(obj));
1387    
1388     /*
1389     * We must test whether the GC is calling and return without mutating any
1390     * state, especially cx->lockedSealedScope. Note asymmetry with respect to
1391     * js_UnlockObj, which is a thin-layer on top of js_UnlockTitle.
1392     */
1393     if (CX_THREAD_IS_RUNNING_GC(cx))
1394     return;
1395    
1396     for (;;) {
1397     scope = OBJ_SCOPE(obj);
1398     title = &scope->title;
1399 siliconforks 507 if (scope->sealed() && !cx->lockedSealedTitle) {
1400 siliconforks 332 cx->lockedSealedTitle = title;
1401     return;
1402     }
1403    
1404     js_LockTitle(cx, title);
1405    
1406     /* If obj still has this scope, we're done. */
1407     if (scope == OBJ_SCOPE(obj))
1408     return;
1409    
1410     /* Lost a race with a mutator; retry with obj's new scope. */
1411     js_UnlockTitle(cx, title);
1412     }
1413     }
1414    
1415     void
1416     js_UnlockObj(JSContext *cx, JSObject *obj)
1417     {
1418     JS_ASSERT(OBJ_IS_NATIVE(obj));
1419     js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title);
1420     }
1421    
1422 siliconforks 507 bool
1423     js_LockObjIfShape(JSContext *cx, JSObject *obj, uint32 shape)
1424     {
1425     JS_ASSERT(OBJ_SCOPE(obj)->title.ownercx != cx);
1426     js_LockObj(cx, obj);
1427     if (OBJ_SHAPE(obj) == shape)
1428     return true;
1429     js_UnlockObj(cx, obj);
1430     return false;
1431     }
1432    
1433 siliconforks 332 void
1434     js_InitTitle(JSContext *cx, JSTitle *title)
1435     {
1436     #ifdef JS_THREADSAFE
1437     title->ownercx = cx;
1438     memset(&title->lock, 0, sizeof title->lock);
1439    
1440     /*
1441     * Set u.link = NULL, not u.count = 0, in case the target architecture's
1442     * null pointer has a non-zero integer representation.
1443     */
1444     title->u.link = NULL;
1445    
1446     #ifdef JS_DEBUG_TITLE_LOCKS
1447     title->file[0] = title->file[1] = title->file[2] = title->file[3] = NULL;
1448     title->line[0] = title->line[1] = title->line[2] = title->line[3] = 0;
1449     #endif
1450     #endif
1451     }
1452    
1453     void
1454     js_FinishTitle(JSContext *cx, JSTitle *title)
1455     {
1456 siliconforks 460 #ifdef DEBUG_SCOPE_COUNT
1457     js_unlog_title(title);
1458     #endif
1459    
1460 siliconforks 332 #ifdef JS_THREADSAFE
1461     /* Title must be single-threaded at this point, so set ownercx. */
1462     JS_ASSERT(title->u.count == 0);
1463     title->ownercx = cx;
1464     js_FinishLock(&title->lock);
1465     #endif
1466     }
1467    
1468     #ifdef DEBUG
1469    
1470     JSBool
1471     js_IsRuntimeLocked(JSRuntime *rt)
1472     {
1473     return js_CurrentThreadId() == rt->rtLockOwner;
1474     }
1475    
1476     JSBool
1477     js_IsObjLocked(JSContext *cx, JSObject *obj)
1478     {
1479 siliconforks 460 return js_IsTitleLocked(cx, &OBJ_SCOPE(obj)->title);
1480 siliconforks 332 }
1481    
1482     JSBool
1483     js_IsTitleLocked(JSContext *cx, JSTitle *title)
1484     {
1485     /* Special case: the GC locking any object's title, see js_LockTitle. */
1486     if (CX_THREAD_IS_RUNNING_GC(cx))
1487     return JS_TRUE;
1488    
1489     /* Special case: locked object owning a sealed scope, see js_LockObj. */
1490     if (cx->lockedSealedTitle == title)
1491     return JS_TRUE;
1492    
1493     /*
1494     * General case: the title is either exclusively owned (by cx), or it has
1495     * a thin or fat lock to cope with shared (concurrent) ownership.
1496     */
1497     if (title->ownercx) {
1498     JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
1499     return JS_TRUE;
1500     }
1501     return js_CurrentThreadId() ==
1502     ((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
1503     }
1504    
1505     #ifdef JS_DEBUG_TITLE_LOCKS
1506     void
1507     js_SetScopeInfo(JSScope *scope, const char *file, int line)
1508     {
1509     JSTitle *title = &scope->title;
1510     if (!title->ownercx) {
1511     jsrefcount count = title->u.count;
1512 siliconforks 507 JS_ASSERT_IF(!scope->sealed(), count > 0);
1513 siliconforks 332 JS_ASSERT(count <= 4);
1514     title->file[count - 1] = file;
1515     title->line[count - 1] = line;
1516     }
1517     }
1518     #endif /* JS_DEBUG_TITLE_LOCKS */
1519     #endif /* DEBUG */
1520     #endif /* JS_THREADSAFE */

  ViewVC Help
Powered by ViewVC 1.1.24