/[jscoverage]/trunk/js/jslock.cpp
ViewVC logotype

Annotation of /trunk/js/jslock.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 507 - (hide annotations)
Sun Jan 10 07:23:34 2010 UTC (12 years, 5 months ago) by siliconforks
File size: 43644 byte(s)
Update SpiderMonkey from Firefox 3.6rc1.

1 siliconforks 507 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 siliconforks 332 *
3     * ***** BEGIN LICENSE BLOCK *****
4     * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5     *
6     * The contents of this file are subject to the Mozilla Public License Version
7     * 1.1 (the "License"); you may not use this file except in compliance with
8     * the License. You may obtain a copy of the License at
9     * http://www.mozilla.org/MPL/
10     *
11     * Software distributed under the License is distributed on an "AS IS" basis,
12     * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13     * for the specific language governing rights and limitations under the
14     * License.
15     *
16     * The Original Code is Mozilla Communicator client code, released
17     * March 31, 1998.
18     *
19     * The Initial Developer of the Original Code is
20     * Netscape Communications Corporation.
21     * Portions created by the Initial Developer are Copyright (C) 1998
22     * the Initial Developer. All Rights Reserved.
23     *
24     * Contributor(s):
25     *
26     * Alternatively, the contents of this file may be used under the terms of
27     * either of the GNU General Public License Version 2 or later (the "GPL"),
28     * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29     * in which case the provisions of the GPL or the LGPL are applicable instead
30     * of those above. If you wish to allow use of your version of this file only
31     * under the terms of either the GPL or the LGPL, and not to allow others to
32     * use your version of this file under the terms of the MPL, indicate your
33     * decision by deleting the provisions above and replace them with the notice
34     * and other provisions required by the GPL or the LGPL. If you do not delete
35     * the provisions above, a recipient may use your version of this file under
36     * the terms of any one of the MPL, the GPL or the LGPL.
37     *
38     * ***** END LICENSE BLOCK ***** */
39    
40     #ifdef JS_THREADSAFE
41    
42     /*
43     * JS locking stubs.
44     */
45     #include <stdlib.h>
46     #include <string.h>
47     #include "jspubtd.h"
48     #include "jsutil.h" /* Added by JSIFY */
49     #include "jstypes.h"
50 siliconforks 507 #include "jsstdint.h"
51 siliconforks 332 #include "jsbit.h"
52     #include "jscntxt.h"
53     #include "jsdtoa.h"
54     #include "jsgc.h"
55 siliconforks 507 #include "jsfun.h" /* for VALUE_IS_FUNCTION from LOCKED_OBJ_WRITE_SLOT */
56 siliconforks 332 #include "jslock.h"
57     #include "jsscope.h"
58     #include "jsstr.h"
59    
60     #define ReadWord(W) (W)
61    
62 siliconforks 507 #if !defined(__GNUC__)
63     # define __asm__ asm
64     # define __volatile__ volatile
65     #endif
66    
67 siliconforks 332 /* Implement NativeCompareAndSwap. */
68    
69     #if defined(_WIN32) && defined(_M_IX86)
70     #pragma warning( disable : 4035 )
71     JS_BEGIN_EXTERN_C
72     extern long __cdecl
73     _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
74     JS_END_EXTERN_C
75     #pragma intrinsic(_InterlockedCompareExchange)
76    
77 siliconforks 460 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
78    
79 siliconforks 332 static JS_ALWAYS_INLINE int
80     NativeCompareAndSwapHelper(jsword *w, jsword ov, jsword nv)
81     {
82 siliconforks 460 _InterlockedCompareExchange((long*) w, nv, ov);
83 siliconforks 332 __asm {
84     sete al
85     }
86     }
87    
88     static JS_ALWAYS_INLINE int
89     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
90     {
91     return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
92     }
93    
94 siliconforks 507 #elif defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_X64))
95     JS_BEGIN_EXTERN_C
96     extern long long __cdecl
97     _InterlockedCompareExchange64(long long *volatile dest, long long exchange, long long comp);
98     JS_END_EXTERN_C
99     #pragma intrinsic(_InterlockedCompareExchange64)
100    
101     static JS_ALWAYS_INLINE int
102     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
103     {
104     return _InterlockedCompareExchange64(w, nv, ov) == ov;
105     }
106    
107 siliconforks 332 #elif defined(XP_MACOSX) || defined(DARWIN)
108    
109     #include <libkern/OSAtomic.h>
110    
111     static JS_ALWAYS_INLINE int
112     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
113     {
114     /* Details on these functions available in the manpage for atomic */
115 siliconforks 460 return OSAtomicCompareAndSwapPtrBarrier(ov, nv, w);
116 siliconforks 332 }
117    
118 siliconforks 507 #elif defined(__i386) && (defined(__GNUC__) || defined(__SUNPRO_CC))
119 siliconforks 332
120     /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
121     static JS_ALWAYS_INLINE int
122     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
123     {
124     unsigned int res;
125    
126     __asm__ __volatile__ (
127     "lock\n"
128     "cmpxchgl %2, (%1)\n"
129     "sete %%al\n"
130     "andl $1, %%eax\n"
131     : "=a" (res)
132     : "r" (w), "r" (nv), "a" (ov)
133     : "cc", "memory");
134     return (int)res;
135     }
136    
137 siliconforks 507 #elif defined(__x86_64) && (defined(__GNUC__) || defined(__SUNPRO_CC))
138    
139 siliconforks 332 static JS_ALWAYS_INLINE int
140     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
141     {
142     unsigned int res;
143    
144     __asm__ __volatile__ (
145     "lock\n"
146     "cmpxchgq %2, (%1)\n"
147     "sete %%al\n"
148     "movzbl %%al, %%eax\n"
149     : "=a" (res)
150     : "r" (w), "r" (nv), "a" (ov)
151     : "cc", "memory");
152     return (int)res;
153     }
154    
155 siliconforks 507 #elif defined(__sparc) && (defined(__GNUC__) || defined(__SUNPRO_CC))
156 siliconforks 332
157     static JS_ALWAYS_INLINE int
158     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
159     {
160     unsigned int res;
161 siliconforks 507
162     __asm__ __volatile__ (
163     "stbar\n"
164     "cas [%1],%2,%3\n"
165     "cmp %2,%3\n"
166     "be,a 1f\n"
167     "mov 1,%0\n"
168     "mov 0,%0\n"
169     "1:"
170 siliconforks 332 : "=r" (res)
171     : "r" (w), "r" (ov), "r" (nv));
172     return (int)res;
173     }
174    
175     #elif defined(AIX)
176    
177     #include <sys/atomic_op.h>
178    
179     static JS_ALWAYS_INLINE int
180     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
181     {
182     return !_check_lock((atomic_p)w, ov, nv);
183     }
184    
185     #elif defined(USE_ARM_KUSER)
186    
187     /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
188     * description of this ABI; this is a function provided at a fixed
189     * location by the kernel in the memory space of each process.
190     */
191     typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
192     #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
193    
194     JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
195    
196     static JS_ALWAYS_INLINE int
197     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
198     {
199     volatile int *vp = (volatile int *) w;
200     PRInt32 failed = 1;
201    
202     /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
203     do {
204     failed = __kernel_cmpxchg(ov, nv, vp);
205     } while (failed && *vp == ov);
206     return !failed;
207     }
208    
209     #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
210    
211     #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
212    
213     #endif /* arch-tests */
214    
215     #if JS_HAS_NATIVE_COMPARE_AND_SWAP
216    
217     JSBool
218     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
219     {
220     return !!NativeCompareAndSwap(w, ov, nv);
221     }
222    
223     #elif defined(NSPR_LOCK)
224    
225     # ifdef __GNUC__
226 siliconforks 507 # warning "js_CompareAndSwap is implemented using NSPR lock"
227 siliconforks 332 # endif
228    
229     JSBool
230     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
231     {
232     int result;
233     static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
234    
235     JS_ACQUIRE_LOCK(CompareAndSwapLock);
236     result = (*w == ov);
237     if (result)
238     *w = nv;
239     JS_RELEASE_LOCK(CompareAndSwapLock);
240     return result;
241     }
242    
243     #else /* !defined(NSPR_LOCK) */
244    
245     #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
246    
247     #endif
248    
249 siliconforks 460 void
250     js_AtomicSetMask(jsword *w, jsword mask)
251     {
252     jsword ov, nv;
253    
254     do {
255     ov = *w;
256     nv = ov | mask;
257     } while (!js_CompareAndSwap(w, ov, nv));
258     }
259    
260 siliconforks 332 #ifndef NSPR_LOCK
261    
262     struct JSFatLock {
263     int susp;
264     PRLock *slock;
265     PRCondVar *svar;
266     JSFatLock *next;
267     JSFatLock **prevp;
268     };
269    
270     typedef struct JSFatLockTable {
271     JSFatLock *free;
272     JSFatLock *taken;
273     } JSFatLockTable;
274    
275     #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
276    
277     static void
278     js_Dequeue(JSThinLock *);
279    
280     static PRLock **global_locks;
281     static uint32 global_lock_count = 1;
282     static uint32 global_locks_log2 = 0;
283     static uint32 global_locks_mask = 0;
284    
285     static void
286     js_LockGlobal(void *id)
287     {
288     uint32 i = GLOBAL_LOCK_INDEX(id);
289     PR_Lock(global_locks[i]);
290     }
291    
292     static void
293     js_UnlockGlobal(void *id)
294     {
295     uint32 i = GLOBAL_LOCK_INDEX(id);
296     PR_Unlock(global_locks[i]);
297     }
298    
299     #endif /* !NSPR_LOCK */
300    
301     void
302     js_InitLock(JSThinLock *tl)
303     {
304     #ifdef NSPR_LOCK
305     tl->owner = 0;
306     tl->fat = (JSFatLock*)JS_NEW_LOCK();
307     #else
308     memset(tl, 0, sizeof(JSThinLock));
309     #endif
310     }
311    
312     void
313     js_FinishLock(JSThinLock *tl)
314     {
315     #ifdef NSPR_LOCK
316     tl->owner = 0xdeadbeef;
317     if (tl->fat)
318     JS_DESTROY_LOCK(((JSLock*)tl->fat));
319     #else
320     JS_ASSERT(tl->owner == 0);
321     JS_ASSERT(tl->fat == NULL);
322     #endif
323     }
324    
325     #ifdef DEBUG_SCOPE_COUNT
326    
327     #include <stdio.h>
328     #include "jsdhash.h"
329    
330 siliconforks 460 static FILE *logfp = NULL;
331 siliconforks 332 static JSDHashTable logtbl;
332    
333     typedef struct logentry {
334     JSDHashEntryStub stub;
335     char op;
336     const char *file;
337     int line;
338     } logentry;
339    
340     static void
341 siliconforks 460 logit(JSTitle *title, char op, const char *file, int line)
342 siliconforks 332 {
343     logentry *entry;
344    
345     if (!logfp) {
346     logfp = fopen("/tmp/scope.log", "w");
347     if (!logfp)
348     return;
349     setvbuf(logfp, NULL, _IONBF, 0);
350     }
351 siliconforks 460 fprintf(logfp, "%p %d %c %s %d\n", title, title->u.count, op, file, line);
352 siliconforks 332
353     if (!logtbl.entryStore &&
354     !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
355     sizeof(logentry), 100)) {
356     return;
357     }
358 siliconforks 460 entry = (logentry *) JS_DHashTableOperate(&logtbl, title, JS_DHASH_ADD);
359 siliconforks 332 if (!entry)
360     return;
361 siliconforks 460 entry->stub.key = title;
362 siliconforks 332 entry->op = op;
363     entry->file = file;
364     entry->line = line;
365     }
366    
367     void
368 siliconforks 460 js_unlog_title(JSTitle *title)
369 siliconforks 332 {
370     if (!logtbl.entryStore)
371     return;
372 siliconforks 460 (void) JS_DHashTableOperate(&logtbl, title, JS_DHASH_REMOVE);
373 siliconforks 332 }
374    
375 siliconforks 460 # define LOGIT(title,op) logit(title, op, __FILE__, __LINE__)
376 siliconforks 332
377     #else
378    
379 siliconforks 460 # define LOGIT(title, op) /* nothing */
380 siliconforks 332
381     #endif /* DEBUG_SCOPE_COUNT */
382    
383     /*
384 siliconforks 460 * Return true if we would deadlock waiting in ClaimTitle on
385     * rt->titleSharingDone until ownercx finishes its request and shares a title.
386 siliconforks 332 *
387     * (i) rt->gcLock held
388     */
389 siliconforks 460 static bool
390     WillDeadlock(JSContext *ownercx, JSThread *thread)
391 siliconforks 332 {
392 siliconforks 460 JS_ASSERT(CURRENT_THREAD_IS_ME(thread));
393     JS_ASSERT(ownercx->thread != thread);
394 siliconforks 332
395 siliconforks 460 for (;;) {
396     JS_ASSERT(ownercx->thread);
397     JS_ASSERT(ownercx->requestDepth > 0);
398     JSTitle *title = ownercx->thread->titleToShare;
399     if (!title || !title->ownercx) {
400     /*
401     * ownercx->thread doesn't wait or has just been notified that the
402     * title became shared.
403     */
404     return false;
405     }
406    
407     /*
408     * ownercx->thread is waiting in ClaimTitle for a context from some
409     * thread to finish its request. If that thread is the current thread,
410     * we would deadlock. Otherwise we must recursively check if that
411     * thread waits for the current thread.
412     */
413     if (title->ownercx->thread == thread) {
414     JS_RUNTIME_METER(ownercx->runtime, deadlocksAvoided);
415     return true;
416     }
417 siliconforks 332 ownercx = title->ownercx;
418 siliconforks 460 }
419 siliconforks 332 }
420    
421 siliconforks 460 static void
422     FinishSharingTitle(JSContext *cx, JSTitle *title);
423    
424 siliconforks 332 /*
425     * Make title multi-threaded, i.e. share its ownership among contexts in rt
426     * using a "thin" or (if necessary due to contention) "fat" lock. Called only
427     * from ClaimTitle, immediately below, when we detect deadlock were we to wait
428     * for title's lock, because its ownercx is waiting on a title owned by the
429     * calling cx.
430     *
431     * (i) rt->gcLock held
432     */
433     static void
434     ShareTitle(JSContext *cx, JSTitle *title)
435     {
436     JSRuntime *rt;
437     JSTitle **todop;
438    
439     rt = cx->runtime;
440     if (title->u.link) {
441     for (todop = &rt->titleSharingTodo; *todop != title;
442     todop = &(*todop)->u.link) {
443     JS_ASSERT(*todop != NO_TITLE_SHARING_TODO);
444     }
445     *todop = title->u.link;
446     title->u.link = NULL; /* null u.link for sanity ASAP */
447     JS_NOTIFY_ALL_CONDVAR(rt->titleSharingDone);
448     }
449 siliconforks 460 FinishSharingTitle(cx, title);
450 siliconforks 332 }
451    
452     /*
453 siliconforks 460 * FinishSharingTitle is the tail part of ShareTitle, split out to become a
454     * subroutine of js_ShareWaitingTitles too. The bulk of the work here involves
455     * making mutable strings in the title's object's slots be immutable. We have
456     * to do this because such strings will soon be available to multiple threads,
457     * so their buffers can't be realloc'd any longer in js_ConcatStrings, and
458     * their members can't be modified by js_ConcatStrings, js_UndependString or
459 siliconforks 507 * MinimizeDependentStrings.
460 siliconforks 332 *
461 siliconforks 460 * The last bit of work done by this function nulls title->ownercx and updates
462     * rt->sharedTitles.
463 siliconforks 332 */
464 siliconforks 460 static void
465     FinishSharingTitle(JSContext *cx, JSTitle *title)
466 siliconforks 332 {
467 siliconforks 460 js_InitLock(&title->lock);
468     title->u.count = 0; /* NULL may not pun as 0 */
469 siliconforks 507
470     JSScope *scope = TITLE_TO_SCOPE(title);
471     JSObject *obj = scope->object;
472 siliconforks 332 if (obj) {
473 siliconforks 507 uint32 nslots = scope->freeslot;
474     JS_ASSERT(nslots >= JSSLOT_START(obj->getClass()));
475     for (uint32 i = JSSLOT_START(obj->getClass()); i != nslots; ++i) {
476     jsval v = STOBJ_GET_SLOT(obj, i);
477 siliconforks 332 if (JSVAL_IS_STRING(v) &&
478     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
479     /*
480     * FIXME bug 363059: The following error recovery changes
481     * runtime execution semantics, arbitrarily and silently
482     * ignoring errors except out-of-memory, which should have been
483     * reported through JS_ReportOutOfMemory at this point.
484     */
485     STOBJ_SET_SLOT(obj, i, JSVAL_VOID);
486     }
487     }
488     }
489    
490     title->ownercx = NULL; /* NB: set last, after lock init */
491     JS_RUNTIME_METER(cx->runtime, sharedTitles);
492     }
493    
494     /*
495 siliconforks 460 * Notify all contexts that are currently in a request, which will give them a
496     * chance to yield their current request.
497     */
498     void
499     js_NudgeOtherContexts(JSContext *cx)
500     {
501     JSRuntime *rt = cx->runtime;
502     JSContext *acx = NULL;
503    
504     while ((acx = js_NextActiveContext(rt, acx)) != NULL) {
505     if (cx != acx)
506     JS_TriggerOperationCallback(acx);
507     }
508     }
509    
510     /*
511     * Notify all contexts that are currently in a request and execute on this
512     * specific thread.
513     */
514     static void
515     NudgeThread(JSThread *thread)
516     {
517     JSCList *link;
518     JSContext *acx;
519    
520     link = &thread->contextList;
521     while ((link = link->next) != &thread->contextList) {
522     acx = CX_FROM_THREAD_LINKS(link);
523     JS_ASSERT(acx->thread == thread);
524     if (acx->requestDepth)
525     JS_TriggerOperationCallback(acx);
526     }
527     }
528    
529     /*
530 siliconforks 332 * Given a title with apparently non-null ownercx different from cx, try to
531     * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
532     * If we claim ownership, return true. Otherwise, we wait for ownercx to be
533     * set to null (indicating that title is multi-threaded); or if waiting would
534     * deadlock, we set ownercx to null ourselves via ShareTitle. In any case,
535     * once ownercx is null we return false.
536     */
537     static JSBool
538     ClaimTitle(JSTitle *title, JSContext *cx)
539     {
540     JSRuntime *rt;
541     JSContext *ownercx;
542 siliconforks 460 uint32 requestDebit;
543 siliconforks 332
544     rt = cx->runtime;
545     JS_RUNTIME_METER(rt, claimAttempts);
546     JS_LOCK_GC(rt);
547    
548     /* Reload in case ownercx went away while we blocked on the lock. */
549     while ((ownercx = title->ownercx) != NULL) {
550     /*
551     * Avoid selflock if ownercx is dead, or is not running a request, or
552     * has the same thread as cx. Set title->ownercx to cx so that the
553     * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
554     * fast path around the corresponding js_UnlockTitle or js_UnlockObj
555     * function call.
556     *
557     * If title->u.link is non-null, title has already been inserted on
558     * the rt->titleSharingTodo list, because another thread's context
559     * already wanted to lock title while ownercx was running a request.
560 siliconforks 460 * That context must still be in request and cannot be dead. We can
561     * claim it if its thread matches ours but only if cx itself is in a
562     * request.
563     *
564     * The latter check covers the case when the embedding triggers a call
565     * to js_GC on a cx outside a request while having ownercx running a
566     * request on the same thread, and then js_GC calls a mark hook or a
567     * finalizer accessing the title. In this case we cannot claim the
568     * title but must share it now as no title-sharing JS_EndRequest will
569     * follow.
570 siliconforks 332 */
571 siliconforks 460 bool canClaim;
572     if (title->u.link) {
573     JS_ASSERT(js_ValidContextPointer(rt, ownercx));
574     JS_ASSERT(ownercx->requestDepth > 0);
575     JS_ASSERT_IF(cx->requestDepth == 0, cx->thread == rt->gcThread);
576     canClaim = (ownercx->thread == cx->thread &&
577     cx->requestDepth > 0);
578     } else {
579     canClaim = (!js_ValidContextPointer(rt, ownercx) ||
580     !ownercx->requestDepth ||
581     ownercx->thread == cx->thread);
582     }
583     if (canClaim) {
584 siliconforks 332 title->ownercx = cx;
585     JS_UNLOCK_GC(rt);
586     JS_RUNTIME_METER(rt, claimedTitles);
587     return JS_TRUE;
588     }
589    
590     /*
591 siliconforks 460 * Avoid deadlock if title's owner thread is waiting on a title that
592     * the current thread owns, by revoking title's ownership. This
593     * approach to deadlock avoidance works because the engine never nests
594     * title locks.
595 siliconforks 332 *
596 siliconforks 460 * If cx->thread could hold locks on ownercx->thread->titleToShare, or
597     * if ownercx->thread could hold locks on title, we would need to keep
598     * reentrancy counts for all such "flyweight" (ownercx != NULL) locks,
599     * so that control would unwind properly once these locks became
600     * "thin" or "fat". The engine promotes a title from exclusive to
601     * shared access only when locking, never when holding or unlocking.
602 siliconforks 332 *
603     * Avoid deadlock before any of this title/context cycle detection if
604     * cx is on the active GC's thread, because in that case, no requests
605     * will run until the GC completes. Any title wanted by the GC (from
606 siliconforks 460 * a finalizer or a mark hook) that can't be claimed must become
607     * shared.
608 siliconforks 332 */
609 siliconforks 460 if (rt->gcThread == cx->thread || WillDeadlock(ownercx, cx->thread)) {
610 siliconforks 332 ShareTitle(cx, title);
611     break;
612     }
613    
614     /*
615     * Thanks to the non-zero NO_TITLE_SHARING_TODO link terminator, we
616     * can decide whether title is on rt->titleSharingTodo with a single
617     * non-null test, and avoid double-insertion bugs.
618     */
619     if (!title->u.link) {
620 siliconforks 507 TITLE_TO_SCOPE(title)->hold();
621 siliconforks 332 title->u.link = rt->titleSharingTodo;
622     rt->titleSharingTodo = title;
623     }
624    
625     /*
626 siliconforks 460 * Discount all the requests running on the current thread so a
627     * possible GC can proceed on another thread while we wait on
628     * rt->titleSharingDone.
629 siliconforks 332 */
630 siliconforks 460 requestDebit = js_DiscountRequestsForGC(cx);
631     if (title->ownercx != ownercx) {
632     /*
633     * js_DiscountRequestsForGC released and reacquired the GC lock,
634     * and the title was taken or shared. Start over.
635     */
636     js_RecountRequestsAfterGC(rt, requestDebit);
637     continue;
638 siliconforks 332 }
639    
640     /*
641     * We know that some other thread's context owns title, which is now
642     * linked onto rt->titleSharingTodo, awaiting the end of that other
643 siliconforks 460 * thread's request. So it is safe to wait on rt->titleSharingDone.
644     * But before waiting, we force the operation callback for that other
645     * thread so it can quickly suspend.
646 siliconforks 332 */
647 siliconforks 460 NudgeThread(ownercx->thread);
648    
649     JS_ASSERT(!cx->thread->titleToShare);
650     cx->thread->titleToShare = title;
651     #ifdef DEBUG
652     PRStatus stat =
653     #endif
654     PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
655 siliconforks 332 JS_ASSERT(stat != PR_FAILURE);
656    
657 siliconforks 460 js_RecountRequestsAfterGC(rt, requestDebit);
658 siliconforks 332
659     /*
660 siliconforks 460 * Don't clear titleToShare until after we're through waiting on
661 siliconforks 332 * all condition variables protected by rt->gcLock -- that includes
662 siliconforks 460 * rt->titleSharingDone *and* rt->gcDone (hidden in the call to
663     * js_RecountRequestsAfterGC immediately above).
664 siliconforks 332 *
665     * Otherwise, the GC could easily deadlock with another thread that
666     * owns a title wanted by a finalizer. By keeping cx->titleToShare
667     * set till here, we ensure that such deadlocks are detected, which
668     * results in the finalized object's title being shared (it must, of
669     * course, have other, live objects sharing it).
670     */
671 siliconforks 460 cx->thread->titleToShare = NULL;
672 siliconforks 332 }
673    
674     JS_UNLOCK_GC(rt);
675     return JS_FALSE;
676     }
677    
678 siliconforks 460 void
679     js_ShareWaitingTitles(JSContext *cx)
680     {
681     JSTitle *title, **todop;
682     bool shared;
683    
684     /* See whether cx has any single-threaded titles to start sharing. */
685     todop = &cx->runtime->titleSharingTodo;
686     shared = false;
687     while ((title = *todop) != NO_TITLE_SHARING_TODO) {
688     if (title->ownercx != cx) {
689     todop = &title->u.link;
690     continue;
691     }
692     *todop = title->u.link;
693     title->u.link = NULL; /* null u.link for sanity ASAP */
694    
695     /*
696 siliconforks 507 * If JSScope::drop returns false, we held the last ref to scope. The
697 siliconforks 460 * waiting thread(s) must have been killed, after which the GC
698     * collected the object that held this scope. Unlikely, because it
699     * requires that the GC ran (e.g., from an operation callback)
700     * during this request, but possible.
701     */
702 siliconforks 507 if (TITLE_TO_SCOPE(title)->drop(cx, NULL)) {
703 siliconforks 460 FinishSharingTitle(cx, title); /* set ownercx = NULL */
704     shared = true;
705     }
706     }
707     if (shared)
708     JS_NOTIFY_ALL_CONDVAR(cx->runtime->titleSharingDone);
709     }
710    
711 siliconforks 332 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
712     JS_FRIEND_API(jsval)
713     js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
714     {
715     jsval v;
716     JSScope *scope;
717     JSTitle *title;
718     #ifndef NSPR_LOCK
719     JSThinLock *tl;
720     jsword me;
721     #endif
722    
723 siliconforks 507 OBJ_CHECK_SLOT(obj, slot);
724 siliconforks 332
725     /*
726     * Native object locking is inlined here to optimize the single-threaded
727     * and contention-free multi-threaded cases.
728     */
729     scope = OBJ_SCOPE(obj);
730     title = &scope->title;
731     JS_ASSERT(title->ownercx != cx);
732 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
733 siliconforks 332
734     /*
735     * Avoid locking if called from the GC. Also avoid locking an object
736     * owning a sealed scope. If neither of those special cases applies, try
737     * to claim scope's flyweight lock from whatever context may have had it in
738     * an earlier request.
739     */
740     if (CX_THREAD_IS_RUNNING_GC(cx) ||
741 siliconforks 507 scope->sealed() ||
742 siliconforks 332 (title->ownercx && ClaimTitle(title, cx))) {
743     return STOBJ_GET_SLOT(obj, slot);
744     }
745    
746     #ifndef NSPR_LOCK
747     tl = &title->lock;
748     me = CX_THINLOCK_ID(cx);
749     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
750     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
751     /*
752     * Got the lock with one compare-and-swap. Even so, someone else may
753     * have mutated obj so it now has its own scope and lock, which would
754     * require either a restart from the top of this routine, or a thin
755     * lock release followed by fat lock acquisition.
756     */
757     if (scope == OBJ_SCOPE(obj)) {
758     v = STOBJ_GET_SLOT(obj, slot);
759     if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
760     /* Assert that scope locks never revert to flyweight. */
761     JS_ASSERT(title->ownercx != cx);
762 siliconforks 460 LOGIT(title, '1');
763 siliconforks 332 title->u.count = 1;
764     js_UnlockObj(cx, obj);
765     }
766     return v;
767     }
768     if (!NativeCompareAndSwap(&tl->owner, me, 0))
769     js_Dequeue(tl);
770     }
771     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
772     return STOBJ_GET_SLOT(obj, slot);
773     }
774     #endif
775    
776     js_LockObj(cx, obj);
777     v = STOBJ_GET_SLOT(obj, slot);
778    
779     /*
780     * Test whether cx took ownership of obj's scope during js_LockObj.
781     *
782     * This does not mean that a given scope reverted to flyweight from "thin"
783     * or "fat" -- it does mean that obj's map pointer changed due to another
784     * thread setting a property, requiring obj to cease sharing a prototype
785     * object's scope (whose lock was not flyweight, else we wouldn't be here
786     * in the first place!).
787     */
788     title = &OBJ_SCOPE(obj)->title;
789     if (title->ownercx != cx)
790     js_UnlockTitle(cx, title);
791     return v;
792     }
793    
794     void
795     js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
796     {
797     JSTitle *title;
798     JSScope *scope;
799     #ifndef NSPR_LOCK
800     JSThinLock *tl;
801     jsword me;
802     #endif
803    
804 siliconforks 507 OBJ_CHECK_SLOT(obj, slot);
805    
806 siliconforks 332 /* Any string stored in a thread-safe object must be immutable. */
807     if (JSVAL_IS_STRING(v) &&
808     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
809     /* FIXME bug 363059: See comments in js_FinishSharingScope. */
810     v = JSVAL_NULL;
811     }
812    
813     /*
814     * Native object locking is inlined here to optimize the single-threaded
815     * and contention-free multi-threaded cases.
816     */
817     scope = OBJ_SCOPE(obj);
818     title = &scope->title;
819     JS_ASSERT(title->ownercx != cx);
820 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
821 siliconforks 332
822     /*
823     * Avoid locking if called from the GC. Also avoid locking an object
824     * owning a sealed scope. If neither of those special cases applies, try
825     * to claim scope's flyweight lock from whatever context may have had it in
826     * an earlier request.
827     */
828     if (CX_THREAD_IS_RUNNING_GC(cx) ||
829 siliconforks 507 scope->sealed() ||
830 siliconforks 332 (title->ownercx && ClaimTitle(title, cx))) {
831 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
832 siliconforks 332 return;
833     }
834    
835     #ifndef NSPR_LOCK
836     tl = &title->lock;
837     me = CX_THINLOCK_ID(cx);
838     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
839     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
840     if (scope == OBJ_SCOPE(obj)) {
841 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
842 siliconforks 332 if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
843     /* Assert that scope locks never revert to flyweight. */
844     JS_ASSERT(title->ownercx != cx);
845 siliconforks 460 LOGIT(title, '1');
846 siliconforks 332 title->u.count = 1;
847     js_UnlockObj(cx, obj);
848     }
849     return;
850     }
851     if (!NativeCompareAndSwap(&tl->owner, me, 0))
852     js_Dequeue(tl);
853     }
854     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
855 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
856 siliconforks 332 return;
857     }
858     #endif
859    
860     js_LockObj(cx, obj);
861 siliconforks 507 LOCKED_OBJ_WRITE_SLOT(cx, obj, slot, v);
862 siliconforks 332
863     /*
864     * Same drill as above, in js_GetSlotThreadSafe.
865     */
866     title = &OBJ_SCOPE(obj)->title;
867     if (title->ownercx != cx)
868     js_UnlockTitle(cx, title);
869     }
870    
871     #ifndef NSPR_LOCK
872    
873     static JSFatLock *
874     NewFatlock()
875     {
876     JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
877     if (!fl) return NULL;
878     fl->susp = 0;
879     fl->next = NULL;
880     fl->prevp = NULL;
881     fl->slock = PR_NewLock();
882     fl->svar = PR_NewCondVar(fl->slock);
883     return fl;
884     }
885    
886     static void
887     DestroyFatlock(JSFatLock *fl)
888     {
889     PR_DestroyLock(fl->slock);
890     PR_DestroyCondVar(fl->svar);
891 siliconforks 507 js_free(fl);
892 siliconforks 332 }
893    
894     static JSFatLock *
895     ListOfFatlocks(int listc)
896     {
897     JSFatLock *m;
898     JSFatLock *m0;
899     int i;
900    
901     JS_ASSERT(listc>0);
902     m0 = m = NewFatlock();
903     for (i=1; i<listc; i++) {
904     m->next = NewFatlock();
905     m = m->next;
906     }
907     return m0;
908     }
909    
910     static void
911     DeleteListOfFatlocks(JSFatLock *m)
912     {
913     JSFatLock *m0;
914     for (; m; m=m0) {
915     m0 = m->next;
916     DestroyFatlock(m);
917     }
918     }
919    
920     static JSFatLockTable *fl_list_table = NULL;
921     static uint32 fl_list_table_len = 0;
922     static uint32 fl_list_chunk_len = 0;
923    
924     static JSFatLock *
925     GetFatlock(void *id)
926     {
927     JSFatLock *m;
928    
929     uint32 i = GLOBAL_LOCK_INDEX(id);
930     if (fl_list_table[i].free == NULL) {
931     #ifdef DEBUG
932     if (fl_list_table[i].taken)
933     printf("Ran out of fat locks!\n");
934     #endif
935     fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
936     }
937     m = fl_list_table[i].free;
938     fl_list_table[i].free = m->next;
939     m->susp = 0;
940     m->next = fl_list_table[i].taken;
941     m->prevp = &fl_list_table[i].taken;
942     if (fl_list_table[i].taken)
943     fl_list_table[i].taken->prevp = &m->next;
944     fl_list_table[i].taken = m;
945     return m;
946     }
947    
948     static void
949     PutFatlock(JSFatLock *m, void *id)
950     {
951     uint32 i;
952     if (m == NULL)
953     return;
954    
955     /* Unlink m from fl_list_table[i].taken. */
956     *m->prevp = m->next;
957     if (m->next)
958     m->next->prevp = m->prevp;
959    
960     /* Insert m in fl_list_table[i].free. */
961     i = GLOBAL_LOCK_INDEX(id);
962     m->next = fl_list_table[i].free;
963     fl_list_table[i].free = m;
964     }
965    
966     #endif /* !NSPR_LOCK */
967    
968     JSBool
969     js_SetupLocks(int listc, int globc)
970     {
971     #ifndef NSPR_LOCK
972     uint32 i;
973    
974     if (global_locks)
975     return JS_TRUE;
976     #ifdef DEBUG
977     if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
978     printf("Bad number %d in js_SetupLocks()!\n", listc);
979     if (globc > 100 || globc < 0) /* globc == number of global locks */
980     printf("Bad number %d in js_SetupLocks()!\n", listc);
981     #endif
982     global_locks_log2 = JS_CeilingLog2(globc);
983     global_locks_mask = JS_BITMASK(global_locks_log2);
984     global_lock_count = JS_BIT(global_locks_log2);
985 siliconforks 507 global_locks = (PRLock **) js_malloc(global_lock_count * sizeof(PRLock*));
986 siliconforks 332 if (!global_locks)
987     return JS_FALSE;
988     for (i = 0; i < global_lock_count; i++) {
989     global_locks[i] = PR_NewLock();
990     if (!global_locks[i]) {
991     global_lock_count = i;
992     js_CleanupLocks();
993     return JS_FALSE;
994     }
995     }
996 siliconforks 507 fl_list_table = (JSFatLockTable *) js_malloc(i * sizeof(JSFatLockTable));
997 siliconforks 332 if (!fl_list_table) {
998     js_CleanupLocks();
999     return JS_FALSE;
1000     }
1001     fl_list_table_len = global_lock_count;
1002     for (i = 0; i < global_lock_count; i++)
1003     fl_list_table[i].free = fl_list_table[i].taken = NULL;
1004     fl_list_chunk_len = listc;
1005     #endif /* !NSPR_LOCK */
1006     return JS_TRUE;
1007     }
1008    
1009     void
1010     js_CleanupLocks()
1011     {
1012     #ifndef NSPR_LOCK
1013     uint32 i;
1014    
1015     if (global_locks) {
1016     for (i = 0; i < global_lock_count; i++)
1017     PR_DestroyLock(global_locks[i]);
1018 siliconforks 507 js_free(global_locks);
1019 siliconforks 332 global_locks = NULL;
1020     global_lock_count = 1;
1021     global_locks_log2 = 0;
1022     global_locks_mask = 0;
1023     }
1024     if (fl_list_table) {
1025     for (i = 0; i < fl_list_table_len; i++) {
1026     DeleteListOfFatlocks(fl_list_table[i].free);
1027     fl_list_table[i].free = NULL;
1028     DeleteListOfFatlocks(fl_list_table[i].taken);
1029     fl_list_table[i].taken = NULL;
1030     }
1031 siliconforks 507 js_free(fl_list_table);
1032 siliconforks 332 fl_list_table = NULL;
1033     fl_list_table_len = 0;
1034     }
1035     #endif /* !NSPR_LOCK */
1036     }
1037    
1038     #ifdef NSPR_LOCK
1039    
1040     static JS_ALWAYS_INLINE void
1041     ThinLock(JSThinLock *tl, jsword me)
1042     {
1043     JS_ACQUIRE_LOCK((JSLock *) tl->fat);
1044     tl->owner = me;
1045     }
1046    
1047     static JS_ALWAYS_INLINE void
1048     ThinUnlock(JSThinLock *tl, jsword /*me*/)
1049     {
1050     tl->owner = 0;
1051     JS_RELEASE_LOCK((JSLock *) tl->fat);
1052     }
1053    
1054     #else
1055    
1056     /*
1057     * Fast locking and unlocking is implemented by delaying the allocation of a
1058     * system lock (fat lock) until contention. As long as a locking thread A
1059     * runs uncontended, the lock is represented solely by storing A's identity in
1060     * the object being locked.
1061     *
1062     * If another thread B tries to lock the object currently locked by A, B is
1063     * enqueued into a fat lock structure (which might have to be allocated and
1064     * pointed to by the object), and suspended using NSPR conditional variables
1065     * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
1066     * signalling to A that when releasing the lock, B must be dequeued and
1067     * notified.
1068     *
1069     * The basic operation of the locking primitives (js_Lock, js_Unlock,
1070     * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
1071     * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
1072     * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
1073     * succeeds this implies that p is uncontended (no one is waiting because the
1074     * wait bit is not set).
1075     *
1076     * When dequeueing, the lock is released, and one of the threads suspended on
1077     * the lock is notified. If other threads still are waiting, the wait bit is
1078     * kept (in js_Enqueue), and if not, the fat lock is deallocated.
1079     *
1080     * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
1081     * are serialized using a global lock. For scalability, a hashtable of global
1082     * locks is used, which is indexed modulo the thin lock pointer.
1083     */
1084    
1085     /*
1086     * Invariants:
1087     * (i) global lock is held
1088     * (ii) fl->susp >= 0
1089     */
1090     static int
1091     js_SuspendThread(JSThinLock *tl)
1092     {
1093     JSFatLock *fl;
1094     PRStatus stat;
1095    
1096     if (tl->fat == NULL)
1097     fl = tl->fat = GetFatlock(tl);
1098     else
1099     fl = tl->fat;
1100     JS_ASSERT(fl->susp >= 0);
1101     fl->susp++;
1102     PR_Lock(fl->slock);
1103     js_UnlockGlobal(tl);
1104     stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
1105     JS_ASSERT(stat != PR_FAILURE);
1106     PR_Unlock(fl->slock);
1107     js_LockGlobal(tl);
1108     fl->susp--;
1109     if (fl->susp == 0) {
1110     PutFatlock(fl, tl);
1111     tl->fat = NULL;
1112     }
1113     return tl->fat == NULL;
1114     }
1115    
1116     /*
1117     * (i) global lock is held
1118     * (ii) fl->susp > 0
1119     */
1120     static void
1121     js_ResumeThread(JSThinLock *tl)
1122     {
1123     JSFatLock *fl = tl->fat;
1124     PRStatus stat;
1125    
1126     JS_ASSERT(fl != NULL);
1127     JS_ASSERT(fl->susp > 0);
1128     PR_Lock(fl->slock);
1129     js_UnlockGlobal(tl);
1130     stat = PR_NotifyCondVar(fl->svar);
1131     JS_ASSERT(stat != PR_FAILURE);
1132     PR_Unlock(fl->slock);
1133     }
1134    
1135     static void
1136     js_Enqueue(JSThinLock *tl, jsword me)
1137     {
1138     jsword o, n;
1139    
1140     js_LockGlobal(tl);
1141     for (;;) {
1142     o = ReadWord(tl->owner);
1143     n = Thin_SetWait(o);
1144     if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
1145     if (js_SuspendThread(tl))
1146     me = Thin_RemoveWait(me);
1147     else
1148     me = Thin_SetWait(me);
1149     }
1150     else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
1151     js_UnlockGlobal(tl);
1152     return;
1153     }
1154     }
1155     }
1156    
1157     static void
1158     js_Dequeue(JSThinLock *tl)
1159     {
1160     jsword o;
1161    
1162     js_LockGlobal(tl);
1163     o = ReadWord(tl->owner);
1164     JS_ASSERT(Thin_GetWait(o) != 0);
1165     JS_ASSERT(tl->fat != NULL);
1166     if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
1167     JS_ASSERT(0);
1168     js_ResumeThread(tl);
1169     }
1170    
1171     static JS_ALWAYS_INLINE void
1172     ThinLock(JSThinLock *tl, jsword me)
1173     {
1174     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1175     if (NativeCompareAndSwap(&tl->owner, 0, me))
1176     return;
1177     if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1178     js_Enqueue(tl, me);
1179     #ifdef DEBUG
1180     else
1181     JS_ASSERT(0);
1182     #endif
1183     }
1184    
1185     static JS_ALWAYS_INLINE void
1186     ThinUnlock(JSThinLock *tl, jsword me)
1187     {
1188     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1189    
1190     /*
1191     * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
1192     * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
1193     */
1194     if (NativeCompareAndSwap(&tl->owner, me, 0))
1195     return;
1196    
1197     JS_ASSERT(Thin_GetWait(tl->owner));
1198     if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1199     js_Dequeue(tl);
1200     #ifdef DEBUG
1201     else
1202     JS_ASSERT(0); /* unbalanced unlock */
1203     #endif
1204     }
1205    
1206     #endif /* !NSPR_LOCK */
1207    
1208     void
1209     js_Lock(JSContext *cx, JSThinLock *tl)
1210     {
1211     ThinLock(tl, CX_THINLOCK_ID(cx));
1212     }
1213    
1214     void
1215     js_Unlock(JSContext *cx, JSThinLock *tl)
1216     {
1217     ThinUnlock(tl, CX_THINLOCK_ID(cx));
1218     }
1219    
1220     void
1221     js_LockRuntime(JSRuntime *rt)
1222     {
1223     PR_Lock(rt->rtLock);
1224     #ifdef DEBUG
1225     rt->rtLockOwner = js_CurrentThreadId();
1226     #endif
1227     }
1228    
1229     void
1230     js_UnlockRuntime(JSRuntime *rt)
1231     {
1232     #ifdef DEBUG
1233     rt->rtLockOwner = 0;
1234     #endif
1235     PR_Unlock(rt->rtLock);
1236     }
1237    
1238     void
1239     js_LockTitle(JSContext *cx, JSTitle *title)
1240     {
1241     jsword me = CX_THINLOCK_ID(cx);
1242    
1243     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1244     JS_ASSERT(title->ownercx != cx);
1245     if (CX_THREAD_IS_RUNNING_GC(cx))
1246     return;
1247     if (title->ownercx && ClaimTitle(title, cx))
1248     return;
1249    
1250     if (Thin_RemoveWait(ReadWord(title->lock.owner)) == me) {
1251     JS_ASSERT(title->u.count > 0);
1252     LOGIT(scope, '+');
1253     title->u.count++;
1254     } else {
1255     ThinLock(&title->lock, me);
1256     JS_ASSERT(title->u.count == 0);
1257     LOGIT(scope, '1');
1258     title->u.count = 1;
1259     }
1260     }
1261    
1262     void
1263     js_UnlockTitle(JSContext *cx, JSTitle *title)
1264     {
1265     jsword me = CX_THINLOCK_ID(cx);
1266    
1267     /* We hope compilers use me instead of reloading cx->thread in the macro. */
1268     if (CX_THREAD_IS_RUNNING_GC(cx))
1269     return;
1270     if (cx->lockedSealedTitle == title) {
1271     cx->lockedSealedTitle = NULL;
1272     return;
1273     }
1274    
1275     /*
1276     * If title->ownercx is not null, it's likely that two contexts not using
1277     * requests nested locks for title. The first context, cx here, claimed
1278     * title; the second, title->ownercx here, re-claimed it because the first
1279     * was not in a request, or was on the same thread. We don't want to keep
1280     * track of such nesting, because it penalizes the common non-nested case.
1281     * Instead of asserting here and silently coping, we simply re-claim title
1282     * for cx and return.
1283     *
1284     * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1285     * case where an asymmetric thread model (Mozilla's main thread is known
1286     * to be the only thread that runs the GC) combined with multiple contexts
1287     * per thread has led to such request-less nesting.
1288     */
1289     if (title->ownercx) {
1290     JS_ASSERT(title->u.count == 0);
1291     JS_ASSERT(title->lock.owner == 0);
1292     title->ownercx = cx;
1293     return;
1294     }
1295    
1296     JS_ASSERT(title->u.count > 0);
1297     if (Thin_RemoveWait(ReadWord(title->lock.owner)) != me) {
1298     JS_ASSERT(0); /* unbalanced unlock */
1299     return;
1300     }
1301 siliconforks 460 LOGIT(title, '-');
1302 siliconforks 332 if (--title->u.count == 0)
1303     ThinUnlock(&title->lock, me);
1304     }
1305    
1306     /*
1307     * NB: oldtitle may be null if our caller is js_GetMutableScope and it just
1308     * dropped the last reference to oldtitle.
1309     */
1310     void
1311     js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
1312     {
1313     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
1314    
1315     /*
1316     * If the last reference to oldtitle went away, newtitle needs no lock
1317     * state update.
1318     */
1319     if (!oldtitle)
1320     return;
1321     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
1322    
1323     /*
1324     * Special case in js_LockTitle and js_UnlockTitle for the GC calling
1325     * code that locks, unlocks, or mutates. Nothing to do in these cases,
1326     * because title and newtitle were "locked" by the GC thread, so neither
1327     * was actually locked.
1328     */
1329     if (CX_THREAD_IS_RUNNING_GC(cx))
1330     return;
1331    
1332     /*
1333     * Special case in js_LockObj and js_UnlockTitle for locking the sealed
1334     * scope of an object that owns that scope (the prototype or mutated obj
1335     * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1336     */
1337     JS_ASSERT(cx->lockedSealedTitle != newtitle);
1338     if (cx->lockedSealedTitle == oldtitle) {
1339     JS_ASSERT(newtitle->ownercx == cx ||
1340     (!newtitle->ownercx && newtitle->u.count == 1));
1341     cx->lockedSealedTitle = NULL;
1342     return;
1343     }
1344    
1345     /*
1346     * If oldtitle is single-threaded, there's nothing to do.
1347     */
1348     if (oldtitle->ownercx) {
1349     JS_ASSERT(oldtitle->ownercx == cx);
1350     JS_ASSERT(newtitle->ownercx == cx ||
1351     (!newtitle->ownercx && newtitle->u.count == 1));
1352     return;
1353     }
1354    
1355     /*
1356     * We transfer oldtitle->u.count only if newtitle is not single-threaded.
1357     * Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
1358     * JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
1359     * if they find newtitle->ownercx != cx.
1360     */
1361     if (newtitle->ownercx != cx) {
1362     JS_ASSERT(!newtitle->ownercx);
1363     newtitle->u.count = oldtitle->u.count;
1364     }
1365    
1366     /*
1367     * Reset oldtitle's lock state so that it is completely unlocked.
1368     */
1369 siliconforks 460 LOGIT(oldtitle, '0');
1370 siliconforks 332 oldtitle->u.count = 0;
1371     ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
1372     }
1373    
1374     void
1375     js_LockObj(JSContext *cx, JSObject *obj)
1376     {
1377     JSScope *scope;
1378     JSTitle *title;
1379    
1380     JS_ASSERT(OBJ_IS_NATIVE(obj));
1381    
1382     /*
1383     * We must test whether the GC is calling and return without mutating any
1384     * state, especially cx->lockedSealedScope. Note asymmetry with respect to
1385     * js_UnlockObj, which is a thin-layer on top of js_UnlockTitle.
1386     */
1387     if (CX_THREAD_IS_RUNNING_GC(cx))
1388     return;
1389    
1390     for (;;) {
1391     scope = OBJ_SCOPE(obj);
1392     title = &scope->title;
1393 siliconforks 507 if (scope->sealed() && !cx->lockedSealedTitle) {
1394 siliconforks 332 cx->lockedSealedTitle = title;
1395     return;
1396     }
1397    
1398     js_LockTitle(cx, title);
1399    
1400     /* If obj still has this scope, we're done. */
1401     if (scope == OBJ_SCOPE(obj))
1402     return;
1403    
1404     /* Lost a race with a mutator; retry with obj's new scope. */
1405     js_UnlockTitle(cx, title);
1406     }
1407     }
1408    
1409     void
1410     js_UnlockObj(JSContext *cx, JSObject *obj)
1411     {
1412     JS_ASSERT(OBJ_IS_NATIVE(obj));
1413     js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title);
1414     }
1415    
1416 siliconforks 507 bool
1417     js_LockObjIfShape(JSContext *cx, JSObject *obj, uint32 shape)
1418     {
1419     JS_ASSERT(OBJ_SCOPE(obj)->title.ownercx != cx);
1420     js_LockObj(cx, obj);
1421     if (OBJ_SHAPE(obj) == shape)
1422     return true;
1423     js_UnlockObj(cx, obj);
1424     return false;
1425     }
1426    
1427 siliconforks 332 void
1428     js_InitTitle(JSContext *cx, JSTitle *title)
1429     {
1430     #ifdef JS_THREADSAFE
1431     title->ownercx = cx;
1432     memset(&title->lock, 0, sizeof title->lock);
1433    
1434     /*
1435     * Set u.link = NULL, not u.count = 0, in case the target architecture's
1436     * null pointer has a non-zero integer representation.
1437     */
1438     title->u.link = NULL;
1439    
1440     #ifdef JS_DEBUG_TITLE_LOCKS
1441     title->file[0] = title->file[1] = title->file[2] = title->file[3] = NULL;
1442     title->line[0] = title->line[1] = title->line[2] = title->line[3] = 0;
1443     #endif
1444     #endif
1445     }
1446    
1447     void
1448     js_FinishTitle(JSContext *cx, JSTitle *title)
1449     {
1450 siliconforks 460 #ifdef DEBUG_SCOPE_COUNT
1451     js_unlog_title(title);
1452     #endif
1453    
1454 siliconforks 332 #ifdef JS_THREADSAFE
1455     /* Title must be single-threaded at this point, so set ownercx. */
1456     JS_ASSERT(title->u.count == 0);
1457     title->ownercx = cx;
1458     js_FinishLock(&title->lock);
1459     #endif
1460     }
1461    
1462     #ifdef DEBUG
1463    
1464     JSBool
1465     js_IsRuntimeLocked(JSRuntime *rt)
1466     {
1467     return js_CurrentThreadId() == rt->rtLockOwner;
1468     }
1469    
1470     JSBool
1471     js_IsObjLocked(JSContext *cx, JSObject *obj)
1472     {
1473 siliconforks 460 return js_IsTitleLocked(cx, &OBJ_SCOPE(obj)->title);
1474 siliconforks 332 }
1475    
1476     JSBool
1477     js_IsTitleLocked(JSContext *cx, JSTitle *title)
1478     {
1479     /* Special case: the GC locking any object's title, see js_LockTitle. */
1480     if (CX_THREAD_IS_RUNNING_GC(cx))
1481     return JS_TRUE;
1482    
1483     /* Special case: locked object owning a sealed scope, see js_LockObj. */
1484     if (cx->lockedSealedTitle == title)
1485     return JS_TRUE;
1486    
1487     /*
1488     * General case: the title is either exclusively owned (by cx), or it has
1489     * a thin or fat lock to cope with shared (concurrent) ownership.
1490     */
1491     if (title->ownercx) {
1492     JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
1493     return JS_TRUE;
1494     }
1495     return js_CurrentThreadId() ==
1496     ((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
1497     }
1498    
1499     #ifdef JS_DEBUG_TITLE_LOCKS
1500     void
1501     js_SetScopeInfo(JSScope *scope, const char *file, int line)
1502     {
1503     JSTitle *title = &scope->title;
1504     if (!title->ownercx) {
1505     jsrefcount count = title->u.count;
1506 siliconforks 507 JS_ASSERT_IF(!scope->sealed(), count > 0);
1507 siliconforks 332 JS_ASSERT(count <= 4);
1508     title->file[count - 1] = file;
1509     title->line[count - 1] = line;
1510     }
1511     }
1512     #endif /* JS_DEBUG_TITLE_LOCKS */
1513     #endif /* DEBUG */
1514     #endif /* JS_THREADSAFE */

  ViewVC Help
Powered by ViewVC 1.1.24