/[jscoverage]/trunk/js/jslock.cpp
ViewVC logotype

Annotation of /trunk/js/jslock.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 460 - (hide annotations)
Sat Sep 26 23:15:22 2009 UTC (12 years, 9 months ago) by siliconforks
File size: 43729 byte(s)
Upgrade to SpiderMonkey from Firefox 3.5.3.

1 siliconforks 332 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2     *
3     * ***** BEGIN LICENSE BLOCK *****
4     * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5     *
6     * The contents of this file are subject to the Mozilla Public License Version
7     * 1.1 (the "License"); you may not use this file except in compliance with
8     * the License. You may obtain a copy of the License at
9     * http://www.mozilla.org/MPL/
10     *
11     * Software distributed under the License is distributed on an "AS IS" basis,
12     * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13     * for the specific language governing rights and limitations under the
14     * License.
15     *
16     * The Original Code is Mozilla Communicator client code, released
17     * March 31, 1998.
18     *
19     * The Initial Developer of the Original Code is
20     * Netscape Communications Corporation.
21     * Portions created by the Initial Developer are Copyright (C) 1998
22     * the Initial Developer. All Rights Reserved.
23     *
24     * Contributor(s):
25     *
26     * Alternatively, the contents of this file may be used under the terms of
27     * either of the GNU General Public License Version 2 or later (the "GPL"),
28     * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29     * in which case the provisions of the GPL or the LGPL are applicable instead
30     * of those above. If you wish to allow use of your version of this file only
31     * under the terms of either the GPL or the LGPL, and not to allow others to
32     * use your version of this file under the terms of the MPL, indicate your
33     * decision by deleting the provisions above and replace them with the notice
34     * and other provisions required by the GPL or the LGPL. If you do not delete
35     * the provisions above, a recipient may use your version of this file under
36     * the terms of any one of the MPL, the GPL or the LGPL.
37     *
38     * ***** END LICENSE BLOCK ***** */
39    
40     #ifdef JS_THREADSAFE
41    
42     /*
43     * JS locking stubs.
44     */
45     #include "jsstddef.h"
46     #include <stdlib.h>
47     #include <string.h>
48     #include "jspubtd.h"
49     #include "jsutil.h" /* Added by JSIFY */
50     #include "jstypes.h"
51     #include "jsbit.h"
52     #include "jscntxt.h"
53     #include "jsdtoa.h"
54     #include "jsgc.h"
55     #include "jsfun.h" /* for VALUE_IS_FUNCTION used by *_WRITE_BARRIER */
56     #include "jslock.h"
57     #include "jsscope.h"
58     #include "jsstr.h"
59    
60     #define ReadWord(W) (W)
61    
62     /* Implement NativeCompareAndSwap. */
63    
64     #if defined(_WIN32) && defined(_M_IX86)
65     #pragma warning( disable : 4035 )
66     JS_BEGIN_EXTERN_C
67     extern long __cdecl
68     _InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
69     JS_END_EXTERN_C
70     #pragma intrinsic(_InterlockedCompareExchange)
71    
72 siliconforks 460 JS_STATIC_ASSERT(sizeof(jsword) == sizeof(long));
73    
74 siliconforks 332 static JS_ALWAYS_INLINE int
75     NativeCompareAndSwapHelper(jsword *w, jsword ov, jsword nv)
76     {
77 siliconforks 460 _InterlockedCompareExchange((long*) w, nv, ov);
78 siliconforks 332 __asm {
79     sete al
80     }
81     }
82    
83     static JS_ALWAYS_INLINE int
84     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
85     {
86     return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
87     }
88    
89     #elif defined(XP_MACOSX) || defined(DARWIN)
90    
91     #include <libkern/OSAtomic.h>
92    
93     static JS_ALWAYS_INLINE int
94     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
95     {
96     /* Details on these functions available in the manpage for atomic */
97 siliconforks 460 return OSAtomicCompareAndSwapPtrBarrier(ov, nv, w);
98 siliconforks 332 }
99    
100     #elif defined(__GNUC__) && defined(__i386__)
101    
102     /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
103     static JS_ALWAYS_INLINE int
104     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
105     {
106     unsigned int res;
107    
108     __asm__ __volatile__ (
109     "lock\n"
110     "cmpxchgl %2, (%1)\n"
111     "sete %%al\n"
112     "andl $1, %%eax\n"
113     : "=a" (res)
114     : "r" (w), "r" (nv), "a" (ov)
115     : "cc", "memory");
116     return (int)res;
117     }
118    
119     #elif defined(__GNUC__) && defined(__x86_64__)
120     static JS_ALWAYS_INLINE int
121     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
122     {
123     unsigned int res;
124    
125     __asm__ __volatile__ (
126     "lock\n"
127     "cmpxchgq %2, (%1)\n"
128     "sete %%al\n"
129     "movzbl %%al, %%eax\n"
130     : "=a" (res)
131     : "r" (w), "r" (nv), "a" (ov)
132     : "cc", "memory");
133     return (int)res;
134     }
135    
136     #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
137    
138     static JS_ALWAYS_INLINE int
139     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
140     {
141     #if defined(__GNUC__)
142     unsigned int res;
143     JS_ASSERT(ov != nv);
144     asm volatile ("\
145     stbar\n\
146     cas [%1],%2,%3\n\
147     cmp %2,%3\n\
148     be,a 1f\n\
149     mov 1,%0\n\
150     mov 0,%0\n\
151     1:"
152     : "=r" (res)
153     : "r" (w), "r" (ov), "r" (nv));
154     return (int)res;
155     #else /* !__GNUC__ */
156     extern int compare_and_swap(jsword*, jsword, jsword);
157     JS_ASSERT(ov != nv);
158     return compare_and_swap(w, ov, nv);
159     #endif
160     }
161    
162     #elif defined(AIX)
163    
164     #include <sys/atomic_op.h>
165    
166     static JS_ALWAYS_INLINE int
167     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
168     {
169     return !_check_lock((atomic_p)w, ov, nv);
170     }
171    
172     #elif defined(USE_ARM_KUSER)
173    
174     /* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
175     * description of this ABI; this is a function provided at a fixed
176     * location by the kernel in the memory space of each process.
177     */
178     typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
179     #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
180    
181     JS_STATIC_ASSERT(sizeof(jsword) == sizeof(int));
182    
183     static JS_ALWAYS_INLINE int
184     NativeCompareAndSwap(jsword *w, jsword ov, jsword nv)
185     {
186     volatile int *vp = (volatile int *) w;
187     PRInt32 failed = 1;
188    
189     /* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
190     do {
191     failed = __kernel_cmpxchg(ov, nv, vp);
192     } while (failed && *vp == ov);
193     return !failed;
194     }
195    
196     #elif JS_HAS_NATIVE_COMPARE_AND_SWAP
197    
198     #error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
199    
200     #endif /* arch-tests */
201    
202     #if JS_HAS_NATIVE_COMPARE_AND_SWAP
203    
204     JSBool
205     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
206     {
207     return !!NativeCompareAndSwap(w, ov, nv);
208     }
209    
210     #elif defined(NSPR_LOCK)
211    
212     # ifdef __GNUC__
213     # warning "js_CompareAndSwap is implemented using NSSP lock"
214     # endif
215    
216     JSBool
217     js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
218     {
219     int result;
220     static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
221    
222     JS_ACQUIRE_LOCK(CompareAndSwapLock);
223     result = (*w == ov);
224     if (result)
225     *w = nv;
226     JS_RELEASE_LOCK(CompareAndSwapLock);
227     return result;
228     }
229    
230     #else /* !defined(NSPR_LOCK) */
231    
232     #error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
233    
234     #endif
235    
236 siliconforks 460 void
237     js_AtomicSetMask(jsword *w, jsword mask)
238     {
239     jsword ov, nv;
240    
241     do {
242     ov = *w;
243     nv = ov | mask;
244     } while (!js_CompareAndSwap(w, ov, nv));
245     }
246    
247 siliconforks 332 #ifndef NSPR_LOCK
248    
249     struct JSFatLock {
250     int susp;
251     PRLock *slock;
252     PRCondVar *svar;
253     JSFatLock *next;
254     JSFatLock **prevp;
255     };
256    
257     typedef struct JSFatLockTable {
258     JSFatLock *free;
259     JSFatLock *taken;
260     } JSFatLockTable;
261    
262     #define GLOBAL_LOCK_INDEX(id) (((uint32)(jsuword)(id)>>2) & global_locks_mask)
263    
264     static void
265     js_Dequeue(JSThinLock *);
266    
267     static PRLock **global_locks;
268     static uint32 global_lock_count = 1;
269     static uint32 global_locks_log2 = 0;
270     static uint32 global_locks_mask = 0;
271    
272     static void
273     js_LockGlobal(void *id)
274     {
275     uint32 i = GLOBAL_LOCK_INDEX(id);
276     PR_Lock(global_locks[i]);
277     }
278    
279     static void
280     js_UnlockGlobal(void *id)
281     {
282     uint32 i = GLOBAL_LOCK_INDEX(id);
283     PR_Unlock(global_locks[i]);
284     }
285    
286     #endif /* !NSPR_LOCK */
287    
288     void
289     js_InitLock(JSThinLock *tl)
290     {
291     #ifdef NSPR_LOCK
292     tl->owner = 0;
293     tl->fat = (JSFatLock*)JS_NEW_LOCK();
294     #else
295     memset(tl, 0, sizeof(JSThinLock));
296     #endif
297     }
298    
299     void
300     js_FinishLock(JSThinLock *tl)
301     {
302     #ifdef NSPR_LOCK
303     tl->owner = 0xdeadbeef;
304     if (tl->fat)
305     JS_DESTROY_LOCK(((JSLock*)tl->fat));
306     #else
307     JS_ASSERT(tl->owner == 0);
308     JS_ASSERT(tl->fat == NULL);
309     #endif
310     }
311    
312     #ifdef DEBUG_SCOPE_COUNT
313    
314     #include <stdio.h>
315     #include "jsdhash.h"
316    
317 siliconforks 460 static FILE *logfp = NULL;
318 siliconforks 332 static JSDHashTable logtbl;
319    
320     typedef struct logentry {
321     JSDHashEntryStub stub;
322     char op;
323     const char *file;
324     int line;
325     } logentry;
326    
327     static void
328 siliconforks 460 logit(JSTitle *title, char op, const char *file, int line)
329 siliconforks 332 {
330     logentry *entry;
331    
332     if (!logfp) {
333     logfp = fopen("/tmp/scope.log", "w");
334     if (!logfp)
335     return;
336     setvbuf(logfp, NULL, _IONBF, 0);
337     }
338 siliconforks 460 fprintf(logfp, "%p %d %c %s %d\n", title, title->u.count, op, file, line);
339 siliconforks 332
340     if (!logtbl.entryStore &&
341     !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
342     sizeof(logentry), 100)) {
343     return;
344     }
345 siliconforks 460 entry = (logentry *) JS_DHashTableOperate(&logtbl, title, JS_DHASH_ADD);
346 siliconforks 332 if (!entry)
347     return;
348 siliconforks 460 entry->stub.key = title;
349 siliconforks 332 entry->op = op;
350     entry->file = file;
351     entry->line = line;
352     }
353    
354     void
355 siliconforks 460 js_unlog_title(JSTitle *title)
356 siliconforks 332 {
357     if (!logtbl.entryStore)
358     return;
359 siliconforks 460 (void) JS_DHashTableOperate(&logtbl, title, JS_DHASH_REMOVE);
360 siliconforks 332 }
361    
362 siliconforks 460 # define LOGIT(title,op) logit(title, op, __FILE__, __LINE__)
363 siliconforks 332
364     #else
365    
366 siliconforks 460 # define LOGIT(title, op) /* nothing */
367 siliconforks 332
368     #endif /* DEBUG_SCOPE_COUNT */
369    
370     /*
371 siliconforks 460 * Return true if we would deadlock waiting in ClaimTitle on
372     * rt->titleSharingDone until ownercx finishes its request and shares a title.
373 siliconforks 332 *
374     * (i) rt->gcLock held
375     */
376 siliconforks 460 static bool
377     WillDeadlock(JSContext *ownercx, JSThread *thread)
378 siliconforks 332 {
379 siliconforks 460 JS_ASSERT(CURRENT_THREAD_IS_ME(thread));
380     JS_ASSERT(ownercx->thread != thread);
381 siliconforks 332
382 siliconforks 460 for (;;) {
383     JS_ASSERT(ownercx->thread);
384     JS_ASSERT(ownercx->requestDepth > 0);
385     JSTitle *title = ownercx->thread->titleToShare;
386     if (!title || !title->ownercx) {
387     /*
388     * ownercx->thread doesn't wait or has just been notified that the
389     * title became shared.
390     */
391     return false;
392     }
393    
394     /*
395     * ownercx->thread is waiting in ClaimTitle for a context from some
396     * thread to finish its request. If that thread is the current thread,
397     * we would deadlock. Otherwise we must recursively check if that
398     * thread waits for the current thread.
399     */
400     if (title->ownercx->thread == thread) {
401     JS_RUNTIME_METER(ownercx->runtime, deadlocksAvoided);
402     return true;
403     }
404 siliconforks 332 ownercx = title->ownercx;
405 siliconforks 460 }
406 siliconforks 332 }
407    
408 siliconforks 460 static void
409     FinishSharingTitle(JSContext *cx, JSTitle *title);
410    
411 siliconforks 332 /*
412     * Make title multi-threaded, i.e. share its ownership among contexts in rt
413     * using a "thin" or (if necessary due to contention) "fat" lock. Called only
414     * from ClaimTitle, immediately below, when we detect deadlock were we to wait
415     * for title's lock, because its ownercx is waiting on a title owned by the
416     * calling cx.
417     *
418     * (i) rt->gcLock held
419     */
420     static void
421     ShareTitle(JSContext *cx, JSTitle *title)
422     {
423     JSRuntime *rt;
424     JSTitle **todop;
425    
426     rt = cx->runtime;
427     if (title->u.link) {
428     for (todop = &rt->titleSharingTodo; *todop != title;
429     todop = &(*todop)->u.link) {
430     JS_ASSERT(*todop != NO_TITLE_SHARING_TODO);
431     }
432     *todop = title->u.link;
433     title->u.link = NULL; /* null u.link for sanity ASAP */
434     JS_NOTIFY_ALL_CONDVAR(rt->titleSharingDone);
435     }
436 siliconforks 460 FinishSharingTitle(cx, title);
437 siliconforks 332 }
438    
439     /*
440 siliconforks 460 * FinishSharingTitle is the tail part of ShareTitle, split out to become a
441     * subroutine of js_ShareWaitingTitles too. The bulk of the work here involves
442     * making mutable strings in the title's object's slots be immutable. We have
443     * to do this because such strings will soon be available to multiple threads,
444     * so their buffers can't be realloc'd any longer in js_ConcatStrings, and
445     * their members can't be modified by js_ConcatStrings, js_UndependString or
446     * js_MinimizeDependentStrings.
447 siliconforks 332 *
448 siliconforks 460 * The last bit of work done by this function nulls title->ownercx and updates
449     * rt->sharedTitles.
450 siliconforks 332 */
451 siliconforks 460 static void
452     FinishSharingTitle(JSContext *cx, JSTitle *title)
453 siliconforks 332 {
454     JSScope *scope;
455     JSObject *obj;
456     uint32 nslots, i;
457     jsval v;
458    
459 siliconforks 460 js_InitLock(&title->lock);
460     title->u.count = 0; /* NULL may not pun as 0 */
461     scope = TITLE_TO_SCOPE(title);
462 siliconforks 332 obj = scope->object;
463     if (obj) {
464 siliconforks 460 nslots = scope->freeslot;
465 siliconforks 332 for (i = 0; i != nslots; ++i) {
466     v = STOBJ_GET_SLOT(obj, i);
467     if (JSVAL_IS_STRING(v) &&
468     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
469     /*
470     * FIXME bug 363059: The following error recovery changes
471     * runtime execution semantics, arbitrarily and silently
472     * ignoring errors except out-of-memory, which should have been
473     * reported through JS_ReportOutOfMemory at this point.
474     */
475     STOBJ_SET_SLOT(obj, i, JSVAL_VOID);
476     }
477     }
478     }
479    
480     title->ownercx = NULL; /* NB: set last, after lock init */
481     JS_RUNTIME_METER(cx->runtime, sharedTitles);
482     }
483    
484     /*
485 siliconforks 460 * Notify all contexts that are currently in a request, which will give them a
486     * chance to yield their current request.
487     */
488     void
489     js_NudgeOtherContexts(JSContext *cx)
490     {
491     JSRuntime *rt = cx->runtime;
492     JSContext *acx = NULL;
493    
494     while ((acx = js_NextActiveContext(rt, acx)) != NULL) {
495     if (cx != acx)
496     JS_TriggerOperationCallback(acx);
497     }
498     }
499    
500     /*
501     * Notify all contexts that are currently in a request and execute on this
502     * specific thread.
503     */
504     static void
505     NudgeThread(JSThread *thread)
506     {
507     JSCList *link;
508     JSContext *acx;
509    
510     link = &thread->contextList;
511     while ((link = link->next) != &thread->contextList) {
512     acx = CX_FROM_THREAD_LINKS(link);
513     JS_ASSERT(acx->thread == thread);
514     if (acx->requestDepth)
515     JS_TriggerOperationCallback(acx);
516     }
517     }
518    
519     /*
520 siliconforks 332 * Given a title with apparently non-null ownercx different from cx, try to
521     * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
522     * If we claim ownership, return true. Otherwise, we wait for ownercx to be
523     * set to null (indicating that title is multi-threaded); or if waiting would
524     * deadlock, we set ownercx to null ourselves via ShareTitle. In any case,
525     * once ownercx is null we return false.
526     */
527     static JSBool
528     ClaimTitle(JSTitle *title, JSContext *cx)
529     {
530     JSRuntime *rt;
531     JSContext *ownercx;
532 siliconforks 460 uint32 requestDebit;
533 siliconforks 332
534     rt = cx->runtime;
535     JS_RUNTIME_METER(rt, claimAttempts);
536     JS_LOCK_GC(rt);
537    
538     /* Reload in case ownercx went away while we blocked on the lock. */
539     while ((ownercx = title->ownercx) != NULL) {
540     /*
541     * Avoid selflock if ownercx is dead, or is not running a request, or
542     * has the same thread as cx. Set title->ownercx to cx so that the
543     * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
544     * fast path around the corresponding js_UnlockTitle or js_UnlockObj
545     * function call.
546     *
547     * If title->u.link is non-null, title has already been inserted on
548     * the rt->titleSharingTodo list, because another thread's context
549     * already wanted to lock title while ownercx was running a request.
550 siliconforks 460 * That context must still be in request and cannot be dead. We can
551     * claim it if its thread matches ours but only if cx itself is in a
552     * request.
553     *
554     * The latter check covers the case when the embedding triggers a call
555     * to js_GC on a cx outside a request while having ownercx running a
556     * request on the same thread, and then js_GC calls a mark hook or a
557     * finalizer accessing the title. In this case we cannot claim the
558     * title but must share it now as no title-sharing JS_EndRequest will
559     * follow.
560 siliconforks 332 */
561 siliconforks 460 bool canClaim;
562     if (title->u.link) {
563     JS_ASSERT(js_ValidContextPointer(rt, ownercx));
564     JS_ASSERT(ownercx->requestDepth > 0);
565     JS_ASSERT_IF(cx->requestDepth == 0, cx->thread == rt->gcThread);
566     canClaim = (ownercx->thread == cx->thread &&
567     cx->requestDepth > 0);
568     } else {
569     canClaim = (!js_ValidContextPointer(rt, ownercx) ||
570     !ownercx->requestDepth ||
571     ownercx->thread == cx->thread);
572     }
573     if (canClaim) {
574 siliconforks 332 title->ownercx = cx;
575     JS_UNLOCK_GC(rt);
576     JS_RUNTIME_METER(rt, claimedTitles);
577     return JS_TRUE;
578     }
579    
580     /*
581 siliconforks 460 * Avoid deadlock if title's owner thread is waiting on a title that
582     * the current thread owns, by revoking title's ownership. This
583     * approach to deadlock avoidance works because the engine never nests
584     * title locks.
585 siliconforks 332 *
586 siliconforks 460 * If cx->thread could hold locks on ownercx->thread->titleToShare, or
587     * if ownercx->thread could hold locks on title, we would need to keep
588     * reentrancy counts for all such "flyweight" (ownercx != NULL) locks,
589     * so that control would unwind properly once these locks became
590     * "thin" or "fat". The engine promotes a title from exclusive to
591     * shared access only when locking, never when holding or unlocking.
592 siliconforks 332 *
593     * Avoid deadlock before any of this title/context cycle detection if
594     * cx is on the active GC's thread, because in that case, no requests
595     * will run until the GC completes. Any title wanted by the GC (from
596 siliconforks 460 * a finalizer or a mark hook) that can't be claimed must become
597     * shared.
598 siliconforks 332 */
599 siliconforks 460 if (rt->gcThread == cx->thread || WillDeadlock(ownercx, cx->thread)) {
600 siliconforks 332 ShareTitle(cx, title);
601     break;
602     }
603    
604     /*
605     * Thanks to the non-zero NO_TITLE_SHARING_TODO link terminator, we
606     * can decide whether title is on rt->titleSharingTodo with a single
607     * non-null test, and avoid double-insertion bugs.
608     */
609     if (!title->u.link) {
610 siliconforks 460 js_HoldScope(TITLE_TO_SCOPE(title));
611 siliconforks 332 title->u.link = rt->titleSharingTodo;
612     rt->titleSharingTodo = title;
613     }
614    
615     /*
616 siliconforks 460 * Discount all the requests running on the current thread so a
617     * possible GC can proceed on another thread while we wait on
618     * rt->titleSharingDone.
619 siliconforks 332 */
620 siliconforks 460 requestDebit = js_DiscountRequestsForGC(cx);
621     if (title->ownercx != ownercx) {
622     /*
623     * js_DiscountRequestsForGC released and reacquired the GC lock,
624     * and the title was taken or shared. Start over.
625     */
626     js_RecountRequestsAfterGC(rt, requestDebit);
627     continue;
628 siliconforks 332 }
629    
630     /*
631     * We know that some other thread's context owns title, which is now
632     * linked onto rt->titleSharingTodo, awaiting the end of that other
633 siliconforks 460 * thread's request. So it is safe to wait on rt->titleSharingDone.
634     * But before waiting, we force the operation callback for that other
635     * thread so it can quickly suspend.
636 siliconforks 332 */
637 siliconforks 460 NudgeThread(ownercx->thread);
638    
639     JS_ASSERT(!cx->thread->titleToShare);
640     cx->thread->titleToShare = title;
641     #ifdef DEBUG
642     PRStatus stat =
643     #endif
644     PR_WaitCondVar(rt->titleSharingDone, PR_INTERVAL_NO_TIMEOUT);
645 siliconforks 332 JS_ASSERT(stat != PR_FAILURE);
646    
647 siliconforks 460 js_RecountRequestsAfterGC(rt, requestDebit);
648 siliconforks 332
649     /*
650 siliconforks 460 * Don't clear titleToShare until after we're through waiting on
651 siliconforks 332 * all condition variables protected by rt->gcLock -- that includes
652 siliconforks 460 * rt->titleSharingDone *and* rt->gcDone (hidden in the call to
653     * js_RecountRequestsAfterGC immediately above).
654 siliconforks 332 *
655     * Otherwise, the GC could easily deadlock with another thread that
656     * owns a title wanted by a finalizer. By keeping cx->titleToShare
657     * set till here, we ensure that such deadlocks are detected, which
658     * results in the finalized object's title being shared (it must, of
659     * course, have other, live objects sharing it).
660     */
661 siliconforks 460 cx->thread->titleToShare = NULL;
662 siliconforks 332 }
663    
664     JS_UNLOCK_GC(rt);
665     return JS_FALSE;
666     }
667    
668 siliconforks 460 void
669     js_ShareWaitingTitles(JSContext *cx)
670     {
671     JSTitle *title, **todop;
672     bool shared;
673    
674     /* See whether cx has any single-threaded titles to start sharing. */
675     todop = &cx->runtime->titleSharingTodo;
676     shared = false;
677     while ((title = *todop) != NO_TITLE_SHARING_TODO) {
678     if (title->ownercx != cx) {
679     todop = &title->u.link;
680     continue;
681     }
682     *todop = title->u.link;
683     title->u.link = NULL; /* null u.link for sanity ASAP */
684    
685     /*
686     * If js_DropScope returns false, we held the last ref to scope. The
687     * waiting thread(s) must have been killed, after which the GC
688     * collected the object that held this scope. Unlikely, because it
689     * requires that the GC ran (e.g., from an operation callback)
690     * during this request, but possible.
691     */
692     if (js_DropScope(cx, TITLE_TO_SCOPE(title), NULL)) {
693     FinishSharingTitle(cx, title); /* set ownercx = NULL */
694     shared = true;
695     }
696     }
697     if (shared)
698     JS_NOTIFY_ALL_CONDVAR(cx->runtime->titleSharingDone);
699     }
700    
701 siliconforks 332 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
702     JS_FRIEND_API(jsval)
703     js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
704     {
705     jsval v;
706     JSScope *scope;
707     JSTitle *title;
708     #ifndef NSPR_LOCK
709     JSThinLock *tl;
710     jsword me;
711     #endif
712    
713     /*
714     * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
715     * all slots starting from 0 as required slots. A property definition or
716     * some prior arrangement must have allocated slot.
717     *
718     * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
719     * the crucial distinction between a |required slot number| that's passed
720     * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
721     * passed to the JS_Get/SetReservedSlot APIs.
722     */
723     if (!OBJ_IS_NATIVE(obj))
724     return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
725    
726     /*
727     * Native object locking is inlined here to optimize the single-threaded
728     * and contention-free multi-threaded cases.
729     */
730     scope = OBJ_SCOPE(obj);
731     title = &scope->title;
732     JS_ASSERT(title->ownercx != cx);
733 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
734 siliconforks 332
735     /*
736     * Avoid locking if called from the GC. Also avoid locking an object
737     * owning a sealed scope. If neither of those special cases applies, try
738     * to claim scope's flyweight lock from whatever context may have had it in
739     * an earlier request.
740     */
741     if (CX_THREAD_IS_RUNNING_GC(cx) ||
742     (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
743     (title->ownercx && ClaimTitle(title, cx))) {
744     return STOBJ_GET_SLOT(obj, slot);
745     }
746    
747     #ifndef NSPR_LOCK
748     tl = &title->lock;
749     me = CX_THINLOCK_ID(cx);
750     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
751     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
752     /*
753     * Got the lock with one compare-and-swap. Even so, someone else may
754     * have mutated obj so it now has its own scope and lock, which would
755     * require either a restart from the top of this routine, or a thin
756     * lock release followed by fat lock acquisition.
757     */
758     if (scope == OBJ_SCOPE(obj)) {
759     v = STOBJ_GET_SLOT(obj, slot);
760     if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
761     /* Assert that scope locks never revert to flyweight. */
762     JS_ASSERT(title->ownercx != cx);
763 siliconforks 460 LOGIT(title, '1');
764 siliconforks 332 title->u.count = 1;
765     js_UnlockObj(cx, obj);
766     }
767     return v;
768     }
769     if (!NativeCompareAndSwap(&tl->owner, me, 0))
770     js_Dequeue(tl);
771     }
772     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
773     return STOBJ_GET_SLOT(obj, slot);
774     }
775     #endif
776    
777     js_LockObj(cx, obj);
778     v = STOBJ_GET_SLOT(obj, slot);
779    
780     /*
781     * Test whether cx took ownership of obj's scope during js_LockObj.
782     *
783     * This does not mean that a given scope reverted to flyweight from "thin"
784     * or "fat" -- it does mean that obj's map pointer changed due to another
785     * thread setting a property, requiring obj to cease sharing a prototype
786     * object's scope (whose lock was not flyweight, else we wouldn't be here
787     * in the first place!).
788     */
789     title = &OBJ_SCOPE(obj)->title;
790     if (title->ownercx != cx)
791     js_UnlockTitle(cx, title);
792     return v;
793     }
794    
795     void
796     js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
797     {
798     JSTitle *title;
799     JSScope *scope;
800     #ifndef NSPR_LOCK
801     JSThinLock *tl;
802     jsword me;
803     #endif
804    
805     /* Any string stored in a thread-safe object must be immutable. */
806     if (JSVAL_IS_STRING(v) &&
807     !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) {
808     /* FIXME bug 363059: See comments in js_FinishSharingScope. */
809     v = JSVAL_NULL;
810     }
811    
812     /*
813     * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
814     * for the Get case.
815     */
816     if (!OBJ_IS_NATIVE(obj)) {
817     OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
818     return;
819     }
820    
821     /*
822     * Native object locking is inlined here to optimize the single-threaded
823     * and contention-free multi-threaded cases.
824     */
825     scope = OBJ_SCOPE(obj);
826     title = &scope->title;
827     JS_ASSERT(title->ownercx != cx);
828 siliconforks 460 JS_ASSERT(slot < scope->freeslot);
829 siliconforks 332
830     /*
831     * Avoid locking if called from the GC. Also avoid locking an object
832     * owning a sealed scope. If neither of those special cases applies, try
833     * to claim scope's flyweight lock from whatever context may have had it in
834     * an earlier request.
835     */
836     if (CX_THREAD_IS_RUNNING_GC(cx) ||
837     (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
838     (title->ownercx && ClaimTitle(title, cx))) {
839     LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
840     return;
841     }
842    
843     #ifndef NSPR_LOCK
844     tl = &title->lock;
845     me = CX_THINLOCK_ID(cx);
846     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
847     if (NativeCompareAndSwap(&tl->owner, 0, me)) {
848     if (scope == OBJ_SCOPE(obj)) {
849     LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
850     if (!NativeCompareAndSwap(&tl->owner, me, 0)) {
851     /* Assert that scope locks never revert to flyweight. */
852     JS_ASSERT(title->ownercx != cx);
853 siliconforks 460 LOGIT(title, '1');
854 siliconforks 332 title->u.count = 1;
855     js_UnlockObj(cx, obj);
856     }
857     return;
858     }
859     if (!NativeCompareAndSwap(&tl->owner, me, 0))
860     js_Dequeue(tl);
861     }
862     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
863     LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
864     return;
865     }
866     #endif
867    
868     js_LockObj(cx, obj);
869     LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, v);
870    
871     /*
872     * Same drill as above, in js_GetSlotThreadSafe.
873     */
874     title = &OBJ_SCOPE(obj)->title;
875     if (title->ownercx != cx)
876     js_UnlockTitle(cx, title);
877     }
878    
879     #ifndef NSPR_LOCK
880    
881     static JSFatLock *
882     NewFatlock()
883     {
884     JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
885     if (!fl) return NULL;
886     fl->susp = 0;
887     fl->next = NULL;
888     fl->prevp = NULL;
889     fl->slock = PR_NewLock();
890     fl->svar = PR_NewCondVar(fl->slock);
891     return fl;
892     }
893    
894     static void
895     DestroyFatlock(JSFatLock *fl)
896     {
897     PR_DestroyLock(fl->slock);
898     PR_DestroyCondVar(fl->svar);
899     free(fl);
900     }
901    
902     static JSFatLock *
903     ListOfFatlocks(int listc)
904     {
905     JSFatLock *m;
906     JSFatLock *m0;
907     int i;
908    
909     JS_ASSERT(listc>0);
910     m0 = m = NewFatlock();
911     for (i=1; i<listc; i++) {
912     m->next = NewFatlock();
913     m = m->next;
914     }
915     return m0;
916     }
917    
918     static void
919     DeleteListOfFatlocks(JSFatLock *m)
920     {
921     JSFatLock *m0;
922     for (; m; m=m0) {
923     m0 = m->next;
924     DestroyFatlock(m);
925     }
926     }
927    
928     static JSFatLockTable *fl_list_table = NULL;
929     static uint32 fl_list_table_len = 0;
930     static uint32 fl_list_chunk_len = 0;
931    
932     static JSFatLock *
933     GetFatlock(void *id)
934     {
935     JSFatLock *m;
936    
937     uint32 i = GLOBAL_LOCK_INDEX(id);
938     if (fl_list_table[i].free == NULL) {
939     #ifdef DEBUG
940     if (fl_list_table[i].taken)
941     printf("Ran out of fat locks!\n");
942     #endif
943     fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
944     }
945     m = fl_list_table[i].free;
946     fl_list_table[i].free = m->next;
947     m->susp = 0;
948     m->next = fl_list_table[i].taken;
949     m->prevp = &fl_list_table[i].taken;
950     if (fl_list_table[i].taken)
951     fl_list_table[i].taken->prevp = &m->next;
952     fl_list_table[i].taken = m;
953     return m;
954     }
955    
956     static void
957     PutFatlock(JSFatLock *m, void *id)
958     {
959     uint32 i;
960     if (m == NULL)
961     return;
962    
963     /* Unlink m from fl_list_table[i].taken. */
964     *m->prevp = m->next;
965     if (m->next)
966     m->next->prevp = m->prevp;
967    
968     /* Insert m in fl_list_table[i].free. */
969     i = GLOBAL_LOCK_INDEX(id);
970     m->next = fl_list_table[i].free;
971     fl_list_table[i].free = m;
972     }
973    
974     #endif /* !NSPR_LOCK */
975    
976     JSBool
977     js_SetupLocks(int listc, int globc)
978     {
979     #ifndef NSPR_LOCK
980     uint32 i;
981    
982     if (global_locks)
983     return JS_TRUE;
984     #ifdef DEBUG
985     if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
986     printf("Bad number %d in js_SetupLocks()!\n", listc);
987     if (globc > 100 || globc < 0) /* globc == number of global locks */
988     printf("Bad number %d in js_SetupLocks()!\n", listc);
989     #endif
990     global_locks_log2 = JS_CeilingLog2(globc);
991     global_locks_mask = JS_BITMASK(global_locks_log2);
992     global_lock_count = JS_BIT(global_locks_log2);
993     global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
994     if (!global_locks)
995     return JS_FALSE;
996     for (i = 0; i < global_lock_count; i++) {
997     global_locks[i] = PR_NewLock();
998     if (!global_locks[i]) {
999     global_lock_count = i;
1000     js_CleanupLocks();
1001     return JS_FALSE;
1002     }
1003     }
1004     fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
1005     if (!fl_list_table) {
1006     js_CleanupLocks();
1007     return JS_FALSE;
1008     }
1009     fl_list_table_len = global_lock_count;
1010     for (i = 0; i < global_lock_count; i++)
1011     fl_list_table[i].free = fl_list_table[i].taken = NULL;
1012     fl_list_chunk_len = listc;
1013     #endif /* !NSPR_LOCK */
1014     return JS_TRUE;
1015     }
1016    
1017     void
1018     js_CleanupLocks()
1019     {
1020     #ifndef NSPR_LOCK
1021     uint32 i;
1022    
1023     if (global_locks) {
1024     for (i = 0; i < global_lock_count; i++)
1025     PR_DestroyLock(global_locks[i]);
1026     free(global_locks);
1027     global_locks = NULL;
1028     global_lock_count = 1;
1029     global_locks_log2 = 0;
1030     global_locks_mask = 0;
1031     }
1032     if (fl_list_table) {
1033     for (i = 0; i < fl_list_table_len; i++) {
1034     DeleteListOfFatlocks(fl_list_table[i].free);
1035     fl_list_table[i].free = NULL;
1036     DeleteListOfFatlocks(fl_list_table[i].taken);
1037     fl_list_table[i].taken = NULL;
1038     }
1039     free(fl_list_table);
1040     fl_list_table = NULL;
1041     fl_list_table_len = 0;
1042     }
1043     #endif /* !NSPR_LOCK */
1044     }
1045    
1046     #ifdef NSPR_LOCK
1047    
1048     static JS_ALWAYS_INLINE void
1049     ThinLock(JSThinLock *tl, jsword me)
1050     {
1051     JS_ACQUIRE_LOCK((JSLock *) tl->fat);
1052     tl->owner = me;
1053     }
1054    
1055     static JS_ALWAYS_INLINE void
1056     ThinUnlock(JSThinLock *tl, jsword /*me*/)
1057     {
1058     tl->owner = 0;
1059     JS_RELEASE_LOCK((JSLock *) tl->fat);
1060     }
1061    
1062     #else
1063    
1064     /*
1065     * Fast locking and unlocking is implemented by delaying the allocation of a
1066     * system lock (fat lock) until contention. As long as a locking thread A
1067     * runs uncontended, the lock is represented solely by storing A's identity in
1068     * the object being locked.
1069     *
1070     * If another thread B tries to lock the object currently locked by A, B is
1071     * enqueued into a fat lock structure (which might have to be allocated and
1072     * pointed to by the object), and suspended using NSPR conditional variables
1073     * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
1074     * signalling to A that when releasing the lock, B must be dequeued and
1075     * notified.
1076     *
1077     * The basic operation of the locking primitives (js_Lock, js_Unlock,
1078     * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
1079     * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
1080     * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
1081     * succeeds this implies that p is uncontended (no one is waiting because the
1082     * wait bit is not set).
1083     *
1084     * When dequeueing, the lock is released, and one of the threads suspended on
1085     * the lock is notified. If other threads still are waiting, the wait bit is
1086     * kept (in js_Enqueue), and if not, the fat lock is deallocated.
1087     *
1088     * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
1089     * are serialized using a global lock. For scalability, a hashtable of global
1090     * locks is used, which is indexed modulo the thin lock pointer.
1091     */
1092    
1093     /*
1094     * Invariants:
1095     * (i) global lock is held
1096     * (ii) fl->susp >= 0
1097     */
1098     static int
1099     js_SuspendThread(JSThinLock *tl)
1100     {
1101     JSFatLock *fl;
1102     PRStatus stat;
1103    
1104     if (tl->fat == NULL)
1105     fl = tl->fat = GetFatlock(tl);
1106     else
1107     fl = tl->fat;
1108     JS_ASSERT(fl->susp >= 0);
1109     fl->susp++;
1110     PR_Lock(fl->slock);
1111     js_UnlockGlobal(tl);
1112     stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
1113     JS_ASSERT(stat != PR_FAILURE);
1114     PR_Unlock(fl->slock);
1115     js_LockGlobal(tl);
1116     fl->susp--;
1117     if (fl->susp == 0) {
1118     PutFatlock(fl, tl);
1119     tl->fat = NULL;
1120     }
1121     return tl->fat == NULL;
1122     }
1123    
1124     /*
1125     * (i) global lock is held
1126     * (ii) fl->susp > 0
1127     */
1128     static void
1129     js_ResumeThread(JSThinLock *tl)
1130     {
1131     JSFatLock *fl = tl->fat;
1132     PRStatus stat;
1133    
1134     JS_ASSERT(fl != NULL);
1135     JS_ASSERT(fl->susp > 0);
1136     PR_Lock(fl->slock);
1137     js_UnlockGlobal(tl);
1138     stat = PR_NotifyCondVar(fl->svar);
1139     JS_ASSERT(stat != PR_FAILURE);
1140     PR_Unlock(fl->slock);
1141     }
1142    
1143     static void
1144     js_Enqueue(JSThinLock *tl, jsword me)
1145     {
1146     jsword o, n;
1147    
1148     js_LockGlobal(tl);
1149     for (;;) {
1150     o = ReadWord(tl->owner);
1151     n = Thin_SetWait(o);
1152     if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
1153     if (js_SuspendThread(tl))
1154     me = Thin_RemoveWait(me);
1155     else
1156     me = Thin_SetWait(me);
1157     }
1158     else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
1159     js_UnlockGlobal(tl);
1160     return;
1161     }
1162     }
1163     }
1164    
1165     static void
1166     js_Dequeue(JSThinLock *tl)
1167     {
1168     jsword o;
1169    
1170     js_LockGlobal(tl);
1171     o = ReadWord(tl->owner);
1172     JS_ASSERT(Thin_GetWait(o) != 0);
1173     JS_ASSERT(tl->fat != NULL);
1174     if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
1175     JS_ASSERT(0);
1176     js_ResumeThread(tl);
1177     }
1178    
1179     static JS_ALWAYS_INLINE void
1180     ThinLock(JSThinLock *tl, jsword me)
1181     {
1182     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1183     if (NativeCompareAndSwap(&tl->owner, 0, me))
1184     return;
1185     if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1186     js_Enqueue(tl, me);
1187     #ifdef DEBUG
1188     else
1189     JS_ASSERT(0);
1190     #endif
1191     }
1192    
1193     static JS_ALWAYS_INLINE void
1194     ThinUnlock(JSThinLock *tl, jsword me)
1195     {
1196     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1197    
1198     /*
1199     * Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
1200     * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
1201     */
1202     if (NativeCompareAndSwap(&tl->owner, me, 0))
1203     return;
1204    
1205     JS_ASSERT(Thin_GetWait(tl->owner));
1206     if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1207     js_Dequeue(tl);
1208     #ifdef DEBUG
1209     else
1210     JS_ASSERT(0); /* unbalanced unlock */
1211     #endif
1212     }
1213    
1214     #endif /* !NSPR_LOCK */
1215    
1216     void
1217     js_Lock(JSContext *cx, JSThinLock *tl)
1218     {
1219     ThinLock(tl, CX_THINLOCK_ID(cx));
1220     }
1221    
1222     void
1223     js_Unlock(JSContext *cx, JSThinLock *tl)
1224     {
1225     ThinUnlock(tl, CX_THINLOCK_ID(cx));
1226     }
1227    
1228     void
1229     js_LockRuntime(JSRuntime *rt)
1230     {
1231     PR_Lock(rt->rtLock);
1232     #ifdef DEBUG
1233     rt->rtLockOwner = js_CurrentThreadId();
1234     #endif
1235     }
1236    
1237     void
1238     js_UnlockRuntime(JSRuntime *rt)
1239     {
1240     #ifdef DEBUG
1241     rt->rtLockOwner = 0;
1242     #endif
1243     PR_Unlock(rt->rtLock);
1244     }
1245    
1246     void
1247     js_LockTitle(JSContext *cx, JSTitle *title)
1248     {
1249     jsword me = CX_THINLOCK_ID(cx);
1250    
1251     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
1252     JS_ASSERT(title->ownercx != cx);
1253     if (CX_THREAD_IS_RUNNING_GC(cx))
1254     return;
1255     if (title->ownercx && ClaimTitle(title, cx))
1256     return;
1257    
1258     if (Thin_RemoveWait(ReadWord(title->lock.owner)) == me) {
1259     JS_ASSERT(title->u.count > 0);
1260     LOGIT(scope, '+');
1261     title->u.count++;
1262     } else {
1263     ThinLock(&title->lock, me);
1264     JS_ASSERT(title->u.count == 0);
1265     LOGIT(scope, '1');
1266     title->u.count = 1;
1267     }
1268     }
1269    
1270     void
1271     js_UnlockTitle(JSContext *cx, JSTitle *title)
1272     {
1273     jsword me = CX_THINLOCK_ID(cx);
1274    
1275     /* We hope compilers use me instead of reloading cx->thread in the macro. */
1276     if (CX_THREAD_IS_RUNNING_GC(cx))
1277     return;
1278     if (cx->lockedSealedTitle == title) {
1279     cx->lockedSealedTitle = NULL;
1280     return;
1281     }
1282    
1283     /*
1284     * If title->ownercx is not null, it's likely that two contexts not using
1285     * requests nested locks for title. The first context, cx here, claimed
1286     * title; the second, title->ownercx here, re-claimed it because the first
1287     * was not in a request, or was on the same thread. We don't want to keep
1288     * track of such nesting, because it penalizes the common non-nested case.
1289     * Instead of asserting here and silently coping, we simply re-claim title
1290     * for cx and return.
1291     *
1292     * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1293     * case where an asymmetric thread model (Mozilla's main thread is known
1294     * to be the only thread that runs the GC) combined with multiple contexts
1295     * per thread has led to such request-less nesting.
1296     */
1297     if (title->ownercx) {
1298     JS_ASSERT(title->u.count == 0);
1299     JS_ASSERT(title->lock.owner == 0);
1300     title->ownercx = cx;
1301     return;
1302     }
1303    
1304     JS_ASSERT(title->u.count > 0);
1305     if (Thin_RemoveWait(ReadWord(title->lock.owner)) != me) {
1306     JS_ASSERT(0); /* unbalanced unlock */
1307     return;
1308     }
1309 siliconforks 460 LOGIT(title, '-');
1310 siliconforks 332 if (--title->u.count == 0)
1311     ThinUnlock(&title->lock, me);
1312     }
1313    
1314     /*
1315     * NB: oldtitle may be null if our caller is js_GetMutableScope and it just
1316     * dropped the last reference to oldtitle.
1317     */
1318     void
1319     js_TransferTitle(JSContext *cx, JSTitle *oldtitle, JSTitle *newtitle)
1320     {
1321     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, newtitle));
1322    
1323     /*
1324     * If the last reference to oldtitle went away, newtitle needs no lock
1325     * state update.
1326     */
1327     if (!oldtitle)
1328     return;
1329     JS_ASSERT(JS_IS_TITLE_LOCKED(cx, oldtitle));
1330    
1331     /*
1332     * Special case in js_LockTitle and js_UnlockTitle for the GC calling
1333     * code that locks, unlocks, or mutates. Nothing to do in these cases,
1334     * because title and newtitle were "locked" by the GC thread, so neither
1335     * was actually locked.
1336     */
1337     if (CX_THREAD_IS_RUNNING_GC(cx))
1338     return;
1339    
1340     /*
1341     * Special case in js_LockObj and js_UnlockTitle for locking the sealed
1342     * scope of an object that owns that scope (the prototype or mutated obj
1343     * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1344     */
1345     JS_ASSERT(cx->lockedSealedTitle != newtitle);
1346     if (cx->lockedSealedTitle == oldtitle) {
1347     JS_ASSERT(newtitle->ownercx == cx ||
1348     (!newtitle->ownercx && newtitle->u.count == 1));
1349     cx->lockedSealedTitle = NULL;
1350     return;
1351     }
1352    
1353     /*
1354     * If oldtitle is single-threaded, there's nothing to do.
1355     */
1356     if (oldtitle->ownercx) {
1357     JS_ASSERT(oldtitle->ownercx == cx);
1358     JS_ASSERT(newtitle->ownercx == cx ||
1359     (!newtitle->ownercx && newtitle->u.count == 1));
1360     return;
1361     }
1362    
1363     /*
1364     * We transfer oldtitle->u.count only if newtitle is not single-threaded.
1365     * Flow unwinds from here through some number of JS_UNLOCK_TITLE and/or
1366     * JS_UNLOCK_OBJ macro calls, which will decrement newtitle->u.count only
1367     * if they find newtitle->ownercx != cx.
1368     */
1369     if (newtitle->ownercx != cx) {
1370     JS_ASSERT(!newtitle->ownercx);
1371     newtitle->u.count = oldtitle->u.count;
1372     }
1373    
1374     /*
1375     * Reset oldtitle's lock state so that it is completely unlocked.
1376     */
1377 siliconforks 460 LOGIT(oldtitle, '0');
1378 siliconforks 332 oldtitle->u.count = 0;
1379     ThinUnlock(&oldtitle->lock, CX_THINLOCK_ID(cx));
1380     }
1381    
1382     void
1383     js_LockObj(JSContext *cx, JSObject *obj)
1384     {
1385     JSScope *scope;
1386     JSTitle *title;
1387    
1388     JS_ASSERT(OBJ_IS_NATIVE(obj));
1389    
1390     /*
1391     * We must test whether the GC is calling and return without mutating any
1392     * state, especially cx->lockedSealedScope. Note asymmetry with respect to
1393     * js_UnlockObj, which is a thin-layer on top of js_UnlockTitle.
1394     */
1395     if (CX_THREAD_IS_RUNNING_GC(cx))
1396     return;
1397    
1398     for (;;) {
1399     scope = OBJ_SCOPE(obj);
1400     title = &scope->title;
1401     if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
1402     !cx->lockedSealedTitle) {
1403     cx->lockedSealedTitle = title;
1404     return;
1405     }
1406    
1407     js_LockTitle(cx, title);
1408    
1409     /* If obj still has this scope, we're done. */
1410     if (scope == OBJ_SCOPE(obj))
1411     return;
1412    
1413     /* Lost a race with a mutator; retry with obj's new scope. */
1414     js_UnlockTitle(cx, title);
1415     }
1416     }
1417    
1418     void
1419     js_UnlockObj(JSContext *cx, JSObject *obj)
1420     {
1421     JS_ASSERT(OBJ_IS_NATIVE(obj));
1422     js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title);
1423     }
1424    
1425     void
1426     js_InitTitle(JSContext *cx, JSTitle *title)
1427     {
1428     #ifdef JS_THREADSAFE
1429     title->ownercx = cx;
1430     memset(&title->lock, 0, sizeof title->lock);
1431    
1432     /*
1433     * Set u.link = NULL, not u.count = 0, in case the target architecture's
1434     * null pointer has a non-zero integer representation.
1435     */
1436     title->u.link = NULL;
1437    
1438     #ifdef JS_DEBUG_TITLE_LOCKS
1439     title->file[0] = title->file[1] = title->file[2] = title->file[3] = NULL;
1440     title->line[0] = title->line[1] = title->line[2] = title->line[3] = 0;
1441     #endif
1442     #endif
1443     }
1444    
1445     void
1446     js_FinishTitle(JSContext *cx, JSTitle *title)
1447     {
1448 siliconforks 460 #ifdef DEBUG_SCOPE_COUNT
1449     js_unlog_title(title);
1450     #endif
1451    
1452 siliconforks 332 #ifdef JS_THREADSAFE
1453     /* Title must be single-threaded at this point, so set ownercx. */
1454     JS_ASSERT(title->u.count == 0);
1455     title->ownercx = cx;
1456     js_FinishLock(&title->lock);
1457     #endif
1458     }
1459    
1460     #ifdef DEBUG
1461    
1462     JSBool
1463     js_IsRuntimeLocked(JSRuntime *rt)
1464     {
1465     return js_CurrentThreadId() == rt->rtLockOwner;
1466     }
1467    
1468     JSBool
1469     js_IsObjLocked(JSContext *cx, JSObject *obj)
1470     {
1471 siliconforks 460 return js_IsTitleLocked(cx, &OBJ_SCOPE(obj)->title);
1472 siliconforks 332 }
1473    
1474     JSBool
1475     js_IsTitleLocked(JSContext *cx, JSTitle *title)
1476     {
1477     /* Special case: the GC locking any object's title, see js_LockTitle. */
1478     if (CX_THREAD_IS_RUNNING_GC(cx))
1479     return JS_TRUE;
1480    
1481     /* Special case: locked object owning a sealed scope, see js_LockObj. */
1482     if (cx->lockedSealedTitle == title)
1483     return JS_TRUE;
1484    
1485     /*
1486     * General case: the title is either exclusively owned (by cx), or it has
1487     * a thin or fat lock to cope with shared (concurrent) ownership.
1488     */
1489     if (title->ownercx) {
1490     JS_ASSERT(title->ownercx == cx || title->ownercx->thread == cx->thread);
1491     return JS_TRUE;
1492     }
1493     return js_CurrentThreadId() ==
1494     ((JSThread *)Thin_RemoveWait(ReadWord(title->lock.owner)))->id;
1495     }
1496    
1497     #ifdef JS_DEBUG_TITLE_LOCKS
1498     void
1499     js_SetScopeInfo(JSScope *scope, const char *file, int line)
1500     {
1501     JSTitle *title = &scope->title;
1502     if (!title->ownercx) {
1503     jsrefcount count = title->u.count;
1504     JS_ASSERT_IF(!SCOPE_IS_SEALED(scope), count > 0);
1505     JS_ASSERT(count <= 4);
1506     title->file[count - 1] = file;
1507     title->line[count - 1] = line;
1508     }
1509     }
1510     #endif /* JS_DEBUG_TITLE_LOCKS */
1511     #endif /* DEBUG */
1512     #endif /* JS_THREADSAFE */

  ViewVC Help
Powered by ViewVC 1.1.24