1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sw=4 et tw=78:
3 : *
4 : * ***** BEGIN LICENSE BLOCK *****
5 : * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 : *
7 : * The contents of this file are subject to the Mozilla Public License Version
8 : * 1.1 (the "License"); you may not use this file except in compliance with
9 : * the License. You may obtain a copy of the License at
10 : * http://www.mozilla.org/MPL/
11 : *
12 : * Software distributed under the License is distributed on an "AS IS" basis,
13 : * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 : * for the specific language governing rights and limitations under the
15 : * License.
16 : *
17 : * The Original Code is Mozilla Communicator client code, released
18 : * March 31, 1998.
19 : *
20 : * The Initial Developer of the Original Code is
21 : * Netscape Communications Corporation.
22 : * Portions created by the Initial Developer are Copyright (C) 1998
23 : * the Initial Developer. All Rights Reserved.
24 : *
25 : * Contributor(s):
26 : *
27 : * Alternatively, the contents of this file may be used under the terms of
28 : * either of the GNU General Public License Version 2 or later (the "GPL"),
29 : * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 : * in which case the provisions of the GPL or the LGPL are applicable instead
31 : * of those above. If you wish to allow use of your version of this file only
32 : * under the terms of either the GPL or the LGPL, and not to allow others to
33 : * use your version of this file under the terms of the MPL, indicate your
34 : * decision by deleting the provisions above and replace them with the notice
35 : * and other provisions required by the GPL or the LGPL. If you do not delete
36 : * the provisions above, a recipient may use your version of this file under
37 : * the terms of any one of the MPL, the GPL or the LGPL.
38 : *
39 : * ***** END LICENSE BLOCK ***** */
40 :
41 : /* JS Mark-and-Sweep Garbage Collector. */
42 :
43 : #include "mozilla/Attributes.h"
44 : #include "mozilla/Util.h"
45 :
46 : /*
47 : * This code implements a mark-and-sweep garbage collector. The mark phase is
48 : * incremental. Most sweeping is done on a background thread. A GC is divided
49 : * into slices as follows:
50 : *
51 : * Slice 1: Roots pushed onto the mark stack. The mark stack is processed by
52 : * popping an element, marking it, and pushing its children.
53 : * ... JS code runs ...
54 : * Slice 2: More mark stack processing.
55 : * ... JS code runs ...
56 : * Slice n-1: More mark stack processing.
57 : * ... JS code runs ...
58 : * Slice n: Mark stack is completely drained. Some sweeping is done.
59 : * ... JS code runs, remaining sweeping done on background thread ...
60 : *
61 : * When background sweeping finishes the GC is complete.
62 : *
63 : * Incremental GC requires close collaboration with the mutator (i.e., JS code):
64 : *
65 : * 1. During an incremental GC, if a memory location (except a root) is written
66 : * to, then the value it previously held must be marked. Write barriers ensure
67 : * this.
68 : * 2. Any object that is allocated during incremental GC must start out marked.
69 : * 3. Roots are special memory locations that don't need write
70 : * barriers. However, they must be marked in the first slice. Roots are things
71 : * like the C stack and the VM stack, since it would be too expensive to put
72 : * barriers on them.
73 : */
74 :
75 : #include <math.h>
76 : #include <string.h> /* for memset used when DEBUG */
77 :
78 : #include "jstypes.h"
79 : #include "jsutil.h"
80 : #include "jshash.h"
81 : #include "jsclist.h"
82 : #include "jsprf.h"
83 : #include "jsapi.h"
84 : #include "jsatom.h"
85 : #include "jscompartment.h"
86 : #include "jscrashreport.h"
87 : #include "jscrashformat.h"
88 : #include "jscntxt.h"
89 : #include "jsversion.h"
90 : #include "jsdbgapi.h"
91 : #include "jsexn.h"
92 : #include "jsfun.h"
93 : #include "jsgc.h"
94 : #include "jsgcmark.h"
95 : #include "jsinterp.h"
96 : #include "jsiter.h"
97 : #include "jslock.h"
98 : #include "jsnum.h"
99 : #include "jsobj.h"
100 : #include "jsprobes.h"
101 : #include "jsproxy.h"
102 : #include "jsscope.h"
103 : #include "jsscript.h"
104 : #include "jswatchpoint.h"
105 : #include "jsweakmap.h"
106 : #if JS_HAS_XML_SUPPORT
107 : #include "jsxml.h"
108 : #endif
109 :
110 : #include "frontend/Parser.h"
111 : #include "gc/Memory.h"
112 : #include "methodjit/MethodJIT.h"
113 : #include "vm/Debugger.h"
114 : #include "vm/String.h"
115 :
116 : #include "jsinterpinlines.h"
117 : #include "jsobjinlines.h"
118 :
119 : #include "vm/ScopeObject-inl.h"
120 : #include "vm/String-inl.h"
121 :
122 : #ifdef MOZ_VALGRIND
123 : # define JS_VALGRIND
124 : #endif
125 : #ifdef JS_VALGRIND
126 : # include <valgrind/memcheck.h>
127 : #endif
128 :
129 : #ifdef XP_WIN
130 : # include "jswin.h"
131 : #else
132 : # include <unistd.h>
133 : #endif
134 :
135 : using namespace mozilla;
136 : using namespace js;
137 : using namespace js::gc;
138 :
139 : namespace js {
140 : namespace gc {
141 :
142 : /*
143 : * Lower limit after which we limit the heap growth
144 : */
145 : const size_t GC_ALLOCATION_THRESHOLD = 30 * 1024 * 1024;
146 :
147 : /*
148 : * A GC is triggered once the number of newly allocated arenas is
149 : * GC_HEAP_GROWTH_FACTOR times the number of live arenas after the last GC
150 : * starting after the lower limit of GC_ALLOCATION_THRESHOLD. This number is
151 : * used for non-incremental GCs.
152 : */
153 : const float GC_HEAP_GROWTH_FACTOR = 3.0f;
154 :
155 : /* Perform a Full GC every 20 seconds if MaybeGC is called */
156 : static const uint64_t GC_IDLE_FULL_SPAN = 20 * 1000 * 1000;
157 :
158 : #ifdef JS_GC_ZEAL
159 : static void
160 : StartVerifyBarriers(JSContext *cx);
161 :
162 : static void
163 : EndVerifyBarriers(JSContext *cx);
164 :
165 : void
166 : FinishVerifier(JSRuntime *rt);
167 : #endif
168 :
169 : /* This array should be const, but that doesn't link right under GCC. */
170 : AllocKind slotsToThingKind[] = {
171 : /* 0 */ FINALIZE_OBJECT0, FINALIZE_OBJECT2, FINALIZE_OBJECT2, FINALIZE_OBJECT4,
172 : /* 4 */ FINALIZE_OBJECT4, FINALIZE_OBJECT8, FINALIZE_OBJECT8, FINALIZE_OBJECT8,
173 : /* 8 */ FINALIZE_OBJECT8, FINALIZE_OBJECT12, FINALIZE_OBJECT12, FINALIZE_OBJECT12,
174 : /* 12 */ FINALIZE_OBJECT12, FINALIZE_OBJECT16, FINALIZE_OBJECT16, FINALIZE_OBJECT16,
175 : /* 16 */ FINALIZE_OBJECT16
176 : };
177 :
178 : JS_STATIC_ASSERT(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT);
179 :
180 : const uint32_t Arena::ThingSizes[] = {
181 : sizeof(JSObject), /* FINALIZE_OBJECT0 */
182 : sizeof(JSObject), /* FINALIZE_OBJECT0_BACKGROUND */
183 : sizeof(JSObject_Slots2), /* FINALIZE_OBJECT2 */
184 : sizeof(JSObject_Slots2), /* FINALIZE_OBJECT2_BACKGROUND */
185 : sizeof(JSObject_Slots4), /* FINALIZE_OBJECT4 */
186 : sizeof(JSObject_Slots4), /* FINALIZE_OBJECT4_BACKGROUND */
187 : sizeof(JSObject_Slots8), /* FINALIZE_OBJECT8 */
188 : sizeof(JSObject_Slots8), /* FINALIZE_OBJECT8_BACKGROUND */
189 : sizeof(JSObject_Slots12), /* FINALIZE_OBJECT12 */
190 : sizeof(JSObject_Slots12), /* FINALIZE_OBJECT12_BACKGROUND */
191 : sizeof(JSObject_Slots16), /* FINALIZE_OBJECT16 */
192 : sizeof(JSObject_Slots16), /* FINALIZE_OBJECT16_BACKGROUND */
193 : sizeof(JSScript), /* FINALIZE_SCRIPT */
194 : sizeof(Shape), /* FINALIZE_SHAPE */
195 : sizeof(BaseShape), /* FINALIZE_BASE_SHAPE */
196 : sizeof(types::TypeObject), /* FINALIZE_TYPE_OBJECT */
197 : #if JS_HAS_XML_SUPPORT
198 : sizeof(JSXML), /* FINALIZE_XML */
199 : #endif
200 : sizeof(JSShortString), /* FINALIZE_SHORT_STRING */
201 : sizeof(JSString), /* FINALIZE_STRING */
202 : sizeof(JSExternalString), /* FINALIZE_EXTERNAL_STRING */
203 : };
204 :
205 : #define OFFSET(type) uint32_t(sizeof(ArenaHeader) + (ArenaSize - sizeof(ArenaHeader)) % sizeof(type))
206 :
207 : const uint32_t Arena::FirstThingOffsets[] = {
208 : OFFSET(JSObject), /* FINALIZE_OBJECT0 */
209 : OFFSET(JSObject), /* FINALIZE_OBJECT0_BACKGROUND */
210 : OFFSET(JSObject_Slots2), /* FINALIZE_OBJECT2 */
211 : OFFSET(JSObject_Slots2), /* FINALIZE_OBJECT2_BACKGROUND */
212 : OFFSET(JSObject_Slots4), /* FINALIZE_OBJECT4 */
213 : OFFSET(JSObject_Slots4), /* FINALIZE_OBJECT4_BACKGROUND */
214 : OFFSET(JSObject_Slots8), /* FINALIZE_OBJECT8 */
215 : OFFSET(JSObject_Slots8), /* FINALIZE_OBJECT8_BACKGROUND */
216 : OFFSET(JSObject_Slots12), /* FINALIZE_OBJECT12 */
217 : OFFSET(JSObject_Slots12), /* FINALIZE_OBJECT12_BACKGROUND */
218 : OFFSET(JSObject_Slots16), /* FINALIZE_OBJECT16 */
219 : OFFSET(JSObject_Slots16), /* FINALIZE_OBJECT16_BACKGROUND */
220 : OFFSET(JSScript), /* FINALIZE_SCRIPT */
221 : OFFSET(Shape), /* FINALIZE_SHAPE */
222 : OFFSET(BaseShape), /* FINALIZE_BASE_SHAPE */
223 : OFFSET(types::TypeObject), /* FINALIZE_TYPE_OBJECT */
224 : #if JS_HAS_XML_SUPPORT
225 : OFFSET(JSXML), /* FINALIZE_XML */
226 : #endif
227 : OFFSET(JSShortString), /* FINALIZE_SHORT_STRING */
228 : OFFSET(JSString), /* FINALIZE_STRING */
229 : OFFSET(JSExternalString), /* FINALIZE_EXTERNAL_STRING */
230 : };
231 :
232 : #undef OFFSET
233 :
234 : #ifdef DEBUG
235 : void
236 12401769 : ArenaHeader::checkSynchronizedWithFreeList() const
237 : {
238 : /*
239 : * Do not allow to access the free list when its real head is still stored
240 : * in FreeLists and is not synchronized with this one.
241 : */
242 12401769 : JS_ASSERT(allocated());
243 :
244 : /*
245 : * We can be called from the background finalization thread when the free
246 : * list in the compartment can mutate at any moment. We cannot do any
247 : * checks in this case.
248 : */
249 12401663 : if (!compartment->rt->gcRunning)
250 1115994 : return;
251 :
252 11285669 : FreeSpan firstSpan = FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
253 11285669 : if (firstSpan.isEmpty())
254 1361529 : return;
255 9924140 : const FreeSpan *list = compartment->arenas.getFreeList(getAllocKind());
256 9924140 : if (list->isEmpty() || firstSpan.arenaAddress() != list->arenaAddress())
257 9136469 : return;
258 :
259 : /*
260 : * Here this arena has free things, FreeList::lists[thingKind] is not
261 : * empty and also points to this arena. Thus they must the same.
262 : */
263 787671 : JS_ASSERT(firstSpan.isSameNonEmptySpan(list));
264 : }
265 : #endif
266 :
267 : /* static */ void
268 0 : Arena::staticAsserts()
269 : {
270 : JS_STATIC_ASSERT(sizeof(Arena) == ArenaSize);
271 : JS_STATIC_ASSERT(JS_ARRAY_LENGTH(ThingSizes) == FINALIZE_LIMIT);
272 : JS_STATIC_ASSERT(JS_ARRAY_LENGTH(FirstThingOffsets) == FINALIZE_LIMIT);
273 0 : }
274 :
275 : template<typename T>
276 : inline bool
277 2955046 : Arena::finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize)
278 : {
279 : /* Enforce requirements on size of T. */
280 2955046 : JS_ASSERT(thingSize % Cell::CellSize == 0);
281 2955046 : JS_ASSERT(thingSize <= 255);
282 :
283 2955046 : JS_ASSERT(aheader.allocated());
284 2955046 : JS_ASSERT(thingKind == aheader.getAllocKind());
285 2955046 : JS_ASSERT(thingSize == aheader.getThingSize());
286 2955046 : JS_ASSERT(!aheader.hasDelayedMarking);
287 2955046 : JS_ASSERT(!aheader.markOverflow);
288 2955046 : JS_ASSERT(!aheader.allocatedDuringIncremental);
289 :
290 2955046 : uintptr_t thing = thingsStart(thingKind);
291 2955046 : uintptr_t lastByte = thingsEnd() - 1;
292 :
293 2955046 : FreeSpan nextFree(aheader.getFirstFreeSpan());
294 2955046 : nextFree.checkSpan();
295 :
296 2955046 : FreeSpan newListHead;
297 2955046 : FreeSpan *newListTail = &newListHead;
298 2955046 : uintptr_t newFreeSpanStart = 0;
299 2955046 : bool allClear = true;
300 5910092 : DebugOnly<size_t> nmarked = 0;
301 382256469 : for (;; thing += thingSize) {
302 385211515 : JS_ASSERT(thing <= lastByte + 1);
303 385211515 : if (thing == nextFree.first) {
304 4804812 : JS_ASSERT(nextFree.last <= lastByte);
305 4804812 : if (nextFree.last == lastByte)
306 : break;
307 1849766 : JS_ASSERT(Arena::isAligned(nextFree.last, thingSize));
308 1849766 : if (!newFreeSpanStart)
309 1468285 : newFreeSpanStart = thing;
310 1849766 : thing = nextFree.last;
311 1849766 : nextFree = *nextFree.nextSpan();
312 1849766 : nextFree.checkSpan();
313 : } else {
314 380406703 : T *t = reinterpret_cast<T *>(thing);
315 380406703 : if (t->isMarked()) {
316 248839476 : allClear = false;
317 248839476 : nmarked++;
318 248839476 : if (newFreeSpanStart) {
319 2397087 : JS_ASSERT(thing >= thingsStart(thingKind) + thingSize);
320 2397087 : newListTail->first = newFreeSpanStart;
321 2397087 : newListTail->last = thing - thingSize;
322 2397087 : newListTail = newListTail->nextSpanUnchecked(thingSize);
323 2397087 : newFreeSpanStart = 0;
324 : }
325 : } else {
326 131567227 : if (!newFreeSpanStart)
327 2316803 : newFreeSpanStart = thing;
328 131567227 : t->finalize(fop);
329 131567227 : JS_POISON(t, JS_FREE_PATTERN, thingSize);
330 : }
331 : }
332 : }
333 :
334 2955046 : if (allClear) {
335 1326848 : JS_ASSERT(newListTail == &newListHead);
336 1326848 : JS_ASSERT(newFreeSpanStart == thingsStart(thingKind));
337 1326848 : return true;
338 : }
339 :
340 1628198 : newListTail->first = newFreeSpanStart ? newFreeSpanStart : nextFree.first;
341 1628198 : JS_ASSERT(Arena::isAligned(newListTail->first, thingSize));
342 1628198 : newListTail->last = lastByte;
343 :
344 : #ifdef DEBUG
345 1628198 : size_t nfree = 0;
346 4025285 : for (const FreeSpan *span = &newListHead; span != newListTail; span = span->nextSpan()) {
347 2397087 : span->checkSpan();
348 2397087 : JS_ASSERT(Arena::isAligned(span->first, thingSize));
349 2397087 : JS_ASSERT(Arena::isAligned(span->last, thingSize));
350 2397087 : nfree += (span->last - span->first) / thingSize + 1;
351 2397087 : JS_ASSERT(nfree + nmarked <= thingsPerArena(thingSize));
352 : }
353 1628198 : nfree += (newListTail->last + 1 - newListTail->first) / thingSize;
354 1628198 : JS_ASSERT(nfree + nmarked == thingsPerArena(thingSize));
355 : #endif
356 1628198 : aheader.setFirstFreeSpan(&newListHead);
357 :
358 1628198 : return false;
359 : }
360 :
361 : template<typename T>
362 : inline void
363 1318324 : FinalizeTypedArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
364 : {
365 : /*
366 : * Release empty arenas and move non-full arenas with some free things into
367 : * a separated list that we append to al after the loop to ensure that any
368 : * arena before al->cursor is full.
369 : */
370 1318324 : JS_ASSERT_IF(!al->head, al->cursor == &al->head);
371 1318324 : ArenaLists::ArenaList available;
372 1318324 : ArenaHeader **ap = &al->head;
373 1318324 : size_t thingSize = Arena::thingSize(thingKind);
374 7228416 : while (ArenaHeader *aheader = *ap) {
375 2955046 : bool allClear = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
376 2955046 : if (allClear) {
377 1326848 : *ap = aheader->next;
378 1326848 : aheader->chunk()->releaseArena(aheader);
379 1628198 : } else if (aheader->hasFreeThings()) {
380 705613 : *ap = aheader->next;
381 705613 : *available.cursor = aheader;
382 705613 : available.cursor = &aheader->next;
383 : } else {
384 922585 : ap = &aheader->next;
385 : }
386 : }
387 :
388 : /* Terminate the available list and append it to al. */
389 1318324 : *available.cursor = NULL;
390 1318324 : *ap = available.head;
391 1318324 : al->cursor = ap;
392 2274962 : JS_ASSERT_IF(!al->head, al->cursor == &al->head);
393 1318324 : }
394 :
395 : /*
396 : * Finalize the list. On return al->cursor points to the first non-empty arena
397 : * after the al->head.
398 : */
399 : static void
400 1318324 : FinalizeArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
401 : {
402 1318324 : switch(thingKind) {
403 : case FINALIZE_OBJECT0:
404 : case FINALIZE_OBJECT0_BACKGROUND:
405 : case FINALIZE_OBJECT2:
406 : case FINALIZE_OBJECT2_BACKGROUND:
407 : case FINALIZE_OBJECT4:
408 : case FINALIZE_OBJECT4_BACKGROUND:
409 : case FINALIZE_OBJECT8:
410 : case FINALIZE_OBJECT8_BACKGROUND:
411 : case FINALIZE_OBJECT12:
412 : case FINALIZE_OBJECT12_BACKGROUND:
413 : case FINALIZE_OBJECT16:
414 : case FINALIZE_OBJECT16_BACKGROUND:
415 688867 : FinalizeTypedArenas<JSObject>(fop, al, thingKind);
416 688867 : break;
417 : case FINALIZE_SCRIPT:
418 83697 : FinalizeTypedArenas<JSScript>(fop, al, thingKind);
419 83697 : break;
420 : case FINALIZE_SHAPE:
421 83697 : FinalizeTypedArenas<Shape>(fop, al, thingKind);
422 83697 : break;
423 : case FINALIZE_BASE_SHAPE:
424 83697 : FinalizeTypedArenas<BaseShape>(fop, al, thingKind);
425 83697 : break;
426 : case FINALIZE_TYPE_OBJECT:
427 83697 : FinalizeTypedArenas<types::TypeObject>(fop, al, thingKind);
428 83697 : break;
429 : #if JS_HAS_XML_SUPPORT
430 : case FINALIZE_XML:
431 83697 : FinalizeTypedArenas<JSXML>(fop, al, thingKind);
432 83697 : break;
433 : #endif
434 : case FINALIZE_STRING:
435 83224 : FinalizeTypedArenas<JSString>(fop, al, thingKind);
436 83224 : break;
437 : case FINALIZE_SHORT_STRING:
438 44051 : FinalizeTypedArenas<JSShortString>(fop, al, thingKind);
439 44051 : break;
440 : case FINALIZE_EXTERNAL_STRING:
441 83697 : FinalizeTypedArenas<JSExternalString>(fop, al, thingKind);
442 83697 : break;
443 : }
444 1318324 : }
445 :
446 : static inline Chunk *
447 40491 : AllocChunk() {
448 40491 : return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
449 : }
450 :
451 : static inline void
452 40491 : FreeChunk(Chunk *p) {
453 40491 : UnmapPages(static_cast<void *>(p), ChunkSize);
454 40491 : }
455 :
456 : #ifdef JS_THREADSAFE
457 : inline bool
458 42904 : ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
459 : {
460 : /*
461 : * To minimize memory waste we do not want to run the background chunk
462 : * allocation if we have empty chunks or when the runtime needs just few
463 : * of them.
464 : */
465 42904 : return rt->gcHelperThread.canBackgroundAllocate() &&
466 : emptyCount == 0 &&
467 42904 : rt->gcChunkSet.count() >= 4;
468 : }
469 : #endif
470 :
471 : /* Must be called with the GC lock taken. */
472 : inline Chunk *
473 40393 : ChunkPool::get(JSRuntime *rt)
474 : {
475 40393 : JS_ASSERT(this == &rt->gcChunkPool);
476 :
477 40393 : Chunk *chunk = emptyChunkListHead;
478 40393 : if (chunk) {
479 2413 : JS_ASSERT(emptyCount);
480 2413 : emptyChunkListHead = chunk->info.next;
481 2413 : --emptyCount;
482 : } else {
483 37980 : JS_ASSERT(!emptyCount);
484 37980 : chunk = Chunk::allocate(rt);
485 37980 : if (!chunk)
486 0 : return NULL;
487 37980 : JS_ASSERT(chunk->info.numArenasFreeCommitted == ArenasPerChunk);
488 37980 : rt->gcNumArenasFreeCommitted += ArenasPerChunk;
489 : }
490 40393 : JS_ASSERT(chunk->unused());
491 40393 : JS_ASSERT(!rt->gcChunkSet.has(chunk));
492 :
493 : #ifdef JS_THREADSAFE
494 40393 : if (wantBackgroundAllocation(rt))
495 2511 : rt->gcHelperThread.startBackgroundAllocationIfIdle();
496 : #endif
497 :
498 40393 : return chunk;
499 : }
500 :
501 : /* Must be called either during the GC or with the GC lock taken. */
502 : inline void
503 42904 : ChunkPool::put(Chunk *chunk)
504 : {
505 42904 : chunk->info.age = 0;
506 42904 : chunk->info.next = emptyChunkListHead;
507 42904 : emptyChunkListHead = chunk;
508 42904 : emptyCount++;
509 42904 : }
510 :
511 : /* Must be called either during the GC or with the GC lock taken. */
512 : Chunk *
513 38436 : ChunkPool::expire(JSRuntime *rt, bool releaseAll)
514 : {
515 38436 : JS_ASSERT(this == &rt->gcChunkPool);
516 :
517 : /*
518 : * Return old empty chunks to the system while preserving the order of
519 : * other chunks in the list. This way, if the GC runs several times
520 : * without emptying the list, the older chunks will stay at the tail
521 : * and are more likely to reach the max age.
522 : */
523 38436 : Chunk *freeList = NULL;
524 118057 : for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
525 41185 : JS_ASSERT(emptyCount);
526 41185 : Chunk *chunk = *chunkp;
527 41185 : JS_ASSERT(chunk->unused());
528 41185 : JS_ASSERT(!rt->gcChunkSet.has(chunk));
529 41185 : JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
530 41185 : if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE) {
531 40491 : *chunkp = chunk->info.next;
532 40491 : --emptyCount;
533 40491 : chunk->prepareToBeFreed(rt);
534 40491 : chunk->info.next = freeList;
535 40491 : freeList = chunk;
536 : } else {
537 : /* Keep the chunk but increase its age. */
538 694 : ++chunk->info.age;
539 694 : chunkp = &chunk->info.next;
540 : }
541 : }
542 38436 : JS_ASSERT_IF(releaseAll, !emptyCount);
543 38436 : return freeList;
544 : }
545 :
546 : static void
547 59261 : FreeChunkList(Chunk *chunkListHead)
548 : {
549 99752 : while (Chunk *chunk = chunkListHead) {
550 40491 : JS_ASSERT(!chunk->info.numArenasFreeCommitted);
551 40491 : chunkListHead = chunk->info.next;
552 40491 : FreeChunk(chunk);
553 : }
554 18770 : }
555 :
556 : void
557 18761 : ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
558 : {
559 18761 : FreeChunkList(expire(rt, releaseAll));
560 18761 : }
561 :
562 : JS_FRIEND_API(int64_t)
563 0 : ChunkPool::countCleanDecommittedArenas(JSRuntime *rt)
564 : {
565 0 : JS_ASSERT(this == &rt->gcChunkPool);
566 :
567 0 : int64_t numDecommitted = 0;
568 0 : Chunk *chunk = emptyChunkListHead;
569 0 : while (chunk) {
570 0 : for (uint32_t i = 0; i < ArenasPerChunk; ++i)
571 0 : if (chunk->decommittedArenas.get(i))
572 0 : ++numDecommitted;
573 0 : chunk = chunk->info.next;
574 : }
575 0 : return numDecommitted;
576 : }
577 :
578 : /* static */ Chunk *
579 40491 : Chunk::allocate(JSRuntime *rt)
580 : {
581 40491 : Chunk *chunk = static_cast<Chunk *>(AllocChunk());
582 40491 : if (!chunk)
583 0 : return NULL;
584 40491 : chunk->init();
585 40491 : rt->gcStats.count(gcstats::STAT_NEW_CHUNK);
586 40491 : return chunk;
587 : }
588 :
589 : /* Must be called with the GC lock taken. */
590 : /* static */ inline void
591 0 : Chunk::release(JSRuntime *rt, Chunk *chunk)
592 : {
593 0 : JS_ASSERT(chunk);
594 0 : chunk->prepareToBeFreed(rt);
595 0 : FreeChunk(chunk);
596 0 : }
597 :
598 : inline void
599 40491 : Chunk::prepareToBeFreed(JSRuntime *rt)
600 : {
601 40491 : JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted);
602 40491 : rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted;
603 40491 : rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
604 :
605 : #ifdef DEBUG
606 : /*
607 : * Let FreeChunkList detect a missing prepareToBeFreed call before it
608 : * frees chunk.
609 : */
610 40491 : info.numArenasFreeCommitted = 0;
611 : #endif
612 40491 : }
613 :
614 : void
615 40491 : Chunk::init()
616 : {
617 40491 : JS_POISON(this, JS_FREE_PATTERN, ChunkSize);
618 :
619 : /*
620 : * We clear the bitmap to guard against xpc_IsGrayGCThing being called on
621 : * uninitialized data, which would happen before the first GC cycle.
622 : */
623 40491 : bitmap.clear();
624 :
625 : /* Initialize the arena tracking bitmap. */
626 40491 : decommittedArenas.clear(false);
627 :
628 : /* Initialize the chunk info. */
629 40491 : info.freeArenasHead = &arenas[0].aheader;
630 40491 : info.lastDecommittedArenaOffset = 0;
631 40491 : info.numArenasFree = ArenasPerChunk;
632 40491 : info.numArenasFreeCommitted = ArenasPerChunk;
633 40491 : info.age = 0;
634 :
635 : /* Initialize the arena header state. */
636 10244223 : for (unsigned i = 0; i < ArenasPerChunk; i++) {
637 10203732 : arenas[i].aheader.setAsNotAllocated();
638 : arenas[i].aheader.next = (i + 1 < ArenasPerChunk)
639 : ? &arenas[i + 1].aheader
640 10203732 : : NULL;
641 : }
642 :
643 : /* The rest of info fields are initialized in PickChunk. */
644 40491 : }
645 :
646 : inline Chunk **
647 1726847 : GetAvailableChunkList(JSCompartment *comp)
648 : {
649 1726847 : JSRuntime *rt = comp->rt;
650 : return comp->isSystemCompartment
651 : ? &rt->gcSystemAvailableChunkListHead
652 1726847 : : &rt->gcUserAvailableChunkListHead;
653 : }
654 :
655 : inline void
656 43479 : Chunk::addToAvailableList(JSCompartment *comp)
657 : {
658 43479 : insertToAvailableList(GetAvailableChunkList(comp));
659 43479 : }
660 :
661 : inline void
662 43479 : Chunk::insertToAvailableList(Chunk **insertPoint)
663 : {
664 43479 : JS_ASSERT(hasAvailableArenas());
665 43479 : JS_ASSERT(!info.prevp);
666 43479 : JS_ASSERT(!info.next);
667 43479 : info.prevp = insertPoint;
668 43479 : Chunk *insertBefore = *insertPoint;
669 43479 : if (insertBefore) {
670 3085 : JS_ASSERT(insertBefore->info.prevp == insertPoint);
671 3085 : insertBefore->info.prevp = &info.next;
672 : }
673 43479 : info.next = insertBefore;
674 43479 : *insertPoint = this;
675 43479 : }
676 :
677 : inline void
678 43479 : Chunk::removeFromAvailableList()
679 : {
680 43479 : JS_ASSERT(info.prevp);
681 43479 : *info.prevp = info.next;
682 43479 : if (info.next) {
683 2847 : JS_ASSERT(info.next->info.prevp == &info.next);
684 2847 : info.next->info.prevp = info.prevp;
685 : }
686 43479 : info.prevp = NULL;
687 43479 : info.next = NULL;
688 43479 : }
689 :
690 : /*
691 : * Search for and return the next decommitted Arena. Our goal is to keep
692 : * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
693 : * it to the most recently freed arena when we free, and forcing it to
694 : * the last alloc + 1 when we allocate.
695 : */
696 : uint32_t
697 0 : Chunk::findDecommittedArenaOffset()
698 : {
699 : /* Note: lastFreeArenaOffset can be past the end of the list. */
700 0 : for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++)
701 0 : if (decommittedArenas.get(i))
702 0 : return i;
703 0 : for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++)
704 0 : if (decommittedArenas.get(i))
705 0 : return i;
706 0 : JS_NOT_REACHED("No decommitted arenas found.");
707 : return -1;
708 : }
709 :
710 : ArenaHeader *
711 0 : Chunk::fetchNextDecommittedArena()
712 : {
713 0 : JS_ASSERT(info.numArenasFreeCommitted == 0);
714 0 : JS_ASSERT(info.numArenasFree > 0);
715 :
716 0 : unsigned offset = findDecommittedArenaOffset();
717 0 : info.lastDecommittedArenaOffset = offset + 1;
718 0 : --info.numArenasFree;
719 0 : decommittedArenas.unset(offset);
720 :
721 0 : Arena *arena = &arenas[offset];
722 0 : MarkPagesInUse(arena, ArenaSize);
723 0 : arena->aheader.setAsNotAllocated();
724 :
725 0 : return &arena->aheader;
726 : }
727 :
728 : inline ArenaHeader *
729 1687458 : Chunk::fetchNextFreeArena(JSRuntime *rt)
730 : {
731 1687458 : JS_ASSERT(info.numArenasFreeCommitted > 0);
732 1687458 : JS_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
733 1687458 : JS_ASSERT(info.numArenasFreeCommitted <= rt->gcNumArenasFreeCommitted);
734 :
735 1687458 : ArenaHeader *aheader = info.freeArenasHead;
736 1687458 : info.freeArenasHead = aheader->next;
737 1687458 : --info.numArenasFreeCommitted;
738 1687458 : --info.numArenasFree;
739 1687458 : --rt->gcNumArenasFreeCommitted;
740 :
741 1687458 : return aheader;
742 : }
743 :
744 : ArenaHeader *
745 1683368 : Chunk::allocateArena(JSCompartment *comp, AllocKind thingKind)
746 : {
747 1683368 : JS_ASSERT(hasAvailableArenas());
748 :
749 1683368 : JSRuntime *rt = comp->rt;
750 1683368 : JS_ASSERT(rt->gcBytes <= rt->gcMaxBytes);
751 1683368 : if (rt->gcMaxBytes - rt->gcBytes < ArenaSize)
752 41 : return NULL;
753 :
754 1683327 : ArenaHeader *aheader = JS_LIKELY(info.numArenasFreeCommitted > 0)
755 : ? fetchNextFreeArena(rt)
756 1683327 : : fetchNextDecommittedArena();
757 1683327 : aheader->init(comp, thingKind);
758 1683327 : if (JS_UNLIKELY(!hasAvailableArenas()))
759 3086 : removeFromAvailableList();
760 :
761 1683327 : Probes::resizeHeap(comp, rt->gcBytes, rt->gcBytes + ArenaSize);
762 1683327 : rt->gcBytes += ArenaSize;
763 1683327 : comp->gcBytes += ArenaSize;
764 1683327 : if (comp->gcBytes >= comp->gcTriggerBytes)
765 97581 : TriggerCompartmentGC(comp, gcreason::ALLOC_TRIGGER);
766 :
767 1683327 : return aheader;
768 : }
769 :
770 : inline void
771 1683327 : Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader)
772 : {
773 1683327 : JS_ASSERT(!aheader->allocated());
774 1683327 : aheader->next = info.freeArenasHead;
775 1683327 : info.freeArenasHead = aheader;
776 1683327 : ++info.numArenasFreeCommitted;
777 1683327 : ++info.numArenasFree;
778 1683327 : ++rt->gcNumArenasFreeCommitted;
779 1683327 : }
780 :
781 : void
782 1683327 : Chunk::releaseArena(ArenaHeader *aheader)
783 : {
784 1683327 : JS_ASSERT(aheader->allocated());
785 1683327 : JS_ASSERT(!aheader->hasDelayedMarking);
786 1683327 : JSCompartment *comp = aheader->compartment;
787 1683327 : JSRuntime *rt = comp->rt;
788 : #ifdef JS_THREADSAFE
789 3366654 : AutoLockGC maybeLock;
790 1683327 : if (rt->gcHelperThread.sweeping())
791 220528 : maybeLock.lock(rt);
792 : #endif
793 :
794 1683327 : Probes::resizeHeap(comp, rt->gcBytes, rt->gcBytes - ArenaSize);
795 1683327 : JS_ASSERT(rt->gcBytes >= ArenaSize);
796 1683327 : JS_ASSERT(comp->gcBytes >= ArenaSize);
797 : #ifdef JS_THREADSAFE
798 1683327 : if (rt->gcHelperThread.sweeping())
799 220528 : comp->reduceGCTriggerBytes(GC_HEAP_GROWTH_FACTOR * ArenaSize);
800 : #endif
801 1683327 : rt->gcBytes -= ArenaSize;
802 1683327 : comp->gcBytes -= ArenaSize;
803 :
804 1683327 : aheader->setAsNotAllocated();
805 1683327 : addArenaToFreeList(rt, aheader);
806 :
807 1683327 : if (info.numArenasFree == 1) {
808 3086 : JS_ASSERT(!info.prevp);
809 3086 : JS_ASSERT(!info.next);
810 3086 : addToAvailableList(comp);
811 1680241 : } else if (!unused()) {
812 1639848 : JS_ASSERT(info.prevp);
813 : } else {
814 40393 : rt->gcChunkSet.remove(this);
815 40393 : removeFromAvailableList();
816 40393 : rt->gcChunkPool.put(this);
817 : }
818 1683327 : }
819 :
820 : } /* namespace gc */
821 : } /* namespace js */
822 :
823 : /* The caller must hold the GC lock. */
824 : static Chunk *
825 1683368 : PickChunk(JSCompartment *comp)
826 : {
827 1683368 : JSRuntime *rt = comp->rt;
828 1683368 : Chunk **listHeadp = GetAvailableChunkList(comp);
829 1683368 : Chunk *chunk = *listHeadp;
830 1683368 : if (chunk)
831 1642975 : return chunk;
832 :
833 40393 : chunk = rt->gcChunkPool.get(rt);
834 40393 : if (!chunk)
835 0 : return NULL;
836 :
837 40393 : rt->gcChunkAllocationSinceLastGC = true;
838 :
839 : /*
840 : * FIXME bug 583732 - chunk is newly allocated and cannot be present in
841 : * the table so using ordinary lookupForAdd is suboptimal here.
842 : */
843 80786 : GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk);
844 40393 : JS_ASSERT(!p);
845 40393 : if (!rt->gcChunkSet.add(p, chunk)) {
846 0 : Chunk::release(rt, chunk);
847 0 : return NULL;
848 : }
849 :
850 40393 : chunk->info.prevp = NULL;
851 40393 : chunk->info.next = NULL;
852 40393 : chunk->addToAvailableList(comp);
853 :
854 40393 : return chunk;
855 : }
856 :
857 : JS_FRIEND_API(bool)
858 23655060 : IsAboutToBeFinalized(const Cell *thing)
859 : {
860 23655060 : JSCompartment *thingCompartment = reinterpret_cast<const Cell *>(thing)->compartment();
861 23655060 : if (!thingCompartment->isCollecting())
862 87469 : return false;
863 23567591 : return !reinterpret_cast<const Cell *>(thing)->isMarked();
864 : }
865 :
866 : bool
867 167880 : IsAboutToBeFinalized(const Value &v)
868 : {
869 167880 : JS_ASSERT(v.isMarkable());
870 167880 : return IsAboutToBeFinalized((Cell *)v.toGCThing());
871 : }
872 :
873 : /* Lifetime for type sets attached to scripts containing observed types. */
874 : static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
875 :
876 : JSBool
877 18761 : js_InitGC(JSRuntime *rt, uint32_t maxbytes)
878 : {
879 18761 : if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
880 0 : return false;
881 :
882 18761 : if (!rt->gcRootsHash.init(256))
883 0 : return false;
884 :
885 18761 : if (!rt->gcLocksHash.init(256))
886 0 : return false;
887 :
888 : #ifdef JS_THREADSAFE
889 18761 : rt->gcLock = PR_NewLock();
890 18761 : if (!rt->gcLock)
891 0 : return false;
892 18761 : if (!rt->gcHelperThread.init())
893 0 : return false;
894 : #endif
895 :
896 : /*
897 : * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
898 : * for default backward API compatibility.
899 : */
900 18761 : rt->gcMaxBytes = maxbytes;
901 18761 : rt->setGCMaxMallocBytes(maxbytes);
902 :
903 18761 : rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
904 18761 : return true;
905 : }
906 :
907 : namespace js {
908 :
909 : inline bool
910 11157038 : InFreeList(ArenaHeader *aheader, uintptr_t addr)
911 : {
912 11157038 : if (!aheader->hasFreeThings())
913 2194159 : return false;
914 :
915 8962879 : FreeSpan firstSpan(aheader->getFirstFreeSpan());
916 :
917 18237028 : for (const FreeSpan *span = &firstSpan;;) {
918 : /* If the thing comes fore the current span, it's not free. */
919 18237028 : if (addr < span->first)
920 8961983 : return false;
921 :
922 : /*
923 : * If we find it inside the span, it's dead. We use here "<=" and not
924 : * "<" even for the last span as we know that thing is inside the
925 : * arena. Thus for the last span thing < span->end.
926 : */
927 9275045 : if (addr <= span->last)
928 896 : return true;
929 :
930 : /*
931 : * The last possible empty span is an the end of the arena. Here
932 : * span->end < thing < thingsEnd and so we must have more spans.
933 : */
934 9274149 : span = span->nextSpan();
935 : }
936 : }
937 :
938 : enum ConservativeGCTest
939 : {
940 : CGCT_VALID,
941 : CGCT_LOWBITSET, /* excluded because one of the low bits was set */
942 : CGCT_NOTARENA, /* not within arena range in a chunk */
943 : CGCT_OTHERCOMPARTMENT, /* in another compartment */
944 : CGCT_NOTCHUNK, /* not within a valid chunk */
945 : CGCT_FREEARENA, /* within arena containing only free things */
946 : CGCT_NOTLIVE, /* gcthing is not allocated */
947 : CGCT_END
948 : };
949 :
950 : /*
951 : * Tests whether w is a (possibly dead) GC thing. Returns CGCT_VALID and
952 : * details about the thing if so. On failure, returns the reason for rejection.
953 : */
954 : inline ConservativeGCTest
955 247550640 : IsAddressableGCThing(JSRuntime *rt, uintptr_t w,
956 : gc::AllocKind *thingKindPtr, ArenaHeader **arenaHeader, void **thing)
957 : {
958 : /*
959 : * We assume that the compiler never uses sub-word alignment to store
960 : * pointers and does not tag pointers on its own. Additionally, the value
961 : * representation for all values and the jsid representation for GC-things
962 : * do not touch the low two bits. Thus any word with the low two bits set
963 : * is not a valid GC-thing.
964 : */
965 : JS_STATIC_ASSERT(JSID_TYPE_STRING == 0 && JSID_TYPE_OBJECT == 4);
966 247550640 : if (w & 0x3)
967 40978656 : return CGCT_LOWBITSET;
968 :
969 : /*
970 : * An object jsid has its low bits tagged. In the value representation on
971 : * 64-bit, the high bits are tagged.
972 : */
973 206571984 : const uintptr_t JSID_PAYLOAD_MASK = ~uintptr_t(JSID_TYPE_MASK);
974 : #if JS_BITS_PER_WORD == 32
975 206571984 : uintptr_t addr = w & JSID_PAYLOAD_MASK;
976 : #elif JS_BITS_PER_WORD == 64
977 : uintptr_t addr = w & JSID_PAYLOAD_MASK & JSVAL_PAYLOAD_MASK;
978 : #endif
979 :
980 206571984 : Chunk *chunk = Chunk::fromAddress(addr);
981 :
982 206571984 : if (!rt->gcChunkSet.has(chunk))
983 195012761 : return CGCT_NOTCHUNK;
984 :
985 : /*
986 : * We query for pointers outside the arena array after checking for an
987 : * allocated chunk. Such pointers are rare and we want to reject them
988 : * after doing more likely rejections.
989 : */
990 11559223 : if (!Chunk::withinArenasRange(addr))
991 7436 : return CGCT_NOTARENA;
992 :
993 : /* If the arena is not currently allocated, don't access the header. */
994 11551787 : size_t arenaOffset = Chunk::arenaIndex(addr);
995 11551787 : if (chunk->decommittedArenas.get(arenaOffset))
996 0 : return CGCT_FREEARENA;
997 :
998 11551787 : ArenaHeader *aheader = &chunk->arenas[arenaOffset].aheader;
999 :
1000 11551787 : if (!aheader->allocated())
1001 2 : return CGCT_FREEARENA;
1002 :
1003 11551785 : if (rt->gcRunning && !aheader->compartment->isCollecting())
1004 221252 : return CGCT_OTHERCOMPARTMENT;
1005 :
1006 11330533 : AllocKind thingKind = aheader->getAllocKind();
1007 11330533 : uintptr_t offset = addr & ArenaMask;
1008 11330533 : uintptr_t minOffset = Arena::firstThingOffset(thingKind);
1009 11330533 : if (offset < minOffset)
1010 173495 : return CGCT_NOTARENA;
1011 :
1012 : /* addr can point inside the thing so we must align the address. */
1013 11157038 : uintptr_t shift = (offset - minOffset) % Arena::thingSize(thingKind);
1014 11157038 : addr -= shift;
1015 :
1016 11157038 : if (thing)
1017 11157038 : *thing = reinterpret_cast<void *>(addr);
1018 11157038 : if (arenaHeader)
1019 11157038 : *arenaHeader = aheader;
1020 11157038 : if (thingKindPtr)
1021 11157038 : *thingKindPtr = thingKind;
1022 11157038 : return CGCT_VALID;
1023 : }
1024 :
1025 : /*
1026 : * Returns CGCT_VALID and mark it if the w can be a live GC thing and sets
1027 : * thingKind accordingly. Otherwise returns the reason for rejection.
1028 : */
1029 : inline ConservativeGCTest
1030 247550640 : MarkIfGCThingWord(JSTracer *trc, uintptr_t w)
1031 : {
1032 : void *thing;
1033 : ArenaHeader *aheader;
1034 : AllocKind thingKind;
1035 247550640 : ConservativeGCTest status = IsAddressableGCThing(trc->runtime, w, &thingKind, &aheader, &thing);
1036 247550640 : if (status != CGCT_VALID)
1037 236393602 : return status;
1038 :
1039 : /*
1040 : * Check if the thing is free. We must use the list of free spans as at
1041 : * this point we no longer have the mark bits from the previous GC run and
1042 : * we must account for newly allocated things.
1043 : */
1044 11157038 : if (InFreeList(aheader, uintptr_t(thing)))
1045 896 : return CGCT_NOTLIVE;
1046 :
1047 11156142 : JSGCTraceKind traceKind = MapAllocToTraceKind(thingKind);
1048 : #ifdef DEBUG
1049 11156142 : const char pattern[] = "machine_stack %p";
1050 : char nameBuf[sizeof(pattern) - 2 + sizeof(thing) * 2];
1051 11156142 : JS_snprintf(nameBuf, sizeof(nameBuf), pattern, thing);
1052 11156142 : JS_SET_TRACING_NAME(trc, nameBuf);
1053 : #endif
1054 11156142 : void *tmp = thing;
1055 11156142 : MarkKind(trc, &tmp, traceKind);
1056 11156142 : JS_ASSERT(tmp == thing);
1057 :
1058 : #ifdef DEBUG
1059 11156142 : if (trc->runtime->gcIncrementalState == MARK_ROOTS)
1060 466 : trc->runtime->gcSavedRoots.append(JSRuntime::SavedGCRoot(thing, traceKind));
1061 : #endif
1062 :
1063 11156142 : return CGCT_VALID;
1064 : }
1065 :
1066 : static void
1067 247550640 : MarkWordConservatively(JSTracer *trc, uintptr_t w)
1068 : {
1069 : /*
1070 : * The conservative scanner may access words that valgrind considers as
1071 : * undefined. To avoid false positives and not to alter valgrind view of
1072 : * the memory we make as memcheck-defined the argument, a copy of the
1073 : * original word. See bug 572678.
1074 : */
1075 : #ifdef JS_VALGRIND
1076 : JS_SILENCE_UNUSED_VALUE_IN_EXPR(VALGRIND_MAKE_MEM_DEFINED(&w, sizeof(w)));
1077 : #endif
1078 :
1079 247550640 : MarkIfGCThingWord(trc, w);
1080 247550640 : }
1081 :
1082 : static void
1083 43304 : MarkRangeConservatively(JSTracer *trc, const uintptr_t *begin, const uintptr_t *end)
1084 : {
1085 43304 : JS_ASSERT(begin <= end);
1086 247593944 : for (const uintptr_t *i = begin; i < end; ++i)
1087 247550640 : MarkWordConservatively(trc, *i);
1088 43304 : }
1089 :
1090 : static JS_NEVER_INLINE void
1091 22875 : MarkConservativeStackRoots(JSTracer *trc, bool useSavedRoots)
1092 : {
1093 22875 : JSRuntime *rt = trc->runtime;
1094 :
1095 : #ifdef DEBUG
1096 22875 : if (useSavedRoots) {
1097 3378 : for (JSRuntime::SavedGCRoot *root = rt->gcSavedRoots.begin();
1098 1689 : root != rt->gcSavedRoots.end();
1099 : root++)
1100 : {
1101 466 : JS_SET_TRACING_NAME(trc, "cstack");
1102 466 : MarkKind(trc, &root->thing, root->kind);
1103 : }
1104 1223 : return;
1105 : }
1106 :
1107 21652 : if (rt->gcIncrementalState == MARK_ROOTS)
1108 1446 : rt->gcSavedRoots.clearAndFree();
1109 : #endif
1110 :
1111 21652 : ConservativeGCData *cgcd = &rt->conservativeGC;
1112 21652 : if (!cgcd->hasStackToScan()) {
1113 : #ifdef JS_THREADSAFE
1114 0 : JS_ASSERT(!rt->suspendCount);
1115 0 : JS_ASSERT(rt->requestDepth <= cgcd->requestThreshold);
1116 : #endif
1117 0 : return;
1118 : }
1119 :
1120 : uintptr_t *stackMin, *stackEnd;
1121 : #if JS_STACK_GROWTH_DIRECTION > 0
1122 : stackMin = rt->nativeStackBase;
1123 : stackEnd = cgcd->nativeStackTop;
1124 : #else
1125 21652 : stackMin = cgcd->nativeStackTop + 1;
1126 21652 : stackEnd = reinterpret_cast<uintptr_t *>(rt->nativeStackBase);
1127 : #endif
1128 :
1129 21652 : JS_ASSERT(stackMin <= stackEnd);
1130 21652 : MarkRangeConservatively(trc, stackMin, stackEnd);
1131 : MarkRangeConservatively(trc, cgcd->registerSnapshot.words,
1132 21652 : ArrayEnd(cgcd->registerSnapshot.words));
1133 : }
1134 :
1135 : void
1136 0 : MarkStackRangeConservatively(JSTracer *trc, Value *beginv, Value *endv)
1137 : {
1138 0 : const uintptr_t *begin = beginv->payloadWord();
1139 0 : const uintptr_t *end = endv->payloadWord();
1140 : #ifdef JS_NUNBOX32
1141 : /*
1142 : * With 64-bit jsvals on 32-bit systems, we can optimize a bit by
1143 : * scanning only the payloads.
1144 : */
1145 0 : JS_ASSERT(begin <= end);
1146 0 : for (const uintptr_t *i = begin; i < end; i += sizeof(Value) / sizeof(uintptr_t))
1147 0 : MarkWordConservatively(trc, *i);
1148 : #else
1149 : MarkRangeConservatively(trc, begin, end);
1150 : #endif
1151 0 : }
1152 :
1153 :
1154 :
1155 : JS_NEVER_INLINE void
1156 23149 : ConservativeGCData::recordStackTop()
1157 : {
1158 : /* Update the native stack pointer if it points to a bigger stack. */
1159 : uintptr_t dummy;
1160 23149 : nativeStackTop = &dummy;
1161 :
1162 : /*
1163 : * To record and update the register snapshot for the conservative scanning
1164 : * with the latest values we use setjmp.
1165 : */
1166 : #if defined(_MSC_VER)
1167 : # pragma warning(push)
1168 : # pragma warning(disable: 4611)
1169 : #endif
1170 23149 : (void) setjmp(registerSnapshot.jmpbuf);
1171 : #if defined(_MSC_VER)
1172 : # pragma warning(pop)
1173 : #endif
1174 23149 : }
1175 :
1176 : static void
1177 41950 : RecordNativeStackTopForGC(JSRuntime *rt)
1178 : {
1179 41950 : ConservativeGCData *cgcd = &rt->conservativeGC;
1180 :
1181 : #ifdef JS_THREADSAFE
1182 : /* Record the stack top here only if we are called from a request. */
1183 41950 : JS_ASSERT(rt->requestDepth >= cgcd->requestThreshold);
1184 41950 : if (rt->requestDepth == cgcd->requestThreshold)
1185 18961 : return;
1186 : #endif
1187 22989 : cgcd->recordStackTop();
1188 : }
1189 :
1190 : } /* namespace js */
1191 :
1192 : bool
1193 0 : js_IsAddressableGCThing(JSRuntime *rt, uintptr_t w, gc::AllocKind *thingKind, void **thing)
1194 : {
1195 0 : return js::IsAddressableGCThing(rt, w, thingKind, NULL, thing) == CGCT_VALID;
1196 : }
1197 :
1198 : #ifdef DEBUG
1199 : static void
1200 : CheckLeakedRoots(JSRuntime *rt);
1201 : #endif
1202 :
1203 : void
1204 18761 : js_FinishGC(JSRuntime *rt)
1205 : {
1206 : /*
1207 : * Wait until the background finalization stops and the helper thread
1208 : * shuts down before we forcefully release any remaining GC memory.
1209 : */
1210 : #ifdef JS_THREADSAFE
1211 18761 : rt->gcHelperThread.finish();
1212 : #endif
1213 :
1214 : #ifdef JS_GC_ZEAL
1215 : /* Free memory associated with GC verification. */
1216 18761 : FinishVerifier(rt);
1217 : #endif
1218 :
1219 : /* Delete all remaining Compartments. */
1220 37522 : for (CompartmentsIter c(rt); !c.done(); c.next())
1221 18761 : Foreground::delete_(c.get());
1222 18761 : rt->compartments.clear();
1223 18761 : rt->atomsCompartment = NULL;
1224 :
1225 18761 : rt->gcSystemAvailableChunkListHead = NULL;
1226 18761 : rt->gcUserAvailableChunkListHead = NULL;
1227 18761 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
1228 0 : Chunk::release(rt, r.front());
1229 18761 : rt->gcChunkSet.clear();
1230 :
1231 18761 : rt->gcChunkPool.expireAndFree(rt, true);
1232 :
1233 : #ifdef DEBUG
1234 18761 : if (!rt->gcRootsHash.empty())
1235 0 : CheckLeakedRoots(rt);
1236 : #endif
1237 18761 : rt->gcRootsHash.clear();
1238 18761 : rt->gcLocksHash.clear();
1239 18761 : }
1240 :
1241 : JSBool
1242 171 : js_AddRoot(JSContext *cx, Value *vp, const char *name)
1243 : {
1244 171 : JSBool ok = js_AddRootRT(cx->runtime, vp, name);
1245 171 : if (!ok)
1246 0 : JS_ReportOutOfMemory(cx);
1247 171 : return ok;
1248 : }
1249 :
1250 : JSBool
1251 18666 : js_AddGCThingRoot(JSContext *cx, void **rp, const char *name)
1252 : {
1253 18666 : JSBool ok = js_AddGCThingRootRT(cx->runtime, rp, name);
1254 18666 : if (!ok)
1255 0 : JS_ReportOutOfMemory(cx);
1256 18666 : return ok;
1257 : }
1258 :
1259 : JS_FRIEND_API(JSBool)
1260 171 : js_AddRootRT(JSRuntime *rt, jsval *vp, const char *name)
1261 : {
1262 : return !!rt->gcRootsHash.put((void *)vp,
1263 171 : RootInfo(name, JS_GC_ROOT_VALUE_PTR));
1264 : }
1265 :
1266 : JS_FRIEND_API(JSBool)
1267 18666 : js_AddGCThingRootRT(JSRuntime *rt, void **rp, const char *name)
1268 : {
1269 : return !!rt->gcRootsHash.put((void *)rp,
1270 18666 : RootInfo(name, JS_GC_ROOT_GCTHING_PTR));
1271 : }
1272 :
1273 : JS_FRIEND_API(void)
1274 18837 : js_RemoveRoot(JSRuntime *rt, void *rp)
1275 : {
1276 18837 : rt->gcRootsHash.remove(rp);
1277 18837 : rt->gcPoke = true;
1278 18837 : }
1279 :
1280 : typedef RootedValueMap::Range RootRange;
1281 : typedef RootedValueMap::Entry RootEntry;
1282 : typedef RootedValueMap::Enum RootEnum;
1283 :
1284 : #ifdef DEBUG
1285 :
1286 : static void
1287 0 : CheckLeakedRoots(JSRuntime *rt)
1288 : {
1289 0 : uint32_t leakedroots = 0;
1290 :
1291 : /* Warn (but don't assert) debug builds of any remaining roots. */
1292 0 : for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront()) {
1293 0 : RootEntry &entry = r.front();
1294 0 : leakedroots++;
1295 : fprintf(stderr,
1296 : "JS engine warning: leaking GC root \'%s\' at %p\n",
1297 0 : entry.value.name ? entry.value.name : "", entry.key);
1298 : }
1299 :
1300 0 : if (leakedroots > 0) {
1301 0 : if (leakedroots == 1) {
1302 : fprintf(stderr,
1303 : "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1304 : " This root may point to freed memory. Objects reachable\n"
1305 : " through it have not been finalized.\n",
1306 0 : (void *) rt);
1307 : } else {
1308 : fprintf(stderr,
1309 : "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1310 : " These roots may point to freed memory. Objects reachable\n"
1311 : " through them have not been finalized.\n",
1312 0 : (unsigned long) leakedroots, (void *) rt);
1313 : }
1314 : }
1315 0 : }
1316 :
1317 : void
1318 0 : js_DumpNamedRoots(JSRuntime *rt,
1319 : void (*dump)(const char *name, void *rp, JSGCRootType type, void *data),
1320 : void *data)
1321 : {
1322 0 : for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront()) {
1323 0 : RootEntry &entry = r.front();
1324 0 : if (const char *name = entry.value.name)
1325 0 : dump(name, entry.key, entry.value.type, data);
1326 : }
1327 0 : }
1328 :
1329 : #endif /* DEBUG */
1330 :
1331 : uint32_t
1332 0 : js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1333 : {
1334 0 : int ct = 0;
1335 0 : for (RootEnum e(rt->gcRootsHash); !e.empty(); e.popFront()) {
1336 0 : RootEntry &entry = e.front();
1337 :
1338 0 : ct++;
1339 0 : int mapflags = map(entry.key, entry.value.type, entry.value.name, data);
1340 :
1341 0 : if (mapflags & JS_MAP_GCROOT_REMOVE)
1342 0 : e.removeFront();
1343 0 : if (mapflags & JS_MAP_GCROOT_STOP)
1344 0 : break;
1345 : }
1346 :
1347 0 : return ct;
1348 : }
1349 :
1350 : static size_t
1351 205438 : ComputeTriggerBytes(size_t lastBytes, size_t maxBytes, JSGCInvocationKind gckind)
1352 : {
1353 205438 : size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, GC_ALLOCATION_THRESHOLD);
1354 205438 : float trigger = float(base) * GC_HEAP_GROWTH_FACTOR;
1355 205438 : return size_t(Min(float(maxBytes), trigger));
1356 : }
1357 :
1358 : void
1359 102719 : JSCompartment::setGCLastBytes(size_t lastBytes, size_t lastMallocBytes, JSGCInvocationKind gckind)
1360 : {
1361 102719 : gcTriggerBytes = ComputeTriggerBytes(lastBytes, rt->gcMaxBytes, gckind);
1362 102719 : gcTriggerMallocAndFreeBytes = ComputeTriggerBytes(lastMallocBytes, SIZE_MAX, gckind);
1363 102719 : }
1364 :
1365 : void
1366 220528 : JSCompartment::reduceGCTriggerBytes(size_t amount)
1367 : {
1368 220528 : JS_ASSERT(amount > 0);
1369 220528 : JS_ASSERT(gcTriggerBytes >= amount);
1370 220528 : if (gcTriggerBytes - amount < GC_ALLOCATION_THRESHOLD * GC_HEAP_GROWTH_FACTOR)
1371 89538 : return;
1372 130990 : gcTriggerBytes -= amount;
1373 : }
1374 :
1375 : namespace js {
1376 : namespace gc {
1377 :
1378 : inline void
1379 5399 : ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
1380 : {
1381 113379 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
1382 107980 : FreeSpan *headSpan = &freeLists[i];
1383 107980 : if (!headSpan->isEmpty()) {
1384 51083 : ArenaHeader *aheader = headSpan->arenaHeader();
1385 51083 : aheader->allocatedDuringIncremental = true;
1386 51083 : rt->gcMarker.delayMarkingArena(aheader);
1387 : }
1388 : }
1389 5399 : }
1390 :
1391 : inline void *
1392 1768506 : ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
1393 : {
1394 1768506 : Chunk *chunk = NULL;
1395 :
1396 1768506 : ArenaList *al = &arenaLists[thingKind];
1397 3537012 : AutoLockGC maybeLock;
1398 :
1399 : #ifdef JS_THREADSAFE
1400 1768506 : volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
1401 1768506 : if (*bfs != BFS_DONE) {
1402 : /*
1403 : * We cannot search the arena list for free things while the
1404 : * background finalization runs and can modify head or cursor at any
1405 : * moment. So we always allocate a new arena in that case.
1406 : */
1407 47211 : maybeLock.lock(comp->rt);
1408 47211 : if (*bfs == BFS_RUN) {
1409 28488 : JS_ASSERT(!*al->cursor);
1410 28488 : chunk = PickChunk(comp);
1411 28488 : if (!chunk) {
1412 : /*
1413 : * Let the caller to wait for the background allocation to
1414 : * finish and restart the allocation attempt.
1415 : */
1416 0 : return NULL;
1417 : }
1418 18723 : } else if (*bfs == BFS_JUST_FINISHED) {
1419 : /* See comments before BackgroundFinalizeState definition. */
1420 18723 : *bfs = BFS_DONE;
1421 : } else {
1422 0 : JS_ASSERT(*bfs == BFS_DONE);
1423 : }
1424 : }
1425 : #endif /* JS_THREADSAFE */
1426 :
1427 1768506 : if (!chunk) {
1428 1740018 : if (ArenaHeader *aheader = *al->cursor) {
1429 85138 : JS_ASSERT(aheader->hasFreeThings());
1430 :
1431 : /*
1432 : * The empty arenas are returned to the chunk and should not present on
1433 : * the list.
1434 : */
1435 85138 : JS_ASSERT(!aheader->isEmpty());
1436 85138 : al->cursor = &aheader->next;
1437 :
1438 : /*
1439 : * Move the free span stored in the arena to the free list and
1440 : * allocate from it.
1441 : */
1442 85138 : freeLists[thingKind] = aheader->getFirstFreeSpan();
1443 85138 : aheader->setAsFullyUsed();
1444 85138 : if (JS_UNLIKELY(comp->needsBarrier())) {
1445 75 : aheader->allocatedDuringIncremental = true;
1446 75 : comp->rt->gcMarker.delayMarkingArena(aheader);
1447 : }
1448 85138 : return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
1449 : }
1450 :
1451 : /* Make sure we hold the GC lock before we call PickChunk. */
1452 1654880 : if (!maybeLock.locked())
1453 1654870 : maybeLock.lock(comp->rt);
1454 1654880 : chunk = PickChunk(comp);
1455 1654880 : if (!chunk)
1456 0 : return NULL;
1457 : }
1458 :
1459 : /*
1460 : * While we still hold the GC lock get an arena from some chunk, mark it
1461 : * as full as its single free span is moved to the free lits, and insert
1462 : * it to the list as a fully allocated arena.
1463 : *
1464 : * We add the arena before the the head, not after the tail pointed by the
1465 : * cursor, so after the GC the most recently added arena will be used first
1466 : * for allocations improving cache locality.
1467 : */
1468 1683368 : JS_ASSERT(!*al->cursor);
1469 1683368 : ArenaHeader *aheader = chunk->allocateArena(comp, thingKind);
1470 1683368 : if (!aheader)
1471 41 : return NULL;
1472 :
1473 1683327 : if (JS_UNLIKELY(comp->needsBarrier())) {
1474 63 : aheader->allocatedDuringIncremental = true;
1475 63 : comp->rt->gcMarker.delayMarkingArena(aheader);
1476 : }
1477 1683327 : aheader->next = al->head;
1478 1683327 : if (!al->head) {
1479 348751 : JS_ASSERT(al->cursor == &al->head);
1480 348751 : al->cursor = &aheader->next;
1481 : }
1482 1683327 : al->head = aheader;
1483 :
1484 : /* See comments before allocateFromNewArena about this assert. */
1485 1683327 : JS_ASSERT(!aheader->hasFreeThings());
1486 1683327 : uintptr_t arenaAddr = aheader->arenaAddress();
1487 1683327 : return freeLists[thingKind].allocateFromNewArena(arenaAddr,
1488 : Arena::firstThingOffset(thingKind),
1489 3366654 : Arena::thingSize(thingKind));
1490 : }
1491 :
1492 : void
1493 1004364 : ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind)
1494 : {
1495 1004364 : JS_ASSERT(!fop->onBackgroundThread());
1496 : #ifdef JS_THREADSAFE
1497 1004364 : JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
1498 : #endif
1499 1004364 : FinalizeArenas(fop, &arenaLists[thingKind], thingKind);
1500 1004364 : }
1501 :
1502 : inline void
1503 669576 : ArenaLists::finalizeLater(FreeOp *fop, AllocKind thingKind)
1504 : {
1505 0 : JS_ASSERT(thingKind == FINALIZE_OBJECT0_BACKGROUND ||
1506 : thingKind == FINALIZE_OBJECT2_BACKGROUND ||
1507 : thingKind == FINALIZE_OBJECT4_BACKGROUND ||
1508 : thingKind == FINALIZE_OBJECT8_BACKGROUND ||
1509 : thingKind == FINALIZE_OBJECT12_BACKGROUND ||
1510 : thingKind == FINALIZE_OBJECT16_BACKGROUND ||
1511 : thingKind == FINALIZE_SHORT_STRING ||
1512 669576 : thingKind == FINALIZE_STRING);
1513 669576 : JS_ASSERT(!fop->onBackgroundThread());
1514 :
1515 : #ifdef JS_THREADSAFE
1516 669576 : JS_ASSERT(!fop->runtime()->gcHelperThread.sweeping());
1517 :
1518 669576 : ArenaList *al = &arenaLists[thingKind];
1519 669576 : if (!al->head) {
1520 355616 : JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
1521 355616 : JS_ASSERT(al->cursor == &al->head);
1522 355616 : return;
1523 : }
1524 :
1525 : /*
1526 : * The state can be just-finished if we have not allocated any GC things
1527 : * from the arena list after the previous background finalization.
1528 : */
1529 450444 : JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE ||
1530 764404 : backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED);
1531 :
1532 313960 : if (fop->shouldFreeLater()) {
1533 : /*
1534 : * To ensure the finalization order even during the background GC we
1535 : * must use infallibleAppend so arenas scheduled for background
1536 : * finalization would not be finalized now if the append fails.
1537 : */
1538 156948 : fop->runtime()->gcHelperThread.finalizeVector.infallibleAppend(al->head);
1539 156948 : al->clear();
1540 156948 : backgroundFinalizeState[thingKind] = BFS_RUN;
1541 : } else {
1542 157012 : FinalizeArenas(fop, al, thingKind);
1543 157012 : backgroundFinalizeState[thingKind] = BFS_DONE;
1544 : }
1545 :
1546 : #else /* !JS_THREADSAFE */
1547 :
1548 : finalizeNow(fop, thingKind);
1549 :
1550 : #endif
1551 : }
1552 :
1553 : #ifdef JS_THREADSAFE
1554 : /*static*/ void
1555 156948 : ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
1556 : {
1557 156948 : JS_ASSERT(fop->onBackgroundThread());
1558 156948 : JS_ASSERT(listHead);
1559 156948 : AllocKind thingKind = listHead->getAllocKind();
1560 156948 : JSCompartment *comp = listHead->compartment;
1561 156948 : ArenaList finalized;
1562 156948 : finalized.head = listHead;
1563 156948 : FinalizeArenas(fop, &finalized, thingKind);
1564 :
1565 : /*
1566 : * After we finish the finalization al->cursor must point to the end of
1567 : * the head list as we emptied the list before the background finalization
1568 : * and the allocation adds new arenas before the cursor.
1569 : */
1570 156948 : ArenaLists *lists = &comp->arenas;
1571 156948 : ArenaList *al = &lists->arenaLists[thingKind];
1572 :
1573 313896 : AutoLockGC lock(fop->runtime());
1574 156948 : JS_ASSERT(lists->backgroundFinalizeState[thingKind] == BFS_RUN);
1575 156948 : JS_ASSERT(!*al->cursor);
1576 :
1577 : /*
1578 : * We must set the state to BFS_JUST_FINISHED if we touch arenaList list,
1579 : * even if we add to the list only fully allocated arenas without any free
1580 : * things. It ensures that the allocation thread takes the GC lock and all
1581 : * writes to the free list elements are propagated. As we always take the
1582 : * GC lock when allocating new arenas from the chunks we can set the state
1583 : * to BFS_DONE if we have released all finalized arenas back to their
1584 : * chunks.
1585 : */
1586 156948 : if (finalized.head) {
1587 155207 : *al->cursor = finalized.head;
1588 155207 : if (finalized.cursor != &finalized.head)
1589 59718 : al->cursor = finalized.cursor;
1590 155207 : lists->backgroundFinalizeState[thingKind] = BFS_JUST_FINISHED;
1591 : } else {
1592 1741 : lists->backgroundFinalizeState[thingKind] = BFS_DONE;
1593 : }
1594 156948 : }
1595 : #endif /* JS_THREADSAFE */
1596 :
1597 : void
1598 83697 : ArenaLists::finalizeObjects(FreeOp *fop)
1599 : {
1600 83697 : finalizeNow(fop, FINALIZE_OBJECT0);
1601 83697 : finalizeNow(fop, FINALIZE_OBJECT2);
1602 83697 : finalizeNow(fop, FINALIZE_OBJECT4);
1603 83697 : finalizeNow(fop, FINALIZE_OBJECT8);
1604 83697 : finalizeNow(fop, FINALIZE_OBJECT12);
1605 83697 : finalizeNow(fop, FINALIZE_OBJECT16);
1606 :
1607 : #ifdef JS_THREADSAFE
1608 83697 : finalizeLater(fop, FINALIZE_OBJECT0_BACKGROUND);
1609 83697 : finalizeLater(fop, FINALIZE_OBJECT2_BACKGROUND);
1610 83697 : finalizeLater(fop, FINALIZE_OBJECT4_BACKGROUND);
1611 83697 : finalizeLater(fop, FINALIZE_OBJECT8_BACKGROUND);
1612 83697 : finalizeLater(fop, FINALIZE_OBJECT12_BACKGROUND);
1613 83697 : finalizeLater(fop, FINALIZE_OBJECT16_BACKGROUND);
1614 : #endif
1615 :
1616 : #if JS_HAS_XML_SUPPORT
1617 83697 : finalizeNow(fop, FINALIZE_XML);
1618 : #endif
1619 83697 : }
1620 :
1621 : void
1622 83697 : ArenaLists::finalizeStrings(FreeOp *fop)
1623 : {
1624 83697 : finalizeLater(fop, FINALIZE_SHORT_STRING);
1625 83697 : finalizeLater(fop, FINALIZE_STRING);
1626 :
1627 83697 : finalizeNow(fop, FINALIZE_EXTERNAL_STRING);
1628 83697 : }
1629 :
1630 : void
1631 83697 : ArenaLists::finalizeShapes(FreeOp *fop)
1632 : {
1633 83697 : finalizeNow(fop, FINALIZE_SHAPE);
1634 83697 : finalizeNow(fop, FINALIZE_BASE_SHAPE);
1635 83697 : finalizeNow(fop, FINALIZE_TYPE_OBJECT);
1636 83697 : }
1637 :
1638 : void
1639 83697 : ArenaLists::finalizeScripts(FreeOp *fop)
1640 : {
1641 83697 : finalizeNow(fop, FINALIZE_SCRIPT);
1642 83697 : }
1643 :
1644 : static void
1645 10674 : RunLastDitchGC(JSContext *cx, gcreason::Reason reason)
1646 : {
1647 10674 : JSRuntime *rt = cx->runtime;
1648 :
1649 : /* The last ditch GC preserves all atoms. */
1650 21348 : AutoKeepAtoms keep(rt);
1651 10674 : GC(cx, GC_NORMAL, reason);
1652 10674 : }
1653 :
1654 : /* static */ void *
1655 1768475 : ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
1656 : {
1657 1768475 : JS_ASSERT(cx->compartment->arenas.freeLists[thingKind].isEmpty());
1658 :
1659 1768475 : JSCompartment *comp = cx->compartment;
1660 1768475 : JSRuntime *rt = comp->rt;
1661 1768475 : JS_ASSERT(!rt->gcRunning);
1662 :
1663 1768475 : bool runGC = rt->gcIncrementalState != NO_INCREMENTAL && comp->gcBytes > comp->gcTriggerBytes;
1664 10 : for (;;) {
1665 1768485 : if (JS_UNLIKELY(runGC)) {
1666 10 : PrepareCompartmentForGC(comp);
1667 10 : RunLastDitchGC(cx, gcreason::LAST_DITCH);
1668 :
1669 : /*
1670 : * The JSGC_END callback can legitimately allocate new GC
1671 : * things and populate the free list. If that happens, just
1672 : * return that list head.
1673 : */
1674 10 : size_t thingSize = Arena::thingSize(thingKind);
1675 10 : if (void *thing = comp->arenas.allocateFromFreeList(thingKind, thingSize))
1676 0 : return thing;
1677 : }
1678 :
1679 : /*
1680 : * allocateFromArena may fail while the background finalization still
1681 : * run. In that case we want to wait for it to finish and restart.
1682 : * However, checking for that is racy as the background finalization
1683 : * could free some things after allocateFromArena decided to fail but
1684 : * at this point it may have already stopped. To avoid this race we
1685 : * always try to allocate twice.
1686 : */
1687 1768506 : for (bool secondAttempt = false; ; secondAttempt = true) {
1688 1768506 : void *thing = comp->arenas.allocateFromArena(comp, thingKind);
1689 1768506 : if (JS_LIKELY(!!thing))
1690 1768465 : return thing;
1691 41 : if (secondAttempt)
1692 : break;
1693 :
1694 42 : AutoLockGC lock(rt);
1695 : #ifdef JS_THREADSAFE
1696 21 : rt->gcHelperThread.waitBackgroundSweepEnd();
1697 : #endif
1698 : }
1699 :
1700 : /*
1701 : * We failed to allocate. Run the GC if we haven't done it already.
1702 : * Otherwise report OOM.
1703 : */
1704 20 : if (runGC)
1705 : break;
1706 10 : runGC = true;
1707 : }
1708 :
1709 10 : js_ReportOutOfMemory(cx);
1710 10 : return NULL;
1711 : }
1712 :
1713 : } /* namespace gc */
1714 : } /* namespace js */
1715 :
1716 : JSGCTraceKind
1717 0 : js_GetGCThingTraceKind(void *thing)
1718 : {
1719 0 : return GetGCThingTraceKind(thing);
1720 : }
1721 :
1722 : JSBool
1723 0 : js_LockGCThingRT(JSRuntime *rt, void *thing)
1724 : {
1725 0 : if (!thing)
1726 0 : return true;
1727 :
1728 0 : if (GCLocks::Ptr p = rt->gcLocksHash.lookupWithDefault(thing, 0)) {
1729 0 : p->value++;
1730 0 : return true;
1731 : }
1732 :
1733 0 : return false;
1734 : }
1735 :
1736 : void
1737 0 : js_UnlockGCThingRT(JSRuntime *rt, void *thing)
1738 : {
1739 0 : if (!thing)
1740 0 : return;
1741 :
1742 0 : if (GCLocks::Ptr p = rt->gcLocksHash.lookup(thing)) {
1743 0 : rt->gcPoke = true;
1744 0 : if (--p->value == 0)
1745 0 : rt->gcLocksHash.remove(p);
1746 : }
1747 : }
1748 :
1749 : namespace js {
1750 :
1751 : void
1752 46061 : InitTracer(JSTracer *trc, JSRuntime *rt, JSTraceCallback callback)
1753 : {
1754 46061 : trc->runtime = rt;
1755 46061 : trc->callback = callback;
1756 46061 : trc->debugPrinter = NULL;
1757 46061 : trc->debugPrintArg = NULL;
1758 46061 : trc->debugPrintIndex = size_t(-1);
1759 46061 : trc->eagerlyTraceWeakMaps = true;
1760 46061 : }
1761 :
1762 : /* static */ int64_t
1763 0 : SliceBudget::TimeBudget(int64_t millis)
1764 : {
1765 0 : return millis * PRMJ_USEC_PER_MSEC;
1766 : }
1767 :
1768 : /* static */ int64_t
1769 54 : SliceBudget::WorkBudget(int64_t work)
1770 : {
1771 : /* For work = 0 not to mean Unlimited, we subtract 1. */
1772 54 : return -work - 1;
1773 : }
1774 :
1775 77183 : SliceBudget::SliceBudget()
1776 : : deadline(INT64_MAX),
1777 77183 : counter(INTPTR_MAX)
1778 : {
1779 77183 : }
1780 :
1781 63 : SliceBudget::SliceBudget(int64_t budget)
1782 : {
1783 63 : if (budget == Unlimited) {
1784 9 : deadline = INT64_MAX;
1785 9 : counter = INTPTR_MAX;
1786 54 : } else if (budget > 0) {
1787 0 : deadline = PRMJ_Now() + budget;
1788 0 : counter = CounterReset;
1789 : } else {
1790 54 : deadline = 0;
1791 54 : counter = -budget - 1;
1792 : }
1793 63 : }
1794 :
1795 : bool
1796 99 : SliceBudget::checkOverBudget()
1797 : {
1798 99 : bool over = PRMJ_Now() > deadline;
1799 99 : if (!over)
1800 0 : counter = CounterReset;
1801 99 : return over;
1802 : }
1803 :
1804 18761 : GCMarker::GCMarker()
1805 : : stack(size_t(-1)),
1806 : color(BLACK),
1807 : started(false),
1808 : unmarkedArenaStackTop(NULL),
1809 : markLaterArenas(0),
1810 18761 : grayFailed(false)
1811 : {
1812 18761 : }
1813 :
1814 : bool
1815 18761 : GCMarker::init()
1816 : {
1817 18761 : return stack.init(MARK_STACK_LENGTH);
1818 : }
1819 :
1820 : void
1821 39964 : GCMarker::start(JSRuntime *rt)
1822 : {
1823 39964 : InitTracer(this, rt, NULL);
1824 39964 : JS_ASSERT(!started);
1825 39964 : started = true;
1826 39964 : color = BLACK;
1827 :
1828 39964 : JS_ASSERT(!unmarkedArenaStackTop);
1829 39964 : JS_ASSERT(markLaterArenas == 0);
1830 :
1831 39964 : JS_ASSERT(grayRoots.empty());
1832 39964 : JS_ASSERT(!grayFailed);
1833 :
1834 : /*
1835 : * The GC is recomputing the liveness of WeakMap entries, so we delay
1836 : * visting entries.
1837 : */
1838 39964 : eagerlyTraceWeakMaps = JS_FALSE;
1839 39964 : }
1840 :
1841 : void
1842 39864 : GCMarker::stop()
1843 : {
1844 39864 : JS_ASSERT(isDrained());
1845 :
1846 39864 : JS_ASSERT(started);
1847 39864 : started = false;
1848 :
1849 39864 : JS_ASSERT(!unmarkedArenaStackTop);
1850 39864 : JS_ASSERT(markLaterArenas == 0);
1851 :
1852 39864 : JS_ASSERT(grayRoots.empty());
1853 39864 : grayFailed = false;
1854 :
1855 : /* Free non-ballast stack memory. */
1856 39864 : stack.reset();
1857 39864 : grayRoots.clearAndFree();
1858 39864 : }
1859 :
1860 : void
1861 1446 : GCMarker::reset()
1862 : {
1863 1446 : color = BLACK;
1864 :
1865 1446 : stack.reset();
1866 1446 : JS_ASSERT(isMarkStackEmpty());
1867 :
1868 53879 : while (unmarkedArenaStackTop) {
1869 50987 : ArenaHeader *aheader = unmarkedArenaStackTop;
1870 50987 : JS_ASSERT(aheader->hasDelayedMarking);
1871 50987 : JS_ASSERT(markLaterArenas);
1872 50987 : unmarkedArenaStackTop = aheader->getNextDelayedMarking();
1873 50987 : aheader->hasDelayedMarking = 0;
1874 50987 : aheader->markOverflow = 0;
1875 50987 : aheader->allocatedDuringIncremental = 0;
1876 50987 : markLaterArenas--;
1877 : }
1878 1446 : JS_ASSERT(isDrained());
1879 1446 : JS_ASSERT(!markLaterArenas);
1880 :
1881 1446 : grayRoots.clearAndFree();
1882 1446 : grayFailed = false;
1883 1446 : }
1884 :
1885 : /*
1886 : * When the native stack is low, the GC does not call JS_TraceChildren to mark
1887 : * the reachable "children" of the thing. Rather the thing is put aside and
1888 : * JS_TraceChildren is called later with more space on the C stack.
1889 : *
1890 : * To implement such delayed marking of the children with minimal overhead for
1891 : * the normal case of sufficient native stack, the code adds a field per
1892 : * arena. The field markingDelay->link links all arenas with delayed things
1893 : * into a stack list with the pointer to stack top in
1894 : * GCMarker::unmarkedArenaStackTop. delayMarkingChildren adds
1895 : * arenas to the stack as necessary while markDelayedChildren pops the arenas
1896 : * from the stack until it empties.
1897 : */
1898 :
1899 : inline void
1900 51221 : GCMarker::delayMarkingArena(ArenaHeader *aheader)
1901 : {
1902 51221 : if (aheader->hasDelayedMarking) {
1903 : /* Arena already scheduled to be marked later */
1904 225 : return;
1905 : }
1906 50996 : aheader->setNextDelayedMarking(unmarkedArenaStackTop);
1907 50996 : unmarkedArenaStackTop = aheader;
1908 50996 : markLaterArenas++;
1909 : }
1910 :
1911 : void
1912 0 : GCMarker::delayMarkingChildren(const void *thing)
1913 : {
1914 0 : const Cell *cell = reinterpret_cast<const Cell *>(thing);
1915 0 : cell->arenaHeader()->markOverflow = 1;
1916 0 : delayMarkingArena(cell->arenaHeader());
1917 0 : }
1918 :
1919 : void
1920 9 : GCMarker::markDelayedChildren(ArenaHeader *aheader)
1921 : {
1922 9 : if (aheader->markOverflow) {
1923 0 : bool always = aheader->allocatedDuringIncremental;
1924 0 : aheader->markOverflow = 0;
1925 :
1926 0 : for (CellIterUnderGC i(aheader); !i.done(); i.next()) {
1927 0 : Cell *t = i.getCell();
1928 0 : if (always || t->isMarked()) {
1929 0 : t->markIfUnmarked();
1930 0 : JS_TraceChildren(this, t, MapAllocToTraceKind(aheader->getAllocKind()));
1931 : }
1932 : }
1933 : } else {
1934 9 : JS_ASSERT(aheader->allocatedDuringIncremental);
1935 9 : PushArena(this, aheader);
1936 : }
1937 9 : aheader->allocatedDuringIncremental = 0;
1938 9 : }
1939 :
1940 : bool
1941 9 : GCMarker::markDelayedChildren(SliceBudget &budget)
1942 : {
1943 18 : gcstats::AutoPhase ap(runtime->gcStats, gcstats::PHASE_MARK_DELAYED);
1944 :
1945 9 : JS_ASSERT(unmarkedArenaStackTop);
1946 9 : do {
1947 : /*
1948 : * If marking gets delayed at the same arena again, we must repeat
1949 : * marking of its things. For that we pop arena from the stack and
1950 : * clear its hasDelayedMarking flag before we begin the marking.
1951 : */
1952 9 : ArenaHeader *aheader = unmarkedArenaStackTop;
1953 9 : JS_ASSERT(aheader->hasDelayedMarking);
1954 9 : JS_ASSERT(markLaterArenas);
1955 9 : unmarkedArenaStackTop = aheader->getNextDelayedMarking();
1956 9 : aheader->hasDelayedMarking = 0;
1957 9 : markLaterArenas--;
1958 9 : markDelayedChildren(aheader);
1959 :
1960 9 : budget.step(150);
1961 9 : if (budget.isOverBudget())
1962 0 : return false;
1963 : } while (unmarkedArenaStackTop);
1964 9 : JS_ASSERT(!markLaterArenas);
1965 :
1966 9 : return true;
1967 : }
1968 :
1969 : #ifdef DEBUG
1970 : void
1971 22725226 : GCMarker::checkCompartment(void *p)
1972 : {
1973 22725226 : JS_ASSERT(started);
1974 22725226 : JS_ASSERT(static_cast<Cell *>(p)->compartment()->isCollecting());
1975 22725226 : }
1976 : #endif
1977 :
1978 : bool
1979 38499 : GCMarker::hasBufferedGrayRoots() const
1980 : {
1981 38499 : return !grayFailed;
1982 : }
1983 :
1984 : void
1985 0 : GCMarker::startBufferingGrayRoots()
1986 : {
1987 0 : JS_ASSERT(!callback);
1988 0 : callback = GrayCallback;
1989 0 : JS_ASSERT(IS_GC_MARKING_TRACER(this));
1990 0 : }
1991 :
1992 : void
1993 0 : GCMarker::endBufferingGrayRoots()
1994 : {
1995 0 : JS_ASSERT(callback == GrayCallback);
1996 0 : callback = NULL;
1997 0 : JS_ASSERT(IS_GC_MARKING_TRACER(this));
1998 0 : }
1999 :
2000 : void
2001 38436 : GCMarker::markBufferedGrayRoots()
2002 : {
2003 38436 : JS_ASSERT(!grayFailed);
2004 :
2005 38436 : for (GrayRoot *elem = grayRoots.begin(); elem != grayRoots.end(); elem++) {
2006 : #ifdef DEBUG
2007 0 : debugPrinter = elem->debugPrinter;
2008 0 : debugPrintArg = elem->debugPrintArg;
2009 0 : debugPrintIndex = elem->debugPrintIndex;
2010 : #endif
2011 0 : void *tmp = elem->thing;
2012 0 : MarkKind(this, &tmp, elem->kind);
2013 0 : JS_ASSERT(tmp == elem->thing);
2014 : }
2015 :
2016 38436 : grayRoots.clearAndFree();
2017 38436 : }
2018 :
2019 : void
2020 0 : GCMarker::appendGrayRoot(void *thing, JSGCTraceKind kind)
2021 : {
2022 0 : JS_ASSERT(started);
2023 :
2024 0 : if (grayFailed)
2025 0 : return;
2026 :
2027 0 : GrayRoot root(thing, kind);
2028 : #ifdef DEBUG
2029 0 : root.debugPrinter = debugPrinter;
2030 0 : root.debugPrintArg = debugPrintArg;
2031 0 : root.debugPrintIndex = debugPrintIndex;
2032 : #endif
2033 :
2034 0 : if (!grayRoots.append(root)) {
2035 0 : grayRoots.clearAndFree();
2036 0 : grayFailed = true;
2037 : }
2038 : }
2039 :
2040 : void
2041 0 : GCMarker::GrayCallback(JSTracer *trc, void **thingp, JSGCTraceKind kind)
2042 : {
2043 0 : GCMarker *gcmarker = static_cast<GCMarker *>(trc);
2044 0 : gcmarker->appendGrayRoot(*thingp, kind);
2045 0 : }
2046 :
2047 : size_t
2048 0 : GCMarker::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const
2049 : {
2050 0 : return stack.sizeOfExcludingThis(mallocSizeOf) +
2051 0 : grayRoots.sizeOfExcludingThis(mallocSizeOf);
2052 : }
2053 :
2054 : void
2055 0 : SetMarkStackLimit(JSRuntime *rt, size_t limit)
2056 : {
2057 0 : JS_ASSERT(!rt->gcRunning);
2058 0 : rt->gcMarker.setSizeLimit(limit);
2059 0 : }
2060 :
2061 : } /* namespace js */
2062 :
2063 : static void
2064 22853 : gc_root_traversal(JSTracer *trc, const RootEntry &entry)
2065 : {
2066 22853 : const char *name = entry.value.name ? entry.value.name : "root";
2067 22853 : if (entry.value.type == JS_GC_ROOT_GCTHING_PTR)
2068 22828 : MarkGCThingRoot(trc, reinterpret_cast<void **>(entry.key), name);
2069 : else
2070 25 : MarkValueRoot(trc, reinterpret_cast<Value *>(entry.key), name);
2071 22853 : }
2072 :
2073 : static void
2074 0 : gc_lock_traversal(const GCLocks::Entry &entry, JSTracer *trc)
2075 : {
2076 0 : JS_ASSERT(entry.value >= 1);
2077 0 : void *tmp = entry.key;
2078 0 : MarkGCThingRoot(trc, &tmp, "locked object");
2079 0 : JS_ASSERT(tmp == entry.key);
2080 0 : }
2081 :
2082 : namespace js {
2083 :
2084 : void
2085 63 : MarkCompartmentActive(StackFrame *fp)
2086 : {
2087 63 : if (fp->isScriptFrame())
2088 63 : fp->script()->compartment()->active = true;
2089 63 : }
2090 :
2091 : } /* namespace js */
2092 :
2093 : void
2094 : AutoIdArray::trace(JSTracer *trc)
2095 : {
2096 : JS_ASSERT(tag == IDARRAY);
2097 : gc::MarkIdRange(trc, idArray->length, idArray->vector, "JSAutoIdArray.idArray");
2098 : }
2099 :
2100 : void
2101 0 : AutoEnumStateRooter::trace(JSTracer *trc)
2102 : {
2103 0 : gc::MarkObjectRoot(trc, &obj, "JS::AutoEnumStateRooter.obj");
2104 0 : }
2105 :
2106 : inline void
2107 19766 : AutoGCRooter::trace(JSTracer *trc)
2108 : {
2109 19766 : switch (tag) {
2110 : case JSVAL:
2111 3300 : MarkValueRoot(trc, &static_cast<AutoValueRooter *>(this)->val, "JS::AutoValueRooter.val");
2112 3300 : return;
2113 :
2114 : case PARSER:
2115 15 : static_cast<Parser *>(this)->trace(trc);
2116 15 : return;
2117 :
2118 : case ENUMERATOR:
2119 0 : static_cast<AutoEnumStateRooter *>(this)->trace(trc);
2120 0 : return;
2121 :
2122 : case IDARRAY: {
2123 0 : JSIdArray *ida = static_cast<AutoIdArray *>(this)->idArray;
2124 0 : MarkIdRange(trc, ida->length, ida->vector, "JS::AutoIdArray.idArray");
2125 0 : return;
2126 : }
2127 :
2128 : case DESCRIPTORS: {
2129 : PropDescArray &descriptors =
2130 9 : static_cast<AutoPropDescArrayRooter *>(this)->descriptors;
2131 18 : for (size_t i = 0, len = descriptors.length(); i < len; i++) {
2132 9 : PropDesc &desc = descriptors[i];
2133 9 : MarkValueRoot(trc, &desc.pd, "PropDesc::pd");
2134 9 : MarkValueRoot(trc, &desc.value, "PropDesc::value");
2135 9 : MarkValueRoot(trc, &desc.get, "PropDesc::get");
2136 9 : MarkValueRoot(trc, &desc.set, "PropDesc::set");
2137 : }
2138 9 : return;
2139 : }
2140 :
2141 : case DESCRIPTOR : {
2142 9 : PropertyDescriptor &desc = *static_cast<AutoPropertyDescriptorRooter *>(this);
2143 9 : if (desc.obj)
2144 9 : MarkObjectRoot(trc, &desc.obj, "Descriptor::obj");
2145 9 : MarkValueRoot(trc, &desc.value, "Descriptor::value");
2146 9 : if ((desc.attrs & JSPROP_GETTER) && desc.getter) {
2147 0 : JSObject *tmp = JS_FUNC_TO_DATA_PTR(JSObject *, desc.getter);
2148 0 : MarkObjectRoot(trc, &tmp, "Descriptor::get");
2149 0 : desc.getter = JS_DATA_TO_FUNC_PTR(JSPropertyOp, tmp);
2150 : }
2151 9 : if (desc.attrs & JSPROP_SETTER && desc.setter) {
2152 0 : JSObject *tmp = JS_FUNC_TO_DATA_PTR(JSObject *, desc.setter);
2153 0 : MarkObjectRoot(trc, &tmp, "Descriptor::set");
2154 0 : desc.setter = JS_DATA_TO_FUNC_PTR(JSStrictPropertyOp, tmp);
2155 : }
2156 9 : return;
2157 : }
2158 :
2159 : case NAMESPACES: {
2160 0 : JSXMLArray<JSObject> &array = static_cast<AutoNamespaceArray *>(this)->array;
2161 0 : MarkObjectRange(trc, array.length, array.vector, "JSXMLArray.vector");
2162 0 : js_XMLArrayCursorTrace(trc, array.cursors);
2163 0 : return;
2164 : }
2165 :
2166 : case XML:
2167 0 : js_TraceXML(trc, static_cast<AutoXMLRooter *>(this)->xml);
2168 0 : return;
2169 :
2170 : case OBJECT:
2171 10031 : if (static_cast<AutoObjectRooter *>(this)->obj)
2172 : MarkObjectRoot(trc, &static_cast<AutoObjectRooter *>(this)->obj,
2173 10031 : "JS::AutoObjectRooter.obj");
2174 10031 : return;
2175 :
2176 : case ID:
2177 0 : MarkIdRoot(trc, &static_cast<AutoIdRooter *>(this)->id_, "JS::AutoIdRooter.id_");
2178 0 : return;
2179 :
2180 : case VALVECTOR: {
2181 2864 : AutoValueVector::VectorImpl &vector = static_cast<AutoValueVector *>(this)->vector;
2182 2864 : MarkValueRootRange(trc, vector.length(), vector.begin(), "js::AutoValueVector.vector");
2183 2864 : return;
2184 : }
2185 :
2186 : case STRING:
2187 604 : if (static_cast<AutoStringRooter *>(this)->str)
2188 : MarkStringRoot(trc, &static_cast<AutoStringRooter *>(this)->str,
2189 604 : "JS::AutoStringRooter.str");
2190 604 : return;
2191 :
2192 : case IDVECTOR: {
2193 1242 : AutoIdVector::VectorImpl &vector = static_cast<AutoIdVector *>(this)->vector;
2194 1242 : MarkIdRootRange(trc, vector.length(), vector.begin(), "js::AutoIdVector.vector");
2195 1242 : return;
2196 : }
2197 :
2198 : case SHAPEVECTOR: {
2199 1098 : AutoShapeVector::VectorImpl &vector = static_cast<js::AutoShapeVector *>(this)->vector;
2200 1098 : MarkShapeRootRange(trc, vector.length(), const_cast<Shape **>(vector.begin()),
2201 1098 : "js::AutoShapeVector.vector");
2202 1098 : return;
2203 : }
2204 :
2205 : case OBJVECTOR: {
2206 9 : AutoObjectVector::VectorImpl &vector = static_cast<AutoObjectVector *>(this)->vector;
2207 9 : MarkObjectRootRange(trc, vector.length(), vector.begin(), "js::AutoObjectVector.vector");
2208 9 : return;
2209 : }
2210 :
2211 : case VALARRAY: {
2212 0 : AutoValueArray *array = static_cast<AutoValueArray *>(this);
2213 0 : MarkValueRootRange(trc, array->length(), array->start(), "js::AutoValueArray");
2214 0 : return;
2215 : }
2216 :
2217 : case SCRIPTVECTOR: {
2218 0 : AutoScriptVector::VectorImpl &vector = static_cast<AutoScriptVector *>(this)->vector;
2219 0 : for (size_t i = 0; i < vector.length(); i++)
2220 0 : MarkScriptRoot(trc, &vector[i], "AutoScriptVector element");
2221 0 : return;
2222 : }
2223 : }
2224 :
2225 585 : JS_ASSERT(tag >= 0);
2226 : MarkValueRootRange(trc, tag, static_cast<AutoArrayRooter *>(this)->array,
2227 585 : "JS::AutoArrayRooter.array");
2228 : }
2229 :
2230 : /* static */ void
2231 43246 : AutoGCRooter::traceAll(JSTracer *trc)
2232 : {
2233 63012 : for (js::AutoGCRooter *gcr = trc->runtime->autoGCRooters; gcr; gcr = gcr->down)
2234 19766 : gcr->trace(trc);
2235 43246 : }
2236 :
2237 : namespace js {
2238 :
2239 : static void
2240 41836 : MarkRuntime(JSTracer *trc, bool useSavedRoots = false)
2241 : {
2242 41836 : JSRuntime *rt = trc->runtime;
2243 41836 : JS_ASSERT(trc->callback != GCMarker::GrayCallback);
2244 :
2245 41836 : if (IS_GC_MARKING_TRACER(trc)) {
2246 122547 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
2247 84084 : if (!c->isCollecting())
2248 324 : c->markCrossCompartmentWrappers(trc);
2249 : }
2250 38463 : Debugger::markCrossCompartmentDebuggerObjectReferents(trc);
2251 : }
2252 :
2253 41836 : AutoGCRooter::traceAll(trc);
2254 :
2255 41836 : if (rt->hasContexts())
2256 22875 : MarkConservativeStackRoots(trc, useSavedRoots);
2257 :
2258 64689 : for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront())
2259 22853 : gc_root_traversal(trc, r.front());
2260 :
2261 41836 : for (GCLocks::Range r = rt->gcLocksHash.all(); !r.empty(); r.popFront())
2262 0 : gc_lock_traversal(r.front(), trc);
2263 :
2264 41836 : if (rt->scriptAndCountsVector) {
2265 0 : ScriptAndCountsVector &vec = *rt->scriptAndCountsVector;
2266 0 : for (size_t i = 0; i < vec.length(); i++)
2267 0 : MarkScriptRoot(trc, &vec[i].script, "scriptAndCountsVector");
2268 : }
2269 :
2270 : /*
2271 : * Atoms are not in the cross-compartment map. So if there are any
2272 : * compartments that are not being collected, we are not allowed to collect
2273 : * atoms. Otherwise, the non-collected compartments could contain pointers
2274 : * to atoms that we would miss.
2275 : */
2276 41836 : bool isFullGC = true;
2277 41836 : if (IS_GC_MARKING_TRACER(trc)) {
2278 122547 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
2279 84084 : if (!c->isCollecting())
2280 324 : isFullGC = false;
2281 : }
2282 : }
2283 41836 : MarkAtomState(trc, rt->gcKeepAtoms || !isFullGC);
2284 41836 : rt->staticStrings.trace(trc);
2285 :
2286 64711 : for (ContextIter acx(rt); !acx.done(); acx.next())
2287 22875 : acx->mark(trc);
2288 :
2289 : /* We can't use GCCompartmentsIter if we're called from TraceRuntime. */
2290 137119 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
2291 95283 : if (IS_GC_MARKING_TRACER(trc) && !c->isCollecting())
2292 324 : continue;
2293 :
2294 94959 : if (c->activeAnalysis)
2295 20 : c->markTypes(trc);
2296 :
2297 : /* During a GC, these are treated as weak pointers. */
2298 94959 : if (!IS_GC_MARKING_TRACER(trc)) {
2299 11199 : if (c->watchpointMap)
2300 0 : c->watchpointMap->markAll(trc);
2301 : }
2302 :
2303 : /* Do not discard scripts with counts while profiling. */
2304 94959 : if (rt->profilingScripts) {
2305 0 : for (CellIterUnderGC i(c, FINALIZE_SCRIPT); !i.done(); i.next()) {
2306 0 : JSScript *script = i.get<JSScript>();
2307 0 : if (script->scriptCounts) {
2308 0 : MarkScriptRoot(trc, &script, "profilingScripts");
2309 0 : JS_ASSERT(script == i.get<JSScript>());
2310 : }
2311 : }
2312 : }
2313 : }
2314 :
2315 : #ifdef JS_METHODJIT
2316 : /* We need to expand inline frames before stack scanning. */
2317 137119 : for (CompartmentsIter c(rt); !c.done(); c.next())
2318 95283 : mjit::ExpandInlineFrames(c);
2319 : #endif
2320 :
2321 41836 : rt->stackSpace.mark(trc);
2322 :
2323 : /* The embedding can register additional roots here. */
2324 41836 : if (JSTraceDataOp op = rt->gcBlackRootsTraceOp)
2325 0 : (*op)(trc, rt->gcBlackRootsData);
2326 :
2327 : /* During GC, this buffers up the gray roots and doesn't mark them. */
2328 41836 : if (JSTraceDataOp op = rt->gcGrayRootsTraceOp) {
2329 0 : if (IS_GC_MARKING_TRACER(trc)) {
2330 0 : GCMarker *gcmarker = static_cast<GCMarker *>(trc);
2331 0 : gcmarker->startBufferingGrayRoots();
2332 0 : (*op)(trc, rt->gcGrayRootsData);
2333 0 : gcmarker->endBufferingGrayRoots();
2334 : } else {
2335 0 : (*op)(trc, rt->gcGrayRootsData);
2336 : }
2337 : }
2338 41836 : }
2339 :
2340 : static void
2341 97833 : TriggerOperationCallback(JSRuntime *rt, gcreason::Reason reason)
2342 : {
2343 97833 : if (rt->gcIsNeeded)
2344 97661 : return;
2345 :
2346 172 : rt->gcIsNeeded = true;
2347 172 : rt->gcTriggerReason = reason;
2348 172 : rt->triggerOperationCallback();
2349 : }
2350 :
2351 : void
2352 165 : TriggerGC(JSRuntime *rt, gcreason::Reason reason)
2353 : {
2354 165 : JS_ASSERT(rt->onOwnerThread());
2355 :
2356 165 : if (rt->gcRunning)
2357 0 : return;
2358 :
2359 165 : PrepareForFullGC(rt);
2360 165 : TriggerOperationCallback(rt, reason);
2361 : }
2362 :
2363 : void
2364 97689 : TriggerCompartmentGC(JSCompartment *comp, gcreason::Reason reason)
2365 : {
2366 97689 : JSRuntime *rt = comp->rt;
2367 97689 : JS_ASSERT(rt->onOwnerThread());
2368 :
2369 97689 : if (rt->gcRunning)
2370 0 : return;
2371 :
2372 97689 : if (rt->gcZeal() == ZealAllocValue) {
2373 0 : TriggerGC(rt, reason);
2374 0 : return;
2375 : }
2376 :
2377 97689 : if (comp == rt->atomsCompartment) {
2378 : /* We can't do a compartmental GC of the default compartment. */
2379 21 : TriggerGC(rt, reason);
2380 21 : return;
2381 : }
2382 :
2383 97668 : PrepareCompartmentForGC(comp);
2384 97668 : TriggerOperationCallback(rt, reason);
2385 : }
2386 :
2387 : void
2388 0 : MaybeGC(JSContext *cx)
2389 : {
2390 0 : JSRuntime *rt = cx->runtime;
2391 0 : JS_ASSERT(rt->onOwnerThread());
2392 :
2393 0 : if (rt->gcZeal() == ZealAllocValue || rt->gcZeal() == ZealPokeValue) {
2394 0 : PrepareForFullGC(rt);
2395 0 : GC(cx, GC_NORMAL, gcreason::MAYBEGC);
2396 0 : return;
2397 : }
2398 :
2399 0 : JSCompartment *comp = cx->compartment;
2400 0 : if (rt->gcIsNeeded) {
2401 0 : GCSlice(cx, GC_NORMAL, gcreason::MAYBEGC);
2402 0 : return;
2403 : }
2404 :
2405 0 : if (comp->gcBytes > 8192 &&
2406 : comp->gcBytes >= 3 * (comp->gcTriggerBytes / 4) &&
2407 : rt->gcIncrementalState == NO_INCREMENTAL)
2408 : {
2409 0 : PrepareCompartmentForGC(comp);
2410 0 : GCSlice(cx, GC_NORMAL, gcreason::MAYBEGC);
2411 0 : return;
2412 : }
2413 :
2414 0 : if (comp->gcMallocAndFreeBytes > comp->gcTriggerMallocAndFreeBytes) {
2415 0 : PrepareCompartmentForGC(comp);
2416 0 : GCSlice(cx, GC_NORMAL, gcreason::MAYBEGC);
2417 0 : return;
2418 : }
2419 :
2420 : /*
2421 : * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
2422 : * is not atomic and a race condition could trigger or suppress the GC. We
2423 : * tolerate this.
2424 : */
2425 0 : int64_t now = PRMJ_Now();
2426 0 : if (rt->gcNextFullGCTime && rt->gcNextFullGCTime <= now) {
2427 0 : if (rt->gcChunkAllocationSinceLastGC ||
2428 : rt->gcNumArenasFreeCommitted > FreeCommittedArenasThreshold)
2429 : {
2430 0 : PrepareForFullGC(rt);
2431 0 : GCSlice(cx, GC_SHRINK, gcreason::MAYBEGC);
2432 : } else {
2433 0 : rt->gcNextFullGCTime = now + GC_IDLE_FULL_SPAN;
2434 : }
2435 : }
2436 : }
2437 :
2438 : static void
2439 18 : DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
2440 : {
2441 18 : Chunk *chunk = *availableListHeadp;
2442 18 : if (!chunk)
2443 0 : return;
2444 :
2445 : /*
2446 : * Decommit is expensive so we avoid holding the GC lock while calling it.
2447 : *
2448 : * We decommit from the tail of the list to minimize interference with the
2449 : * main thread that may start to allocate things at this point.
2450 : *
2451 : * The arena that is been decommitted outside the GC lock must not be
2452 : * available for allocations either via the free list or via the
2453 : * decommittedArenas bitmap. For that we just fetch the arena from the
2454 : * free list before the decommit pretending as it was allocated. If this
2455 : * arena also is the single free arena in the chunk, then we must remove
2456 : * from the available list before we release the lock so the allocation
2457 : * thread would not see chunks with no free arenas on the available list.
2458 : *
2459 : * After we retake the lock, we mark the arena as free and decommitted if
2460 : * the decommit was successful. We must also add the chunk back to the
2461 : * available list if we removed it previously or when the main thread
2462 : * have allocated all remaining free arenas in the chunk.
2463 : *
2464 : * We also must make sure that the aheader is not accessed again after we
2465 : * decommit the arena.
2466 : */
2467 18 : JS_ASSERT(chunk->info.prevp == availableListHeadp);
2468 18 : while (Chunk *next = chunk->info.next) {
2469 0 : JS_ASSERT(next->info.prevp == &chunk->info.next);
2470 0 : chunk = next;
2471 : }
2472 :
2473 0 : for (;;) {
2474 4167 : while (chunk->info.numArenasFreeCommitted != 0) {
2475 4131 : ArenaHeader *aheader = chunk->fetchNextFreeArena(rt);
2476 :
2477 4131 : Chunk **savedPrevp = chunk->info.prevp;
2478 4131 : if (!chunk->hasAvailableArenas())
2479 0 : chunk->removeFromAvailableList();
2480 :
2481 4131 : size_t arenaIndex = Chunk::arenaIndex(aheader->arenaAddress());
2482 : bool ok;
2483 : {
2484 : /*
2485 : * If the main thread waits for the decommit to finish, skip
2486 : * potentially expensive unlock/lock pair on the contested
2487 : * lock.
2488 : */
2489 8262 : Maybe<AutoUnlockGC> maybeUnlock;
2490 4131 : if (!rt->gcRunning)
2491 4131 : maybeUnlock.construct(rt);
2492 4131 : ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
2493 : }
2494 :
2495 4131 : if (ok) {
2496 4131 : ++chunk->info.numArenasFree;
2497 4131 : chunk->decommittedArenas.set(arenaIndex);
2498 : } else {
2499 0 : chunk->addArenaToFreeList(rt, aheader);
2500 : }
2501 4131 : JS_ASSERT(chunk->hasAvailableArenas());
2502 4131 : JS_ASSERT(!chunk->unused());
2503 4131 : if (chunk->info.numArenasFree == 1) {
2504 : /*
2505 : * Put the chunk back to the available list either at the
2506 : * point where it was before to preserve the available list
2507 : * that we enumerate, or, when the allocation thread has fully
2508 : * used all the previous chunks, at the beginning of the
2509 : * available list.
2510 : */
2511 0 : Chunk **insertPoint = savedPrevp;
2512 0 : if (savedPrevp != availableListHeadp) {
2513 0 : Chunk *prev = Chunk::fromPointerToNext(savedPrevp);
2514 0 : if (!prev->hasAvailableArenas())
2515 0 : insertPoint = availableListHeadp;
2516 : }
2517 0 : chunk->insertToAvailableList(insertPoint);
2518 : } else {
2519 4131 : JS_ASSERT(chunk->info.prevp);
2520 : }
2521 :
2522 4131 : if (rt->gcChunkAllocationSinceLastGC) {
2523 : /*
2524 : * The allocator thread has started to get new chunks. We should stop
2525 : * to avoid decommitting arenas in just allocated chunks.
2526 : */
2527 0 : return;
2528 : }
2529 : }
2530 :
2531 : /*
2532 : * chunk->info.prevp becomes null when the allocator thread consumed
2533 : * all chunks from the available list.
2534 : */
2535 18 : JS_ASSERT_IF(chunk->info.prevp, *chunk->info.prevp == chunk);
2536 18 : if (chunk->info.prevp == availableListHeadp || !chunk->info.prevp)
2537 18 : break;
2538 :
2539 : /*
2540 : * prevp exists and is not the list head. It must point to the next
2541 : * field of the previous chunk.
2542 : */
2543 0 : chunk = chunk->getPrevious();
2544 : }
2545 : }
2546 :
2547 : static void
2548 9 : DecommitArenas(JSRuntime *rt)
2549 : {
2550 9 : DecommitArenasFromAvailableList(rt, &rt->gcSystemAvailableChunkListHead);
2551 9 : DecommitArenasFromAvailableList(rt, &rt->gcUserAvailableChunkListHead);
2552 9 : }
2553 :
2554 : /* Must be called with the GC lock taken. */
2555 : static void
2556 19675 : ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
2557 : {
2558 19675 : if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
2559 18 : AutoUnlockGC unlock(rt);
2560 9 : FreeChunkList(toFree);
2561 : }
2562 :
2563 19675 : if (shouldShrink)
2564 9 : DecommitArenas(rt);
2565 19675 : }
2566 :
2567 : #ifdef JS_THREADSAFE
2568 :
2569 : static unsigned
2570 18761 : GetCPUCount()
2571 : {
2572 : static unsigned ncpus = 0;
2573 18761 : if (ncpus == 0) {
2574 : # ifdef XP_WIN
2575 : SYSTEM_INFO sysinfo;
2576 : GetSystemInfo(&sysinfo);
2577 : ncpus = unsigned(sysinfo.dwNumberOfProcessors);
2578 : # else
2579 18667 : long n = sysconf(_SC_NPROCESSORS_ONLN);
2580 18667 : ncpus = (n > 0) ? unsigned(n) : 1;
2581 : # endif
2582 : }
2583 18761 : return ncpus;
2584 : }
2585 :
2586 : bool
2587 18761 : GCHelperThread::init()
2588 : {
2589 18761 : if (!(wakeup = PR_NewCondVar(rt->gcLock)))
2590 0 : return false;
2591 18761 : if (!(done = PR_NewCondVar(rt->gcLock)))
2592 0 : return false;
2593 :
2594 : thread = PR_CreateThread(PR_USER_THREAD, threadMain, this, PR_PRIORITY_NORMAL,
2595 18761 : PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0);
2596 18761 : if (!thread)
2597 0 : return false;
2598 :
2599 18761 : backgroundAllocation = (GetCPUCount() >= 2);
2600 18761 : return true;
2601 : }
2602 :
2603 : void
2604 18761 : GCHelperThread::finish()
2605 : {
2606 18761 : PRThread *join = NULL;
2607 : {
2608 37522 : AutoLockGC lock(rt);
2609 18761 : if (thread && state != SHUTDOWN) {
2610 : /*
2611 : * We cannot be in the ALLOCATING or CANCEL_ALLOCATION states as
2612 : * the allocations should have been stopped during the last GC.
2613 : */
2614 18761 : JS_ASSERT(state == IDLE || state == SWEEPING);
2615 18761 : if (state == IDLE)
2616 18761 : PR_NotifyCondVar(wakeup);
2617 18761 : state = SHUTDOWN;
2618 18761 : join = thread;
2619 : }
2620 : }
2621 18761 : if (join) {
2622 : /* PR_DestroyThread is not necessary. */
2623 18761 : PR_JoinThread(join);
2624 : }
2625 18761 : if (wakeup)
2626 18761 : PR_DestroyCondVar(wakeup);
2627 18761 : if (done)
2628 18761 : PR_DestroyCondVar(done);
2629 18761 : }
2630 :
2631 : /* static */
2632 : void
2633 18761 : GCHelperThread::threadMain(void *arg)
2634 : {
2635 18761 : static_cast<GCHelperThread *>(arg)->threadLoop();
2636 18761 : }
2637 :
2638 : void
2639 18761 : GCHelperThread::threadLoop()
2640 : {
2641 37522 : AutoLockGC lock(rt);
2642 :
2643 : /*
2644 : * Even on the first iteration the state can be SHUTDOWN or SWEEPING if
2645 : * the stop request or the GC and the corresponding startBackgroundSweep call
2646 : * happen before this thread has a chance to run.
2647 : */
2648 63133 : for (;;) {
2649 81894 : switch (state) {
2650 : case SHUTDOWN:
2651 : return;
2652 : case IDLE:
2653 40947 : PR_WaitCondVar(wakeup, PR_INTERVAL_NO_TIMEOUT);
2654 40947 : break;
2655 : case SWEEPING:
2656 19675 : doSweep();
2657 19675 : if (state == SWEEPING)
2658 19675 : state = IDLE;
2659 19675 : PR_NotifyAllCondVar(done);
2660 19675 : break;
2661 : case ALLOCATING:
2662 5022 : do {
2663 : Chunk *chunk;
2664 : {
2665 5022 : AutoUnlockGC unlock(rt);
2666 2511 : chunk = Chunk::allocate(rt);
2667 : }
2668 :
2669 : /* OOM stops the background allocation. */
2670 2511 : if (!chunk)
2671 0 : break;
2672 2511 : JS_ASSERT(chunk->info.numArenasFreeCommitted == ArenasPerChunk);
2673 2511 : rt->gcNumArenasFreeCommitted += ArenasPerChunk;
2674 2511 : rt->gcChunkPool.put(chunk);
2675 2511 : } while (state == ALLOCATING && rt->gcChunkPool.wantBackgroundAllocation(rt));
2676 2511 : if (state == ALLOCATING)
2677 2511 : state = IDLE;
2678 2511 : break;
2679 : case CANCEL_ALLOCATION:
2680 0 : state = IDLE;
2681 0 : PR_NotifyAllCondVar(done);
2682 0 : break;
2683 : }
2684 : }
2685 : }
2686 :
2687 : bool
2688 19666 : GCHelperThread::prepareForBackgroundSweep()
2689 : {
2690 19666 : JS_ASSERT(state == IDLE);
2691 19666 : size_t maxArenaLists = MAX_BACKGROUND_FINALIZE_KINDS * rt->compartments.length();
2692 19666 : return finalizeVector.reserve(maxArenaLists);
2693 : }
2694 :
2695 : /* Must be called with the GC lock taken. */
2696 : void
2697 19666 : GCHelperThread::startBackgroundSweep(JSContext *cx, bool shouldShrink)
2698 : {
2699 : /* The caller takes the GC lock. */
2700 19666 : JS_ASSERT(state == IDLE);
2701 19666 : JS_ASSERT(cx);
2702 19666 : JS_ASSERT(!finalizationContext);
2703 19666 : finalizationContext = cx;
2704 19666 : shrinkFlag = shouldShrink;
2705 19666 : state = SWEEPING;
2706 19666 : PR_NotifyCondVar(wakeup);
2707 19666 : }
2708 :
2709 : /* Must be called with the GC lock taken. */
2710 : void
2711 9 : GCHelperThread::startBackgroundShrink()
2712 : {
2713 9 : switch (state) {
2714 : case IDLE:
2715 9 : JS_ASSERT(!finalizationContext);
2716 9 : shrinkFlag = true;
2717 9 : state = SWEEPING;
2718 9 : PR_NotifyCondVar(wakeup);
2719 9 : break;
2720 : case SWEEPING:
2721 0 : shrinkFlag = true;
2722 0 : break;
2723 : case ALLOCATING:
2724 : case CANCEL_ALLOCATION:
2725 : /*
2726 : * If we have started background allocation there is nothing to
2727 : * shrink.
2728 : */
2729 0 : break;
2730 : case SHUTDOWN:
2731 0 : JS_NOT_REACHED("No shrink on shutdown");
2732 : }
2733 9 : }
2734 :
2735 : /* Must be called with the GC lock taken. */
2736 : void
2737 38102 : GCHelperThread::waitBackgroundSweepEnd()
2738 : {
2739 76964 : while (state == SWEEPING)
2740 760 : PR_WaitCondVar(done, PR_INTERVAL_NO_TIMEOUT);
2741 38102 : }
2742 :
2743 : /* Must be called with the GC lock taken. */
2744 : void
2745 41410 : GCHelperThread::waitBackgroundSweepOrAllocEnd()
2746 : {
2747 41410 : if (state == ALLOCATING)
2748 0 : state = CANCEL_ALLOCATION;
2749 90052 : while (state == SWEEPING || state == CANCEL_ALLOCATION)
2750 7232 : PR_WaitCondVar(done, PR_INTERVAL_NO_TIMEOUT);
2751 41410 : }
2752 :
2753 : /* Must be called with the GC lock taken. */
2754 : inline void
2755 2511 : GCHelperThread::startBackgroundAllocationIfIdle()
2756 : {
2757 2511 : if (state == IDLE) {
2758 2511 : state = ALLOCATING;
2759 2511 : PR_NotifyCondVar(wakeup);
2760 : }
2761 2511 : }
2762 :
2763 : JS_FRIEND_API(void)
2764 12426 : GCHelperThread::replenishAndFreeLater(void *ptr)
2765 : {
2766 12426 : JS_ASSERT(freeCursor == freeCursorEnd);
2767 : do {
2768 12426 : if (freeCursor && !freeVector.append(freeCursorEnd - FREE_ARRAY_LENGTH))
2769 0 : break;
2770 12426 : freeCursor = (void **) OffTheBooks::malloc_(FREE_ARRAY_SIZE);
2771 12426 : if (!freeCursor) {
2772 0 : freeCursorEnd = NULL;
2773 0 : break;
2774 : }
2775 12426 : freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH;
2776 12426 : *freeCursor++ = ptr;
2777 12426 : return;
2778 : } while (false);
2779 0 : Foreground::free_(ptr);
2780 : }
2781 :
2782 : /* Must be called with the GC lock taken. */
2783 : void
2784 19675 : GCHelperThread::doSweep()
2785 : {
2786 19675 : if (finalizationContext) {
2787 19666 : finalizationContext = NULL;
2788 39332 : AutoUnlockGC unlock(rt);
2789 :
2790 : /*
2791 : * We must finalize in the insert order, see comments in
2792 : * finalizeObjects.
2793 : */
2794 19666 : FreeOp fop(rt, false, true);
2795 176614 : for (ArenaHeader **i = finalizeVector.begin(); i != finalizeVector.end(); ++i)
2796 156948 : ArenaLists::backgroundFinalize(&fop, *i);
2797 19666 : finalizeVector.resize(0);
2798 :
2799 19666 : if (freeCursor) {
2800 12426 : void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
2801 12426 : freeElementsAndArray(array, freeCursor);
2802 12426 : freeCursor = freeCursorEnd = NULL;
2803 : } else {
2804 7240 : JS_ASSERT(!freeCursorEnd);
2805 : }
2806 19666 : for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
2807 0 : void **array = *iter;
2808 0 : freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
2809 : }
2810 19666 : freeVector.resize(0);
2811 : }
2812 :
2813 19675 : bool shrinking = shrinkFlag;
2814 19675 : ExpireChunksAndArenas(rt, shrinking);
2815 :
2816 : /*
2817 : * The main thread may have called ShrinkGCBuffers while
2818 : * ExpireChunksAndArenas(rt, false) was running, so we recheck the flag
2819 : * afterwards.
2820 : */
2821 19675 : if (!shrinking && shrinkFlag) {
2822 0 : shrinkFlag = false;
2823 0 : ExpireChunksAndArenas(rt, true);
2824 : }
2825 19675 : }
2826 :
2827 : #endif /* JS_THREADSAFE */
2828 :
2829 : } /* namespace js */
2830 :
2831 : static bool
2832 38427 : ReleaseObservedTypes(JSRuntime *rt)
2833 : {
2834 38427 : bool releaseTypes = false;
2835 38427 : int64_t now = PRMJ_Now();
2836 38427 : if (now >= rt->gcJitReleaseTime) {
2837 4 : releaseTypes = true;
2838 4 : rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
2839 : }
2840 :
2841 38427 : return releaseTypes;
2842 : }
2843 :
2844 : static void
2845 38427 : SweepCompartments(FreeOp *fop, JSGCInvocationKind gckind)
2846 : {
2847 38427 : JSRuntime *rt = fop->runtime();
2848 38427 : JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
2849 :
2850 : /* Skip the atomsCompartment. */
2851 38427 : JSCompartment **read = rt->compartments.begin() + 1;
2852 38427 : JSCompartment **end = rt->compartments.end();
2853 38427 : JSCompartment **write = read;
2854 38427 : JS_ASSERT(rt->compartments.length() >= 1);
2855 38427 : JS_ASSERT(*rt->compartments.begin() == rt->atomsCompartment);
2856 :
2857 122385 : while (read < end) {
2858 45531 : JSCompartment *compartment = *read++;
2859 :
2860 113835 : if (!compartment->hold && compartment->isCollecting() &&
2861 68304 : (compartment->arenas.arenaListsAreEmpty() || !rt->hasContexts()))
2862 : {
2863 22524 : compartment->arenas.checkEmptyFreeLists();
2864 22524 : if (callback)
2865 0 : callback(fop, compartment);
2866 22524 : if (compartment->principals)
2867 3 : JS_DropPrincipals(rt, compartment->principals);
2868 22524 : fop->delete_(compartment);
2869 22524 : continue;
2870 : }
2871 23007 : *write++ = compartment;
2872 : }
2873 38427 : rt->compartments.resize(write - rt->compartments.begin());
2874 38427 : }
2875 :
2876 : static void
2877 39964 : PurgeRuntime(JSTracer *trc)
2878 : {
2879 39964 : JSRuntime *rt = trc->runtime;
2880 :
2881 129330 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
2882 : /* We can be called from StartVerifyBarriers with a non-GC marker. */
2883 89366 : if (c->isCollecting() || !IS_GC_MARKING_TRACER(trc))
2884 89042 : c->purge();
2885 : }
2886 :
2887 39964 : rt->tempLifoAlloc.freeUnused();
2888 39964 : rt->gsnCache.purge();
2889 :
2890 : /* FIXME: bug 506341 */
2891 39964 : rt->propertyCache.purge(rt);
2892 :
2893 61067 : for (ContextIter acx(rt); !acx.done(); acx.next())
2894 21103 : acx->purge();
2895 39964 : }
2896 :
2897 : static void
2898 38454 : BeginMarkPhase(JSRuntime *rt)
2899 : {
2900 38454 : GCMarker *gcmarker = &rt->gcMarker;
2901 :
2902 38454 : rt->gcStartNumber = rt->gcNumber;
2903 :
2904 : /* Reset weak map list. */
2905 38454 : WeakMapBase::resetWeakMapList(rt);
2906 :
2907 : /*
2908 : * We must purge the runtime at the beginning of an incremental GC. The
2909 : * danger if we purge later is that the snapshot invariant of incremental
2910 : * GC will be broken, as follows. If some object is reachable only through
2911 : * some cache (say the dtoaCache) then it will not be part of the snapshot.
2912 : * If we purge after root marking, then the mutator could obtain a pointer
2913 : * to the object and start using it. This object might never be marked, so
2914 : * a GC hazard would exist.
2915 : */
2916 : {
2917 76908 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_PURGE);
2918 38454 : PurgeRuntime(gcmarker);
2919 : }
2920 :
2921 : /*
2922 : * Mark phase.
2923 : */
2924 76908 : gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_MARK);
2925 76908 : gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_MARK_ROOTS);
2926 :
2927 118908 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
2928 80454 : r.front()->bitmap.clear();
2929 :
2930 38454 : MarkRuntime(gcmarker);
2931 38454 : }
2932 :
2933 : void
2934 76872 : MarkWeakReferences(GCMarker *gcmarker)
2935 : {
2936 76872 : JS_ASSERT(gcmarker->isDrained());
2937 308200 : while (WatchpointMap::markAllIteratively(gcmarker) ||
2938 77192 : WeakMapBase::markAllIteratively(gcmarker) ||
2939 76944 : Debugger::markAllIteratively(gcmarker))
2940 : {
2941 320 : SliceBudget budget;
2942 320 : gcmarker->drainMarkStack(budget);
2943 : }
2944 76872 : JS_ASSERT(gcmarker->isDrained());
2945 76872 : }
2946 :
2947 : static void
2948 38436 : MarkGrayAndWeak(JSRuntime *rt)
2949 : {
2950 38436 : GCMarker *gcmarker = &rt->gcMarker;
2951 :
2952 38436 : JS_ASSERT(gcmarker->isDrained());
2953 38436 : MarkWeakReferences(gcmarker);
2954 :
2955 38436 : gcmarker->setMarkColorGray();
2956 38436 : if (gcmarker->hasBufferedGrayRoots()) {
2957 38436 : gcmarker->markBufferedGrayRoots();
2958 : } else {
2959 0 : if (JSTraceDataOp op = rt->gcGrayRootsTraceOp)
2960 0 : (*op)(gcmarker, rt->gcGrayRootsData);
2961 : }
2962 38436 : SliceBudget budget;
2963 38436 : gcmarker->drainMarkStack(budget);
2964 38436 : MarkWeakReferences(gcmarker);
2965 38436 : JS_ASSERT(gcmarker->isDrained());
2966 38436 : }
2967 :
2968 : #ifdef DEBUG
2969 : static void
2970 : ValidateIncrementalMarking(JSContext *cx);
2971 : #endif
2972 :
2973 : static void
2974 38427 : EndMarkPhase(JSContext *cx)
2975 : {
2976 38427 : JSRuntime *rt = cx->runtime;
2977 :
2978 : {
2979 76854 : gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_MARK);
2980 76854 : gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_MARK_OTHER);
2981 38427 : MarkGrayAndWeak(rt);
2982 : }
2983 :
2984 38427 : JS_ASSERT(rt->gcMarker.isDrained());
2985 :
2986 : #ifdef DEBUG
2987 38427 : if (rt->gcIncrementalState != NO_INCREMENTAL)
2988 9 : ValidateIncrementalMarking(cx);
2989 : #endif
2990 :
2991 : #ifdef DEBUG
2992 : /* Make sure that we didn't mark an object in another compartment */
2993 122385 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
2994 84336 : JS_ASSERT_IF(!c->isCollecting() && c != rt->atomsCompartment,
2995 84336 : c->arenas.checkArenaListAllUnmarked());
2996 : }
2997 : #endif
2998 38427 : }
2999 :
3000 : #ifdef DEBUG
3001 : static void
3002 9 : ValidateIncrementalMarking(JSContext *cx)
3003 : {
3004 : typedef HashMap<Chunk *, uintptr_t *, GCChunkHasher, SystemAllocPolicy> BitmapMap;
3005 18 : BitmapMap map;
3006 9 : if (!map.init())
3007 : return;
3008 :
3009 9 : JSRuntime *rt = cx->runtime;
3010 9 : GCMarker *gcmarker = &rt->gcMarker;
3011 :
3012 : /* Save existing mark bits. */
3013 27 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront()) {
3014 18 : ChunkBitmap *bitmap = &r.front()->bitmap;
3015 18 : uintptr_t *entry = (uintptr_t *)js_malloc(sizeof(bitmap->bitmap));
3016 18 : if (!entry)
3017 : return;
3018 :
3019 18 : memcpy(entry, bitmap->bitmap, sizeof(bitmap->bitmap));
3020 18 : if (!map.putNew(r.front(), entry))
3021 : return;
3022 : }
3023 :
3024 : /* Save the existing weakmaps. */
3025 18 : WeakMapVector weakmaps;
3026 9 : if (!WeakMapBase::saveWeakMapList(rt, weakmaps))
3027 : return;
3028 :
3029 : /*
3030 : * After this point, the function should run to completion, so we shouldn't
3031 : * do anything fallible.
3032 : */
3033 :
3034 : /* Re-do all the marking, but non-incrementally. */
3035 9 : js::gc::State state = rt->gcIncrementalState;
3036 9 : rt->gcIncrementalState = NO_INCREMENTAL;
3037 :
3038 : /* As we're re-doing marking, we need to reset the weak map list. */
3039 9 : WeakMapBase::resetWeakMapList(rt);
3040 :
3041 9 : JS_ASSERT(gcmarker->isDrained());
3042 9 : gcmarker->reset();
3043 :
3044 27 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
3045 18 : r.front()->bitmap.clear();
3046 :
3047 9 : MarkRuntime(gcmarker, true);
3048 9 : SliceBudget budget;
3049 9 : rt->gcMarker.drainMarkStack(budget);
3050 9 : MarkGrayAndWeak(rt);
3051 :
3052 : /* Now verify that we have the same mark bits as before. */
3053 27 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront()) {
3054 18 : Chunk *chunk = r.front();
3055 18 : ChunkBitmap *bitmap = &chunk->bitmap;
3056 18 : uintptr_t *entry = map.lookup(r.front())->value;
3057 : ChunkBitmap incBitmap;
3058 :
3059 18 : memcpy(incBitmap.bitmap, entry, sizeof(incBitmap.bitmap));
3060 18 : js_free(entry);
3061 :
3062 4554 : for (size_t i = 0; i < ArenasPerChunk; i++) {
3063 4536 : if (chunk->decommittedArenas.get(i))
3064 0 : continue;
3065 4536 : Arena *arena = &chunk->arenas[i];
3066 4536 : if (!arena->aheader.allocated())
3067 4158 : continue;
3068 378 : if (!arena->aheader.compartment->isCollecting())
3069 0 : continue;
3070 378 : if (arena->aheader.allocatedDuringIncremental)
3071 0 : continue;
3072 :
3073 378 : AllocKind kind = arena->aheader.getAllocKind();
3074 378 : uintptr_t thing = arena->thingsStart(kind);
3075 378 : uintptr_t end = arena->thingsEnd();
3076 72432 : while (thing < end) {
3077 71676 : Cell *cell = (Cell *)thing;
3078 71676 : JS_ASSERT_IF(bitmap->isMarked(cell, BLACK), incBitmap.isMarked(cell, BLACK));
3079 71676 : thing += Arena::thingSize(kind);
3080 : }
3081 : }
3082 :
3083 18 : memcpy(bitmap->bitmap, incBitmap.bitmap, sizeof(incBitmap.bitmap));
3084 : }
3085 :
3086 : /* Restore the weak map list. */
3087 9 : WeakMapBase::resetWeakMapList(rt);
3088 9 : WeakMapBase::restoreWeakMapList(rt, weakmaps);
3089 :
3090 18 : rt->gcIncrementalState = state;
3091 : }
3092 : #endif
3093 :
3094 : static void
3095 38427 : SweepPhase(JSContext *cx, JSGCInvocationKind gckind)
3096 : {
3097 38427 : JSRuntime *rt = cx->runtime;
3098 :
3099 : /*
3100 : * Sweep phase.
3101 : *
3102 : * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3103 : * so that any attempt to allocate a GC-thing from a finalizer will fail,
3104 : * rather than nest badly and leave the unmarked newborn to be swept.
3105 : *
3106 : * We first sweep atom state so we can use IsAboutToBeFinalized on
3107 : * JSString held in a hashtable to check if the hashtable entry can be
3108 : * freed. Note that even after the entry is freed, JSObject finalizers can
3109 : * continue to access the corresponding JSString* assuming that they are
3110 : * unique. This works since the atomization API must not be called during
3111 : * the GC.
3112 : */
3113 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
3114 :
3115 : #ifdef JS_THREADSAFE
3116 38427 : if (rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep())
3117 19666 : cx->gcBackgroundFree = &rt->gcHelperThread;
3118 : #endif
3119 :
3120 : /* Purge the ArenaLists before sweeping. */
3121 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3122 83697 : c->arenas.purge();
3123 :
3124 : #ifdef JS_THREADSAFE
3125 38427 : FreeOp fop(rt, !!cx->gcBackgroundFree, false);
3126 : #else
3127 : FreeOp fop(rt, false, false);
3128 : #endif
3129 : {
3130 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
3131 38427 : if (rt->gcFinalizeCallback)
3132 2 : rt->gcFinalizeCallback(&fop, JSFINALIZE_START);
3133 : }
3134 :
3135 : /* Finalize unreachable (key,value) pairs in all weak maps. */
3136 38427 : WeakMapBase::sweepAll(&rt->gcMarker);
3137 :
3138 38427 : SweepAtomState(rt);
3139 :
3140 : /* Collect watch points associated with unreachable objects. */
3141 38427 : WatchpointMap::sweepAll(rt);
3142 :
3143 : /* Detach unreachable debuggers and global objects from each other. */
3144 38427 : Debugger::sweepAll(&fop);
3145 :
3146 : {
3147 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_COMPARTMENTS);
3148 :
3149 38427 : bool releaseTypes = ReleaseObservedTypes(rt);
3150 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3151 83697 : c->sweep(&fop, releaseTypes);
3152 : }
3153 :
3154 : {
3155 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_OBJECT);
3156 :
3157 : /*
3158 : * We finalize objects before other GC things to ensure that the object's
3159 : * finalizer can access the other things even if they will be freed.
3160 : */
3161 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3162 83697 : c->arenas.finalizeObjects(&fop);
3163 : }
3164 :
3165 : {
3166 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_STRING);
3167 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3168 83697 : c->arenas.finalizeStrings(&fop);
3169 : }
3170 :
3171 : {
3172 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
3173 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3174 83697 : c->arenas.finalizeScripts(&fop);
3175 : }
3176 :
3177 : {
3178 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
3179 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3180 83697 : c->arenas.finalizeShapes(&fop);
3181 : }
3182 :
3183 : #ifdef DEBUG
3184 38427 : PropertyTree::dumpShapes(rt);
3185 : #endif
3186 :
3187 : {
3188 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
3189 :
3190 : /*
3191 : * Sweep script filenames after sweeping functions in the generic loop
3192 : * above. In this way when a scripted function's finalizer destroys the
3193 : * script and calls rt->destroyScriptHook, the hook can still access the
3194 : * script's filename. See bug 323267.
3195 : */
3196 122124 : for (GCCompartmentsIter c(rt); !c.done(); c.next())
3197 83697 : SweepScriptFilenames(c);
3198 :
3199 : /*
3200 : * This removes compartments from rt->compartment, so we do it last to make
3201 : * sure we don't miss sweeping any compartments.
3202 : */
3203 38427 : SweepCompartments(&fop, gckind);
3204 :
3205 : #ifndef JS_THREADSAFE
3206 : /*
3207 : * Destroy arenas after we finished the sweeping so finalizers can safely
3208 : * use IsAboutToBeFinalized().
3209 : * This is done on the GCHelperThread if JS_THREADSAFE is defined.
3210 : */
3211 : ExpireChunksAndArenas(rt, gckind == GC_SHRINK);
3212 : #endif
3213 : }
3214 :
3215 : {
3216 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
3217 38427 : if (rt->gcFinalizeCallback)
3218 2 : rt->gcFinalizeCallback(&fop, JSFINALIZE_END);
3219 : }
3220 :
3221 99861 : for (CompartmentsIter c(rt); !c.done(); c.next())
3222 61434 : c->setGCLastBytes(c->gcBytes, c->gcMallocAndFreeBytes, gckind);
3223 38427 : }
3224 :
3225 : /* Perform mark-and-sweep GC. If comp is set, we perform a single-compartment GC. */
3226 : static void
3227 38418 : MarkAndSweep(JSContext *cx, JSGCInvocationKind gckind)
3228 : {
3229 38418 : JSRuntime *rt = cx->runtime;
3230 :
3231 76836 : AutoUnlockGC unlock(rt);
3232 :
3233 38418 : rt->gcMarker.start(rt);
3234 38418 : JS_ASSERT(!rt->gcMarker.callback);
3235 :
3236 38418 : BeginMarkPhase(rt);
3237 : {
3238 76836 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
3239 38418 : SliceBudget budget;
3240 38418 : rt->gcMarker.drainMarkStack(budget);
3241 : }
3242 38418 : EndMarkPhase(cx);
3243 38418 : SweepPhase(cx, gckind);
3244 :
3245 38418 : rt->gcMarker.stop();
3246 38418 : }
3247 :
3248 : /*
3249 : * This class should be used by any code that needs to exclusive access to the
3250 : * heap in order to trace through it...
3251 : */
3252 : class AutoHeapSession {
3253 : public:
3254 : explicit AutoHeapSession(JSRuntime *rt);
3255 : ~AutoHeapSession();
3256 :
3257 : protected:
3258 : JSRuntime *runtime;
3259 :
3260 : private:
3261 : AutoHeapSession(const AutoHeapSession&) MOZ_DELETE;
3262 : void operator=(const AutoHeapSession&) MOZ_DELETE;
3263 : };
3264 :
3265 : /* ...while this class is to be used only for garbage collection. */
3266 : class AutoGCSession : AutoHeapSession {
3267 : public:
3268 : explicit AutoGCSession(JSRuntime *rt);
3269 : ~AutoGCSession();
3270 : };
3271 :
3272 : /* Start a new heap session. */
3273 42080 : AutoHeapSession::AutoHeapSession(JSRuntime *rt)
3274 42080 : : runtime(rt)
3275 : {
3276 42080 : JS_ASSERT(!rt->noGCOrAllocationCheck);
3277 42080 : JS_ASSERT(!rt->gcRunning);
3278 42080 : rt->gcRunning = true;
3279 42080 : }
3280 :
3281 42080 : AutoHeapSession::~AutoHeapSession()
3282 : {
3283 42080 : JS_ASSERT(runtime->gcRunning);
3284 42080 : runtime->gcRunning = false;
3285 42080 : }
3286 :
3287 38481 : AutoGCSession::AutoGCSession(JSRuntime *rt)
3288 38481 : : AutoHeapSession(rt)
3289 : {
3290 76962 : DebugOnly<bool> any = false;
3291 122619 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3292 84138 : if (c->isGCScheduled()) {
3293 83796 : c->setCollecting(true);
3294 83796 : any = true;
3295 : }
3296 : }
3297 38481 : JS_ASSERT(any);
3298 :
3299 38481 : runtime->gcIsNeeded = false;
3300 38481 : runtime->gcInterFrameGC = true;
3301 :
3302 38481 : runtime->gcNumber++;
3303 :
3304 38481 : runtime->resetGCMallocBytes();
3305 :
3306 : /* Clear gcMallocBytes for all compartments */
3307 122619 : for (CompartmentsIter c(runtime); !c.done(); c.next())
3308 84138 : c->resetGCMallocBytes();
3309 38481 : }
3310 :
3311 76962 : AutoGCSession::~AutoGCSession()
3312 : {
3313 99753 : for (GCCompartmentsIter c(runtime); !c.done(); c.next())
3314 61272 : c->setCollecting(false);
3315 :
3316 38481 : runtime->gcNextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
3317 38481 : runtime->gcChunkAllocationSinceLastGC = false;
3318 :
3319 : #ifdef JS_GC_ZEAL
3320 : /* Keeping these around after a GC is dangerous. */
3321 38481 : runtime->gcSelectedForMarking.clearAndFree();
3322 : #endif
3323 38481 : }
3324 :
3325 : static void
3326 38454 : ResetIncrementalGC(JSRuntime *rt, const char *reason)
3327 : {
3328 38454 : if (rt->gcIncrementalState == NO_INCREMENTAL)
3329 38427 : return;
3330 :
3331 135 : for (CompartmentsIter c(rt); !c.done(); c.next())
3332 108 : c->needsBarrier_ = false;
3333 :
3334 27 : rt->gcMarker.reset();
3335 27 : rt->gcMarker.stop();
3336 27 : rt->gcIncrementalState = NO_INCREMENTAL;
3337 :
3338 27 : JS_ASSERT(!rt->gcStrictCompartmentChecking);
3339 :
3340 27 : rt->gcStats.reset(reason);
3341 : }
3342 :
3343 : class AutoGCSlice {
3344 : public:
3345 : AutoGCSlice(JSContext *cx);
3346 : ~AutoGCSlice();
3347 :
3348 : private:
3349 : JSContext *context;
3350 : };
3351 :
3352 63 : AutoGCSlice::AutoGCSlice(JSContext *cx)
3353 63 : : context(cx)
3354 : {
3355 63 : JSRuntime *rt = context->runtime;
3356 :
3357 : /*
3358 : * During incremental GC, the compartment's active flag determines whether
3359 : * there are stack frames active for any of its scripts. Normally this flag
3360 : * is set at the beginning of the mark phase. During incremental GC, we also
3361 : * set it at the start of every phase.
3362 : */
3363 63 : rt->stackSpace.markActiveCompartments();
3364 :
3365 180 : for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
3366 : /* Clear this early so we don't do any write barriers during GC. */
3367 117 : if (rt->gcIncrementalState == MARK)
3368 54 : c->needsBarrier_ = false;
3369 : else
3370 63 : JS_ASSERT(!c->needsBarrier_);
3371 : }
3372 63 : }
3373 :
3374 63 : AutoGCSlice::~AutoGCSlice()
3375 : {
3376 63 : JSRuntime *rt = context->runtime;
3377 :
3378 180 : for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
3379 117 : if (rt->gcIncrementalState == MARK) {
3380 99 : c->needsBarrier_ = true;
3381 99 : c->arenas.prepareForIncrementalGC(rt);
3382 : } else {
3383 18 : JS_ASSERT(rt->gcIncrementalState == NO_INCREMENTAL);
3384 18 : c->needsBarrier_ = false;
3385 : }
3386 : }
3387 63 : }
3388 :
3389 : class AutoCopyFreeListToArenas {
3390 : JSRuntime *rt;
3391 :
3392 : public:
3393 41950 : AutoCopyFreeListToArenas(JSRuntime *rt)
3394 41950 : : rt(rt) {
3395 137767 : for (CompartmentsIter c(rt); !c.done(); c.next())
3396 95817 : c->arenas.copyFreeListsToArenas();
3397 41950 : }
3398 :
3399 41950 : ~AutoCopyFreeListToArenas() {
3400 115243 : for (CompartmentsIter c(rt); !c.done(); c.next())
3401 73293 : c->arenas.clearFreeListsInArenas();
3402 41950 : }
3403 : };
3404 :
3405 : static void
3406 63 : IncrementalGCSlice(JSContext *cx, int64_t budget, JSGCInvocationKind gckind)
3407 : {
3408 63 : JSRuntime *rt = cx->runtime;
3409 :
3410 126 : AutoUnlockGC unlock(rt);
3411 126 : AutoGCSlice slice(cx);
3412 :
3413 63 : gc::State initialState = rt->gcIncrementalState;
3414 :
3415 63 : if (rt->gcIncrementalState == NO_INCREMENTAL) {
3416 36 : rt->gcIncrementalState = MARK_ROOTS;
3417 36 : rt->gcLastMarkSlice = false;
3418 : }
3419 :
3420 63 : if (rt->gcIncrementalState == MARK_ROOTS) {
3421 36 : rt->gcMarker.start(rt);
3422 36 : JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gcMarker));
3423 :
3424 99 : for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
3425 126 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DISCARD_CODE);
3426 63 : c->discardJitCode(rt->defaultFreeOp());
3427 : }
3428 :
3429 36 : BeginMarkPhase(rt);
3430 :
3431 36 : rt->gcIncrementalState = MARK;
3432 : }
3433 :
3434 63 : if (rt->gcIncrementalState == MARK) {
3435 126 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
3436 63 : SliceBudget sliceBudget(budget);
3437 :
3438 : /* If we needed delayed marking for gray roots, then collect until done. */
3439 63 : if (!rt->gcMarker.hasBufferedGrayRoots())
3440 0 : sliceBudget.reset();
3441 :
3442 : #ifdef JS_GC_ZEAL
3443 63 : if (!rt->gcSelectedForMarking.empty()) {
3444 36 : for (JSObject **obj = rt->gcSelectedForMarking.begin();
3445 18 : obj != rt->gcSelectedForMarking.end(); obj++)
3446 : {
3447 9 : MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
3448 : }
3449 : }
3450 : #endif
3451 :
3452 63 : bool finished = rt->gcMarker.drainMarkStack(sliceBudget);
3453 :
3454 63 : if (finished) {
3455 9 : JS_ASSERT(rt->gcMarker.isDrained());
3456 9 : if (initialState == MARK && !rt->gcLastMarkSlice && budget != SliceBudget::Unlimited)
3457 0 : rt->gcLastMarkSlice = true;
3458 : else
3459 9 : rt->gcIncrementalState = SWEEP;
3460 : }
3461 : }
3462 :
3463 63 : if (rt->gcIncrementalState == SWEEP) {
3464 9 : EndMarkPhase(cx);
3465 9 : SweepPhase(cx, gckind);
3466 :
3467 9 : rt->gcMarker.stop();
3468 :
3469 : /* JIT code was already discarded during sweeping. */
3470 :
3471 9 : rt->gcIncrementalState = NO_INCREMENTAL;
3472 : }
3473 63 : }
3474 :
3475 : class IncrementalSafety
3476 : {
3477 : const char *reason_;
3478 :
3479 3243 : IncrementalSafety(const char *reason) : reason_(reason) {}
3480 :
3481 : public:
3482 3065 : static IncrementalSafety Safe() { return IncrementalSafety(NULL); }
3483 178 : static IncrementalSafety Unsafe(const char *reason) { return IncrementalSafety(reason); }
3484 :
3485 : typedef void (IncrementalSafety::* ConvertibleToBool)();
3486 0 : void nonNull() {}
3487 :
3488 3243 : operator ConvertibleToBool() const {
3489 3243 : return reason_ == NULL ? &IncrementalSafety::nonNull : 0;
3490 : }
3491 :
3492 0 : const char *reason() {
3493 0 : JS_ASSERT(reason_);
3494 0 : return reason_;
3495 : }
3496 : };
3497 :
3498 : static IncrementalSafety
3499 3243 : IsIncrementalGCSafe(JSRuntime *rt)
3500 : {
3501 3243 : if (rt->gcKeepAtoms)
3502 178 : return IncrementalSafety::Unsafe("gcKeepAtoms set");
3503 :
3504 13756 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3505 10691 : if (c->activeAnalysis)
3506 0 : return IncrementalSafety::Unsafe("activeAnalysis set");
3507 : }
3508 :
3509 3065 : if (!rt->gcIncrementalEnabled)
3510 0 : return IncrementalSafety::Unsafe("incremental permanently disabled");
3511 :
3512 3065 : return IncrementalSafety::Safe();
3513 : }
3514 :
3515 : static void
3516 241 : BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
3517 : {
3518 241 : IncrementalSafety safe = IsIncrementalGCSafe(rt);
3519 241 : if (!safe) {
3520 0 : ResetIncrementalGC(rt, safe.reason());
3521 0 : *budget = SliceBudget::Unlimited;
3522 0 : rt->gcStats.nonincremental(safe.reason());
3523 0 : return;
3524 : }
3525 :
3526 241 : if (rt->gcMode != JSGC_MODE_INCREMENTAL) {
3527 0 : ResetIncrementalGC(rt, "GC mode change");
3528 0 : *budget = SliceBudget::Unlimited;
3529 0 : rt->gcStats.nonincremental("GC mode");
3530 0 : return;
3531 : }
3532 :
3533 : #ifdef ANDROID
3534 : JS_ASSERT(rt->gcIncrementalState == NO_INCREMENTAL);
3535 : *budget = SliceBudget::Unlimited;
3536 : rt->gcStats.nonincremental("Android");
3537 : return;
3538 : #endif
3539 :
3540 403 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3541 376 : if (c->gcBytes > c->gcTriggerBytes) {
3542 0 : *budget = SliceBudget::Unlimited;
3543 0 : rt->gcStats.nonincremental("allocation trigger");
3544 0 : return;
3545 : }
3546 :
3547 376 : if (c->isCollecting() != c->needsBarrier()) {
3548 214 : ResetIncrementalGC(rt, "compartment change");
3549 214 : return;
3550 : }
3551 : }
3552 : }
3553 :
3554 : /*
3555 : * GC, repeatedly if necessary, until we think we have not created any new
3556 : * garbage. We disable inlining to ensure that the bottom of the stack with
3557 : * possible GC roots recorded in MarkRuntime excludes any pointers we use during
3558 : * the marking implementation.
3559 : */
3560 : static JS_NEVER_INLINE void
3561 38481 : GCCycle(JSContext *cx, bool incremental, int64_t budget, JSGCInvocationKind gckind)
3562 : {
3563 38481 : JSRuntime *rt = cx->runtime;
3564 :
3565 : #ifdef DEBUG
3566 122619 : for (CompartmentsIter c(rt); !c.done(); c.next())
3567 84138 : JS_ASSERT_IF(rt->gcMode == JSGC_MODE_GLOBAL, c->isGCScheduled());
3568 : #endif
3569 :
3570 : /* Recursive GC is no-op. */
3571 38481 : if (rt->gcRunning)
3572 0 : return;
3573 :
3574 76962 : AutoGCSession gcsession(rt);
3575 :
3576 : /* Don't GC if we are reporting an OOM. */
3577 38481 : if (rt->inOOMReport)
3578 : return;
3579 :
3580 : #ifdef JS_THREADSAFE
3581 : /*
3582 : * As we about to purge caches and clear the mark bits we must wait for
3583 : * any background finalization to finish. We must also wait for the
3584 : * background allocation to finish so we can avoid taking the GC lock
3585 : * when manipulating the chunks during the GC.
3586 : */
3587 : {
3588 76962 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
3589 :
3590 38481 : JS_ASSERT(!cx->gcBackgroundFree);
3591 38481 : rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
3592 : }
3593 : #endif
3594 :
3595 38481 : if (!incremental) {
3596 : /* If non-incremental GC was requested, reset incremental GC. */
3597 38240 : ResetIncrementalGC(rt, "requested");
3598 38240 : rt->gcStats.nonincremental("requested");
3599 : } else {
3600 241 : BudgetIncrementalGC(rt, &budget);
3601 : }
3602 :
3603 76962 : AutoCopyFreeListToArenas copy(rt);
3604 :
3605 38481 : if (budget == SliceBudget::Unlimited && rt->gcIncrementalState == NO_INCREMENTAL)
3606 38418 : MarkAndSweep(cx, gckind);
3607 : else
3608 63 : IncrementalGCSlice(cx, budget, gckind);
3609 :
3610 : #ifdef DEBUG
3611 38481 : if (rt->gcIncrementalState == NO_INCREMENTAL) {
3612 99861 : for (CompartmentsIter c(rt); !c.done(); c.next())
3613 61434 : JS_ASSERT(!c->needsBarrier_);
3614 : }
3615 : #endif
3616 : #ifdef JS_THREADSAFE
3617 38481 : if (rt->gcIncrementalState == NO_INCREMENTAL) {
3618 38427 : if (cx->gcBackgroundFree) {
3619 19666 : JS_ASSERT(cx->gcBackgroundFree == &rt->gcHelperThread);
3620 19666 : cx->gcBackgroundFree = NULL;
3621 19666 : rt->gcHelperThread.startBackgroundSweep(cx, gckind == GC_SHRINK);
3622 : }
3623 : }
3624 : #endif
3625 : }
3626 :
3627 : #ifdef JS_GC_ZEAL
3628 : static bool
3629 0 : IsDeterministicGCReason(gcreason::Reason reason)
3630 : {
3631 0 : if (reason > gcreason::DEBUG_GC && reason != gcreason::CC_FORCED)
3632 0 : return false;
3633 :
3634 0 : if (reason == gcreason::MAYBEGC)
3635 0 : return false;
3636 :
3637 0 : return true;
3638 : }
3639 : #endif
3640 :
3641 : static void
3642 38481 : Collect(JSContext *cx, bool incremental, int64_t budget,
3643 : JSGCInvocationKind gckind, gcreason::Reason reason)
3644 : {
3645 38481 : JSRuntime *rt = cx->runtime;
3646 38481 : JS_AbortIfWrongThread(rt);
3647 :
3648 : #ifdef JS_GC_ZEAL
3649 38481 : if (rt->gcDeterministicOnly && !IsDeterministicGCReason(reason))
3650 0 : return;
3651 : #endif
3652 :
3653 : JS_ASSERT_IF(!incremental || budget != SliceBudget::Unlimited, JSGC_INCREMENTAL);
3654 :
3655 : #ifdef JS_GC_ZEAL
3656 : bool restartVerify = cx->runtime->gcVerifyData &&
3657 130 : cx->runtime->gcZeal() == ZealVerifierValue &&
3658 38611 : reason != gcreason::CC_FORCED;
3659 :
3660 : struct AutoVerifyBarriers {
3661 : JSContext *cx;
3662 : bool restart;
3663 38481 : AutoVerifyBarriers(JSContext *cx, bool restart)
3664 38481 : : cx(cx), restart(restart)
3665 : {
3666 38481 : if (cx->runtime->gcVerifyData)
3667 130 : EndVerifyBarriers(cx);
3668 38481 : }
3669 38481 : ~AutoVerifyBarriers() {
3670 38481 : if (restart)
3671 130 : StartVerifyBarriers(cx);
3672 38481 : }
3673 76962 : } av(cx, restartVerify);
3674 : #endif
3675 :
3676 38481 : RecordNativeStackTopForGC(rt);
3677 :
3678 38481 : int compartmentCount = 0;
3679 38481 : int collectedCount = 0;
3680 122619 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3681 84138 : if (rt->gcMode == JSGC_MODE_GLOBAL)
3682 303 : c->scheduleGC();
3683 :
3684 : /* This is a heuristic to avoid resets. */
3685 84138 : if (rt->gcIncrementalState != NO_INCREMENTAL && c->needsBarrier())
3686 99 : c->scheduleGC();
3687 :
3688 84138 : compartmentCount++;
3689 84138 : if (c->isGCScheduled())
3690 83796 : collectedCount++;
3691 : }
3692 :
3693 76962 : gcstats::AutoGCSlice agc(rt->gcStats, collectedCount, compartmentCount, reason);
3694 :
3695 38481 : do {
3696 : /*
3697 : * Let the API user decide to defer a GC if it wants to (unless this
3698 : * is the last context). Invoke the callback regardless.
3699 : */
3700 38481 : if (rt->gcIncrementalState == NO_INCREMENTAL) {
3701 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_BEGIN);
3702 38427 : if (JSGCCallback callback = rt->gcCallback)
3703 0 : callback(rt, JSGC_BEGIN);
3704 : }
3705 :
3706 : {
3707 : /* Lock out other GC allocator and collector invocations. */
3708 76962 : AutoLockGC lock(rt);
3709 38481 : rt->gcPoke = false;
3710 38481 : GCCycle(cx, incremental, budget, gckind);
3711 : }
3712 :
3713 38481 : if (rt->gcIncrementalState == NO_INCREMENTAL) {
3714 76854 : gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_END);
3715 38427 : if (JSGCCallback callback = rt->gcCallback)
3716 0 : callback(rt, JSGC_END);
3717 : }
3718 :
3719 : /*
3720 : * On shutdown, iterate until finalizers or the JSGC_END callback
3721 : * stop creating garbage.
3722 : */
3723 38481 : } while (!rt->hasContexts() && rt->gcPoke);
3724 : }
3725 :
3726 : namespace js {
3727 :
3728 : void
3729 38240 : GC(JSContext *cx, JSGCInvocationKind gckind, gcreason::Reason reason)
3730 : {
3731 38240 : Collect(cx, false, SliceBudget::Unlimited, gckind, reason);
3732 38240 : }
3733 :
3734 : void
3735 160 : GCSlice(JSContext *cx, JSGCInvocationKind gckind, gcreason::Reason reason)
3736 : {
3737 160 : Collect(cx, true, cx->runtime->gcSliceBudget, gckind, reason);
3738 160 : }
3739 :
3740 : void
3741 81 : GCDebugSlice(JSContext *cx, bool limit, int64_t objCount)
3742 : {
3743 81 : int64_t budget = limit ? SliceBudget::WorkBudget(objCount) : SliceBudget::Unlimited;
3744 81 : PrepareForDebugGC(cx->runtime);
3745 81 : Collect(cx, true, budget, GC_NORMAL, gcreason::API);
3746 81 : }
3747 :
3748 : /* Schedule a full GC unless a compartment will already be collected. */
3749 : void
3750 10862 : PrepareForDebugGC(JSRuntime *rt)
3751 : {
3752 34239 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3753 23530 : if (c->isGCScheduled())
3754 153 : return;
3755 : }
3756 :
3757 10709 : PrepareForFullGC(rt);
3758 : }
3759 :
3760 : void
3761 9 : ShrinkGCBuffers(JSRuntime *rt)
3762 : {
3763 18 : AutoLockGC lock(rt);
3764 9 : JS_ASSERT(!rt->gcRunning);
3765 : #ifndef JS_THREADSAFE
3766 : ExpireChunksAndArenas(rt, true);
3767 : #else
3768 9 : rt->gcHelperThread.startBackgroundShrink();
3769 : #endif
3770 9 : }
3771 :
3772 : void
3773 549 : TraceRuntime(JSTracer *trc)
3774 : {
3775 549 : JS_ASSERT(!IS_GC_MARKING_TRACER(trc));
3776 :
3777 : #ifdef JS_THREADSAFE
3778 : {
3779 549 : JSRuntime *rt = trc->runtime;
3780 549 : if (!rt->gcRunning) {
3781 1098 : AutoLockGC lock(rt);
3782 1098 : AutoHeapSession session(rt);
3783 :
3784 549 : rt->gcHelperThread.waitBackgroundSweepEnd();
3785 1098 : AutoUnlockGC unlock(rt);
3786 :
3787 1098 : AutoCopyFreeListToArenas copy(rt);
3788 549 : RecordNativeStackTopForGC(rt);
3789 549 : MarkRuntime(trc);
3790 : return;
3791 : }
3792 : }
3793 : #else
3794 : AutoCopyFreeListToArenas copy(trc->runtime);
3795 : RecordNativeStackTopForGC(trc->runtime);
3796 : #endif
3797 :
3798 : /*
3799 : * Calls from inside a normal GC or a recursive calls are OK and do not
3800 : * require session setup.
3801 : */
3802 0 : MarkRuntime(trc);
3803 : }
3804 :
3805 : struct IterateArenaCallbackOp
3806 : {
3807 : JSRuntime *rt;
3808 : void *data;
3809 : IterateArenaCallback callback;
3810 : JSGCTraceKind traceKind;
3811 : size_t thingSize;
3812 0 : IterateArenaCallbackOp(JSRuntime *rt, void *data, IterateArenaCallback callback,
3813 : JSGCTraceKind traceKind, size_t thingSize)
3814 0 : : rt(rt), data(data), callback(callback), traceKind(traceKind), thingSize(thingSize)
3815 0 : {}
3816 0 : void operator()(Arena *arena) { (*callback)(rt, data, arena, traceKind, thingSize); }
3817 : };
3818 :
3819 : struct IterateCellCallbackOp
3820 : {
3821 : JSRuntime *rt;
3822 : void *data;
3823 : IterateCellCallback callback;
3824 : JSGCTraceKind traceKind;
3825 : size_t thingSize;
3826 0 : IterateCellCallbackOp(JSRuntime *rt, void *data, IterateCellCallback callback,
3827 : JSGCTraceKind traceKind, size_t thingSize)
3828 0 : : rt(rt), data(data), callback(callback), traceKind(traceKind), thingSize(thingSize)
3829 0 : {}
3830 0 : void operator()(Cell *cell) { (*callback)(rt, data, cell, traceKind, thingSize); }
3831 : };
3832 :
3833 : void
3834 0 : IterateCompartmentsArenasCells(JSRuntime *rt, void *data,
3835 : JSIterateCompartmentCallback compartmentCallback,
3836 : IterateArenaCallback arenaCallback,
3837 : IterateCellCallback cellCallback)
3838 : {
3839 0 : JS_ASSERT(!rt->gcRunning);
3840 :
3841 0 : AutoLockGC lock(rt);
3842 0 : AutoHeapSession session(rt);
3843 : #ifdef JS_THREADSAFE
3844 0 : rt->gcHelperThread.waitBackgroundSweepEnd();
3845 : #endif
3846 0 : AutoUnlockGC unlock(rt);
3847 :
3848 0 : AutoCopyFreeListToArenas copy(rt);
3849 0 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3850 0 : (*compartmentCallback)(rt, data, c);
3851 :
3852 0 : for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) {
3853 0 : JSGCTraceKind traceKind = MapAllocToTraceKind(AllocKind(thingKind));
3854 0 : size_t thingSize = Arena::thingSize(AllocKind(thingKind));
3855 0 : IterateArenaCallbackOp arenaOp(rt, data, arenaCallback, traceKind, thingSize);
3856 0 : IterateCellCallbackOp cellOp(rt, data, cellCallback, traceKind, thingSize);
3857 0 : ForEachArenaAndCell(c, AllocKind(thingKind), arenaOp, cellOp);
3858 : }
3859 : }
3860 0 : }
3861 :
3862 : void
3863 0 : IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
3864 : {
3865 : /* :XXX: Any way to common this preamble with IterateCompartmentsArenasCells? */
3866 0 : JS_ASSERT(!rt->gcRunning);
3867 :
3868 0 : AutoLockGC lock(rt);
3869 0 : AutoHeapSession session(rt);
3870 : #ifdef JS_THREADSAFE
3871 0 : rt->gcHelperThread.waitBackgroundSweepEnd();
3872 : #endif
3873 0 : AutoUnlockGC unlock(rt);
3874 :
3875 0 : for (js::GCChunkSet::Range r = rt->gcChunkSet.all(); !r.empty(); r.popFront())
3876 0 : chunkCallback(rt, data, r.front());
3877 0 : }
3878 :
3879 : void
3880 0 : IterateCells(JSRuntime *rt, JSCompartment *compartment, AllocKind thingKind,
3881 : void *data, IterateCellCallback cellCallback)
3882 : {
3883 : /* :XXX: Any way to common this preamble with IterateCompartmentsArenasCells? */
3884 0 : JS_ASSERT(!rt->gcRunning);
3885 :
3886 0 : AutoLockGC lock(rt);
3887 0 : AutoHeapSession session(rt);
3888 : #ifdef JS_THREADSAFE
3889 0 : rt->gcHelperThread.waitBackgroundSweepEnd();
3890 : #endif
3891 0 : AutoUnlockGC unlock(rt);
3892 :
3893 0 : AutoCopyFreeListToArenas copy(rt);
3894 :
3895 0 : JSGCTraceKind traceKind = MapAllocToTraceKind(thingKind);
3896 0 : size_t thingSize = Arena::thingSize(thingKind);
3897 :
3898 0 : if (compartment) {
3899 0 : for (CellIterUnderGC i(compartment, thingKind); !i.done(); i.next())
3900 0 : cellCallback(rt, data, i.getCell(), traceKind, thingSize);
3901 : } else {
3902 0 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
3903 0 : for (CellIterUnderGC i(c, thingKind); !i.done(); i.next())
3904 0 : cellCallback(rt, data, i.getCell(), traceKind, thingSize);
3905 : }
3906 : }
3907 0 : }
3908 :
3909 : namespace gc {
3910 :
3911 : JSCompartment *
3912 22524 : NewCompartment(JSContext *cx, JSPrincipals *principals)
3913 : {
3914 22524 : JSRuntime *rt = cx->runtime;
3915 22524 : JS_AbortIfWrongThread(rt);
3916 :
3917 22524 : JSCompartment *compartment = cx->new_<JSCompartment>(rt);
3918 22524 : if (compartment && compartment->init(cx)) {
3919 : // Any compartment with the trusted principals -- and there can be
3920 : // multiple -- is a system compartment.
3921 22524 : compartment->isSystemCompartment = principals && rt->trustedPrincipals() == principals;
3922 22524 : if (principals) {
3923 3 : compartment->principals = principals;
3924 3 : JS_HoldPrincipals(principals);
3925 : }
3926 :
3927 22524 : compartment->setGCLastBytes(8192, 8192, GC_NORMAL);
3928 :
3929 : /*
3930 : * Before reporting the OOM condition, |lock| needs to be cleaned up,
3931 : * hence the scoping.
3932 : */
3933 : {
3934 45048 : AutoLockGC lock(rt);
3935 22524 : if (rt->compartments.append(compartment))
3936 22524 : return compartment;
3937 : }
3938 :
3939 0 : js_ReportOutOfMemory(cx);
3940 : }
3941 0 : Foreground::delete_(compartment);
3942 0 : return NULL;
3943 : }
3944 :
3945 : void
3946 10664 : RunDebugGC(JSContext *cx)
3947 : {
3948 : #ifdef JS_GC_ZEAL
3949 10664 : PrepareForDebugGC(cx->runtime);
3950 10664 : RunLastDitchGC(cx, gcreason::DEBUG_GC);
3951 : #endif
3952 10664 : }
3953 :
3954 : void
3955 0 : SetDeterministicGC(JSContext *cx, bool enabled)
3956 : {
3957 : #ifdef JS_GC_ZEAL
3958 0 : JSRuntime *rt = cx->runtime;
3959 0 : rt->gcDeterministicOnly = enabled;
3960 : #endif
3961 0 : }
3962 :
3963 : #if defined(DEBUG) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
3964 :
3965 : static void
3966 : CheckStackRoot(JSTracer *trc, uintptr_t *w)
3967 : {
3968 : /* Mark memory as defined for valgrind, as in MarkWordConservatively. */
3969 : #ifdef JS_VALGRIND
3970 : VALGRIND_MAKE_MEM_DEFINED(&w, sizeof(w));
3971 : #endif
3972 :
3973 : ConservativeGCTest test = MarkIfGCThingWord(trc, *w, DONT_MARK_THING);
3974 :
3975 : if (test == CGCT_VALID) {
3976 : JSContext *iter = NULL;
3977 : bool matched = false;
3978 : JSRuntime *rt = trc->runtime;
3979 : for (unsigned i = 0; i < THING_ROOT_COUNT; i++) {
3980 : Root<Cell*> *rooter = rt->thingGCRooters[i];
3981 : while (rooter) {
3982 : if (rooter->address() == (Cell **) w)
3983 : matched = true;
3984 : rooter = rooter->previous();
3985 : }
3986 : }
3987 : CheckRoot *check = rt->checkGCRooters;
3988 : while (check) {
3989 : if (check->contains(static_cast<uint8_t*>(w), sizeof(w)))
3990 : matched = true;
3991 : check = check->previous();
3992 : }
3993 : if (!matched) {
3994 : /*
3995 : * Only poison the last byte in the word. It is easy to get
3996 : * accidental collisions when a value that does not occupy a full
3997 : * word is used to overwrite a now-dead GC thing pointer. In this
3998 : * case we want to avoid damaging the smaller value.
3999 : */
4000 : PoisonPtr(w);
4001 : }
4002 : }
4003 : }
4004 :
4005 : static void
4006 : CheckStackRootsRange(JSTracer *trc, uintptr_t *begin, uintptr_t *end)
4007 : {
4008 : JS_ASSERT(begin <= end);
4009 : for (uintptr_t *i = begin; i != end; ++i)
4010 : CheckStackRoot(trc, i);
4011 : }
4012 :
4013 : void
4014 : CheckStackRoots(JSContext *cx)
4015 : {
4016 : AutoCopyFreeListToArenas copy(cx->runtime);
4017 :
4018 : JSTracer checker;
4019 : JS_TracerInit(&checker, cx, EmptyMarkCallback);
4020 :
4021 : ThreadData *td = JS_THREAD_DATA(cx);
4022 :
4023 : ConservativeGCThreadData *ctd = &td->conservativeGC;
4024 : ctd->recordStackTop();
4025 :
4026 : JS_ASSERT(ctd->hasStackToScan());
4027 : uintptr_t *stackMin, *stackEnd;
4028 : #if JS_STACK_GROWTH_DIRECTION > 0
4029 : stackMin = td->nativeStackBase;
4030 : stackEnd = ctd->nativeStackTop;
4031 : #else
4032 : stackMin = ctd->nativeStackTop + 1;
4033 : stackEnd = td->nativeStackBase;
4034 : #endif
4035 :
4036 : JS_ASSERT(stackMin <= stackEnd);
4037 : CheckStackRootsRange(&checker, stackMin, stackEnd);
4038 : CheckStackRootsRange(&checker, ctd->registerSnapshot.words,
4039 : ArrayEnd(ctd->registerSnapshot.words));
4040 : }
4041 :
4042 : #endif /* DEBUG && JSGC_ROOT_ANALYSIS && !JS_THREADSAFE */
4043 :
4044 : #ifdef JS_GC_ZEAL
4045 :
4046 : /*
4047 : * Write barrier verification
4048 : *
4049 : * The next few functions are for incremental write barrier verification. When
4050 : * StartVerifyBarriers is called, a snapshot is taken of all objects in the GC
4051 : * heap and saved in an explicit graph data structure. Later, EndVerifyBarriers
4052 : * traverses the heap again. Any pointer values that were in the snapshot and
4053 : * are no longer found must be marked; otherwise an assertion triggers. Note
4054 : * that we must not GC in between starting and finishing a verification phase.
4055 : *
4056 : * The VerifyBarriers function is a shorthand. It checks if a verification phase
4057 : * is currently running. If not, it starts one. Otherwise, it ends the current
4058 : * phase and starts a new one.
4059 : *
4060 : * The user can adjust the frequency of verifications, which causes
4061 : * VerifyBarriers to be a no-op all but one out of N calls. However, if the
4062 : * |always| parameter is true, it starts a new phase no matter what.
4063 : */
4064 :
4065 : struct EdgeValue
4066 : {
4067 : void *thing;
4068 : JSGCTraceKind kind;
4069 : char *label;
4070 : };
4071 :
4072 : struct VerifyNode
4073 : {
4074 : void *thing;
4075 : JSGCTraceKind kind;
4076 : uint32_t count;
4077 : EdgeValue edges[1];
4078 : };
4079 :
4080 : typedef HashMap<void *, VerifyNode *, DefaultHasher<void *>, SystemAllocPolicy> NodeMap;
4081 :
4082 : /*
4083 : * The verifier data structures are simple. The entire graph is stored in a
4084 : * single block of memory. At the beginning is a VerifyNode for the root
4085 : * node. It is followed by a sequence of EdgeValues--the exact number is given
4086 : * in the node. After the edges come more nodes and their edges.
4087 : *
4088 : * The edgeptr and term fields are used to allocate out of the block of memory
4089 : * for the graph. If we run out of memory (i.e., if edgeptr goes beyond term),
4090 : * we just abandon the verification.
4091 : *
4092 : * The nodemap field is a hashtable that maps from the address of the GC thing
4093 : * to the VerifyNode that represents it.
4094 : */
4095 : struct VerifyTracer : JSTracer {
4096 : /* The gcNumber when the verification began. */
4097 : uint64_t number;
4098 :
4099 : /* This counts up to JS_VERIFIER_FREQ to decide whether to verify. */
4100 : uint32_t count;
4101 :
4102 : /* This graph represents the initial GC "snapshot". */
4103 : VerifyNode *curnode;
4104 : VerifyNode *root;
4105 : char *edgeptr;
4106 : char *term;
4107 : NodeMap nodemap;
4108 :
4109 1510 : VerifyTracer() : root(NULL) {}
4110 1510 : ~VerifyTracer() { js_free(root); }
4111 : };
4112 :
4113 : /*
4114 : * This function builds up the heap snapshot by adding edges to the current
4115 : * node.
4116 : */
4117 : static void
4118 18958972 : AccumulateEdge(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
4119 : {
4120 18958972 : VerifyTracer *trc = (VerifyTracer *)jstrc;
4121 :
4122 18958972 : trc->edgeptr += sizeof(EdgeValue);
4123 18958972 : if (trc->edgeptr >= trc->term) {
4124 0 : trc->edgeptr = trc->term;
4125 0 : return;
4126 : }
4127 :
4128 18958972 : VerifyNode *node = trc->curnode;
4129 18958972 : uint32_t i = node->count;
4130 :
4131 18958972 : node->edges[i].thing = *thingp;
4132 18958972 : node->edges[i].kind = kind;
4133 18958972 : node->edges[i].label = trc->debugPrinter ? NULL : (char *)trc->debugPrintArg;
4134 18958972 : node->count++;
4135 : }
4136 :
4137 : static VerifyNode *
4138 18960482 : MakeNode(VerifyTracer *trc, void *thing, JSGCTraceKind kind)
4139 : {
4140 37920964 : NodeMap::AddPtr p = trc->nodemap.lookupForAdd(thing);
4141 18960482 : if (!p) {
4142 11519472 : VerifyNode *node = (VerifyNode *)trc->edgeptr;
4143 11519472 : trc->edgeptr += sizeof(VerifyNode) - sizeof(EdgeValue);
4144 11519472 : if (trc->edgeptr >= trc->term) {
4145 0 : trc->edgeptr = trc->term;
4146 0 : return NULL;
4147 : }
4148 :
4149 11519472 : node->thing = thing;
4150 11519472 : node->count = 0;
4151 11519472 : node->kind = kind;
4152 11519472 : trc->nodemap.add(p, thing, node);
4153 11519472 : return node;
4154 : }
4155 7441010 : return NULL;
4156 : }
4157 :
4158 : static
4159 : VerifyNode *
4160 21774610 : NextNode(VerifyNode *node)
4161 : {
4162 21774610 : if (node->count == 0)
4163 14613271 : return (VerifyNode *)((char *)node + sizeof(VerifyNode) - sizeof(EdgeValue));
4164 : else
4165 : return (VerifyNode *)((char *)node + sizeof(VerifyNode) +
4166 7161339 : sizeof(EdgeValue)*(node->count - 1));
4167 : }
4168 :
4169 : static void
4170 1640 : StartVerifyBarriers(JSContext *cx)
4171 : {
4172 1640 : JSRuntime *rt = cx->runtime;
4173 :
4174 1640 : if (rt->gcVerifyData || rt->gcIncrementalState != NO_INCREMENTAL)
4175 0 : return;
4176 :
4177 3280 : AutoLockGC lock(rt);
4178 3280 : AutoHeapSession session(rt);
4179 :
4180 1640 : if (!IsIncrementalGCSafe(rt))
4181 : return;
4182 :
4183 : #ifdef JS_THREADSAFE
4184 1510 : rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
4185 : #endif
4186 :
4187 3020 : AutoUnlockGC unlock(rt);
4188 :
4189 3020 : AutoCopyFreeListToArenas copy(rt);
4190 1510 : RecordNativeStackTopForGC(rt);
4191 :
4192 4430 : for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
4193 2920 : r.front()->bitmap.clear();
4194 :
4195 6810 : for (CompartmentsIter c(rt); !c.done(); c.next())
4196 5300 : c->discardJitCode(rt->defaultFreeOp());
4197 :
4198 1510 : VerifyTracer *trc = new (js_malloc(sizeof(VerifyTracer))) VerifyTracer;
4199 :
4200 1510 : rt->gcNumber++;
4201 1510 : trc->number = rt->gcNumber;
4202 1510 : trc->count = 0;
4203 :
4204 1510 : JS_TracerInit(trc, rt, AccumulateEdge);
4205 :
4206 1510 : PurgeRuntime(trc);
4207 :
4208 1510 : const size_t size = 64 * 1024 * 1024;
4209 1510 : trc->root = (VerifyNode *)js_malloc(size);
4210 1510 : JS_ASSERT(trc->root);
4211 1510 : trc->edgeptr = (char *)trc->root;
4212 1510 : trc->term = trc->edgeptr + size;
4213 :
4214 1510 : trc->nodemap.init();
4215 :
4216 : /* Create the root node. */
4217 1510 : trc->curnode = MakeNode(trc, NULL, JSGCTraceKind(0));
4218 :
4219 : /* We want MarkRuntime to save the roots to gcSavedRoots. */
4220 1510 : rt->gcIncrementalState = MARK_ROOTS;
4221 :
4222 : /* Make all the roots be edges emanating from the root node. */
4223 1510 : MarkRuntime(trc);
4224 :
4225 1510 : VerifyNode *node = trc->curnode;
4226 1510 : if (trc->edgeptr == trc->term)
4227 0 : goto oom;
4228 :
4229 : /* For each edge, make a node for it if one doesn't already exist. */
4230 11522492 : while ((char *)node < trc->edgeptr) {
4231 30478444 : for (uint32_t i = 0; i < node->count; i++) {
4232 18958972 : EdgeValue &e = node->edges[i];
4233 18958972 : VerifyNode *child = MakeNode(trc, e.thing, e.kind);
4234 18958972 : if (child) {
4235 11517962 : trc->curnode = child;
4236 11517962 : JS_TraceChildren(trc, e.thing, e.kind);
4237 : }
4238 18958972 : if (trc->edgeptr == trc->term)
4239 0 : goto oom;
4240 : }
4241 :
4242 11519472 : node = NextNode(node);
4243 : }
4244 :
4245 1510 : rt->gcVerifyData = trc;
4246 1510 : rt->gcIncrementalState = MARK;
4247 1510 : rt->gcMarker.start(rt);
4248 6810 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
4249 5300 : c->needsBarrier_ = true;
4250 5300 : c->arenas.prepareForIncrementalGC(rt);
4251 : }
4252 :
4253 : return;
4254 :
4255 : oom:
4256 0 : rt->gcIncrementalState = NO_INCREMENTAL;
4257 0 : trc->~VerifyTracer();
4258 1510 : js_free(trc);
4259 : }
4260 :
4261 : static void
4262 762 : MarkFromAutorooter(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
4263 : {
4264 762 : static_cast<Cell *>(*thingp)->markIfUnmarked();
4265 762 : }
4266 :
4267 : static bool
4268 2109 : IsMarkedOrAllocated(Cell *cell)
4269 : {
4270 2109 : return cell->isMarked() || cell->arenaHeader()->allocatedDuringIncremental;
4271 : }
4272 :
4273 : const static uint32_t MAX_VERIFIER_EDGES = 1000;
4274 :
4275 : /*
4276 : * This function is called by EndVerifyBarriers for every heap edge. If the edge
4277 : * already existed in the original snapshot, we "cancel it out" by overwriting
4278 : * it with NULL. EndVerifyBarriers later asserts that the remaining non-NULL
4279 : * edges (i.e., the ones from the original snapshot that must have been
4280 : * modified) must point to marked objects.
4281 : */
4282 : static void
4283 10812890 : CheckEdge(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
4284 : {
4285 10812890 : VerifyTracer *trc = (VerifyTracer *)jstrc;
4286 10812890 : VerifyNode *node = trc->curnode;
4287 :
4288 : /* Avoid n^2 behavior. */
4289 10812890 : if (node->count > MAX_VERIFIER_EDGES)
4290 0 : return;
4291 :
4292 50434420 : for (uint32_t i = 0; i < node->count; i++) {
4293 50432869 : if (node->edges[i].thing == *thingp) {
4294 10811339 : JS_ASSERT(node->edges[i].kind == kind);
4295 10811339 : node->edges[i].thing = NULL;
4296 10811339 : return;
4297 : }
4298 : }
4299 :
4300 : /*
4301 : * Anything that is reachable now should have been reachable before, or else
4302 : * it should be marked.
4303 : */
4304 1551 : NodeMap::Ptr p = trc->nodemap.lookup(*thingp);
4305 1551 : JS_ASSERT_IF(!p, IsMarkedOrAllocated(static_cast<Cell *>(*thingp)));
4306 : }
4307 :
4308 : static void
4309 6273212 : CheckReachable(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
4310 : {
4311 6273212 : VerifyTracer *trc = (VerifyTracer *)jstrc;
4312 6273212 : NodeMap::Ptr p = trc->nodemap.lookup(*thingp);
4313 6273212 : JS_ASSERT_IF(!p, IsMarkedOrAllocated(static_cast<Cell *>(*thingp)));
4314 6273212 : }
4315 :
4316 : static void
4317 1410 : EndVerifyBarriers(JSContext *cx)
4318 : {
4319 1410 : JSRuntime *rt = cx->runtime;
4320 :
4321 2820 : AutoLockGC lock(rt);
4322 2820 : AutoHeapSession session(rt);
4323 :
4324 : #ifdef JS_THREADSAFE
4325 1410 : rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
4326 : #endif
4327 :
4328 2820 : AutoUnlockGC unlock(rt);
4329 :
4330 2820 : AutoCopyFreeListToArenas copy(rt);
4331 1410 : RecordNativeStackTopForGC(rt);
4332 :
4333 1410 : VerifyTracer *trc = (VerifyTracer *)rt->gcVerifyData;
4334 :
4335 1410 : if (!trc)
4336 : return;
4337 :
4338 1410 : bool compartmentCreated = false;
4339 :
4340 : /* We need to disable barriers before tracing, which may invoke barriers. */
4341 6691 : for (CompartmentsIter c(rt); !c.done(); c.next()) {
4342 5281 : if (!c->needsBarrier_)
4343 81 : compartmentCreated = true;
4344 :
4345 5281 : c->needsBarrier_ = false;
4346 : }
4347 :
4348 : /*
4349 : * We need to bump gcNumber so that the methodjit knows that jitcode has
4350 : * been discarded.
4351 : */
4352 1410 : JS_ASSERT(trc->number == rt->gcNumber);
4353 1410 : rt->gcNumber++;
4354 :
4355 6691 : for (CompartmentsIter c(rt); !c.done(); c.next())
4356 5281 : c->discardJitCode(rt->defaultFreeOp());
4357 :
4358 1410 : rt->gcVerifyData = NULL;
4359 1410 : rt->gcIncrementalState = NO_INCREMENTAL;
4360 :
4361 1410 : JS_TracerInit(trc, rt, MarkFromAutorooter);
4362 :
4363 1410 : AutoGCRooter::traceAll(trc);
4364 :
4365 1410 : if (!compartmentCreated && IsIncrementalGCSafe(rt)) {
4366 : /*
4367 : * Verify that all the current roots were reachable previously, or else
4368 : * are marked.
4369 : */
4370 1314 : JS_TracerInit(trc, rt, CheckReachable);
4371 1314 : MarkRuntime(trc, true);
4372 :
4373 1314 : JS_TracerInit(trc, rt, CheckEdge);
4374 :
4375 : /* Start after the roots. */
4376 1314 : VerifyNode *node = NextNode(trc->root);
4377 10256452 : while ((char *)node < trc->edgeptr) {
4378 10253824 : trc->curnode = node;
4379 10253824 : JS_TraceChildren(trc, node->thing, node->kind);
4380 :
4381 10253824 : if (node->count <= MAX_VERIFIER_EDGES) {
4382 21065957 : for (uint32_t i = 0; i < node->count; i++) {
4383 10812133 : void *thing = node->edges[i].thing;
4384 10812133 : JS_ASSERT_IF(thing, IsMarkedOrAllocated(static_cast<Cell *>(thing)));
4385 : }
4386 : }
4387 :
4388 10253824 : node = NextNode(node);
4389 : }
4390 : }
4391 :
4392 1410 : rt->gcMarker.reset();
4393 1410 : rt->gcMarker.stop();
4394 :
4395 1410 : trc->~VerifyTracer();
4396 2820 : js_free(trc);
4397 : }
4398 :
4399 : void
4400 18761 : FinishVerifier(JSRuntime *rt)
4401 : {
4402 18761 : if (VerifyTracer *trc = (VerifyTracer *)rt->gcVerifyData) {
4403 100 : trc->~VerifyTracer();
4404 100 : js_free(trc);
4405 : }
4406 18761 : }
4407 :
4408 : void
4409 18 : VerifyBarriers(JSContext *cx)
4410 : {
4411 18 : JSRuntime *rt = cx->runtime;
4412 18 : if (rt->gcVerifyData)
4413 0 : EndVerifyBarriers(cx);
4414 : else
4415 18 : StartVerifyBarriers(cx);
4416 18 : }
4417 :
4418 : void
4419 -2051281982 : MaybeVerifyBarriers(JSContext *cx, bool always)
4420 : {
4421 -2051281982 : if (cx->runtime->gcZeal() != ZealVerifierValue) {
4422 -2051290904 : if (cx->runtime->gcVerifyData)
4423 18 : EndVerifyBarriers(cx);
4424 -2051290904 : return;
4425 : }
4426 :
4427 8922 : uint32_t freq = cx->runtime->gcZealFrequency;
4428 :
4429 8922 : JSRuntime *rt = cx->runtime;
4430 8922 : if (VerifyTracer *trc = (VerifyTracer *)rt->gcVerifyData) {
4431 8692 : if (++trc->count < freq && !always)
4432 7430 : return;
4433 :
4434 1262 : EndVerifyBarriers(cx);
4435 : }
4436 1492 : StartVerifyBarriers(cx);
4437 : }
4438 :
4439 : #endif /* JS_GC_ZEAL */
4440 :
4441 : } /* namespace gc */
4442 :
4443 0 : static void ReleaseAllJITCode(JSContext *cx)
4444 : {
4445 : #ifdef JS_METHODJIT
4446 0 : for (GCCompartmentsIter c(cx->runtime); !c.done(); c.next()) {
4447 0 : mjit::ClearAllFrames(c);
4448 0 : for (CellIter i(c, FINALIZE_SCRIPT); !i.done(); i.next()) {
4449 0 : JSScript *script = i.get<JSScript>();
4450 0 : mjit::ReleaseScriptCode(cx->runtime->defaultFreeOp(), script);
4451 : }
4452 : }
4453 : #endif
4454 0 : }
4455 :
4456 : /*
4457 : * There are three possible PCCount profiling states:
4458 : *
4459 : * 1. None: Neither scripts nor the runtime have count information.
4460 : * 2. Profile: Active scripts have count information, the runtime does not.
4461 : * 3. Query: Scripts do not have count information, the runtime does.
4462 : *
4463 : * When starting to profile scripts, counting begins immediately, with all JIT
4464 : * code discarded and recompiled with counts as necessary. Active interpreter
4465 : * frames will not begin profiling until they begin executing another script
4466 : * (via a call or return).
4467 : *
4468 : * The below API functions manage transitions to new states, according
4469 : * to the table below.
4470 : *
4471 : * Old State
4472 : * -------------------------
4473 : * Function None Profile Query
4474 : * --------
4475 : * StartPCCountProfiling Profile Profile Profile
4476 : * StopPCCountProfiling None Query Query
4477 : * PurgePCCounts None None None
4478 : */
4479 :
4480 : static void
4481 0 : ReleaseScriptCounts(JSContext *cx)
4482 : {
4483 0 : JSRuntime *rt = cx->runtime;
4484 0 : JS_ASSERT(rt->scriptAndCountsVector);
4485 :
4486 0 : ScriptAndCountsVector &vec = *rt->scriptAndCountsVector;
4487 :
4488 0 : for (size_t i = 0; i < vec.length(); i++)
4489 0 : vec[i].scriptCounts.destroy(cx);
4490 :
4491 0 : cx->delete_(rt->scriptAndCountsVector);
4492 0 : rt->scriptAndCountsVector = NULL;
4493 0 : }
4494 :
4495 : JS_FRIEND_API(void)
4496 0 : StartPCCountProfiling(JSContext *cx)
4497 : {
4498 0 : JSRuntime *rt = cx->runtime;
4499 :
4500 0 : if (rt->profilingScripts)
4501 0 : return;
4502 :
4503 0 : if (rt->scriptAndCountsVector)
4504 0 : ReleaseScriptCounts(cx);
4505 :
4506 0 : ReleaseAllJITCode(cx);
4507 :
4508 0 : rt->profilingScripts = true;
4509 : }
4510 :
4511 : JS_FRIEND_API(void)
4512 0 : StopPCCountProfiling(JSContext *cx)
4513 : {
4514 0 : JSRuntime *rt = cx->runtime;
4515 :
4516 0 : if (!rt->profilingScripts)
4517 0 : return;
4518 0 : JS_ASSERT(!rt->scriptAndCountsVector);
4519 :
4520 0 : ReleaseAllJITCode(cx);
4521 :
4522 0 : ScriptAndCountsVector *vec = cx->new_<ScriptAndCountsVector>(SystemAllocPolicy());
4523 0 : if (!vec)
4524 0 : return;
4525 :
4526 0 : for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
4527 0 : for (CellIter i(c, FINALIZE_SCRIPT); !i.done(); i.next()) {
4528 0 : JSScript *script = i.get<JSScript>();
4529 0 : if (script->scriptCounts && script->types) {
4530 0 : ScriptAndCounts info;
4531 0 : info.script = script;
4532 0 : info.scriptCounts.steal(script->scriptCounts);
4533 0 : if (!vec->append(info))
4534 0 : info.scriptCounts.destroy(cx);
4535 : }
4536 : }
4537 : }
4538 :
4539 0 : rt->profilingScripts = false;
4540 0 : rt->scriptAndCountsVector = vec;
4541 : }
4542 :
4543 : JS_FRIEND_API(void)
4544 0 : PurgePCCounts(JSContext *cx)
4545 : {
4546 0 : JSRuntime *rt = cx->runtime;
4547 :
4548 0 : if (!rt->scriptAndCountsVector)
4549 0 : return;
4550 0 : JS_ASSERT(!rt->profilingScripts);
4551 :
4552 0 : ReleaseScriptCounts(cx);
4553 : }
4554 :
4555 : } /* namespace js */
4556 :
4557 : JS_PUBLIC_API(void)
4558 0 : JS_IterateCompartments(JSRuntime *rt, void *data,
4559 : JSIterateCompartmentCallback compartmentCallback)
4560 : {
4561 0 : JS_ASSERT(!rt->gcRunning);
4562 :
4563 0 : AutoLockGC lock(rt);
4564 0 : AutoHeapSession session(rt);
4565 : #ifdef JS_THREADSAFE
4566 0 : rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
4567 : #endif
4568 0 : AutoUnlockGC unlock(rt);
4569 :
4570 0 : for (CompartmentsIter c(rt); !c.done(); c.next())
4571 0 : (*compartmentCallback)(rt, data, c);
4572 0 : }
4573 :
4574 : #if JS_HAS_XML_SUPPORT
4575 : extern size_t sE4XObjectsCreated;
4576 :
4577 : JSXML *
4578 4729195 : js_NewGCXML(JSContext *cx)
4579 : {
4580 4729195 : if (!cx->runningWithTrustedPrincipals())
4581 4729075 : ++sE4XObjectsCreated;
4582 :
4583 4729195 : return NewGCThing<JSXML>(cx, js::gc::FINALIZE_XML, sizeof(JSXML));
4584 : }
4585 : #endif
4586 :
|