summaryrefslogtreecommitdiffstats
path: root/rhbz-911314.patch
diff options
context:
space:
mode:
Diffstat (limited to 'rhbz-911314.patch')
-rw-r--r--rhbz-911314.patch547
1 files changed, 547 insertions, 0 deletions
diff --git a/rhbz-911314.patch b/rhbz-911314.patch
new file mode 100644
index 0000000..4f1cfa0
--- /dev/null
+++ b/rhbz-911314.patch
@@ -0,0 +1,547 @@
+Use the runtime page size to control arena decommit (RHBZ#911314)
+Return success when decommit is disabled
+
+Author: Gustavo Luiz Duarte <gustavold@linux.vnet.ibm.com>
+Based on Terrence Cole's patch v0 on MOZ#840242
+
+Index: mozilla-release/js/src/gc/Heap.h
+===================================================================
+--- mozilla-release.orig/js/src/gc/Heap.h
++++ mozilla-release/js/src/gc/Heap.h
+@@ -800,7 +800,7 @@ struct Chunk
+
+ /* Search for a decommitted arena to allocate. */
+ unsigned findDecommittedArenaOffset();
+- ArenaHeader* fetchNextDecommittedArena();
++ ArenaHeader* fetchNextDecommittedArena(JSRuntime *rt);
+
+ public:
+ /* Unlink and return the freeArenasHead. */
+Index: mozilla-release/js/src/gc/Memory.cpp
+===================================================================
+--- mozilla-release.orig/js/src/gc/Memory.cpp
++++ mozilla-release/js/src/gc/Memory.cpp
+@@ -8,6 +8,7 @@
+ #include "mozilla/Assertions.h"
+
+ #include "jstypes.h"
++#include "jscntxt.h"
+
+ #include "js/Utility.h"
+ #include "gc/Memory.h"
+@@ -19,39 +20,34 @@ namespace gc {
+ extern const size_t PageSize;
+ extern const size_t ArenaSize;
+ static bool
+-DecommitEnabled()
++DecommitEnabled(JSRuntime *rt)
+ {
+- return PageSize == ArenaSize;
++ return rt->gcSystemPageSize == ArenaSize;
+ }
+
+ #if defined(XP_WIN)
+ #include "jswin.h"
+ #include <psapi.h>
+
+-static size_t AllocationGranularity = 0;
+-
+ void
+-InitMemorySubsystem()
++InitMemorySubsystem(JSRuntime *rt)
+ {
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+- if (sysinfo.dwPageSize != PageSize) {
+- fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
+- MOZ_CRASH();
+- }
+- AllocationGranularity = sysinfo.dwAllocationGranularity;
++ rt->gcSystemPageSize = sysinfo.dwPageSize;
++ rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity;
+ }
+
+ void *
+-MapAlignedPages(size_t size, size_t alignment)
++MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+ {
+ JS_ASSERT(size >= alignment);
+ JS_ASSERT(size % alignment == 0);
+- JS_ASSERT(size % PageSize == 0);
+- JS_ASSERT(alignment % AllocationGranularity == 0);
++ JS_ASSERT(size % rt->gcSystemPageSize == 0);
++ JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+
+ /* Special case: If we want allocation alignment, no further work is needed. */
+- if (alignment == AllocationGranularity) {
++ if (alignment == rt->gcSystemAllocGranularity) {
+ return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ }
+
+@@ -75,7 +71,7 @@ MapAlignedPages(size_t size, size_t alig
+ if (!p)
+ return NULL;
+ void *chunkStart = (void *)(uintptr_t(p) + (alignment - (uintptr_t(p) % alignment)));
+- UnmapPages(p, size * 2);
++ UnmapPages(rt, p, size * 2);
+ p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+
+ /* Failure here indicates a race with another thread, so try again. */
+@@ -86,26 +82,26 @@ MapAlignedPages(size_t size, size_t alig
+ }
+
+ void
+-UnmapPages(void *p, size_t size)
++UnmapPages(JSRuntime *rt, void *p, size_t size)
+ {
+ JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+ }
+
+ bool
+-MarkPagesUnused(void *p, size_t size)
++MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+ {
+- if (!DecommitEnabled())
+- return false;
++ if (!DecommitEnabled(rt))
++ return true;
+
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
+ return p2 == p;
+ }
+
+ bool
+-MarkPagesInUse(void *p, size_t size)
++MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+ {
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+@@ -127,12 +123,13 @@ GetPageFaultCount()
+ #define OS2_MAX_RECURSIONS 16
+
+ void
+-InitMemorySubsystem()
++InitMemorySubsystem(JSRuntime *rt)
+ {
++ rt->gcSystemPageSize = rt->gcSystemAllocGranularity = ArenaSize;
+ }
+
+ void
+-UnmapPages(void *addr, size_t size)
++UnmapPages(JSRuntime *rt, void *addr, size_t size)
+ {
+ if (!DosFreeMem(addr))
+ return;
+@@ -153,7 +150,7 @@ UnmapPages(void *addr, size_t size)
+ }
+
+ static void *
+-MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
++MapAlignedPagesRecursively(JSRuntime *rt, size_t size, size_t alignment, int& recursions)
+ {
+ if (++recursions >= OS2_MAX_RECURSIONS)
+ return NULL;
+@@ -179,7 +176,7 @@ MapAlignedPagesRecursively(size_t size,
+ unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
+ &cb, &flags);
+ if (!rc && (flags & PAG_FREE) && cb >= filler) {
+- UnmapPages(tmp, 0);
++ UnmapPages(rt, tmp, 0);
+ if (DosAllocMem(&tmp, filler,
+ OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
+ JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
+@@ -187,19 +184,19 @@ MapAlignedPagesRecursively(size_t size,
+ }
+ }
+
+- void *p = MapAlignedPagesRecursively(size, alignment, recursions);
+- UnmapPages(tmp, 0);
++ void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
++ UnmapPages(rt, tmp, 0);
+
+ return p;
+ }
+
+ void *
+-MapAlignedPages(size_t size, size_t alignment)
++MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+ {
+ JS_ASSERT(size >= alignment);
+ JS_ASSERT(size % alignment == 0);
+- JS_ASSERT(size % PageSize == 0);
+- JS_ASSERT(alignment % PageSize == 0);
++ JS_ASSERT(size % rt->gcSystemPageSize == 0);
++ JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+
+ int recursions = -1;
+
+@@ -208,7 +205,7 @@ MapAlignedPages(size_t size, size_t alig
+ * of the right size by recursively allocating blocks of unaligned
+ * free memory until only an aligned allocation is possible.
+ */
+- void *p = MapAlignedPagesRecursively(size, alignment, recursions);
++ void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
+ if (p)
+ return p;
+
+@@ -230,16 +227,15 @@ MapAlignedPages(size_t size, size_t alig
+ }
+
+ bool
+-MarkPagesUnused(void *p, size_t size)
+-{
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++MarkPagesUnused(JSRuntime *rt, void *p, size_t size) {
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+ bool
+-MarkPagesInUse(void *p, size_t size)
++MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+ {
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+@@ -259,17 +255,18 @@ GetPageFaultCount()
+ #endif
+
+ void
+-InitMemorySubsystem()
++InitMemorySubsystem(JSRuntime *rt)
+ {
++ rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
+ }
+
+ void *
+-MapAlignedPages(size_t size, size_t alignment)
++MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+ {
+ JS_ASSERT(size >= alignment);
+ JS_ASSERT(size % alignment == 0);
+- JS_ASSERT(size % PageSize == 0);
+- JS_ASSERT(alignment % PageSize == 0);
++ JS_ASSERT(size % rt->gcSystemPageSize == 0);
++ JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
+@@ -281,22 +278,22 @@ MapAlignedPages(size_t size, size_t alig
+ }
+
+ void
+-UnmapPages(void *p, size_t size)
++UnmapPages(JSRuntime *rt, void *p, size_t size)
+ {
+ JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
+ }
+
+ bool
+-MarkPagesUnused(void *p, size_t size)
++MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+ {
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+ bool
+-MarkPagesInUse(void *p, size_t size)
++MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+ {
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+@@ -314,27 +311,24 @@ GetPageFaultCount()
+ #include <unistd.h>
+
+ void
+-InitMemorySubsystem()
++InitMemorySubsystem(JSRuntime *rt)
+ {
+- if (size_t(sysconf(_SC_PAGESIZE)) != PageSize) {
+- fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
+- MOZ_CRASH();
+- }
++ rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
+ }
+
+ void *
+-MapAlignedPages(size_t size, size_t alignment)
++MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+ {
+ JS_ASSERT(size >= alignment);
+ JS_ASSERT(size % alignment == 0);
+- JS_ASSERT(size % PageSize == 0);
+- JS_ASSERT(alignment % PageSize == 0);
++ JS_ASSERT(size % rt->gcSystemPageSize == 0);
++ JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANON;
+
+ /* Special case: If we want page alignment, no further work is needed. */
+- if (alignment == PageSize) {
++ if (alignment == rt->gcSystemAllocGranularity) {
+ return mmap(NULL, size, prot, flags, -1, 0);
+ }
+
+@@ -360,26 +354,26 @@ MapAlignedPages(size_t size, size_t alig
+ }
+
+ void
+-UnmapPages(void *p, size_t size)
++UnmapPages(JSRuntime *rt, void *p, size_t size)
+ {
+ JS_ALWAYS_TRUE(0 == munmap(p, size));
+ }
+
+ bool
+-MarkPagesUnused(void *p, size_t size)
++MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+ {
+- if (!DecommitEnabled())
+- return false;
++ if (!DecommitEnabled(rt))
++ return true;
+
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ int result = madvise(p, size, MADV_DONTNEED);
+ return result != -1;
+ }
+
+ bool
+-MarkPagesInUse(void *p, size_t size)
++MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+ {
+- JS_ASSERT(uintptr_t(p) % PageSize == 0);
++ JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+ return true;
+ }
+
+Index: mozilla-release/js/src/gc/Memory.h
+===================================================================
+--- mozilla-release.orig/js/src/gc/Memory.h
++++ mozilla-release/js/src/gc/Memory.h
+@@ -16,20 +16,20 @@ namespace gc {
+
+ // Sanity check that our compiled configuration matches the currently running
+ // instance and initialize any runtime data needed for allocation.
+-void InitMemorySubsystem();
++void InitMemorySubsystem(JSRuntime *rt);
+
+ // Allocate or deallocate pages from the system with the given alignment.
+-void *MapAlignedPages(size_t size, size_t alignment);
+-void UnmapPages(void *p, size_t size);
++void *MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment);
++void UnmapPages(JSRuntime *rt, void *p, size_t size);
+
+ // Tell the OS that the given pages are not in use, so they should not
+ // be written to a paging file. This may be a no-op on some platforms.
+-bool MarkPagesUnused(void *p, size_t size);
++bool MarkPagesUnused(JSRuntime *rt, void *p, size_t size);
+
+ // Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
+ // and should be paged in and out normally. This may be a no-op on some
+ // platforms.
+-bool MarkPagesInUse(void *p, size_t size);
++bool MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
+
+ // Returns #(hard faults) + #(soft faults)
+ size_t GetPageFaultCount();
+Index: mozilla-release/js/src/jsapi.cpp
+===================================================================
+--- mozilla-release.orig/js/src/jsapi.cpp
++++ mozilla-release/js/src/jsapi.cpp
+@@ -1105,8 +1105,6 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHe
+ #undef MSG_DEF
+ #endif /* DEBUG */
+
+- InitMemorySubsystem();
+-
+ if (!js::TlsPerThreadData.init())
+ return NULL;
+
+Index: mozilla-release/js/src/jscntxt.h
+===================================================================
+--- mozilla-release.orig/js/src/jscntxt.h
++++ mozilla-release/js/src/jscntxt.h
+@@ -847,6 +847,15 @@ struct JSRuntime : js::RuntimeFriendFiel
+ /* Stack of thread-stack-allocated GC roots. */
+ js::AutoGCRooter *autoGCRooters;
+
++ /*
++ * The GC can only safely decommit memory when the page size of the
++ * running process matches the compiled arena size.
++ */
++ size_t gcSystemPageSize;
++
++ /* The OS allocation granularity may not match the page size. */
++ size_t gcSystemAllocGranularity;
++
+ /* Strong references on scripts held for PCCount profiling API. */
+ js::ScriptAndCountsVector *scriptAndCountsVector;
+
+Index: mozilla-release/js/src/jsgc.cpp
+===================================================================
+--- mozilla-release.orig/js/src/jsgc.cpp
++++ mozilla-release/js/src/jsgc.cpp
+@@ -486,13 +486,13 @@ FinalizeArenas(FreeOp *fop,
+ }
+
+ static inline Chunk *
+-AllocChunk() {
+- return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
++AllocChunk(JSRuntime *rt) {
++ return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
+ }
+
+ static inline void
+-FreeChunk(Chunk *p) {
+- UnmapPages(static_cast<void *>(p), ChunkSize);
++FreeChunk(JSRuntime *rt, Chunk *p) {
++ UnmapPages(rt, static_cast<void *>(p), ChunkSize);
+ }
+
+ inline bool
+@@ -582,25 +582,25 @@ ChunkPool::expire(JSRuntime *rt, bool re
+ }
+
+ static void
+-FreeChunkList(Chunk *chunkListHead)
++FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
+ {
+ while (Chunk *chunk = chunkListHead) {
+ JS_ASSERT(!chunk->info.numArenasFreeCommitted);
+ chunkListHead = chunk->info.next;
+- FreeChunk(chunk);
++ FreeChunk(rt, chunk);
+ }
+ }
+
+ void
+ ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
+ {
+- FreeChunkList(expire(rt, releaseAll));
++ FreeChunkList(rt, expire(rt, releaseAll));
+ }
+
+ /* static */ Chunk *
+ Chunk::allocate(JSRuntime *rt)
+ {
+- Chunk *chunk = static_cast<Chunk *>(AllocChunk());
++ Chunk *chunk = static_cast<Chunk *>(AllocChunk(rt));
+
+ #ifdef JSGC_ROOT_ANALYSIS
+ // Our poison pointers are not guaranteed to be invalid on 64-bit
+@@ -613,7 +613,7 @@ Chunk::allocate(JSRuntime *rt)
+ // were marked as uncommitted, but it's a little complicated to avoid
+ // clobbering pre-existing unrelated mappings.
+ while (IsPoisonedPtr(chunk))
+- chunk = static_cast<Chunk *>(AllocChunk());
++ chunk = static_cast<Chunk *>(AllocChunk(rt));
+ #endif
+
+ if (!chunk)
+@@ -629,7 +629,7 @@ Chunk::release(JSRuntime *rt, Chunk *chu
+ {
+ JS_ASSERT(chunk);
+ chunk->prepareToBeFreed(rt);
+- FreeChunk(chunk);
++ FreeChunk(rt, chunk);
+ }
+
+ inline void
+@@ -745,7 +745,7 @@ Chunk::findDecommittedArenaOffset()
+ }
+
+ ArenaHeader *
+-Chunk::fetchNextDecommittedArena()
++Chunk::fetchNextDecommittedArena(JSRuntime *rt)
+ {
+ JS_ASSERT(info.numArenasFreeCommitted == 0);
+ JS_ASSERT(info.numArenasFree > 0);
+@@ -756,7 +756,7 @@ Chunk::fetchNextDecommittedArena()
+ decommittedArenas.unset(offset);
+
+ Arena *arena = &arenas[offset];
+- MarkPagesInUse(arena, ArenaSize);
++ MarkPagesInUse(rt, arena, ArenaSize);
+ arena->aheader.setAsNotAllocated();
+
+ return &arena->aheader;
+@@ -790,7 +790,7 @@ Chunk::allocateArena(JSCompartment *comp
+
+ ArenaHeader *aheader = JS_LIKELY(info.numArenasFreeCommitted > 0)
+ ? fetchNextFreeArena(rt)
+- : fetchNextDecommittedArena();
++ : fetchNextDecommittedArena(rt);
+ aheader->init(comp, thingKind);
+ if (JS_UNLIKELY(!hasAvailableArenas()))
+ removeFromAvailableList();
+@@ -893,6 +893,8 @@ static const int64_t JIT_SCRIPT_RELEASE_
+ JSBool
+ js_InitGC(JSRuntime *rt, uint32_t maxbytes)
+ {
++ InitMemorySubsystem(rt);
++
+ if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
+ return false;
+
+@@ -2745,7 +2747,7 @@ DecommitArenasFromAvailableList(JSRuntim
+ Maybe<AutoUnlockGC> maybeUnlock;
+ if (!rt->isHeapBusy())
+ maybeUnlock.construct(rt);
+- ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
++ ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
+ }
+
+ if (ok) {
+@@ -2813,7 +2815,7 @@ ExpireChunksAndArenas(JSRuntime *rt, boo
+ {
+ if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
+ AutoUnlockGC unlock(rt);
+- FreeChunkList(toFree);
++ FreeChunkList(rt, toFree);
+ }
+
+ if (shouldShrink)
+Index: mozilla-release/js/public/HeapAPI.h
+===================================================================
+--- mozilla-release.orig/js/public/HeapAPI.h
++++ mozilla-release/js/public/HeapAPI.h
+@@ -11,24 +11,7 @@
+ namespace js {
+ namespace gc {
+
+-/*
+- * Page size must be static to support our arena pointer optimizations, so we
+- * are forced to support each platform with non-4096 pages as a special case.
+- * Note: The freelist supports a maximum arena shift of 15.
+- * Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
+- */
+-#if (defined(SOLARIS) || defined(__FreeBSD__)) && \
+- (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
+-const size_t PageShift = 13;
+-const size_t ArenaShift = PageShift;
+-#elif defined(__powerpc64__)
+-const size_t PageShift = 16;
+ const size_t ArenaShift = 12;
+-#else
+-const size_t PageShift = 12;
+-const size_t ArenaShift = PageShift;
+-#endif
+-const size_t PageSize = size_t(1) << PageShift;
+ const size_t ArenaSize = size_t(1) << ArenaShift;
+ const size_t ArenaMask = ArenaSize - 1;
+