Build via docker build -t memoryrepro -f .\Dockerfile .
Run via docker run --name memoryrepro -it --memory=3100mb --env COMPlus_gcServer=1 --rm memoryrepro
.
App should eventually OOM.
OOM does not occur on workstation GC.
#See https://aka.ms/containerfastmode to understand how Visual Studio uses this Dockerfile to build your images for faster debugging. | |
FROM mcr.microsoft.com/dotnet/runtime:6.0.0-rc.1-bullseye-slim AS base | |
WORKDIR /app | |
FROM mcr.microsoft.com/dotnet/sdk:6.0.100-rc.1-bullseye-slim AS build | |
WORKDIR /src | |
COPY ["memoryrepro.csproj", ""] | |
RUN dotnet restore "./memoryrepro.csproj" | |
COPY . . | |
WORKDIR "/src/." | |
RUN dotnet build "memoryrepro.csproj" -c Release -o /app/build | |
FROM build AS publish | |
RUN dotnet publish "memoryrepro.csproj" -c Release -o /app/publish | |
FROM base AS final | |
WORKDIR /app | |
COPY --from=publish /app/publish . | |
ENTRYPOINT ["dotnet", "memoryrepro.dll"] |
<Project Sdk="Microsoft.NET.Sdk"> | |
<PropertyGroup> | |
<OutputType>Exe</OutputType> | |
<TargetFramework>net6.0</TargetFramework> | |
<Nullable>enable</Nullable> | |
</PropertyGroup> | |
</Project> |
using System; | |
using System.Buffers; | |
using System.Text.Json; | |
using System.Threading; | |
using System.Threading.Tasks; | |
var allocationSize = 4 * 1024; // Must be under LOH threshhold | |
var allocationBatchSize = 1024; | |
var allocationBatchInterval = TimeSpan.FromMilliseconds(75); | |
var ballastArraySize = 1024 * 1024; | |
var ballastCount = 2111; // How much memory to retain in the LOH | |
var ballast = new byte[ballastCount][]; | |
for (int i = 0; i < ballastCount; i++) | |
{ | |
ballast[i] = new byte[ballastArraySize]; | |
TouchPage(ballast[i]); | |
} | |
Console.WriteLine("Ballast Allocated"); | |
var batchCount = 0; | |
try | |
{ | |
while (true) | |
{ | |
var tmp = new byte[allocationBatchSize][]; | |
for (int i = 0; i < allocationBatchSize; i++) | |
{ | |
tmp[i] = ArrayPool<byte>.Shared.Rent(allocationSize); | |
//var tmp = GC.AllocateUninitializedArray<byte>(allocationSize); | |
TouchPage(tmp[i]); | |
} | |
await Task.Delay(3); // This mimics the behavior of Pipe where the producer and consumer are on different threads | |
for (int i = 0; i < allocationBatchSize; i++) | |
{ | |
ArrayPool<byte>.Shared.Return(tmp[i]); | |
} | |
batchCount++; | |
Thread.Sleep(allocationBatchInterval); | |
if (batchCount % 10 == 0) | |
{ | |
var gcMemInfo = GC.GetGCMemoryInfo(); | |
Console.WriteLine(JsonSerializer.Serialize(new | |
{ | |
Gen0CollectionCount = GC.CollectionCount(0), | |
Gen1CollectionCount = GC.CollectionCount(1), | |
Gen2CollectionCount = GC.CollectionCount(2), | |
gcMemInfo.Compacted, | |
gcMemInfo.Concurrent, | |
gcMemInfo.Generation, | |
gcMemInfo.Index, | |
gcMemInfo.FragmentedBytes, | |
gcMemInfo.PromotedBytes, | |
gcMemInfo.FinalizationPendingCount, | |
gcMemInfo.HeapSizeBytes, | |
gcMemInfo.MemoryLoadBytes, | |
gcMemInfo.PauseTimePercentage, | |
gcMemInfo.PinnedObjectsCount, | |
gcMemInfo.TotalCommittedBytes, | |
gcMemInfo.TotalAvailableMemoryBytes, | |
gcMemInfo.HighMemoryLoadThresholdBytes | |
})); | |
if (batchCount % int.MaxValue == 0) // This is just to prevent the compiler from optimizing ballast out | |
{ | |
for (int i = 0; i < ballastCount; i++) | |
{ | |
TouchPage(ballast[i]); | |
} | |
} | |
} | |
} | |
} | |
catch (Exception ex) | |
{ | |
Console.WriteLine(ex.Message); | |
Console.WriteLine(ex.StackTrace); | |
} | |
void TouchPage(byte[] b) | |
{ | |
uint size = (uint)b.Length; | |
const uint pageSize = 4096; | |
uint numPages = size / pageSize; | |
for (uint i = 0; i < numPages; i++) | |
{ | |
b[i * pageSize] = (byte)(i % 256); | |
} | |
} |