Parce que c'est l'algorithme de la manière dont il développe sa capacité.
public override void Write(byte[] buffer, int offset, int count) {
//... Removed Error checking for example
int i = _position + count;
// Check for overflow
if (i < 0)
throw new IOException(Environment.GetResourceString("IO.IO_StreamTooLong"));
if (i > _length) {
bool mustZero = _position > _length;
if (i > _capacity) {
bool allocatedNewArray = EnsureCapacity(i);
if (allocatedNewArray)
mustZero = false;
}
if (mustZero)
Array.Clear(_buffer, _length, i - _length);
_length = i;
}
//...
}
private bool EnsureCapacity(int value) {
// Check for overflow
if (value < 0)
throw new IOException(Environment.GetResourceString("IO.IO_StreamTooLong"));
if (value > _capacity) {
int newCapacity = value;
if (newCapacity < 256)
newCapacity = 256;
if (newCapacity < _capacity * 2)
newCapacity = _capacity * 2;
Capacity = newCapacity;
return true;
}
return false;
}
public virtual int Capacity
{
//...
set {
//...
// MemoryStream has this invariant: _origin > 0 => !expandable (see ctors)
if (_expandable && value != _capacity) {
if (value > 0) {
byte[] newBuffer = new byte[value];
if (_length > 0) Buffer.InternalBlockCopy(_buffer, 0, newBuffer, 0, _length);
_buffer = newBuffer;
}
else {
_buffer = null;
}
_capacity = value;
}
}
}
Donc, chaque fois que vous frappez la limite de capacité de doubler la taille de la capacité. La raison pour laquelle il ne c'est qu' Buffer.InternalBlockCopy
opération est lente pour les grands tableaux donc si il avait souvent redimensionner chaque appel d'Écriture de la performance chuter de manière significative.
Quelques choses que vous pourriez faire pour améliorer la performance pour vous est que vous pouvez définir la capacité initiale d'au moins la taille de votre comprimé tableau et vous pouvez alors augmenter la taille par un facteur plus petit que 2.0
afin de réduire la quantité de mémoire que vous utilisez.
const double ResizeFactor = 1.25;
private byte[] Decompress(byte[] data)
{
using (MemoryStream compressedStream = new MemoryStream(data))
using (GZipStream zipStream = new GZipStream(compressedStream, CompressionMode.Decompress))
using (MemoryStream resultStream = new MemoryStream(data.Length * ResizeFactor)) //Set the initial size to be the same as the compressed size + 25%.
{
byte[] buffer = new byte[4096];
int iCount = 0;
while ((iCount = zipStream.Read(buffer, 0, buffer.Length)) > 0)
{
if(resultStream.Capacity < resultStream.Length + iCount)
resultStream.Capacity = resultStream.Capacity * ResizeFactor; //Resize to 125% instead of 200%
resultStream.Write(buffer, 0, iCount);
}
return resultStream.ToArray();
}
}
Si vous vouliez, vous pourriez faire encore plus de fantaisie algorithmes comme le redimensionnement en vigueur en fonction du taux de compression
const double MinResizeFactor = 1.05;
private byte[] Decompress(byte[] data)
{
using (MemoryStream compressedStream = new MemoryStream(data))
using (GZipStream zipStream = new GZipStream(compressedStream, CompressionMode.Decompress))
using (MemoryStream resultStream = new MemoryStream(data.Length * MinResizeFactor)) //Set the initial size to be the same as the compressed size + the minimum resize factor.
{
byte[] buffer = new byte[4096];
int iCount = 0;
while ((iCount = zipStream.Read(buffer, 0, buffer.Length)) > 0)
{
if(resultStream.Capacity < resultStream.Length + iCount)
{
double sizeRatio = ((double)resultStream.Position + iCount) / (compressedStream.Position + 1); //The +1 is to prevent divide by 0 errors, it may not be necessary in practice.
//Resize to minimum resize factor of the current capacity or the
// compressed stream length times the compression ratio + min resize
// factor, whichever is larger.
resultStream.Capacity = Math.Max(resultStream.Capacity * MinResizeFactor,
(sizeRatio + (MinResizeFactor - 1)) * compressedStream.Length);
}
resultStream.Write(buffer, 0, iCount);
}
return resultStream.ToArray();
}
}