in src/main/java/com/hadoop/compression/lzo/LzoDecompressor.java [282:330]
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
int numBytes = 0;
if (isCurrentBlockUncompressed()) {
// The current block has been stored uncompressed, so just
// copy directly from the input buffer.
numBytes = Math.min(userBufLen, len);
System.arraycopy(userBuf, userBufOff, b, off, numBytes);
userBufOff += numBytes;
userBufLen -= numBytes;
} else {
// Check if there is uncompressed data
numBytes = uncompressedDirectBuf.remaining();
if (numBytes > 0) {
numBytes = Math.min(numBytes, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, numBytes);
return numBytes;
}
// Check if there is data to decompress
if (compressedDirectBufLen > 0) {
// Re-initialize the lzo's output direct-buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
numBytes = decompressBytesDirect(strategy.getDecompressor());
uncompressedDirectBuf.limit(numBytes);
// Return atmost 'len' bytes
numBytes = Math.min(numBytes, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, numBytes);
}
}
// Set 'finished' if lzo has consumed all user-data
if (userBufLen <= 0) {
finished = true;
}
return numBytes;
}