in src/main/java/com/hadoop/compression/lzo/LzopInputStream.java [233:277]
protected int decompress(byte[] b, int off, int len) throws IOException {
if (eof) {
return -1;
}
// Check if we are the beginning of a block
if (noUncompressedBytes == uncompressedBlockSize) {
// Get original data size
try {
byte[] tempBuf = new byte[4];
uncompressedBlockSize = readInt(in, tempBuf, 4);
noCompressedBytes += 4;
} catch (EOFException e) {
eof = true;
return -1;
}
noUncompressedBytes = 0;
}
int n = 0;
while ((n = decompressor.decompress(b, off, len)) == 0) {
if (decompressor.finished() || decompressor.needsDictionary()) {
if (noUncompressedBytes >= uncompressedBlockSize) {
eof = true;
return -1;
}
}
if (decompressor.needsInput()) {
try {
getCompressedData();
} catch (EOFException e) {
eof = true;
return -1;
} catch (IOException e) {
LOG.warn("IOException in getCompressedData; likely LZO corruption.", e);
throw e;
}
}
}
// Note the no. of decompressed bytes read from 'current' block
noUncompressedBytes += n;
return n;
}