in src/main/java/com/hadoop/compression/lzo/LzopOutputStream.java [190:215]
protected void compress() throws IOException {
int len = compressor.compress(buffer, 0, buffer.length);
if (len > 0) {
// new lzo block. write current position to index file.
if (indexOut != null) {
indexOut.writeLong(cout.bytesWritten);
}
rawWriteInt((int)compressor.getBytesRead());
// If the compressed buffer is actually larger than the uncompressed buffer,
// the LZO specification says that we should write the uncompressed bytes rather
// than the compressed bytes. The decompressor understands this because both sizes
// get written to the stream.
if (compressor.getBytesRead() <= compressor.getBytesWritten()) {
// Compression actually increased the size of the buffer, so write the uncompressed bytes.
byte[] uncompressed = ((LzoCompressor)compressor).uncompressedBytes();
rawWriteInt(uncompressed.length);
out.write(uncompressed, 0, uncompressed.length);
} else {
// Write out the compressed chunk.
rawWriteInt(len);
out.write(buffer, 0, len);
}
}
}