public void write()

in src/main/java/com/hadoop/compression/lzo/LzopOutputStream.java [124:178]


  public void write(byte[] b, int off, int len) throws IOException {
    // TODO: LzopOutputStream used to inherit from BlockCompressorStream
    // but had a bug due to this inheritance chain. In order to fix the
    // bug we pulled down the implementation of the superclass, which
    // is overly general. Thus this function is not quite as succint
    // as it could be, now that it's LZOP-specific.
    // See: https://github.com/toddlipcon/hadoop-lzo/commit/5fe6dd4736a73fa33b86656ce8aeb011e7f2046c

    // Sanity checks
    if (compressor.finished()) {
      throw new IOException("write beyond end of stream");
    }
    if (b == null) {
      throw new NullPointerException();
    } else if ((off < 0) || (off > b.length) || (len < 0) ||
               ((off + len) > b.length)) {
      throw new IndexOutOfBoundsException();
    } else if (len == 0) {
      return;
    }

    long limlen = compressor.getBytesRead();
    if (len + limlen > MAX_INPUT_SIZE && limlen > 0) {
      // Adding this segment would exceed the maximum size.
      // Flush data if we have it.
      finish();
      compressor.reset();
    }

    if (len > MAX_INPUT_SIZE) {
      // The data we're given exceeds the maximum size. Any data
      // we had have been flushed, so we write out this chunk in segments
      // not exceeding the maximum size until it is exhausted.
      do {
        int bufLen = Math.min(len, MAX_INPUT_SIZE);

        compressor.setInput(b, off, bufLen);
        finish();
        compressor.reset();
        off += bufLen;
        len -= bufLen;
      } while (len > 0);
      return;
    }

    // Give data to the compressor
    compressor.setInput(b, off, len);
    if (!compressor.needsInput()) {
      // compressor buffer size might be smaller than the maximum
      // size, so we permit it to flush if required.
      do {
        compress();
      } while (!compressor.needsInput());
    }
  }