fn flush()

in src/storage/datapool/src/lib.rs [627:676]


    fn flush(&mut self) -> Result<(), std::io::Error> {
        // initialize the hasher
        let mut hasher = blake3::Hasher::new();

        // prepare the header
        let mut header = Header::new();

        // set the user version
        header.set_user_version(self.user_version);

        // hash the header with a zero'd checksum
        hasher.update(header.as_bytes());

        // calculate the number of data pages to be copied
        let data_pages = (self.file_data.end - self.file_data.start) / PAGE_SIZE;

        // write the data region to the file and hash it in one pass
        self.file.seek(SeekFrom::Start(HEADER_SIZE as u64))?;
        for page in 0..data_pages {
            loop {
                let start = page * PAGE_SIZE;
                let end = start + PAGE_SIZE;
                if self.file.write(&self.memory.as_slice()[start..end])? == PAGE_SIZE {
                    hasher.update(&self.memory.as_slice()[start..end]);
                    break;
                }
                self.file
                    .seek(SeekFrom::Start((HEADER_SIZE + start) as u64))?;
            }
        }

        // finalize the hash
        let hash = hasher.finalize();

        // set the checksum in the header to the calculated hash
        header.set_checksum(hash);

        // write the header to the file
        self.file.seek(SeekFrom::Start(0))?;
        loop {
            if self.file.write(header.as_bytes())? == HEADER_SIZE {
                break;
            }
            self.file.seek(SeekFrom::Start(0))?;
        }

        self.file.sync_all()?;

        Ok(())
    }