def examples_via_schema()

in spotify_tensorflow/dataset.py [0:0]


        def examples_via_schema(cls,
                                file_pattern,  # type: str
                                schema_path,  # type: str
                                default_value=0,  # type: float
                                batch_size=128,  # type: int
                                compression_type=None,  # type: str
                                shuffle=True,  # type: bool
                                num_epochs=1,  # type: int
                                shuffle_buffer_size=10000,  # type: int
                                shuffle_seed=42,  # type: int
                                prefetch_buffer_size=1,  # type: int
                                reader_num_threads=1,  # type: int
                                parser_num_threads=2,  # type: int
                                sloppy_ordering=False,  # type: bool
                                drop_final_batch=False  # type: bool
                                ):
            # type: (...) -> Iterator[pd.DataFrame]
            """
            Read a TF dataset in batches, each one yields a Pandas DataFrame.

            :param file_pattern: List of files or patterns of file paths containing
                                 `Example` records. See `tf.gfile.Glob` for pattern rules
            :param schema_path: tf.metadata Schema path
            :param default_value: Value used if a sparse feature is missing.
            :param batch_size: batch size, set to the size of the dataset to read all data at once
            :param compression_type: TFRecord compression type, see `tf.data.TFRecordDataset` doc
            :param shuffle: see `tensorflow.contrib.data.make_batched_features_dataset` doc
            :param num_epochs: see `tensorflow.contrib.data.make_batched_features_dataset` doc
            :param shuffle_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
                                        doc
            :param shuffle_seed: see `tensorflow.contrib.data.make_batched_features_dataset` doc
            :param prefetch_buffer_size: see `tensorflow.contrib.data.make_batched_features_dataset`
                                         doc
            :param reader_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
                                       doc
            :param parser_num_threads: see `tensorflow.contrib.data.make_batched_features_dataset`
                                       doc
            :param sloppy_ordering: see `tensorflow.contrib.data.make_batched_features_dataset` doc
            :param drop_final_batch: see `tensorflow.contrib.data.make_batched_features_dataset` doc

            :return A Python Generator, yielding batches of data in a Pandas DataFrame
            """
            return cls._examples(file_pattern=file_pattern,
                                 schema_path=schema_path,
                                 default_value=default_value,
                                 batch_size=batch_size,
                                 compression_type=compression_type,
                                 shuffle=shuffle,
                                 num_epochs=num_epochs,
                                 shuffle_buffer_size=shuffle_buffer_size,
                                 shuffle_seed=shuffle_seed,
                                 prefetch_buffer_size=prefetch_buffer_size,
                                 reader_num_threads=reader_num_threads,
                                 parser_num_threads=parser_num_threads,
                                 sloppy_ordering=sloppy_ordering,
                                 drop_final_batch=drop_final_batch)