in spotify_tensorflow/dataset.py [0:0]
def _examples(cls,
file_pattern, # type: str
schema_path=None, # type: str
feature_spec=None, # type: Dict[str, Union[tf.FixedLenFeature, tf.VarLenFeature, tf.SparseFeature]] # noqa: E501
compression_type=None, # type: str
batch_size=128, # type: int
shuffle=True, # type: bool
num_epochs=1, # type: int
shuffle_buffer_size=10000, # type: int
shuffle_seed=None, # type: int
prefetch_buffer_size=1, # type: int
reader_num_threads=1, # type: int
parser_num_threads=2, # type: int
sloppy_ordering=False, # type: bool
drop_final_batch=False # type: bool
):
# type: (...) -> tf.data.Dataset
if schema_path:
feature_spec, _ = cls.parse_schema(schema_path)
logger.debug("Will parse features from: `%s`, using features spec: %s",
file_pattern,
str(feature_spec))
from tensorflow.contrib.data import make_batched_features_dataset
reader_args = [compression_type] if compression_type else None
dataset = make_batched_features_dataset(file_pattern,
batch_size=batch_size,
features=feature_spec,
reader_args=reader_args,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
prefetch_buffer_size=prefetch_buffer_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
sloppy_ordering=sloppy_ordering,
drop_final_batch=drop_final_batch)
return dataset