in basic_pitch/data/datasets/maestro.py [0:0]
def process(self, element: List[str], *args: Tuple[Any, Any], **kwargs: Dict[str, Any]) -> List[Any]:
import tempfile
import numpy as np
import sox
from basic_pitch.constants import (
AUDIO_N_CHANNELS,
AUDIO_SAMPLE_RATE,
FREQ_BINS_CONTOURS,
FREQ_BINS_NOTES,
ANNOTATION_HOP,
N_FREQ_BINS_NOTES,
N_FREQ_BINS_CONTOURS,
)
from basic_pitch.data import tf_example_serialization
logging.info(f"Processing {element}")
batch = []
for track_id in element:
track_remote = self.maestro_remote.track(track_id)
with tempfile.TemporaryDirectory() as local_tmp_dir:
maestro_local = mirdata.initialize("maestro", local_tmp_dir)
track_local = maestro_local.track(track_id)
for attribute in self.DOWNLOAD_ATTRIBUTES:
source = getattr(track_remote, attribute)
destination = getattr(track_local, attribute)
os.makedirs(os.path.dirname(destination), exist_ok=True)
with self.filesystem.open(source) as s, open(destination, "wb") as d:
# d.write(s.read())
for piece in read_in_chunks(s):
d.write(piece)
local_wav_path = f"{track_local.audio_path}_tmp.wav"
tfm = sox.Transformer()
tfm.rate(AUDIO_SAMPLE_RATE)
tfm.channels(AUDIO_N_CHANNELS)
tfm.build(track_local.audio_path, local_wav_path)
duration = sox.file_info.duration(local_wav_path)
time_scale = np.arange(0, duration + ANNOTATION_HOP, ANNOTATION_HOP)
n_time_frames = len(time_scale)
note_indices, note_values = track_local.notes.to_sparse_index(time_scale, "s", FREQ_BINS_NOTES, "hz")
onset_indices, onset_values = track_local.notes.to_sparse_index(
time_scale, "s", FREQ_BINS_NOTES, "hz", onsets_only=True
)
contour_indices, contour_values = track_local.notes.to_sparse_index(
time_scale, "s", FREQ_BINS_CONTOURS, "hz"
)
batch.append(
tf_example_serialization.to_transcription_tfexample(
track_local.track_id,
"maestro",
local_wav_path,
note_indices,
note_values,
onset_indices,
onset_values,
contour_indices,
contour_values,
(n_time_frames, N_FREQ_BINS_NOTES),
(n_time_frames, N_FREQ_BINS_CONTOURS),
)
)
return [batch]