in vireo/scala/jni/vireo/transform.cpp [45:142]
void JNICALL Java_com_twitter_vireo_transform_jni_Stitch_jniInit(JNIEnv* env, jobject stitch_obj, jobject audio_tracks_obj, jobject video_tracks_obj, jobject edit_boxes_per_track_obj) {
jni::ExceptionHandler::SafeExecuteFunction(env, [&]{
auto jni = new _JNIStitchStruct();
auto jni_stitch = jni::Wrap(env, stitch_obj);
jni_stitch.set<jlong>("jni", (jlong)jni);
// store a local list of sample jobjects as well
uint64_t index = 0;
vector<jobject> sample_objs;
// collect input audio tracks
auto audio_tracks = jni::createVectorFromSeq<functional::Audio<decode::Sample>>(env, audio_tracks_obj, function<functional::Audio<decode::Sample>(jobject)>([env, &index, &sample_objs](jobject track_obj) -> functional::Audio<decode::Sample> {
vector<decode::Sample> samples = jni::createVectorFromMedia(env, track_obj, function<decode::Sample(jobject)>([env, &index, &sample_objs](jobject sample_obj) -> decode::Sample {
auto jni_sample = jni::Wrap(env, sample_obj);
int64_t pts = (int64_t)jni_sample.get<jlong>("pts");
int64_t dts = (int64_t)jni_sample.get<jlong>("dts");
bool keyframe = (bool)jni_sample.get<jboolean>("keyframe");
SampleType type = (SampleType)jni_sample.get<jbyte>("sampleType");
sample_objs.push_back(sample_obj);
const uint8_t* _index = (const uint8_t*)index++; // not pointing to a valid memory address, used for storing original index in a dummy common::Data32 class
return (decode::Sample){ pts, dts, keyframe, type, [_index](){ return common::Data32(_index, 0, NULL); } };
}));
auto settings_obj = jni::Wrap(env, track_obj).get("settings", "Ljava/lang/Object;");
auto settings = jni::createAudioSettings(env, settings_obj);
return functional::Audio<decode::Sample>(samples, settings);
}));
// collect input video tracks
auto video_tracks = jni::createVectorFromSeq<functional::Video<decode::Sample>>(env, video_tracks_obj, function<functional::Video<decode::Sample>(jobject)>([env, &index, &sample_objs](jobject track_obj) -> functional::Video<decode::Sample> {
vector<decode::Sample> samples = jni::createVectorFromMedia(env, track_obj, function<decode::Sample(jobject)>([env, &index, &sample_objs](jobject sample_obj) -> decode::Sample {
auto jni_sample = jni::Wrap(env, sample_obj);
int64_t pts = (int64_t)jni_sample.get<jlong>("pts");
int64_t dts = (int64_t)jni_sample.get<jlong>("dts");
bool keyframe = (bool)jni_sample.get<jboolean>("keyframe");
SampleType type = (SampleType)jni_sample.get<jbyte>("sampleType");
sample_objs.push_back(sample_obj);
const uint8_t* _index = (const uint8_t*)index++; // not pointing to a valid memory address, used for storing original index in a dummy common::Data32 class
return (decode::Sample){ pts, dts, keyframe, type, [_index](){ return common::Data32(_index, 0, NULL); } };
}));
auto settings_obj = jni::Wrap(env, track_obj).get("settings", "Ljava/lang/Object;");
auto settings = jni::createVideoSettings(env, settings_obj);
return functional::Video<decode::Sample>(samples, settings);
}));
// collect the input edit boxes
auto edit_boxes_per_track = jni::createVectorFromSeq<vector<common::EditBox>>(env, edit_boxes_per_track_obj, function<vector<common::EditBox>(jobject)>([env](jobject edit_boxes_obj) -> vector<common::EditBox> {
return jni::createVectorFromSeq<common::EditBox>(env, edit_boxes_obj, function<common::EditBox(jobject)>([env](jobject edit_box_obj) -> common::EditBox {
auto jni_edit_box = jni::Wrap(env, edit_box_obj);
return common::EditBox((int64_t)jni_edit_box.get<jlong>("startPts"),
(uint64_t)jni_edit_box.get<jlong>("durationPts"),
1.0f,
(SampleType)jni_edit_box.get<jbyte>("sampleType"));
}));
}));
// stitch
auto stitched = transform::Stitch(audio_tracks, video_tracks, edit_boxes_per_track);
// setup output audio track
for (auto sample: stitched.audio_track) {
uint64_t index = (const uint64_t)sample.nal().data();
jobject sample_obj = sample_objs[index];
jni::Wrap jni_sample = jni::Wrap(env, sample_obj);
jni_sample.set<jlong>("pts", (int64_t)sample.pts);
jni_sample.set<jlong>("dts", (int64_t)sample.dts);
jni->jni_audio_samples.push_back(move(jni_sample));
}
jni->audio_edit_boxes.insert(jni->audio_edit_boxes.end(), stitched.audio_track.edit_boxes().begin(), stitched.audio_track.edit_boxes().end());
jni->audio_duration = stitched.audio_track.duration();
jni::Wrap jni_audio_track = jni::Wrap(env, jni_stitch.get("audioTrack", "Lcom/twitter/vireo/transform/Stitch$AudioTrack;"));
setAudioSettings(env, jni_audio_track, stitched.audio_track.settings());
jni_audio_track.set<jint>("b", (uint32_t)jni->jni_audio_samples.size());
// setup output video track
for (auto sample: stitched.video_track) {
uint64_t index = (const uint64_t)sample.nal().data();
jobject sample_obj = sample_objs[index];
jni::Wrap jni_sample = jni::Wrap(env, sample_obj);
jni_sample.set<jlong>("pts", (int64_t)sample.pts);
jni_sample.set<jlong>("dts", (int64_t)sample.dts);
jni->jni_video_samples.push_back(move(jni_sample));
}
jni->video_edit_boxes.insert(jni->video_edit_boxes.end(), stitched.video_track.edit_boxes().begin(), stitched.video_track.edit_boxes().end());
jni->video_duration = stitched.video_track.duration();
jni::Wrap jni_video_track = jni::Wrap(env, jni_stitch.get("videoTrack", "Lcom/twitter/vireo/transform/Stitch$VideoTrack;"));
setVideoSettings(env, jni_video_track, stitched.video_track.settings());
jni_video_track.set<jint>("b", (uint32_t)jni->jni_video_samples.size());
}, [env, stitch_obj] {
Java_com_twitter_vireo_transform_jni_Stitch_jniClose(env, stitch_obj);
});
}