68 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
93 parentTrackedObject =
nullptr;
94 parentClipObject = NULL;
121 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
125 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
127 }
catch (
const std::exception& e) {}
135Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
142Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
161Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
167 std::string ext = get_file_extension(
path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
171 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
172 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" ||
path.find(
"%") != std::string::npos)
214 allocated_reader = reader;
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
246 if (parentTimeline) {
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
249 Clip* clipObject = parentTimeline->
GetClip(object_id);
254 parentClipObject = NULL;
256 else if (clipObject) {
258 parentTrackedObject =
nullptr;
265 parentTrackedObject = trackedObject;
270 parentClipObject = clipObject;
278 bool is_same_reader =
false;
279 if (new_reader && allocated_reader) {
280 if (new_reader->
Name() ==
"FrameMapper") {
283 if (allocated_reader == clip_mapped_reader->
Reader()) {
284 is_same_reader =
true;
289 if (allocated_reader && !is_same_reader) {
291 allocated_reader->
Close();
292 delete allocated_reader;
294 allocated_reader = NULL;
316 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
337 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
343 if (is_open && reader) {
368 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
394 return GetFrame(NULL, clip_frame_number, NULL);
399std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
402 return GetFrame(background_frame, clip_frame_number, NULL);
410 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
415 std::shared_ptr<Frame> frame = NULL;
418 frame = final_cache.
GetFrame(clip_frame_number);
421 frame = GetOrCreateFrame(clip_frame_number);
424 int64_t timeline_frame_number = clip_frame_number;
425 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
426 if (background_frame) {
428 timeline_frame_number = background_frame->number;
429 timeline_size.setWidth(background_frame->GetWidth());
430 timeline_size.setHeight(background_frame->GetHeight());
434 apply_timemapping(frame);
437 apply_waveform(frame, timeline_size);
440 apply_effects(frame, timeline_frame_number, options,
true);
443 apply_keyframes(frame, timeline_size);
446 apply_effects(frame, timeline_frame_number, options,
false);
449 final_cache.
Add(frame);
452 if (!background_frame) {
454 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
455 "#00000000", frame->GetAudioSamplesCount(),
456 frame->GetAudioChannelsCount());
460 apply_background(frame, background_frame);
467 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
474 for (
const auto& effect : effects) {
475 if (effect->Id() ==
id) {
484 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
488 return parentClipObject;
493 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
497 return parentTrackedObject;
501std::string Clip::get_file_extension(std::string
path)
504 return path.substr(
path.find_last_of(
".") + 1);
508void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
513 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
518 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
520 int64_t clip_frame_number = frame->number;
521 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
537 int source_sample_count = round(target_sample_count * fabs(delta));
543 location.
frame = new_frame_number;
557 init_samples.clear();
558 resampler->
SetBuffer(&init_samples, 1.0);
566 if (source_sample_count <= 0) {
568 frame->AddAudioSilence(target_sample_count);
574 source_samples->clear();
577 int remaining_samples = source_sample_count;
579 while (remaining_samples > 0) {
580 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
581 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
583 if (frame_sample_count == 0) {
593 if (remaining_samples - frame_sample_count >= 0) {
595 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
596 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
604 remaining_samples -= frame_sample_count;
605 source_pos += frame_sample_count;
609 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
610 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
613 remaining_samples = 0;
614 source_pos += remaining_samples;
621 frame->AddAudioSilence(target_sample_count);
623 if (source_sample_count != target_sample_count) {
625 double resample_ratio = double(source_sample_count) / double(target_sample_count);
626 resampler->
SetBuffer(source_samples, resample_ratio);
634 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
640 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
645 delete source_samples;
653int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
656 if (frame_number < 1)
664std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
668 int64_t clip_frame_number = adjust_frame_number_minimum(number);
672 clip_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
677 "Clip::GetOrCreateFrame (from reader)",
678 "number", number,
"clip_frame_number", clip_frame_number);
681 auto reader_frame = reader->
GetFrame(clip_frame_number);
682 reader_frame->number = number;
689 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
692 reader_copy->AddColor(QColor(Qt::transparent));
696 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
712 "Clip::GetOrCreateFrame (create blank)",
714 "estimated_samples_in_frame", estimated_samples_in_frame);
717 auto new_frame = std::make_shared<Frame>(
719 "#000000", estimated_samples_in_frame, reader->
info.
channels);
722 new_frame->AddAudioSilence(estimated_samples_in_frame);
738 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
739 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
741 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
742 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
743 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
748 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
749 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
784 if (parentClipObject)
789 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
792 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
793 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
794 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
795 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
796 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
797 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
798 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
801 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
802 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
803 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
804 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
805 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
806 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
807 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
847 return root.toStyledString();
855 root[
"parentObjectId"] = parentObjectId;
857 root[
"scale"] =
scale;
861 root[
"waveform"] = waveform;
889 root[
"effects"] = Json::Value(Json::arrayValue);
892 for (
auto existing_effect : effects)
894 root[
"effects"].append(existing_effect->JsonValue());
900 root[
"reader"] = Json::Value(Json::objectValue);
916 catch (
const std::exception& e)
919 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
930 if (!root[
"parentObjectId"].isNull()){
931 parentObjectId = root[
"parentObjectId"].asString();
932 if (parentObjectId.size() > 0 && parentObjectId !=
""){
935 parentTrackedObject =
nullptr;
936 parentClipObject = NULL;
939 if (!root[
"gravity"].isNull())
941 if (!root[
"scale"].isNull())
943 if (!root[
"anchor"].isNull())
945 if (!root[
"display"].isNull())
947 if (!root[
"mixing"].isNull())
949 if (!root[
"waveform"].isNull())
950 waveform = root[
"waveform"].asBool();
951 if (!root[
"scale_x"].isNull())
953 if (!root[
"scale_y"].isNull())
955 if (!root[
"location_x"].isNull())
957 if (!root[
"location_y"].isNull())
959 if (!root[
"alpha"].isNull())
961 if (!root[
"rotation"].isNull())
963 if (!root[
"time"].isNull())
965 if (!root[
"volume"].isNull())
967 if (!root[
"wave_color"].isNull())
969 if (!root[
"shear_x"].isNull())
971 if (!root[
"shear_y"].isNull())
973 if (!root[
"origin_x"].isNull())
975 if (!root[
"origin_y"].isNull())
977 if (!root[
"channel_filter"].isNull())
979 if (!root[
"channel_mapping"].isNull())
981 if (!root[
"has_audio"].isNull())
983 if (!root[
"has_video"].isNull())
985 if (!root[
"perspective_c1_x"].isNull())
987 if (!root[
"perspective_c1_y"].isNull())
989 if (!root[
"perspective_c2_x"].isNull())
991 if (!root[
"perspective_c2_y"].isNull())
993 if (!root[
"perspective_c3_x"].isNull())
995 if (!root[
"perspective_c3_y"].isNull())
997 if (!root[
"perspective_c4_x"].isNull())
999 if (!root[
"perspective_c4_y"].isNull())
1001 if (!root[
"effects"].isNull()) {
1007 for (
const auto existing_effect : root[
"effects"]) {
1009 if (existing_effect.isNull()) {
1015 if (!existing_effect[
"type"].isNull()) {
1018 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1029 if (!root[
"reader"].isNull())
1031 if (!root[
"reader"][
"type"].isNull())
1034 bool already_open =
false;
1038 already_open = reader->
IsOpen();
1045 std::string type = root[
"reader"][
"type"].asString();
1047 if (type ==
"FFmpegReader") {
1053 }
else if (type ==
"QtImageReader") {
1059#ifdef USE_IMAGEMAGICK
1060 }
else if (type ==
"ImageReader") {
1063 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1066 }
else if (type ==
"TextReader") {
1073 }
else if (type ==
"ChunkReader") {
1079 }
else if (type ==
"DummyReader") {
1085 }
else if (type ==
"Timeline") {
1095 allocated_reader = reader;
1106 final_cache.
Clear();
1110void Clip::sort_effects()
1123 effects.push_back(effect);
1139 if (parentTimeline){
1147 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1150 trackedObjectBBox->ParentClip(
this);
1160 final_cache.
Clear();
1166 effects.remove(effect);
1169 final_cache.
Clear();
1173void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1175 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1176 QPainter painter(background_canvas.get());
1177 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1180 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1181 painter.drawImage(0, 0, *frame->GetImage());
1185 frame->AddImage(background_canvas);
1189void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
TimelineInfoStruct* options,
bool before_keyframes)
1191 for (
auto effect : effects)
1194 if (effect->info.apply_before_clip && before_keyframes) {
1195 effect->GetFrame(frame, frame->number);
1196 }
else if (!effect->info.apply_before_clip && !before_keyframes) {
1197 effect->GetFrame(frame, frame->number);
1201 if (
timeline != NULL && options != NULL) {
1210bool Clip::isNear(
double a,
double b)
1212 return fabs(a - b) < 0.000001;
1216void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1218 if (!frame->has_image_data) {
1224 std::shared_ptr<QImage> source_image = frame->GetImage();
1225 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1226 timeline_size.height(),
1227 QImage::Format_RGBA8888_Premultiplied);
1228 background_canvas->fill(QColor(Qt::transparent));
1231 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1234 QPainter painter(background_canvas.get());
1235 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1238 painter.setTransform(transform);
1241 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1242 painter.drawImage(0, 0, *source_image);
1249 std::stringstream frame_number_str;
1256 frame_number_str << frame->number;
1269 painter.setPen(QColor(
"#ffffff"));
1270 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1276 frame->AddImage(background_canvas);
1280void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1288 std::shared_ptr<QImage> source_image = frame->GetImage();
1292 "frame->number", frame->number,
1294 "width", timeline_size.width(),
1295 "height", timeline_size.height());
1304 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue,
alpha);
1305 frame->AddImage(source_image);
1309QSize Clip::scale_size(QSize source_size,
ScaleType source_scale,
int target_width,
int target_height) {
1310 switch (source_scale)
1313 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1317 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1321 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1330QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1333 std::shared_ptr<QImage> source_image = frame->GetImage();
1341 unsigned char *pixels = source_image->bits();
1344 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1348 pixels[byte_index + 0] *= alpha_value;
1349 pixels[byte_index + 1] *= alpha_value;
1350 pixels[byte_index + 2] *= alpha_value;
1351 pixels[byte_index + 3] *= alpha_value;
1356 "alpha_value", alpha_value,
1357 "frame->number", frame->number);
1361 QSize source_size = scale_size(source_image->size(),
scale, width, height);
1364 float parentObject_location_x = 0.0;
1365 float parentObject_location_y = 0.0;
1366 float parentObject_scale_x = 1.0;
1367 float parentObject_scale_y = 1.0;
1368 float parentObject_shear_x = 0.0;
1369 float parentObject_shear_y = 0.0;
1370 float parentObject_rotation = 0.0;
1376 long parent_frame_number = frame->number + parent_start_offset;
1379 parentObject_location_x = parentClipObject->
location_x.
GetValue(parent_frame_number);
1380 parentObject_location_y = parentClipObject->
location_y.
GetValue(parent_frame_number);
1381 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(parent_frame_number);
1382 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(parent_frame_number);
1383 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(parent_frame_number);
1384 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(parent_frame_number);
1385 parentObject_rotation = parentClipObject->
rotation.
GetValue(parent_frame_number);
1391 Clip* parentClip = (
Clip*) parentTrackedObject->ParentClip();
1396 long parent_frame_number = frame->number + parent_start_offset;
1399 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1403 parentClip->
scale, width, height);
1406 int trackedWidth = trackedObjectProperties[
"w"] * trackedObjectProperties[
"sx"] * parent_size.width() *
1408 int trackedHeight = trackedObjectProperties[
"h"] * trackedObjectProperties[
"sy"] * parent_size.height() *
1412 source_size = scale_size(source_size,
scale, trackedWidth, trackedHeight);
1415 parentObject_location_x = parentClip->
location_x.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cx"] - 0.5) * parentClip->
scale_x.
GetValue(parent_frame_number));
1416 parentObject_location_y = parentClip->
location_y.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cy"] - 0.5) * parentClip->
scale_y.
GetValue(parent_frame_number));
1417 parentObject_rotation = trackedObjectProperties[
"r"] + parentClip->
rotation.
GetValue(parent_frame_number);
1430 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1431 sx*= parentObject_scale_x;
1432 sy*= parentObject_scale_y;
1435 float scaled_source_width = source_size.width() * sx;
1436 float scaled_source_height = source_size.height() * sy;
1444 x = (width - scaled_source_width) / 2.0;
1447 x = width - scaled_source_width;
1450 y = (height - scaled_source_height) / 2.0;
1453 x = (width - scaled_source_width) / 2.0;
1454 y = (height - scaled_source_height) / 2.0;
1457 x = width - scaled_source_width;
1458 y = (height - scaled_source_height) / 2.0;
1461 y = (height - scaled_source_height);
1464 x = (width - scaled_source_width) / 2.0;
1465 y = (height - scaled_source_height);
1468 x = width - scaled_source_width;
1469 y = (height - scaled_source_height);
1475 "Clip::get_transform (Gravity)",
1476 "frame->number", frame->number,
1477 "source_clip->gravity",
gravity,
1478 "scaled_source_width", scaled_source_width,
1479 "scaled_source_height", scaled_source_height);
1481 QTransform transform;
1487 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1488 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1494 "Clip::get_transform (Build QTransform - if needed)",
1495 "frame->number", frame->number,
1498 "sx", sx,
"sy", sy);
1500 if (!isNear(x, 0) || !isNear(y, 0)) {
1502 transform.translate(x, y);
1504 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1506 float origin_x_offset = (scaled_source_width * origin_x_value);
1507 float origin_y_offset = (scaled_source_height * origin_y_value);
1508 transform.translate(origin_x_offset, origin_y_offset);
1509 transform.rotate(r);
1510 transform.shear(shear_x_value, shear_y_value);
1511 transform.translate(-origin_x_offset,-origin_y_offset);
1514 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1515 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1516 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1517 transform.scale(source_width_scale, source_height_scale);
1524int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1541 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1543 return frame_number;
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
float Duration() const
Get the length of this clip (in seconds)
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
std::string id
ID Property for all derived Clip and Effect classes.
float position
The position on the timeline where this clip should start playing.
float end
The position in seconds to end playing (used to trim the ending of a clip)
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::FrameDisplayType display
The format to display the frame number (if any)
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
bool Waveform()
Get the waveform property of this clip.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
AudioLocation previous_location
Previous time-mapped audio location.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
std::string PropertiesJSON(int64_t requested_frame) const override
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
This class represents a color (used on the timeline and clips)
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Json::Value JsonValue() const
Generate Json::Value for this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
int64_t GetLength() const
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.