OpenShot Library | libopenshot 0.3.3
Loading...
Searching...
No Matches
Clip.cpp
Go to the documentation of this file.
1
9// Copyright (c) 2008-2019 OpenShot Studios, LLC
10//
11// SPDX-License-Identifier: LGPL-3.0-or-later
12
13#include "Clip.h"
14
15#include "AudioResampler.h"
16#include "Exceptions.h"
17#include "FFmpegReader.h"
18#include "FrameMapper.h"
19#include "QtImageReader.h"
20#include "ChunkReader.h"
21#include "DummyReader.h"
22#include "Timeline.h"
23#include "ZmqLogger.h"
24
25#ifdef USE_IMAGEMAGICK
26 #include "MagickUtilities.h"
27 #include "ImageReader.h"
28 #include "TextReader.h"
29#endif
30
31#include <Qt>
32
33using namespace openshot;
34
35// Init default settings for a clip
37{
38 // Init clip settings
39 Position(0.0);
40 Layer(0);
41 Start(0.0);
42 ClipBase::End(0.0);
48 waveform = false;
50 parentObjectId = "";
51
52 // Init scale curves
53 scale_x = Keyframe(1.0);
54 scale_y = Keyframe(1.0);
55
56 // Init location curves
57 location_x = Keyframe(0.0);
58 location_y = Keyframe(0.0);
59
60 // Init alpha
61 alpha = Keyframe(1.0);
62
63 // Init time & volume
64 time = Keyframe(1.0);
65 volume = Keyframe(1.0);
66
67 // Init audio waveform color
68 wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
69
70 // Init shear and perspective curves
71 shear_x = Keyframe(0.0);
72 shear_y = Keyframe(0.0);
73 origin_x = Keyframe(0.5);
74 origin_y = Keyframe(0.5);
83
84 // Init audio channel filter and mappings
87
88 // Init audio and video overrides
89 has_audio = Keyframe(-1.0);
90 has_video = Keyframe(-1.0);
91
92 // Initialize the attached object and attached clip as null pointers
93 parentTrackedObject = nullptr;
94 parentClipObject = NULL;
95
96 // Init reader info struct
98}
99
100// Init reader info details
102 if (reader) {
103 // Init rotation (if any)
105
106 // Initialize info struct
107 info = reader->info;
108
109 // Init cache
111 }
112}
113
114// Init reader's rotation (if any)
116 // Dont init rotation if clip has keyframes
117 if (rotation.GetCount() > 0)
118 return;
119
120 // Init rotation
121 if (reader && reader->info.metadata.count("rotate") > 0) {
122 // Use reader metadata rotation (if any)
123 // This is typical with cell phone videos filmed in different orientations
124 try {
125 float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
126 rotation = Keyframe(rotate_metadata);
127 } catch (const std::exception& e) {}
128 }
129 else
130 // Default no rotation
131 rotation = Keyframe(0.0);
132}
133
134// Default Constructor for a clip
135Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
136{
137 // Init all default settings
139}
140
141// Constructor with reader
142Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
143{
144 // Init all default settings
146
147 // Open and Close the reader (to set the duration of the clip)
148 Open();
149 Close();
150
151 // Update duration and set parent
152 if (reader) {
153 ClipBase::End(reader->info.duration);
154 reader->ParentClip(this);
155 // Init reader info struct
157 }
158}
159
160// Constructor with filepath
161Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
162{
163 // Init all default settings
165
166 // Get file extension (and convert to lower case)
167 std::string ext = get_file_extension(path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
169
170 // Determine if common video formats (or image sequences)
171 if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
172 ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || path.find("%") != std::string::npos)
173 {
174 try
175 {
176 // Open common video format
177 reader = new openshot::FFmpegReader(path);
178
179 } catch(...) { }
180 }
181 if (ext=="osp")
182 {
183 try
184 {
185 // Open common video format
186 reader = new openshot::Timeline(path, true);
187
188 } catch(...) { }
189 }
190
191
192 // If no video found, try each reader
193 if (!reader)
194 {
195 try
196 {
197 // Try an image reader
198 reader = new openshot::QtImageReader(path);
199
200 } catch(...) {
201 try
202 {
203 // Try a video reader
204 reader = new openshot::FFmpegReader(path);
205
206 } catch(...) { }
207 }
208 }
209
210 // Update duration and set parent
211 if (reader) {
212 ClipBase::End(reader->info.duration);
213 reader->ParentClip(this);
214 allocated_reader = reader;
215 // Init reader info struct
217 }
218}
219
220// Destructor
222{
223 // Delete the reader if clip created it
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
227 reader = NULL;
228 }
229
230 // Close the resampler
231 if (resampler) {
232 delete resampler;
233 resampler = NULL;
234 }
235
236 // Close clip
237 Close();
238}
239
240// Attach clip to bounding box
241void Clip::AttachToObject(std::string object_id)
242{
243 // Search for the tracked object on the timeline
244 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
245
246 if (parentTimeline) {
247 // Create a smart pointer to the tracked object from the timeline
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
249 Clip* clipObject = parentTimeline->GetClip(object_id);
250
251 // Check for valid tracked object
252 if (trackedObject){
253 SetAttachedObject(trackedObject);
254 parentClipObject = NULL;
255 }
256 else if (clipObject) {
257 SetAttachedClip(clipObject);
258 parentTrackedObject = nullptr;
259 }
260 }
261}
262
263// Set the pointer to the trackedObject this clip is attached to
264void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
265 parentTrackedObject = trackedObject;
266}
267
268// Set the pointer to the clip this clip is attached to
269void Clip::SetAttachedClip(Clip* clipObject){
270 parentClipObject = clipObject;
271}
272
274void Clip::Reader(ReaderBase* new_reader)
275{
276 // Delete previously allocated reader (if not related to new reader)
277 // FrameMappers that point to the same allocated reader are ignored
278 bool is_same_reader = false;
279 if (new_reader && allocated_reader) {
280 if (new_reader->Name() == "FrameMapper") {
281 // Determine if FrameMapper is pointing at the same allocated ready
282 FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
283 if (allocated_reader == clip_mapped_reader->Reader()) {
284 is_same_reader = true;
285 }
286 }
287 }
288 // Clear existing allocated reader (if different)
289 if (allocated_reader && !is_same_reader) {
290 reader->Close();
291 allocated_reader->Close();
292 delete allocated_reader;
293 reader = NULL;
294 allocated_reader = NULL;
295 }
296
297 // set reader pointer
298 reader = new_reader;
299
300 // set parent
301 if (reader) {
302 reader->ParentClip(this);
303
304 // Init reader info struct
306 }
307}
308
311{
312 if (reader)
313 return reader;
314 else
315 // Throw error if reader not initialized
316 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
317}
318
319// Open the internal reader
321{
322 if (reader)
323 {
324 // Open the reader
325 reader->Open();
326 is_open = true;
327
328 // Copy Reader info to Clip
329 info = reader->info;
330
331 // Set some clip properties from the file reader
332 if (end == 0.0)
333 ClipBase::End(reader->info.duration);
334 }
335 else
336 // Throw error if reader not initialized
337 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
338}
339
340// Close the internal reader
342{
343 if (is_open && reader) {
344 ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
345
346 // Close the reader
347 reader->Close();
348 }
349
350 // Clear cache
351 final_cache.Clear();
352 is_open = false;
353}
354
355// Get end position of clip (trim end of video), which can be affected by the time curve.
356float Clip::End() const
357{
358 // if a time curve is present, use its length
359 if (time.GetCount() > 1)
360 {
361 // Determine the FPS fo this clip
362 float fps = 24.0;
363 if (reader)
364 // file reader
365 fps = reader->info.fps.ToFloat();
366 else
367 // Throw error if reader not initialized
368 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
369
370 return float(time.GetLength()) / fps;
371 }
372 else
373 // just use the duration (as detected by the reader)
374 return end;
375}
376
377// Override End() position
378void Clip::End(float value) {
379 ClipBase::End(value);
380}
381
382// Set associated Timeline pointer
384 timeline = new_timeline;
385
386 // Clear cache (it might have changed)
387 final_cache.Clear();
388}
389
390// Create an openshot::Frame object for a specific frame number of this reader.
391std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
392{
393 // Call override of GetFrame
394 return GetFrame(NULL, clip_frame_number, NULL);
395}
396
397// Create an openshot::Frame object for a specific frame number of this reader.
398// NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
399std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
400{
401 // Call override of GetFrame
402 return GetFrame(background_frame, clip_frame_number, NULL);
403}
404
405// Use an existing openshot::Frame object and draw this Clip's frame onto it
406std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
407{
408 // Check for open reader (or throw exception)
409 if (!is_open)
410 throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
411
412 if (reader)
413 {
414 // Get frame object
415 std::shared_ptr<Frame> frame = NULL;
416
417 // Check cache
418 frame = final_cache.GetFrame(clip_frame_number);
419 if (!frame) {
420 // Generate clip frame
421 frame = GetOrCreateFrame(clip_frame_number);
422
423 // Get frame size and frame #
424 int64_t timeline_frame_number = clip_frame_number;
425 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
426 if (background_frame) {
427 // If a background frame is provided, use it instead
428 timeline_frame_number = background_frame->number;
429 timeline_size.setWidth(background_frame->GetWidth());
430 timeline_size.setHeight(background_frame->GetHeight());
431 }
432
433 // Get time mapped frame object (used to increase speed, change direction, etc...)
434 apply_timemapping(frame);
435
436 // Apply waveform image (if any)
437 apply_waveform(frame, timeline_size);
438
439 // Apply effects BEFORE applying keyframes (if any local or global effects are used)
440 apply_effects(frame, timeline_frame_number, options, true);
441
442 // Apply keyframe / transforms to current clip image
443 apply_keyframes(frame, timeline_size);
444
445 // Apply effects AFTER applying keyframes (if any local or global effects are used)
446 apply_effects(frame, timeline_frame_number, options, false);
447
448 // Add final frame to cache (before flattening into background_frame)
449 final_cache.Add(frame);
450 }
451
452 if (!background_frame) {
453 // Create missing background_frame w/ transparent color (if needed)
454 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
455 "#00000000", frame->GetAudioSamplesCount(),
456 frame->GetAudioChannelsCount());
457 }
458
459 // Apply background canvas (i.e. flatten this image onto previous layer image)
460 apply_background(frame, background_frame);
461
462 // Return processed 'frame'
463 return frame;
464 }
465 else
466 // Throw error if reader not initialized
467 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
468}
469
470// Look up an effect by ID
472{
473 // Find the matching effect (if any)
474 for (const auto& effect : effects) {
475 if (effect->Id() == id) {
476 return effect;
477 }
478 }
479 return nullptr;
480}
481
482// Return the associated ParentClip (if any)
484 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
485 // Attach parent clip OR object to this clip
486 AttachToObject(parentObjectId);
487 }
488 return parentClipObject;
489}
490
491// Return the associated Parent Tracked Object (if any)
492std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
493 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
494 // Attach parent clip OR object to this clip
495 AttachToObject(parentObjectId);
496 }
497 return parentTrackedObject;
498}
499
500// Get file extension
501std::string Clip::get_file_extension(std::string path)
502{
503 // return last part of path
504 return path.substr(path.find_last_of(".") + 1);
505}
506
507// Adjust the audio and image of a time mapped frame
508void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
509{
510 // Check for valid reader
511 if (!reader)
512 // Throw error if reader not initialized
513 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
514
515 // Check for a valid time map curve
516 if (time.GetLength() > 1)
517 {
518 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
519
520 int64_t clip_frame_number = frame->number;
521 int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
522
523 // create buffer
524 juce::AudioBuffer<float> *source_samples = nullptr;
525
526 // Get delta (difference from this frame to the next time mapped frame: Y value)
527 double delta = time.GetDelta(clip_frame_number + 1);
528 bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
529
530 // Determine length of source audio (in samples)
531 // A delta of 1.0 == normal expected samples
532 // A delta of 0.5 == 50% of normal expected samples
533 // A delta of 2.0 == 200% of normal expected samples
534 int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
536 Reader()->info.channels);
537 int source_sample_count = round(target_sample_count * fabs(delta));
538
539 // Determine starting audio location
540 AudioLocation location;
541 if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2) {
542 // No previous location OR gap detected
543 location.frame = new_frame_number;
544 location.sample_start = 0;
545
546 // Create / Reset resampler
547 // We don't want to interpolate between unrelated audio data
548 if (resampler) {
549 delete resampler;
550 }
551 // Init resampler with # channels from Reader (should match the timeline)
552 resampler = new AudioResampler(Reader()->info.channels);
553
554 // Allocate buffer of silence to initialize some data inside the resampler
555 // To prevent it from becoming input limited
556 juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
557 init_samples.clear();
558 resampler->SetBuffer(&init_samples, 1.0);
559 resampler->GetResampledBuffer();
560
561 } else {
562 // Use previous location
563 location = previous_location;
564 }
565
566 if (source_sample_count <= 0) {
567 // Add silence and bail (we don't need any samples)
568 frame->AddAudioSilence(target_sample_count);
569 return;
570 }
571
572 // Allocate a new sample buffer for these delta frames
573 source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
574 source_samples->clear();
575
576 // Determine ending audio location
577 int remaining_samples = source_sample_count;
578 int source_pos = 0;
579 while (remaining_samples > 0) {
580 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
581 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
582
583 if (frame_sample_count == 0) {
584 // No samples found in source frame (fill with silence)
585 if (is_increasing) {
586 location.frame++;
587 } else {
588 location.frame--;
589 }
590 location.sample_start = 0;
591 break;
592 }
593 if (remaining_samples - frame_sample_count >= 0) {
594 // Use all frame samples & increment location
595 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
596 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
597 }
598 if (is_increasing) {
599 location.frame++;
600 } else {
601 location.frame--;
602 }
603 location.sample_start = 0;
604 remaining_samples -= frame_sample_count;
605 source_pos += frame_sample_count;
606
607 } else {
608 // Use just what is needed (and reverse samples)
609 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
610 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
611 }
612 location.sample_start += remaining_samples;
613 remaining_samples = 0;
614 source_pos += remaining_samples;
615 }
616
617 }
618
619 // Resize audio for current frame object + fill with silence
620 // We are fixing to clobber this with actual audio data (possibly resampled)
621 frame->AddAudioSilence(target_sample_count);
622
623 if (source_sample_count != target_sample_count) {
624 // Resample audio (if needed)
625 double resample_ratio = double(source_sample_count) / double(target_sample_count);
626 resampler->SetBuffer(source_samples, resample_ratio);
627
628 // Resample the data
629 juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
630
631 // Fill the frame with resampled data
632 for (int channel = 0; channel < Reader()->info.channels; channel++) {
633 // Add new (slower) samples, to the frame object
634 frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
635 }
636 } else {
637 // Fill the frame
638 for (int channel = 0; channel < Reader()->info.channels; channel++) {
639 // Add new (slower) samples, to the frame object
640 frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
641 }
642 }
643
644 // Clean up
645 delete source_samples;
646
647 // Set previous location
648 previous_location = location;
649 }
650}
651
652// Adjust frame number minimum value
653int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
654{
655 // Never return a frame number 0 or below
656 if (frame_number < 1)
657 return 1;
658 else
659 return frame_number;
660
661}
662
663// Get or generate a blank frame
664std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
665{
666 try {
667 // Init to requested frame
668 int64_t clip_frame_number = adjust_frame_number_minimum(number);
669
670 // Adjust for time-mapping (if any)
671 if (enable_time && time.GetLength() > 1) {
672 clip_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
673 }
674
675 // Debug output
677 "Clip::GetOrCreateFrame (from reader)",
678 "number", number, "clip_frame_number", clip_frame_number);
679
680 // Attempt to get a frame (but this could fail if a reader has just been closed)
681 auto reader_frame = reader->GetFrame(clip_frame_number);
682 reader_frame->number = number; // Override frame # (due to time-mapping might change it)
683
684 // Return real frame
685 if (reader_frame) {
686 // Create a new copy of reader frame
687 // This allows a clip to modify the pixels and audio of this frame without
688 // changing the underlying reader's frame data
689 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
690 if (has_video.GetInt(number) == 0) {
691 // No video, so add transparent pixels
692 reader_copy->AddColor(QColor(Qt::transparent));
693 }
694 if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
695 // No audio, so include silence (also, mute audio if past end of reader)
696 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
697 }
698 return reader_copy;
699 }
700
701 } catch (const ReaderClosed & e) {
702 // ...
703 } catch (const OutOfBoundsFrame & e) {
704 // ...
705 }
706
707 // Estimate # of samples needed for this frame
708 int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
709
710 // Debug output
712 "Clip::GetOrCreateFrame (create blank)",
713 "number", number,
714 "estimated_samples_in_frame", estimated_samples_in_frame);
715
716 // Create blank frame
717 auto new_frame = std::make_shared<Frame>(
718 number, reader->info.width, reader->info.height,
719 "#000000", estimated_samples_in_frame, reader->info.channels);
720 new_frame->SampleRate(reader->info.sample_rate);
721 new_frame->ChannelsLayout(reader->info.channel_layout);
722 new_frame->AddAudioSilence(estimated_samples_in_frame);
723 return new_frame;
724}
725
726// Generate JSON string of this object
727std::string Clip::Json() const {
728
729 // Return formatted string
730 return JsonValue().toStyledString();
731}
732
733// Get all properties for a specific frame
734std::string Clip::PropertiesJSON(int64_t requested_frame) const {
735
736 // Generate JSON properties list
737 Json::Value root;
738 root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
739 root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
740 root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
741 root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
742 root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
743 root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
744 root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
745 root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
746 root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
747 root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
748 root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
749 root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
750
751 // Add gravity choices (dropdown style)
752 root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
753 root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
754 root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
755 root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
756 root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
757 root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
758 root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
759 root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
760 root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
761
762 // Add scale choices (dropdown style)
763 root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
764 root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
765 root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
766 root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
767
768 // Add frame number display choices (dropdown style)
769 root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
770 root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
771 root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
772 root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
773
774 // Add volume mixing choices (dropdown style)
775 root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
776 root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
777 root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
778
779 // Add waveform choices (dropdown style)
780 root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
781 root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
782
783 // Add the parentClipObject's properties
784 if (parentClipObject)
785 {
786 // Convert Clip's frame position to Timeline's frame position
787 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
788 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
789 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
790
791 // Correct the parent Clip Object properties by the clip's reference system
792 float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
793 float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
794 float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
795 float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
796 float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
797 float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
798 float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
799
800 // Add the parent Clip Object properties to JSON
801 root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
802 root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
803 root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
804 root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
805 root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
806 root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
807 root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
808 }
809 else
810 {
811 // Add this own clip's properties to JSON
812 root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
813 root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
814 root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
815 root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
816 root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
817 root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
818 root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
819 }
820
821 // Keyframes
822 root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
823 root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
824 root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
825 root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
826 root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
827 root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
828 root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
829 root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
830 root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
831
832 // Add enable audio/video choices (dropdown style)
833 root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
834 root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
835 root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
836 root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
837 root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
838 root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
839
840 root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
841 root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
842 root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
843 root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
844
845
846 // Return formatted string
847 return root.toStyledString();
848}
849
850// Generate Json::Value for this object
851Json::Value Clip::JsonValue() const {
852
853 // Create root json object
854 Json::Value root = ClipBase::JsonValue(); // get parent properties
855 root["parentObjectId"] = parentObjectId;
856 root["gravity"] = gravity;
857 root["scale"] = scale;
858 root["anchor"] = anchor;
859 root["display"] = display;
860 root["mixing"] = mixing;
861 root["waveform"] = waveform;
862 root["scale_x"] = scale_x.JsonValue();
863 root["scale_y"] = scale_y.JsonValue();
864 root["location_x"] = location_x.JsonValue();
865 root["location_y"] = location_y.JsonValue();
866 root["alpha"] = alpha.JsonValue();
867 root["rotation"] = rotation.JsonValue();
868 root["time"] = time.JsonValue();
869 root["volume"] = volume.JsonValue();
870 root["wave_color"] = wave_color.JsonValue();
871 root["shear_x"] = shear_x.JsonValue();
872 root["shear_y"] = shear_y.JsonValue();
873 root["origin_x"] = origin_x.JsonValue();
874 root["origin_y"] = origin_y.JsonValue();
875 root["channel_filter"] = channel_filter.JsonValue();
876 root["channel_mapping"] = channel_mapping.JsonValue();
877 root["has_audio"] = has_audio.JsonValue();
878 root["has_video"] = has_video.JsonValue();
879 root["perspective_c1_x"] = perspective_c1_x.JsonValue();
880 root["perspective_c1_y"] = perspective_c1_y.JsonValue();
881 root["perspective_c2_x"] = perspective_c2_x.JsonValue();
882 root["perspective_c2_y"] = perspective_c2_y.JsonValue();
883 root["perspective_c3_x"] = perspective_c3_x.JsonValue();
884 root["perspective_c3_y"] = perspective_c3_y.JsonValue();
885 root["perspective_c4_x"] = perspective_c4_x.JsonValue();
886 root["perspective_c4_y"] = perspective_c4_y.JsonValue();
887
888 // Add array of effects
889 root["effects"] = Json::Value(Json::arrayValue);
890
891 // loop through effects
892 for (auto existing_effect : effects)
893 {
894 root["effects"].append(existing_effect->JsonValue());
895 }
896
897 if (reader)
898 root["reader"] = reader->JsonValue();
899 else
900 root["reader"] = Json::Value(Json::objectValue);
901
902 // return JsonValue
903 return root;
904}
905
906// Load JSON string into this object
907void Clip::SetJson(const std::string value) {
908
909 // Parse JSON string into JSON objects
910 try
911 {
912 const Json::Value root = openshot::stringToJson(value);
913 // Set all values that match
914 SetJsonValue(root);
915 }
916 catch (const std::exception& e)
917 {
918 // Error parsing JSON (or missing keys)
919 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
920 }
921}
922
923// Load Json::Value into this object
924void Clip::SetJsonValue(const Json::Value root) {
925
926 // Set parent data
928
929 // Set data from Json (if key is found)
930 if (!root["parentObjectId"].isNull()){
931 parentObjectId = root["parentObjectId"].asString();
932 if (parentObjectId.size() > 0 && parentObjectId != ""){
933 AttachToObject(parentObjectId);
934 } else{
935 parentTrackedObject = nullptr;
936 parentClipObject = NULL;
937 }
938 }
939 if (!root["gravity"].isNull())
940 gravity = (GravityType) root["gravity"].asInt();
941 if (!root["scale"].isNull())
942 scale = (ScaleType) root["scale"].asInt();
943 if (!root["anchor"].isNull())
944 anchor = (AnchorType) root["anchor"].asInt();
945 if (!root["display"].isNull())
946 display = (FrameDisplayType) root["display"].asInt();
947 if (!root["mixing"].isNull())
948 mixing = (VolumeMixType) root["mixing"].asInt();
949 if (!root["waveform"].isNull())
950 waveform = root["waveform"].asBool();
951 if (!root["scale_x"].isNull())
952 scale_x.SetJsonValue(root["scale_x"]);
953 if (!root["scale_y"].isNull())
954 scale_y.SetJsonValue(root["scale_y"]);
955 if (!root["location_x"].isNull())
956 location_x.SetJsonValue(root["location_x"]);
957 if (!root["location_y"].isNull())
958 location_y.SetJsonValue(root["location_y"]);
959 if (!root["alpha"].isNull())
960 alpha.SetJsonValue(root["alpha"]);
961 if (!root["rotation"].isNull())
962 rotation.SetJsonValue(root["rotation"]);
963 if (!root["time"].isNull())
964 time.SetJsonValue(root["time"]);
965 if (!root["volume"].isNull())
966 volume.SetJsonValue(root["volume"]);
967 if (!root["wave_color"].isNull())
968 wave_color.SetJsonValue(root["wave_color"]);
969 if (!root["shear_x"].isNull())
970 shear_x.SetJsonValue(root["shear_x"]);
971 if (!root["shear_y"].isNull())
972 shear_y.SetJsonValue(root["shear_y"]);
973 if (!root["origin_x"].isNull())
974 origin_x.SetJsonValue(root["origin_x"]);
975 if (!root["origin_y"].isNull())
976 origin_y.SetJsonValue(root["origin_y"]);
977 if (!root["channel_filter"].isNull())
978 channel_filter.SetJsonValue(root["channel_filter"]);
979 if (!root["channel_mapping"].isNull())
980 channel_mapping.SetJsonValue(root["channel_mapping"]);
981 if (!root["has_audio"].isNull())
982 has_audio.SetJsonValue(root["has_audio"]);
983 if (!root["has_video"].isNull())
984 has_video.SetJsonValue(root["has_video"]);
985 if (!root["perspective_c1_x"].isNull())
986 perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
987 if (!root["perspective_c1_y"].isNull())
988 perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
989 if (!root["perspective_c2_x"].isNull())
990 perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
991 if (!root["perspective_c2_y"].isNull())
992 perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
993 if (!root["perspective_c3_x"].isNull())
994 perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
995 if (!root["perspective_c3_y"].isNull())
996 perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
997 if (!root["perspective_c4_x"].isNull())
998 perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
999 if (!root["perspective_c4_y"].isNull())
1000 perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1001 if (!root["effects"].isNull()) {
1002
1003 // Clear existing effects
1004 effects.clear();
1005
1006 // loop through effects
1007 for (const auto existing_effect : root["effects"]) {
1008 // Skip NULL nodes
1009 if (existing_effect.isNull()) {
1010 continue;
1011 }
1012
1013 // Create Effect
1014 EffectBase *e = NULL;
1015 if (!existing_effect["type"].isNull()) {
1016
1017 // Create instance of effect
1018 if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1019
1020 // Load Json into Effect
1021 e->SetJsonValue(existing_effect);
1022
1023 // Add Effect to Timeline
1024 AddEffect(e);
1025 }
1026 }
1027 }
1028 }
1029 if (!root["reader"].isNull()) // does Json contain a reader?
1030 {
1031 if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1032 {
1033 // Close previous reader (if any)
1034 bool already_open = false;
1035 if (reader)
1036 {
1037 // Track if reader was open
1038 already_open = reader->IsOpen();
1039
1040 // Close and delete existing allocated reader (if any)
1041 Reader(NULL);
1042 }
1043
1044 // Create new reader (and load properties)
1045 std::string type = root["reader"]["type"].asString();
1046
1047 if (type == "FFmpegReader") {
1048
1049 // Create new reader
1050 reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1051 reader->SetJsonValue(root["reader"]);
1052
1053 } else if (type == "QtImageReader") {
1054
1055 // Create new reader
1056 reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1057 reader->SetJsonValue(root["reader"]);
1058
1059#ifdef USE_IMAGEMAGICK
1060 } else if (type == "ImageReader") {
1061
1062 // Create new reader
1063 reader = new ImageReader(root["reader"]["path"].asString(), false);
1064 reader->SetJsonValue(root["reader"]);
1065
1066 } else if (type == "TextReader") {
1067
1068 // Create new reader
1069 reader = new TextReader();
1070 reader->SetJsonValue(root["reader"]);
1071#endif
1072
1073 } else if (type == "ChunkReader") {
1074
1075 // Create new reader
1076 reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1077 reader->SetJsonValue(root["reader"]);
1078
1079 } else if (type == "DummyReader") {
1080
1081 // Create new reader
1082 reader = new openshot::DummyReader();
1083 reader->SetJsonValue(root["reader"]);
1084
1085 } else if (type == "Timeline") {
1086
1087 // Create new reader (always load from file again)
1088 // This prevents FrameMappers from being loaded on accident
1089 reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1090 }
1091
1092 // mark as managed reader and set parent
1093 if (reader) {
1094 reader->ParentClip(this);
1095 allocated_reader = reader;
1096 }
1097
1098 // Re-Open reader (if needed)
1099 if (already_open) {
1100 reader->Open();
1101 }
1102 }
1103 }
1104
1105 // Clear cache (it might have changed)
1106 final_cache.Clear();
1107}
1108
1109// Sort effects by order
1110void Clip::sort_effects()
1111{
1112 // sort clips
1113 effects.sort(CompareClipEffects());
1114}
1115
1116// Add an effect to the clip
1118{
1119 // Set parent clip pointer
1120 effect->ParentClip(this);
1121
1122 // Add effect to list
1123 effects.push_back(effect);
1124
1125 // Sort effects
1126 sort_effects();
1127
1128 // Get the parent timeline of this clip
1129 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1130
1131 if (parentTimeline)
1132 effect->ParentTimeline(parentTimeline);
1133
1134 #ifdef USE_OPENCV
1135 // Add Tracked Object to Timeline
1136 if (effect->info.has_tracked_object){
1137
1138 // Check if this clip has a parent timeline
1139 if (parentTimeline){
1140
1141 effect->ParentTimeline(parentTimeline);
1142
1143 // Iterate through effect's vector of Tracked Objects
1144 for (auto const& trackedObject : effect->trackedObjects){
1145
1146 // Cast the Tracked Object as TrackedObjectBBox
1147 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1148
1149 // Set the Tracked Object's parent clip to this
1150 trackedObjectBBox->ParentClip(this);
1151
1152 // Add the Tracked Object to the timeline
1153 parentTimeline->AddTrackedObject(trackedObjectBBox);
1154 }
1155 }
1156 }
1157 #endif
1158
1159 // Clear cache (it might have changed)
1160 final_cache.Clear();
1161}
1162
1163// Remove an effect from the clip
1165{
1166 effects.remove(effect);
1167
1168 // Clear cache (it might have changed)
1169 final_cache.Clear();
1170}
1171
1172// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1173void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1174 // Add background canvas
1175 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1176 QPainter painter(background_canvas.get());
1177 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1178
1179 // Composite a new layer onto the image
1180 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1181 painter.drawImage(0, 0, *frame->GetImage());
1182 painter.end();
1183
1184 // Add new QImage to frame
1185 frame->AddImage(background_canvas);
1186}
1187
1188// Apply effects to the source frame (if any)
1189void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1190{
1191 for (auto effect : effects)
1192 {
1193 // Apply the effect to this frame
1194 if (effect->info.apply_before_clip && before_keyframes) {
1195 effect->GetFrame(frame, frame->number);
1196 } else if (!effect->info.apply_before_clip && !before_keyframes) {
1197 effect->GetFrame(frame, frame->number);
1198 }
1199 }
1200
1201 if (timeline != NULL && options != NULL) {
1202 // Apply global timeline effects (i.e. transitions & masks... if any)
1203 Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1204 options->is_before_clip_keyframes = before_keyframes;
1205 timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1206 }
1207}
1208
1209// Compare 2 floating point numbers for equality
1210bool Clip::isNear(double a, double b)
1211{
1212 return fabs(a - b) < 0.000001;
1213}
1214
1215// Apply keyframes to the source frame (if any)
1216void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1217 // Skip out if video was disabled or only an audio frame (no visualisation in use)
1218 if (!frame->has_image_data) {
1219 // Skip the rest of the image processing for performance reasons
1220 return;
1221 }
1222
1223 // Get image from clip, and create transparent background image
1224 std::shared_ptr<QImage> source_image = frame->GetImage();
1225 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1226 timeline_size.height(),
1227 QImage::Format_RGBA8888_Premultiplied);
1228 background_canvas->fill(QColor(Qt::transparent));
1229
1230 // Get transform from clip's keyframes
1231 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1232
1233 // Load timeline's new frame image into a QPainter
1234 QPainter painter(background_canvas.get());
1235 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1236
1237 // Apply transform (translate, rotate, scale)
1238 painter.setTransform(transform);
1239
1240 // Composite a new layer onto the image
1241 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1242 painter.drawImage(0, 0, *source_image);
1243
1244 if (timeline) {
1245 Timeline *t = static_cast<Timeline *>(timeline);
1246
1247 // Draw frame #'s on top of image (if needed)
1248 if (display != FRAME_DISPLAY_NONE) {
1249 std::stringstream frame_number_str;
1250 switch (display) {
1251 case (FRAME_DISPLAY_NONE):
1252 // This is only here to prevent unused-enum warnings
1253 break;
1254
1255 case (FRAME_DISPLAY_CLIP):
1256 frame_number_str << frame->number;
1257 break;
1258
1260 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1261 break;
1262
1263 case (FRAME_DISPLAY_BOTH):
1264 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1265 break;
1266 }
1267
1268 // Draw frame number on top of image
1269 painter.setPen(QColor("#ffffff"));
1270 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1271 }
1272 }
1273 painter.end();
1274
1275 // Add new QImage to frame
1276 frame->AddImage(background_canvas);
1277}
1278
1279// Apply apply_waveform image to the source frame (if any)
1280void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1281
1282 if (!Waveform()) {
1283 // Exit if no waveform is needed
1284 return;
1285 }
1286
1287 // Get image from clip
1288 std::shared_ptr<QImage> source_image = frame->GetImage();
1289
1290 // Debug output
1291 ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1292 "frame->number", frame->number,
1293 "Waveform()", Waveform(),
1294 "width", timeline_size.width(),
1295 "height", timeline_size.height());
1296
1297 // Get the color of the waveform
1298 int red = wave_color.red.GetInt(frame->number);
1299 int green = wave_color.green.GetInt(frame->number);
1300 int blue = wave_color.blue.GetInt(frame->number);
1301 int alpha = wave_color.alpha.GetInt(frame->number);
1302
1303 // Generate Waveform Dynamically (the size of the timeline)
1304 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue, alpha);
1305 frame->AddImage(source_image);
1306}
1307
1308// Scale a source size to a target size (given a specific scale-type)
1309QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1310 switch (source_scale)
1311 {
1312 case (SCALE_FIT): {
1313 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1314 break;
1315 }
1316 case (SCALE_STRETCH): {
1317 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1318 break;
1319 }
1320 case (SCALE_CROP): {
1321 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1322 break;
1323 }
1324 }
1325
1326 return source_size;
1327}
1328
1329// Get QTransform from keyframes
1330QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1331{
1332 // Get image from clip
1333 std::shared_ptr<QImage> source_image = frame->GetImage();
1334
1335 /* ALPHA & OPACITY */
1336 if (alpha.GetValue(frame->number) != 1.0)
1337 {
1338 float alpha_value = alpha.GetValue(frame->number);
1339
1340 // Get source image's pixels
1341 unsigned char *pixels = source_image->bits();
1342
1343 // Loop through pixels
1344 for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1345 {
1346 // Apply alpha to pixel values (since we use a premultiplied value, we must
1347 // multiply the alpha with all colors).
1348 pixels[byte_index + 0] *= alpha_value;
1349 pixels[byte_index + 1] *= alpha_value;
1350 pixels[byte_index + 2] *= alpha_value;
1351 pixels[byte_index + 3] *= alpha_value;
1352 }
1353
1354 // Debug output
1355 ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)",
1356 "alpha_value", alpha_value,
1357 "frame->number", frame->number);
1358 }
1359
1360 /* RESIZE SOURCE IMAGE - based on scale type */
1361 QSize source_size = scale_size(source_image->size(), scale, width, height);
1362
1363 // Initialize parent object's properties (Clip or Tracked Object)
1364 float parentObject_location_x = 0.0;
1365 float parentObject_location_y = 0.0;
1366 float parentObject_scale_x = 1.0;
1367 float parentObject_scale_y = 1.0;
1368 float parentObject_shear_x = 0.0;
1369 float parentObject_shear_y = 0.0;
1370 float parentObject_rotation = 0.0;
1371
1372 // Get the parentClipObject properties
1373 if (GetParentClip()){
1374 // Get the start trim position of the parent clip
1375 long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1376 long parent_frame_number = frame->number + parent_start_offset;
1377
1378 // Get parent object's properties (Clip)
1379 parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1380 parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1381 parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1382 parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1383 parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1384 parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1385 parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1386 }
1387
1388 // Get the parentTrackedObject properties
1390 // Get the attached object's parent clip's properties
1391 Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1392 if (parentClip)
1393 {
1394 // Get the start trim position of the parent clip
1395 long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1396 long parent_frame_number = frame->number + parent_start_offset;
1397
1398 // Access the parentTrackedObject's properties
1399 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1400
1401 // Get actual scaled parent size
1402 QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1403 parentClip->scale, width, height);
1404
1405 // Get actual scaled tracked object size
1406 int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1407 parentClip->scale_x.GetValue(parent_frame_number);
1408 int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1409 parentClip->scale_y.GetValue(parent_frame_number);
1410
1411 // Scale the clip source_size based on the actual tracked object size
1412 source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1413
1414 // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1415 parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1416 parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1417 parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1418 }
1419 }
1420
1421 /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1422 float x = 0.0; // left
1423 float y = 0.0; // top
1424
1425 // Adjust size for scale x and scale y
1426 float sx = scale_x.GetValue(frame->number); // percentage X scale
1427 float sy = scale_y.GetValue(frame->number); // percentage Y scale
1428
1429 // Change clip's scale to parentObject's scale
1430 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1431 sx*= parentObject_scale_x;
1432 sy*= parentObject_scale_y;
1433 }
1434
1435 float scaled_source_width = source_size.width() * sx;
1436 float scaled_source_height = source_size.height() * sy;
1437
1438 switch (gravity)
1439 {
1440 case (GRAVITY_TOP_LEFT):
1441 // This is only here to prevent unused-enum warnings
1442 break;
1443 case (GRAVITY_TOP):
1444 x = (width - scaled_source_width) / 2.0; // center
1445 break;
1446 case (GRAVITY_TOP_RIGHT):
1447 x = width - scaled_source_width; // right
1448 break;
1449 case (GRAVITY_LEFT):
1450 y = (height - scaled_source_height) / 2.0; // center
1451 break;
1452 case (GRAVITY_CENTER):
1453 x = (width - scaled_source_width) / 2.0; // center
1454 y = (height - scaled_source_height) / 2.0; // center
1455 break;
1456 case (GRAVITY_RIGHT):
1457 x = width - scaled_source_width; // right
1458 y = (height - scaled_source_height) / 2.0; // center
1459 break;
1460 case (GRAVITY_BOTTOM_LEFT):
1461 y = (height - scaled_source_height); // bottom
1462 break;
1463 case (GRAVITY_BOTTOM):
1464 x = (width - scaled_source_width) / 2.0; // center
1465 y = (height - scaled_source_height); // bottom
1466 break;
1467 case (GRAVITY_BOTTOM_RIGHT):
1468 x = width - scaled_source_width; // right
1469 y = (height - scaled_source_height); // bottom
1470 break;
1471 }
1472
1473 // Debug output
1475 "Clip::get_transform (Gravity)",
1476 "frame->number", frame->number,
1477 "source_clip->gravity", gravity,
1478 "scaled_source_width", scaled_source_width,
1479 "scaled_source_height", scaled_source_height);
1480
1481 QTransform transform;
1482
1483 /* LOCATION, ROTATION, AND SCALE */
1484 float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1485 x += width * (location_x.GetValue(frame->number) + parentObject_location_x); // move in percentage of final width
1486 y += height * (location_y.GetValue(frame->number) + parentObject_location_y); // move in percentage of final height
1487 float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1488 float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1489 float origin_x_value = origin_x.GetValue(frame->number);
1490 float origin_y_value = origin_y.GetValue(frame->number);
1491
1492 // Transform source image (if needed)
1494 "Clip::get_transform (Build QTransform - if needed)",
1495 "frame->number", frame->number,
1496 "x", x, "y", y,
1497 "r", r,
1498 "sx", sx, "sy", sy);
1499
1500 if (!isNear(x, 0) || !isNear(y, 0)) {
1501 // TRANSLATE/MOVE CLIP
1502 transform.translate(x, y);
1503 }
1504 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1505 // ROTATE CLIP (around origin_x, origin_y)
1506 float origin_x_offset = (scaled_source_width * origin_x_value);
1507 float origin_y_offset = (scaled_source_height * origin_y_value);
1508 transform.translate(origin_x_offset, origin_y_offset);
1509 transform.rotate(r);
1510 transform.shear(shear_x_value, shear_y_value);
1511 transform.translate(-origin_x_offset,-origin_y_offset);
1512 }
1513 // SCALE CLIP (if needed)
1514 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1515 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1516 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1517 transform.scale(source_width_scale, source_height_scale);
1518 }
1519
1520 return transform;
1521}
1522
1523// Adjust frame number for Clip position and start (which can result in a different number)
1524int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1525
1526 // Get clip position from parent clip (if any)
1527 float position = 0.0;
1528 float start = 0.0;
1529 Clip *parent = static_cast<Clip *>(ParentClip());
1530 if (parent) {
1531 position = parent->Position();
1532 start = parent->Start();
1533 }
1534
1535 // Adjust start frame and position based on parent clip.
1536 // This ensures the same frame # is used by mapped readers and clips,
1537 // when calculating samples per frame.
1538 // Thus, this prevents gaps and mismatches in # of samples.
1539 int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1540 int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1541 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1542
1543 return frame_number;
1544}
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition CacheBase.cpp:30
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition ChunkReader.h:79
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition ClipBase.h:88
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition ClipBase.h:38
float Duration() const
Get the length of this clip (in seconds)
Definition ClipBase.h:90
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Definition ClipBase.h:89
std::string Id() const
Get the Id of this clip object.
Definition ClipBase.h:85
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition ClipBase.cpp:64
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition ClipBase.cpp:132
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition ClipBase.h:87
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition ClipBase.cpp:80
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition ClipBase.h:41
float Position() const
Get position on timeline (in seconds)
Definition ClipBase.h:86
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition ClipBase.h:91
std::string id
ID Property for all derived Clip and Effect classes.
Definition ClipBase.h:35
float position
The position on the timeline where this clip should start playing.
Definition ClipBase.h:36
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition ClipBase.h:39
std::string previous_properties
This string contains the previous JSON properties.
Definition ClipBase.h:40
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition ClipBase.cpp:96
This class represents a clip (used to arrange readers on the timeline)
Definition Clip.h:89
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition Clip.cpp:264
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition Clip.h:306
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition Clip.h:309
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition Clip.h:314
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition Clip.h:333
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition Clip.h:169
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition Clip.h:171
void Open() override
Open the internal reader.
Definition Clip.cpp:320
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition Clip.h:313
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition Clip.h:337
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition Clip.h:170
void init_reader_rotation()
Update default rotation from reader.
Definition Clip.cpp:115
Clip()
Default Constructor.
Definition Clip.cpp:135
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition Clip.h:327
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition Clip.cpp:241
std::string Json() const override
Generate JSON string of this object.
Definition Clip.cpp:727
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition Clip.cpp:471
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition Clip.cpp:924
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition Clip.h:310
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition Clip.h:341
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition Clip.h:331
void init_reader_settings()
Init reader info details.
Definition Clip.cpp:101
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition Clip.h:328
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition Clip.cpp:851
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition Clip.cpp:269
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition Clip.h:284
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition Clip.h:334
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition Clip.h:320
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition Clip.cpp:483
bool Waveform()
Get the waveform property of this clip.
Definition Clip.h:302
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition Clip.h:167
AudioLocation previous_location
Previous time-mapped audio location.
Definition Clip.h:95
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition Clip.h:332
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition Clip.cpp:492
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition Clip.cpp:1117
void Close() override
Close the internal reader.
Definition Clip.cpp:341
virtual ~Clip()
Destructor.
Definition Clip.cpp:221
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition Clip.h:330
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition Clip.h:321
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition Clip.h:315
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition Clip.h:307
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition Clip.cpp:356
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition Clip.cpp:391
openshot::ReaderBase * Reader()
Get the current reader.
Definition Clip.cpp:310
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition Clip.cpp:1164
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition Clip.h:338
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition Clip.h:342
std::string PropertiesJSON(int64_t requested_frame) const override
Definition Clip.cpp:734
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition Clip.h:324
void init_settings()
Init default settings for a clip.
Definition Clip.cpp:36
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition Clip.h:329
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition Clip.h:168
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition Clip.h:308
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition Clip.h:316
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition Clip.h:92
void SetJson(const std::string value) override
Load JSON string into this object.
Definition Clip.cpp:907
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition Clip.h:317
This class represents a color (used on the timeline and clips)
Definition Color.h:27
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition Color.cpp:117
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition Color.h:33
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition Color.cpp:86
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition DummyReader.h:86
This abstract class is the base class, used by all effects in libopenshot.
Definition EffectBase.h:54
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
Definition EffectBase.h:69
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition EffectBase.h:66
This class returns a listing of all effects supported by libopenshot.
Definition EffectInfo.h:29
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition Fraction.cpp:40
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition Frame.cpp:484
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition ImageReader.h:56
Exception for invalid JSON.
Definition Exceptions.h:218
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition KeyFrame.h:53
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition KeyFrame.cpp:372
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition KeyFrame.cpp:399
int64_t GetLength() const
Definition KeyFrame.cpp:417
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition KeyFrame.cpp:287
double GetValue(int64_t index) const
Get the value at a specific index.
Definition KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition KeyFrame.cpp:339
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition KeyFrame.cpp:292
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition KeyFrame.cpp:424
Exception for frames that are out of bounds.
Definition Exceptions.h:301
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
Definition ReaderBase.h:76
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
Definition ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
Definition Exceptions.h:364
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition TextReader.h:63
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
Definition Timeline.h:148
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition Timeline.cpp:223
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition Timeline.cpp:241
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition Timeline.cpp:408
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition Timeline.cpp:526
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition Compressor.h:29
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition Enums.h:45
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition Enums.h:46
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition ChunkReader.h:50
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition Enums.h:22
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition Enums.h:23
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition Enums.h:26
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition Enums.h:25
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition Enums.h:28
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition Enums.h:29
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition Enums.h:30
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition Enums.h:24
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition Enums.h:31
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition Enums.h:27
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition Enums.h:36
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition Enums.h:37
@ SCALE_NONE
Do not scale the clip.
Definition Enums.h:40
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition Enums.h:61
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition Enums.h:63
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition Enums.h:62
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition Enums.h:64
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition Enums.h:52
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition Enums.h:54
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition Enums.h:55
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition Enums.h:56
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition Enums.h:53
const Json::Value stringToJson(const std::string value)
Definition Json.cpp:16
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition EffectBase.h:42
float duration
Length of time (in seconds)
Definition ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition ReaderBase.h:48
int height
The height of the video (in pixels)
Definition ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition ReaderBase.h:53
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition ReaderBase.h:65
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition ReaderBase.h:62
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.