OpenShot Library | libopenshot  0.2.7
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @ref License
7  */
8 
9 /* LICENSE
10  *
11  * Copyright (c) 2008-2019 OpenShot Studios, LLC
12  * <http://www.openshotstudios.com/>. This file is part of
13  * OpenShot Library (libopenshot), an open-source project dedicated to
14  * delivering high quality video editing and animation solutions to the
15  * world. For more information visit <http://www.openshot.org/>.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "Clip.h"
32 #include "Exceptions.h"
33 #include "FFmpegReader.h"
34 #include "FrameMapper.h"
35 #ifdef USE_IMAGEMAGICK
36  #include "ImageReader.h"
37  #include "TextReader.h"
38 #endif
39 #include "QtImageReader.h"
40 #include "ChunkReader.h"
41 #include "DummyReader.h"
42 #include "Timeline.h"
43 
44 using namespace openshot;
45 
46 // Init default settings for a clip
48 {
49  // Init clip settings
50  Position(0.0);
51  Layer(0);
52  Start(0.0);
53  End(0.0);
55  scale = SCALE_FIT;
59  waveform = false;
61  parentObjectId = "";
62 
63  // Init scale curves
64  scale_x = Keyframe(1.0);
65  scale_y = Keyframe(1.0);
66 
67  // Init location curves
68  location_x = Keyframe(0.0);
69  location_y = Keyframe(0.0);
70 
71  // Init alpha
72  alpha = Keyframe(1.0);
73 
74  // Init time & volume
75  time = Keyframe(1.0);
76  volume = Keyframe(1.0);
77 
78  // Init audio waveform color
79  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
80 
81  // Init shear and perspective curves
82  shear_x = Keyframe(0.0);
83  shear_y = Keyframe(0.0);
84  origin_x = Keyframe(0.5);
85  origin_y = Keyframe(0.5);
86  perspective_c1_x = Keyframe(-1.0);
87  perspective_c1_y = Keyframe(-1.0);
88  perspective_c2_x = Keyframe(-1.0);
89  perspective_c2_y = Keyframe(-1.0);
90  perspective_c3_x = Keyframe(-1.0);
91  perspective_c3_y = Keyframe(-1.0);
92  perspective_c4_x = Keyframe(-1.0);
93  perspective_c4_y = Keyframe(-1.0);
94 
95  // Init audio channel filter and mappings
96  channel_filter = Keyframe(-1.0);
97  channel_mapping = Keyframe(-1.0);
98 
99  // Init audio and video overrides
100  has_audio = Keyframe(-1.0);
101  has_video = Keyframe(-1.0);
102 
103  // Initialize the attached object and attached clip as null pointers
104  parentTrackedObject = nullptr;
105  parentClipObject = NULL;
106 
107  // Init reader info struct
109 }
110 
111 // Init reader info details
113  if (reader) {
114  // Init rotation (if any)
116 
117  // Initialize info struct
118  info = reader->info;
119  }
120 }
121 
122 // Init reader's rotation (if any)
124  // Dont init rotation if clip has keyframes
125  if (rotation.GetCount() > 0)
126  return;
127 
128  // Init rotation
129  if (reader && reader->info.metadata.count("rotate") > 0) {
130  // Use reader metadata rotation (if any)
131  // This is typical with cell phone videos filmed in different orientations
132  try {
133  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
134  rotation = Keyframe(rotate_metadata);
135  } catch (const std::exception& e) {}
136  }
137  else
138  // Default no rotation
139  rotation = Keyframe(0.0);
140 }
141 
142 // Default Constructor for a clip
143 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
144 {
145  // Init all default settings
146  init_settings();
147 }
148 
149 // Constructor with reader
150 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
151 {
152  // Init all default settings
153  init_settings();
154 
155  // Open and Close the reader (to set the duration of the clip)
156  Open();
157  Close();
158 
159  // Update duration and set parent
160  if (reader) {
161  End(reader->info.duration);
162  reader->ParentClip(this);
163  // Init reader info struct
165  }
166 }
167 
168 // Constructor with filepath
169 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
170 {
171  // Init all default settings
172  init_settings();
173 
174  // Get file extension (and convert to lower case)
175  std::string ext = get_file_extension(path);
176  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
177 
178  // Determine if common video formats
179  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
180  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
181  {
182  try
183  {
184  // Open common video format
185  reader = new openshot::FFmpegReader(path);
186 
187  } catch(...) { }
188  }
189  if (ext=="osp")
190  {
191  try
192  {
193  // Open common video format
194  reader = new openshot::Timeline(path, true);
195 
196  } catch(...) { }
197  }
198 
199 
200  // If no video found, try each reader
201  if (!reader)
202  {
203  try
204  {
205  // Try an image reader
206  reader = new openshot::QtImageReader(path);
207 
208  } catch(...) {
209  try
210  {
211  // Try a video reader
212  reader = new openshot::FFmpegReader(path);
213 
214  } catch(...) { }
215  }
216  }
217 
218  // Update duration and set parent
219  if (reader) {
220  End(reader->info.duration);
221  reader->ParentClip(this);
222  allocated_reader = reader;
223  // Init reader info struct
225  }
226 }
227 
228 // Destructor
230 {
231  // Delete the reader if clip created it
232  if (allocated_reader) {
233  delete allocated_reader;
234  allocated_reader = NULL;
235  }
236 
237  // Close the resampler
238  if (resampler) {
239  delete resampler;
240  resampler = NULL;
241  }
242 }
243 
244 // Attach clip to bounding box
245 void Clip::AttachToObject(std::string object_id)
246 {
247  // Search for the tracked object on the timeline
248  Timeline* parentTimeline = (Timeline *) ParentTimeline();
249 
250  if (parentTimeline) {
251  // Create a smart pointer to the tracked object from the timeline
252  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
253  Clip* clipObject = parentTimeline->GetClip(object_id);
254 
255  // Check for valid tracked object
256  if (trackedObject){
257  SetAttachedObject(trackedObject);
258  }
259  else if (clipObject) {
260  SetAttachedClip(clipObject);
261  }
262  }
263  return;
264 }
265 
266 // Set the pointer to the trackedObject this clip is attached to
267 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
268  parentTrackedObject = trackedObject;
269  return;
270 }
271 
272 // Set the pointer to the clip this clip is attached to
273 void Clip::SetAttachedClip(Clip* clipObject){
274  parentClipObject = clipObject;
275  return;
276 }
277 
278 /// Set the current reader
279 void Clip::Reader(ReaderBase* new_reader)
280 {
281  // set reader pointer
282  reader = new_reader;
283 
284  // set parent
285  reader->ParentClip(this);
286 
287  // Init reader info struct
289 }
290 
291 /// Get the current reader
293 {
294  if (reader)
295  return reader;
296  else
297  // Throw error if reader not initialized
298  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
299 }
300 
301 // Open the internal reader
303 {
304  if (reader)
305  {
306  // Open the reader
307  reader->Open();
308  is_open = true;
309 
310  // Copy Reader info to Clip
311  info = reader->info;
312 
313  // Set some clip properties from the file reader
314  if (end == 0.0)
315  End(reader->info.duration);
316  }
317  else
318  // Throw error if reader not initialized
319  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
320 }
321 
322 // Close the internal reader
324 {
325  is_open = false;
326  if (reader) {
327  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
328 
329  // Close the reader
330  reader->Close();
331  }
332  else
333  // Throw error if reader not initialized
334  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
335 }
336 
337 // Get end position of clip (trim end of video), which can be affected by the time curve.
338 float Clip::End() const
339 {
340  // if a time curve is present, use its length
341  if (time.GetCount() > 1)
342  {
343  // Determine the FPS fo this clip
344  float fps = 24.0;
345  if (reader)
346  // file reader
347  fps = reader->info.fps.ToFloat();
348  else
349  // Throw error if reader not initialized
350  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
351 
352  return float(time.GetLength()) / fps;
353  }
354  else
355  // just use the duration (as detected by the reader)
356  return end;
357 }
358 
359 // Create an openshot::Frame object for a specific frame number of this reader.
360 std::shared_ptr<Frame> Clip::GetFrame(int64_t frame_number)
361 {
362  // Check for open reader (or throw exception)
363  if (!is_open)
364  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
365 
366  if (reader)
367  {
368  // Adjust out of bounds frame number
369  frame_number = adjust_frame_number_minimum(frame_number);
370 
371  // Get the original frame and pass it to GetFrame overload
372  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
373  return GetFrame(original_frame, frame_number, NULL);
374  }
375  else
376  // Throw error if reader not initialized
377  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
378 }
379 
380 // Create an openshot::Frame object for a specific frame number of this reader.
381 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
382 {
383  // Check for open reader (or throw exception)
384  if (!is_open)
385  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
386 
387  if (reader)
388  {
389  // Adjust out of bounds frame number
390  frame_number = adjust_frame_number_minimum(frame_number);
391 
392  // Get the original frame and pass it to GetFrame overload
393  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
394  return GetFrame(original_frame, frame_number, NULL);
395  }
396  else
397  // Throw error if reader not initialized
398  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
399 }
400 
401 // Use an existing openshot::Frame object and draw this Clip's frame onto it
402 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number, openshot::TimelineInfoStruct* options)
403 {
404  // Check for open reader (or throw exception)
405  if (!is_open)
406  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
407 
408  if (reader)
409  {
410  // Adjust out of bounds frame number
411  frame_number = adjust_frame_number_minimum(frame_number);
412 
413  // Is a time map detected
414  int64_t new_frame_number = frame_number;
415  int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(frame_number));
416  if (time.GetLength() > 1)
417  new_frame_number = time_mapped_number;
418 
419  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
420  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
421 
422  // Get time mapped frame number (used to increase speed, change direction, etc...)
423  // TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
424  get_time_mapped_frame(original_frame, new_frame_number);
425 
426  // Apply local effects to the frame (if any)
427  apply_effects(original_frame);
428 
429  // Apply global timeline effects (i.e. transitions & masks... if any)
430  if (timeline != NULL && options != NULL) {
431  if (options->is_top_clip) {
432  // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
433  Timeline* timeline_instance = (Timeline*) timeline;
434  original_frame = timeline_instance->apply_effects(original_frame, background_frame->number, Layer());
435  }
436  }
437 
438  // Apply keyframe / transforms
439  apply_keyframes(original_frame, background_frame->GetImage());
440 
441  // Return processed 'frame'
442  return original_frame;
443  }
444  else
445  // Throw error if reader not initialized
446  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
447 }
448 
449 // Look up an effect by ID
450 openshot::EffectBase* Clip::GetEffect(const std::string& id)
451 {
452  // Find the matching effect (if any)
453  for (const auto& effect : effects) {
454  if (effect->Id() == id) {
455  return effect;
456  }
457  }
458  return nullptr;
459 }
460 
461 // Get file extension
462 std::string Clip::get_file_extension(std::string path)
463 {
464  // return last part of path
465  return path.substr(path.find_last_of(".") + 1);
466 }
467 
468 // Reverse an audio buffer
469 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
470 {
471  int number_of_samples = buffer->getNumSamples();
472  int channels = buffer->getNumChannels();
473 
474  // Reverse array (create new buffer to hold the reversed version)
475  juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
476  reversed->clear();
477 
478  for (int channel = 0; channel < channels; channel++)
479  {
480  int n=0;
481  for (int s = number_of_samples - 1; s >= 0; s--, n++)
482  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
483  }
484 
485  // Copy the samples back to the original array
486  buffer->clear();
487  // Loop through channels, and get audio samples
488  for (int channel = 0; channel < channels; channel++)
489  // Get the audio samples for this channel
490  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
491 
492  delete reversed;
493  reversed = NULL;
494 }
495 
496 // Adjust the audio and image of a time mapped frame
497 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
498 {
499  // Check for valid reader
500  if (!reader)
501  // Throw error if reader not initialized
502  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
503 
504  // Check for a valid time map curve
505  if (time.GetLength() > 1)
506  {
507  const GenericScopedLock<juce::CriticalSection> lock(getFrameCriticalSection);
508 
509  // create buffer and resampler
510  juce::AudioSampleBuffer *samples = NULL;
511  if (!resampler)
512  resampler = new AudioResampler();
513 
514  // Get new frame number
515  int new_frame_number = frame->number;
516 
517  // Get delta (difference in previous Y value)
518  int delta = int(round(time.GetDelta(frame_number)));
519 
520  // Init audio vars
521  int channels = reader->info.channels;
522  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
523 
524  // Only resample audio if needed
525  if (reader->info.has_audio) {
526  // Determine if we are speeding up or slowing down
527  if (time.GetRepeatFraction(frame_number).den > 1) {
528  // SLOWING DOWN AUDIO
529  // Resample data, and return new buffer pointer
530  juce::AudioSampleBuffer *resampled_buffer = NULL;
531 
532  // SLOW DOWN audio (split audio)
533  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
534  samples->clear();
535 
536  // Loop through channels, and get audio samples
537  for (int channel = 0; channel < channels; channel++)
538  // Get the audio samples for this channel
539  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
540  number_of_samples, 1.0f);
541 
542  // Reverse the samples (if needed)
543  if (!time.IsIncreasing(frame_number))
544  reverse_buffer(samples);
545 
546  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
547  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
548 
549  // Resample the data (since it's the 1st slice)
550  resampled_buffer = resampler->GetResampledBuffer();
551 
552  // Just take the samples we need for the requested frame
553  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
554  if (start > 0)
555  start -= 1;
556  for (int channel = 0; channel < channels; channel++)
557  // Add new (slower) samples, to the frame object
558  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
559  number_of_samples, 1.0f);
560 
561  // Clean up
562  resampled_buffer = NULL;
563 
564  }
565  else if (abs(delta) > 1 && abs(delta) < 100) {
566  int start = 0;
567  if (delta > 0) {
568  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
569  int total_delta_samples = 0;
570  for (int delta_frame = new_frame_number - (delta - 1);
571  delta_frame <= new_frame_number; delta_frame++)
572  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
573  reader->info.sample_rate,
574  reader->info.channels);
575 
576  // Allocate a new sample buffer for these delta frames
577  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
578  samples->clear();
579 
580  // Loop through each frame in this delta
581  for (int delta_frame = new_frame_number - (delta - 1);
582  delta_frame <= new_frame_number; delta_frame++) {
583  // buffer to hold detal samples
584  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
585  juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
586  number_of_delta_samples);
587  delta_samples->clear();
588 
589  for (int channel = 0; channel < channels; channel++)
590  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
591  number_of_delta_samples, 1.0f);
592 
593  // Reverse the samples (if needed)
594  if (!time.IsIncreasing(frame_number))
595  reverse_buffer(delta_samples);
596 
597  // Copy the samples to
598  for (int channel = 0; channel < channels; channel++)
599  // Get the audio samples for this channel
600  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
601  number_of_delta_samples, 1.0f);
602 
603  // Clean up
604  delete delta_samples;
605  delta_samples = NULL;
606 
607  // Increment start position
608  start += number_of_delta_samples;
609  }
610  }
611  else {
612  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
613  int total_delta_samples = 0;
614  for (int delta_frame = new_frame_number - (delta + 1);
615  delta_frame >= new_frame_number; delta_frame--)
616  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
617  reader->info.sample_rate,
618  reader->info.channels);
619 
620  // Allocate a new sample buffer for these delta frames
621  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
622  samples->clear();
623 
624  // Loop through each frame in this delta
625  for (int delta_frame = new_frame_number - (delta + 1);
626  delta_frame >= new_frame_number; delta_frame--) {
627  // buffer to hold delta samples
628  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
629  juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
630  number_of_delta_samples);
631  delta_samples->clear();
632 
633  for (int channel = 0; channel < channels; channel++)
634  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
635  number_of_delta_samples, 1.0f);
636 
637  // Reverse the samples (if needed)
638  if (!time.IsIncreasing(frame_number))
639  reverse_buffer(delta_samples);
640 
641  // Copy the samples to
642  for (int channel = 0; channel < channels; channel++)
643  // Get the audio samples for this channel
644  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
645  number_of_delta_samples, 1.0f);
646 
647  // Clean up
648  delete delta_samples;
649  delta_samples = NULL;
650 
651  // Increment start position
652  start += number_of_delta_samples;
653  }
654  }
655 
656  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
657  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
658 
659  // Resample data, and return new buffer pointer
660  juce::AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
661 
662  // Add the newly resized audio samples to the current frame
663  for (int channel = 0; channel < channels; channel++)
664  // Add new (slower) samples, to the frame object
665  frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
666 
667  // Clean up
668  buffer = NULL;
669  }
670  else {
671  // Use the samples on this frame (but maybe reverse them if needed)
672  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
673  samples->clear();
674 
675  // Loop through channels, and get audio samples
676  for (int channel = 0; channel < channels; channel++)
677  // Get the audio samples for this channel
678  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
679 
680  // reverse the samples
681  if (!time.IsIncreasing(frame_number))
682  reverse_buffer(samples);
683 
684  // Add reversed samples to the frame object
685  for (int channel = 0; channel < channels; channel++)
686  frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
687 
688 
689  }
690 
691  delete samples;
692  samples = NULL;
693  }
694  }
695 }
696 
697 // Adjust frame number minimum value
698 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
699 {
700  // Never return a frame number 0 or below
701  if (frame_number < 1)
702  return 1;
703  else
704  return frame_number;
705 
706 }
707 
708 // Get or generate a blank frame
709 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
710 {
711  try {
712  // Debug output
713  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number);
714 
715  // Attempt to get a frame (but this could fail if a reader has just been closed)
716  auto reader_frame = reader->GetFrame(number);
717 
718  // Return real frame
719  if (reader_frame) {
720  // Create a new copy of reader frame
721  // This allows a clip to modify the pixels and audio of this frame without
722  // changing the underlying reader's frame data
723  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
724  reader_copy->SampleRate(reader_frame->SampleRate());
725  reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
726  return reader_copy;
727  }
728 
729  } catch (const ReaderClosed & e) {
730  // ...
731  } catch (const OutOfBoundsFrame & e) {
732  // ...
733  }
734 
735  // Estimate # of samples needed for this frame
736  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
737 
738  // Debug output
739  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "estimated_samples_in_frame", estimated_samples_in_frame);
740 
741  // Create blank frame
742  auto new_frame = std::make_shared<Frame>(
743  number, reader->info.width, reader->info.height,
744  "#000000", estimated_samples_in_frame, reader->info.channels);
745  new_frame->SampleRate(reader->info.sample_rate);
746  new_frame->ChannelsLayout(reader->info.channel_layout);
747  new_frame->AddAudioSilence(estimated_samples_in_frame);
748  return new_frame;
749 }
750 
751 // Generate JSON string of this object
752 std::string Clip::Json() const {
753 
754  // Return formatted string
755  return JsonValue().toStyledString();
756 }
757 
758 // Get all properties for a specific frame
759 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
760 
761  // Generate JSON properties list
762  Json::Value root;
763  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
764  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
765  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
766  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
767  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
768  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
769  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
770  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
771  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
772  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
773  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
774  if (!parentObjectId.empty()) {
775  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
776  } else {
777  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", "", NULL, -1, -1, false, requested_frame);
778  }
779  // Add gravity choices (dropdown style)
780  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
781  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
782  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
783  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
784  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
785  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
786  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
787  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
788  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
789 
790  // Add scale choices (dropdown style)
791  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
792  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
793  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
794  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
795 
796  // Add frame number display choices (dropdown style)
797  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
798  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
799  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
800  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
801 
802  // Add volume mixing choices (dropdown style)
803  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
804  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
805  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
806 
807  // Add waveform choices (dropdown style)
808  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
809  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
810 
811  // Add the parentTrackedObject's properties
812  if (parentTrackedObject)
813  {
814  // Convert Clip's frame position to Timeline's frame position
815  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
816  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
817  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
818 
819  // Get attached object's parent clip properties
820  std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
821  double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
822  // Get attached object properties
823  std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
824 
825  // Correct the parent Tracked Object properties by the clip's reference system
826  float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
827  float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
828  float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
829  float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
830  float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
831 
832  // Add the parent Tracked Object properties to JSON
833  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
834  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
835  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
836  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
837  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
838  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
839  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
840  }
841  // Add the parentClipObject's properties
842  else if (parentClipObject)
843  {
844  // Convert Clip's frame position to Timeline's frame position
845  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
846  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
847  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
848 
849  // Correct the parent Clip Object properties by the clip's reference system
850  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
851  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
852  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
853  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
854  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
855  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
856  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
857 
858  // Add the parent Clip Object properties to JSON
859  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
860  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
861  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
862  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
863  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
864  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
865  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
866  }
867  else
868  {
869  // Add this own clip's properties to JSON
870  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
871  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
872  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
873  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
874  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
875  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
876  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
877  }
878 
879  // Keyframes
880  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
881  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
882  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
883  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
884  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
885  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
886  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
887  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
888  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
889 
890  // Add enable audio/video choices (dropdown style)
891  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
892  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
893  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
894  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
895  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
896  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
897 
898  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
899  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
900  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
901  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
902 
903 
904  // Return formatted string
905  return root.toStyledString();
906 }
907 
908 // Generate Json::Value for this object
909 Json::Value Clip::JsonValue() const {
910 
911  // Create root json object
912  Json::Value root = ClipBase::JsonValue(); // get parent properties
913  root["parentObjectId"] = parentObjectId;
914  root["gravity"] = gravity;
915  root["scale"] = scale;
916  root["anchor"] = anchor;
917  root["display"] = display;
918  root["mixing"] = mixing;
919  root["waveform"] = waveform;
920  root["scale_x"] = scale_x.JsonValue();
921  root["scale_y"] = scale_y.JsonValue();
922  root["location_x"] = location_x.JsonValue();
923  root["location_y"] = location_y.JsonValue();
924  root["alpha"] = alpha.JsonValue();
925  root["rotation"] = rotation.JsonValue();
926  root["time"] = time.JsonValue();
927  root["volume"] = volume.JsonValue();
928  root["wave_color"] = wave_color.JsonValue();
929  root["shear_x"] = shear_x.JsonValue();
930  root["shear_y"] = shear_y.JsonValue();
931  root["origin_x"] = origin_x.JsonValue();
932  root["origin_y"] = origin_y.JsonValue();
933  root["channel_filter"] = channel_filter.JsonValue();
934  root["channel_mapping"] = channel_mapping.JsonValue();
935  root["has_audio"] = has_audio.JsonValue();
936  root["has_video"] = has_video.JsonValue();
937  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
938  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
939  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
940  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
941  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
942  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
943  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
944  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
945 
946  // Add array of effects
947  root["effects"] = Json::Value(Json::arrayValue);
948 
949  // loop through effects
950  for (auto existing_effect : effects)
951  {
952  root["effects"].append(existing_effect->JsonValue());
953  }
954 
955  if (reader)
956  root["reader"] = reader->JsonValue();
957  else
958  root["reader"] = Json::Value(Json::objectValue);
959 
960  // return JsonValue
961  return root;
962 }
963 
964 // Load JSON string into this object
965 void Clip::SetJson(const std::string value) {
966 
967  // Parse JSON string into JSON objects
968  try
969  {
970  const Json::Value root = openshot::stringToJson(value);
971  // Set all values that match
972  SetJsonValue(root);
973  }
974  catch (const std::exception& e)
975  {
976  // Error parsing JSON (or missing keys)
977  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
978  }
979 }
980 
981 // Load Json::Value into this object
982 void Clip::SetJsonValue(const Json::Value root) {
983 
984  // Set parent data
986 
987  // Set data from Json (if key is found)
988  if (!root["parentObjectId"].isNull()){
989  parentObjectId = root["parentObjectId"].asString();
990  if (parentObjectId.size() > 0 && parentObjectId != ""){
991  AttachToObject(parentObjectId);
992  } else{
993  parentTrackedObject = nullptr;
994  parentClipObject = NULL;
995  }
996  }
997  if (!root["gravity"].isNull())
998  gravity = (GravityType) root["gravity"].asInt();
999  if (!root["scale"].isNull())
1000  scale = (ScaleType) root["scale"].asInt();
1001  if (!root["anchor"].isNull())
1002  anchor = (AnchorType) root["anchor"].asInt();
1003  if (!root["display"].isNull())
1004  display = (FrameDisplayType) root["display"].asInt();
1005  if (!root["mixing"].isNull())
1006  mixing = (VolumeMixType) root["mixing"].asInt();
1007  if (!root["waveform"].isNull())
1008  waveform = root["waveform"].asBool();
1009  if (!root["scale_x"].isNull())
1010  scale_x.SetJsonValue(root["scale_x"]);
1011  if (!root["scale_y"].isNull())
1012  scale_y.SetJsonValue(root["scale_y"]);
1013  if (!root["location_x"].isNull())
1014  location_x.SetJsonValue(root["location_x"]);
1015  if (!root["location_y"].isNull())
1016  location_y.SetJsonValue(root["location_y"]);
1017  if (!root["alpha"].isNull())
1018  alpha.SetJsonValue(root["alpha"]);
1019  if (!root["rotation"].isNull())
1020  rotation.SetJsonValue(root["rotation"]);
1021  if (!root["time"].isNull())
1022  time.SetJsonValue(root["time"]);
1023  if (!root["volume"].isNull())
1024  volume.SetJsonValue(root["volume"]);
1025  if (!root["wave_color"].isNull())
1026  wave_color.SetJsonValue(root["wave_color"]);
1027  if (!root["shear_x"].isNull())
1028  shear_x.SetJsonValue(root["shear_x"]);
1029  if (!root["shear_y"].isNull())
1030  shear_y.SetJsonValue(root["shear_y"]);
1031  if (!root["origin_x"].isNull())
1032  origin_x.SetJsonValue(root["origin_x"]);
1033  if (!root["origin_y"].isNull())
1034  origin_y.SetJsonValue(root["origin_y"]);
1035  if (!root["channel_filter"].isNull())
1036  channel_filter.SetJsonValue(root["channel_filter"]);
1037  if (!root["channel_mapping"].isNull())
1038  channel_mapping.SetJsonValue(root["channel_mapping"]);
1039  if (!root["has_audio"].isNull())
1040  has_audio.SetJsonValue(root["has_audio"]);
1041  if (!root["has_video"].isNull())
1042  has_video.SetJsonValue(root["has_video"]);
1043  if (!root["perspective_c1_x"].isNull())
1044  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1045  if (!root["perspective_c1_y"].isNull())
1046  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1047  if (!root["perspective_c2_x"].isNull())
1048  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1049  if (!root["perspective_c2_y"].isNull())
1050  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1051  if (!root["perspective_c3_x"].isNull())
1052  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1053  if (!root["perspective_c3_y"].isNull())
1054  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1055  if (!root["perspective_c4_x"].isNull())
1056  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1057  if (!root["perspective_c4_y"].isNull())
1058  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1059  if (!root["effects"].isNull()) {
1060 
1061  // Clear existing effects
1062  effects.clear();
1063 
1064  // loop through effects
1065  for (const auto existing_effect : root["effects"]) {
1066  // Create Effect
1067  EffectBase *e = NULL;
1068  if (!existing_effect["type"].isNull()) {
1069 
1070  // Create instance of effect
1071  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1072 
1073  // Load Json into Effect
1074  e->SetJsonValue(existing_effect);
1075 
1076  // Add Effect to Timeline
1077  AddEffect(e);
1078  }
1079  }
1080  }
1081  }
1082  if (!root["reader"].isNull()) // does Json contain a reader?
1083  {
1084  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1085  {
1086  // Close previous reader (if any)
1087  bool already_open = false;
1088  if (reader)
1089  {
1090  // Track if reader was open
1091  already_open = reader->IsOpen();
1092 
1093  // Close and delete existing reader (if any)
1094  reader->Close();
1095  delete reader;
1096  reader = NULL;
1097  }
1098 
1099  // Create new reader (and load properties)
1100  std::string type = root["reader"]["type"].asString();
1101 
1102  if (type == "FFmpegReader") {
1103 
1104  // Create new reader
1105  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1106  reader->SetJsonValue(root["reader"]);
1107 
1108  } else if (type == "QtImageReader") {
1109 
1110  // Create new reader
1111  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1112  reader->SetJsonValue(root["reader"]);
1113 
1114 #ifdef USE_IMAGEMAGICK
1115  } else if (type == "ImageReader") {
1116 
1117  // Create new reader
1118  reader = new ImageReader(root["reader"]["path"].asString(), false);
1119  reader->SetJsonValue(root["reader"]);
1120 
1121  } else if (type == "TextReader") {
1122 
1123  // Create new reader
1124  reader = new TextReader();
1125  reader->SetJsonValue(root["reader"]);
1126 #endif
1127 
1128  } else if (type == "ChunkReader") {
1129 
1130  // Create new reader
1131  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1132  reader->SetJsonValue(root["reader"]);
1133 
1134  } else if (type == "DummyReader") {
1135 
1136  // Create new reader
1137  reader = new openshot::DummyReader();
1138  reader->SetJsonValue(root["reader"]);
1139 
1140  } else if (type == "Timeline") {
1141 
1142  // Create new reader (always load from file again)
1143  // This prevents FrameMappers from being loaded on accident
1144  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1145  }
1146 
1147  // mark as managed reader and set parent
1148  if (reader) {
1149  reader->ParentClip(this);
1150  allocated_reader = reader;
1151  }
1152 
1153  // Re-Open reader (if needed)
1154  if (already_open)
1155  reader->Open();
1156 
1157  }
1158  }
1159 }
1160 
1161 // Sort effects by order
1162 void Clip::sort_effects()
1163 {
1164  // sort clips
1165  effects.sort(CompareClipEffects());
1166 }
1167 
1168 // Add an effect to the clip
1170 {
1171  // Set parent clip pointer
1172  effect->ParentClip(this);
1173 
1174  // Add effect to list
1175  effects.push_back(effect);
1176 
1177  // Sort effects
1178  sort_effects();
1179 
1180  // Get the parent timeline of this clip
1181  Timeline* parentTimeline = (Timeline *) ParentTimeline();
1182 
1183  if (parentTimeline)
1184  effect->ParentTimeline(parentTimeline);
1185 
1186  #ifdef USE_OPENCV
1187  // Add Tracked Object to Timeline
1188  if (effect->info.has_tracked_object){
1189 
1190  // Check if this clip has a parent timeline
1191  if (parentTimeline){
1192 
1193  effect->ParentTimeline(parentTimeline);
1194 
1195  // Iterate through effect's vector of Tracked Objects
1196  for (auto const& trackedObject : effect->trackedObjects){
1197 
1198  // Cast the Tracked Object as TrackedObjectBBox
1199  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1200 
1201  // Set the Tracked Object's parent clip to this
1202  trackedObjectBBox->ParentClip(this);
1203 
1204  // Add the Tracked Object to the timeline
1205  parentTimeline->AddTrackedObject(trackedObjectBBox);
1206  }
1207  }
1208  }
1209  #endif
1210 
1211  // Clear cache
1212  cache.Clear();
1213 }
1214 
1215 // Remove an effect from the clip
1217 {
1218  effects.remove(effect);
1219 }
1220 
1221 // Apply effects to the source frame (if any)
1222 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1223 {
1224  // Find Effects at this position and layer
1225  for (auto effect : effects)
1226  {
1227  // Apply the effect to this frame
1228  frame = effect->GetFrame(frame, frame->number);
1229 
1230  } // end effect loop
1231 }
1232 
1233 // Compare 2 floating point numbers for equality
1234 bool Clip::isEqual(double a, double b)
1235 {
1236  return fabs(a - b) < 0.000001;
1237 }
1238 
1239 // Apply keyframes to the source frame (if any)
1240 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1241  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1242  if (has_video.GetInt(frame->number) == 0 ||
1243  (!Waveform() && !Reader()->info.has_video))
1244  // Skip the rest of the image processing for performance reasons
1245  return;
1246 
1247  // Get image from clip
1248  std::shared_ptr<QImage> source_image = frame->GetImage();
1249 
1250  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
1251  if (Waveform())
1252  {
1253  // Debug output
1254  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
1255 
1256  // Get the color of the waveform
1257  int red = wave_color.red.GetInt(frame->number);
1258  int green = wave_color.green.GetInt(frame->number);
1259  int blue = wave_color.blue.GetInt(frame->number);
1260  int alpha = wave_color.alpha.GetInt(frame->number);
1261 
1262  // Generate Waveform Dynamically (the size of the timeline)
1263  source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
1264  }
1265 
1266  // Size of final image
1267  int width = background_canvas->width();
1268  int height = background_canvas->height();
1269 
1270  // Get transform from clip's keyframes
1271  QTransform transform = get_transform(frame, width, height);
1272 
1273  // Debug output
1274  ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
1275 
1276  // Load timeline's new frame image into a QPainter
1277  QPainter painter(background_canvas.get());
1278  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1279 
1280  // Apply transform (translate, rotate, scale)
1281  painter.setTransform(transform);
1282 
1283  // Composite a new layer onto the image
1284  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1285  painter.drawImage(0, 0, *source_image);
1286 
1287  if (timeline) {
1288  Timeline *t = (Timeline *) timeline;
1289 
1290  // Draw frame #'s on top of image (if needed)
1291  if (display != FRAME_DISPLAY_NONE) {
1292  std::stringstream frame_number_str;
1293  switch (display) {
1294  case (FRAME_DISPLAY_NONE):
1295  // This is only here to prevent unused-enum warnings
1296  break;
1297 
1298  case (FRAME_DISPLAY_CLIP):
1299  frame_number_str << frame->number;
1300  break;
1301 
1302  case (FRAME_DISPLAY_TIMELINE):
1303  frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
1304  break;
1305 
1306  case (FRAME_DISPLAY_BOTH):
1307  frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1308  break;
1309  }
1310 
1311  // Draw frame number on top of image
1312  painter.setPen(QColor("#ffffff"));
1313  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1314  }
1315  }
1316  painter.end();
1317 
1318  // Add new QImage to frame
1319  frame->AddImage(background_canvas);
1320 }
1321 
1322 // Apply keyframes to the source frame (if any)
1323 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1324 {
1325  // Get image from clip
1326  std::shared_ptr<QImage> source_image = frame->GetImage();
1327 
1328  /* ALPHA & OPACITY */
1329  if (alpha.GetValue(frame->number) != 1.0)
1330  {
1331  float alpha_value = alpha.GetValue(frame->number);
1332 
1333  // Get source image's pixels
1334  unsigned char *pixels = source_image->bits();
1335 
1336  // Loop through pixels
1337  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1338  {
1339  // Apply alpha to pixel values (since we use a premultiplied value, we must
1340  // multiply the alpha with all colors).
1341  pixels[byte_index + 0] *= alpha_value;
1342  pixels[byte_index + 1] *= alpha_value;
1343  pixels[byte_index + 2] *= alpha_value;
1344  pixels[byte_index + 3] *= alpha_value;
1345  }
1346 
1347  // Debug output
1348  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
1349  }
1350 
1351  /* RESIZE SOURCE IMAGE - based on scale type */
1352  QSize source_size = source_image->size();
1353 
1354  // Apply stretch scale to correctly fit the bounding-box
1355  if (parentTrackedObject){
1356  scale = SCALE_STRETCH;
1357  }
1358 
1359  switch (scale)
1360  {
1361  case (SCALE_FIT): {
1362  source_size.scale(width, height, Qt::KeepAspectRatio);
1363 
1364  // Debug output
1365  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1366  break;
1367  }
1368  case (SCALE_STRETCH): {
1369  source_size.scale(width, height, Qt::IgnoreAspectRatio);
1370 
1371  // Debug output
1372  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1373  break;
1374  }
1375  case (SCALE_CROP): {
1376  source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1377 
1378  // Debug output
1379  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1380  break;
1381  }
1382  case (SCALE_NONE): {
1383  // Image is already the original size (i.e. no scaling mode) relative
1384  // to the preview window size (i.e. timeline / preview ratio). No further
1385  // scaling is needed here.
1386  // Debug output
1387  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1388  break;
1389  }
1390  }
1391 
1392  // Initialize parent object's properties (Clip or Tracked Object)
1393  float parentObject_location_x = 0.0;
1394  float parentObject_location_y = 0.0;
1395  float parentObject_scale_x = 1.0;
1396  float parentObject_scale_y = 1.0;
1397  float parentObject_shear_x = 0.0;
1398  float parentObject_shear_y = 0.0;
1399  float parentObject_rotation = 0.0;
1400 
1401  // Get the parentClipObject properties
1402  if (parentClipObject){
1403 
1404  // Convert Clip's frame position to Timeline's frame position
1405  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1406  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1407  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1408 
1409  // Get parent object's properties (Clip)
1410  parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
1411  parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
1412  parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
1413  parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
1414  parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
1415  parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
1416  parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
1417  }
1418 
1419  // Get the parentTrackedObject properties
1420  if (parentTrackedObject){
1421 
1422  // Convert Clip's frame position to Timeline's frame position
1423  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1424  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1425  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1426 
1427  // Get parentTrackedObject's parent clip's properties
1428  std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1429 
1430  // Get the attached object's parent clip's properties
1431  if (!trackedObjectParentClipProperties.empty())
1432  {
1433  // Get parent object's properties (Tracked Object)
1434  float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
1435 
1436  // Access the parentTrackedObject's properties
1437  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1438 
1439  // Get the Tracked Object's properties and correct them by the clip's reference system
1440  parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
1441  parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
1442  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1443  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1444  parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
1445  }
1446  else
1447  {
1448  // Access the parentTrackedObject's properties
1449  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1450 
1451  // Get the Tracked Object's properties and correct them by the clip's reference system
1452  parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
1453  parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
1454  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1455  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1456  parentObject_rotation = trackedObjectProperties["r"];
1457  }
1458  }
1459 
1460  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1461  float x = 0.0; // left
1462  float y = 0.0; // top
1463 
1464  // Adjust size for scale x and scale y
1465  float sx = scale_x.GetValue(frame->number); // percentage X scale
1466  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1467 
1468  // Change clip's scale to parentObject's scale
1469  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1470  sx*= parentObject_scale_x;
1471  sy*= parentObject_scale_y;
1472  }
1473 
1474  float scaled_source_width = source_size.width() * sx;
1475  float scaled_source_height = source_size.height() * sy;
1476 
1477  switch (gravity)
1478  {
1479  case (GRAVITY_TOP_LEFT):
1480  // This is only here to prevent unused-enum warnings
1481  break;
1482  case (GRAVITY_TOP):
1483  x = (width - scaled_source_width) / 2.0; // center
1484  break;
1485  case (GRAVITY_TOP_RIGHT):
1486  x = width - scaled_source_width; // right
1487  break;
1488  case (GRAVITY_LEFT):
1489  y = (height - scaled_source_height) / 2.0; // center
1490  break;
1491  case (GRAVITY_CENTER):
1492  x = (width - scaled_source_width) / 2.0; // center
1493  y = (height - scaled_source_height) / 2.0; // center
1494  break;
1495  case (GRAVITY_RIGHT):
1496  x = width - scaled_source_width; // right
1497  y = (height - scaled_source_height) / 2.0; // center
1498  break;
1499  case (GRAVITY_BOTTOM_LEFT):
1500  y = (height - scaled_source_height); // bottom
1501  break;
1502  case (GRAVITY_BOTTOM):
1503  x = (width - scaled_source_width) / 2.0; // center
1504  y = (height - scaled_source_height); // bottom
1505  break;
1506  case (GRAVITY_BOTTOM_RIGHT):
1507  x = width - scaled_source_width; // right
1508  y = (height - scaled_source_height); // bottom
1509  break;
1510  }
1511 
1512  // Debug output
1513  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
1514 
1515  QTransform transform;
1516 
1517  /* LOCATION, ROTATION, AND SCALE */
1518  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1519  x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
1520  y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
1521  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1522  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1523  float origin_x_value = origin_x.GetValue(frame->number);
1524  float origin_y_value = origin_y.GetValue(frame->number);
1525 
1526  // Transform source image (if needed)
1527  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
1528 
1529  if (!isEqual(x, 0) || !isEqual(y, 0)) {
1530  // TRANSLATE/MOVE CLIP
1531  transform.translate(x, y);
1532  }
1533  if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1534  // ROTATE CLIP (around origin_x, origin_y)
1535  float origin_x_offset = (scaled_source_width * origin_x_value);
1536  float origin_y_offset = (scaled_source_height * origin_y_value);
1537  transform.translate(origin_x_offset, origin_y_offset);
1538  transform.rotate(r);
1539  transform.shear(shear_x_value, shear_y_value);
1540  transform.translate(-origin_x_offset,-origin_y_offset);
1541  }
1542  // SCALE CLIP (if needed)
1543  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1544  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1545  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1546  transform.scale(source_width_scale, source_height_scale);
1547  }
1548 
1549  return transform;
1550 }
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
This class is used to resample audio data for many sequential frames.
juce::AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:98
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:110
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:56
float Duration() const
Get the length of this clip (in seconds)
Definition: ClipBase.h:112
std::string Id() const
Get the Id of this clip object.
Definition: ClipBase.h:107
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:36
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:104
CacheMemory cache
Definition: ClipBase.h:68
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:109
openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition: ClipBase.h:113
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:52
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:59
float Position() const
Get position on timeline (in seconds)
Definition: ClipBase.h:108
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:54
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:57
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:58
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:68
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:267
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:305
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:308
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:313
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:332
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:176
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: Clip.h:112
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:178
void Open() override
Open the internal reader.
Definition: Clip.cpp:302
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:312
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:336
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:177
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:123
Clip()
Default Constructor.
Definition: Clip.cpp:143
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:326
float End() const
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:338
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:245
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:752
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:450
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:982
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:360
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:309
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:340
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:330
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:112
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:327
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:909
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:273
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:333
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:319
bool Waveform()
Get the waveform property of this clip.
Definition: Clip.h:301
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:174
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:331
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1169
void Close() override
Close the internal reader.
Definition: Clip.cpp:323
virtual ~Clip()
Destructor.
Definition: Clip.cpp:229
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:329
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:320
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:314
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:306
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:292
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1216
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:337
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:341
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:759
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:323
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:47
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:328
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:175
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:307
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:315
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:965
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:316
This class represents a color (used on the timeline and clips)
Definition: Color.h:45
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:50
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:48
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:49
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:138
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:51
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:107
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:104
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:71
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:188
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:127
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:87
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:84
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:48
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:93
int num
Numerator for the fraction.
Definition: Fraction.h:50
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:54
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:59
int den
Denominator for the fraction.
Definition: Fraction.h:51
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:536
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:74
Exception for invalid JSON.
Definition: Exceptions.h:206
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:72
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:292
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:368
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:491
int64_t GetLength() const
Definition: KeyFrame.cpp:509
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:388
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:297
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:268
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:335
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:302
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:516
Exception for frames that are out of bounds.
Definition: Exceptions.h:286
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:68
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:98
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:171
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:116
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:254
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:338
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:86
This class represents a timeline.
Definition: Timeline.h:168
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:247
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:265
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:404
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:537
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:190
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:52
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:47
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:62
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:63
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:69
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:39
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:40
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:43
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:42
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:45
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:46
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:47
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:41
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:48
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:44
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:53
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:55
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:56
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:54
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:57
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:78
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:80
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:79
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:81
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:69
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:71
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:72
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:73
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:70
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:34
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:60
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:87
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:47
bool is_top_clip
Is clip on top (if overlapping another clip)
Definition: TimelineBase.h:48