OpenShot Library | libopenshot  0.4.0
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
24 
25 #ifdef USE_IMAGEMAGICK
26  #include "MagickUtilities.h"
27  #include "ImageReader.h"
28  #include "TextReader.h"
29 #endif
30 
31 #include <Qt>
32 
33 using namespace openshot;
34 
35 // Init default settings for a clip
37 {
38  // Init clip settings
39  Position(0.0);
40  Layer(0);
41  Start(0.0);
42  ClipBase::End(0.0);
44  scale = SCALE_FIT;
48  waveform = false;
50  parentObjectId = "";
51 
52  // Init scale curves
53  scale_x = Keyframe(1.0);
54  scale_y = Keyframe(1.0);
55 
56  // Init location curves
57  location_x = Keyframe(0.0);
58  location_y = Keyframe(0.0);
59 
60  // Init alpha
61  alpha = Keyframe(1.0);
62 
63  // Init time & volume
64  time = Keyframe(1.0);
65  volume = Keyframe(1.0);
66 
67  // Init audio waveform color
68  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
69 
70  // Init shear and perspective curves
71  shear_x = Keyframe(0.0);
72  shear_y = Keyframe(0.0);
73  origin_x = Keyframe(0.5);
74  origin_y = Keyframe(0.5);
75  perspective_c1_x = Keyframe(-1.0);
76  perspective_c1_y = Keyframe(-1.0);
77  perspective_c2_x = Keyframe(-1.0);
78  perspective_c2_y = Keyframe(-1.0);
79  perspective_c3_x = Keyframe(-1.0);
80  perspective_c3_y = Keyframe(-1.0);
81  perspective_c4_x = Keyframe(-1.0);
82  perspective_c4_y = Keyframe(-1.0);
83 
84  // Init audio channel filter and mappings
85  channel_filter = Keyframe(-1.0);
86  channel_mapping = Keyframe(-1.0);
87 
88  // Init audio and video overrides
89  has_audio = Keyframe(-1.0);
90  has_video = Keyframe(-1.0);
91 
92  // Initialize the attached object and attached clip as null pointers
93  parentTrackedObject = nullptr;
94  parentClipObject = NULL;
95 
96  // Init reader info struct
98 }
99 
100 // Init reader info details
102  if (reader) {
103  // Init rotation (if any)
105 
106  // Initialize info struct
107  info = reader->info;
108 
109  // Init cache
111  }
112 }
113 
115  // Don't init rotation if clip already has keyframes.
116  if (rotation.GetCount() > 0)
117  return;
118 
119  // Get rotation from metadata (if any)
120  float rotate_angle = 0.0f;
121  if (reader && reader->info.metadata.count("rotate") > 0) {
122  try {
123  rotate_angle = strtof(reader->info.metadata["rotate"].c_str(), nullptr);
124  } catch (const std::exception& e) {
125  // Leave rotate_angle at default 0.0f
126  }
127  }
128  rotation = Keyframe(rotate_angle);
129 
130  // Compute uniform scale factors for rotated video.
131  // Assume reader->info.width and reader->info.height are the clip's natural dimensions.
132  float w = static_cast<float>(reader->info.width);
133  float h = static_cast<float>(reader->info.height);
134  float rad = rotate_angle * M_PI / 180.0f;
135 
136  // Calculate the dimensions of the bounding box for the rotated clip.
137  float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
138  float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
139 
140  // To have the rotated clip appear the same size as the unrotated clip,
141  // compute a uniform scale factor S that brings the bounding box back to (w, h).
142  float uniform_scale = std::min(w / new_width, h / new_height);
143 
144  // Set scale keyframes uniformly.
145  scale_x = Keyframe(uniform_scale);
146  scale_y = Keyframe(uniform_scale);
147 }
148 
149 // Default Constructor for a clip
150 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
151 {
152  // Init all default settings
153  init_settings();
154 }
155 
156 // Constructor with reader
157 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
158 {
159  // Init all default settings
160  init_settings();
161 
162  // Open and Close the reader (to set the duration of the clip)
163  Open();
164  Close();
165 
166  // Update duration and set parent
167  if (reader) {
168  ClipBase::End(reader->info.duration);
169  reader->ParentClip(this);
170  // Init reader info struct
172  }
173 }
174 
175 // Constructor with filepath
176 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
177 {
178  // Init all default settings
179  init_settings();
180 
181  // Get file extension (and convert to lower case)
182  std::string ext = get_file_extension(path);
183  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
184 
185  // Determine if common video formats (or image sequences)
186  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
187  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || ext=="gif" || path.find("%") != std::string::npos)
188  {
189  try
190  {
191  // Open common video format
192  reader = new openshot::FFmpegReader(path);
193 
194  } catch(...) { }
195  }
196  if (ext=="osp")
197  {
198  try
199  {
200  // Open common video format
201  reader = new openshot::Timeline(path, true);
202 
203  } catch(...) { }
204  }
205 
206 
207  // If no video found, try each reader
208  if (!reader)
209  {
210  try
211  {
212  // Try an image reader
213  reader = new openshot::QtImageReader(path);
214 
215  } catch(...) {
216  try
217  {
218  // Try a video reader
219  reader = new openshot::FFmpegReader(path);
220 
221  } catch(...) { }
222  }
223  }
224 
225  // Update duration and set parent
226  if (reader) {
227  ClipBase::End(reader->info.duration);
228  reader->ParentClip(this);
229  allocated_reader = reader;
230  // Init reader info struct
232  }
233 }
234 
235 // Destructor
237 {
238  // Delete the reader if clip created it
239  if (allocated_reader) {
240  delete allocated_reader;
241  allocated_reader = NULL;
242  reader = NULL;
243  }
244 
245  // Close the resampler
246  if (resampler) {
247  delete resampler;
248  resampler = NULL;
249  }
250 
251  // Close clip
252  Close();
253 }
254 
255 // Attach clip to bounding box
256 void Clip::AttachToObject(std::string object_id)
257 {
258  // Search for the tracked object on the timeline
259  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
260 
261  if (parentTimeline) {
262  // Create a smart pointer to the tracked object from the timeline
263  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
264  Clip* clipObject = parentTimeline->GetClip(object_id);
265 
266  // Check for valid tracked object
267  if (trackedObject){
268  SetAttachedObject(trackedObject);
269  parentClipObject = NULL;
270  }
271  else if (clipObject) {
272  SetAttachedClip(clipObject);
273  parentTrackedObject = nullptr;
274  }
275  }
276 }
277 
278 // Set the pointer to the trackedObject this clip is attached to
279 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
280  parentTrackedObject = trackedObject;
281 }
282 
283 // Set the pointer to the clip this clip is attached to
284 void Clip::SetAttachedClip(Clip* clipObject){
285  parentClipObject = clipObject;
286 }
287 
289 void Clip::Reader(ReaderBase* new_reader)
290 {
291  // Delete previously allocated reader (if not related to new reader)
292  // FrameMappers that point to the same allocated reader are ignored
293  bool is_same_reader = false;
294  if (new_reader && allocated_reader) {
295  if (new_reader->Name() == "FrameMapper") {
296  // Determine if FrameMapper is pointing at the same allocated ready
297  FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
298  if (allocated_reader == clip_mapped_reader->Reader()) {
299  is_same_reader = true;
300  }
301  }
302  }
303  // Clear existing allocated reader (if different)
304  if (allocated_reader && !is_same_reader) {
305  reader->Close();
306  allocated_reader->Close();
307  delete allocated_reader;
308  reader = NULL;
309  allocated_reader = NULL;
310  }
311 
312  // set reader pointer
313  reader = new_reader;
314 
315  // set parent
316  if (reader) {
317  reader->ParentClip(this);
318 
319  // Init reader info struct
321  }
322 }
323 
326 {
327  if (reader)
328  return reader;
329  else
330  // Throw error if reader not initialized
331  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
332 }
333 
334 // Open the internal reader
336 {
337  if (reader)
338  {
339  // Open the reader
340  reader->Open();
341  is_open = true;
342 
343  // Copy Reader info to Clip
344  info = reader->info;
345 
346  // Set some clip properties from the file reader
347  if (end == 0.0)
348  ClipBase::End(reader->info.duration);
349  }
350  else
351  // Throw error if reader not initialized
352  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
353 }
354 
355 // Close the internal reader
357 {
358  if (is_open && reader) {
359  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
360 
361  // Close the reader
362  reader->Close();
363  }
364 
365  // Clear cache
366  final_cache.Clear();
367  is_open = false;
368 }
369 
370 // Get end position of clip (trim end of video), which can be affected by the time curve.
371 float Clip::End() const
372 {
373  // if a time curve is present, use its length
374  if (time.GetCount() > 1)
375  {
376  // Determine the FPS fo this clip
377  float fps = 24.0;
378  if (reader)
379  // file reader
380  fps = reader->info.fps.ToFloat();
381  else
382  // Throw error if reader not initialized
383  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
384 
385  return float(time.GetLength()) / fps;
386  }
387  else
388  // just use the duration (as detected by the reader)
389  return end;
390 }
391 
392 // Override End() position
393 void Clip::End(float value) {
394  ClipBase::End(value);
395 }
396 
397 // Set associated Timeline pointer
399  timeline = new_timeline;
400 
401  // Clear cache (it might have changed)
402  final_cache.Clear();
403 }
404 
405 // Create an openshot::Frame object for a specific frame number of this reader.
406 std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
407 {
408  // Call override of GetFrame
409  return GetFrame(NULL, clip_frame_number, NULL);
410 }
411 
412 // Create an openshot::Frame object for a specific frame number of this reader.
413 // NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
414 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
415 {
416  // Call override of GetFrame
417  return GetFrame(background_frame, clip_frame_number, NULL);
418 }
419 
420 // Use an existing openshot::Frame object and draw this Clip's frame onto it
421 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
422 {
423  // Check for open reader (or throw exception)
424  if (!is_open)
425  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
426 
427  if (reader)
428  {
429  // Get frame object
430  std::shared_ptr<Frame> frame = NULL;
431 
432  // Check cache
433  frame = final_cache.GetFrame(clip_frame_number);
434  if (!frame) {
435  // Generate clip frame
436  frame = GetOrCreateFrame(clip_frame_number);
437 
438  // Get frame size and frame #
439  int64_t timeline_frame_number = clip_frame_number;
440  QSize timeline_size(frame->GetWidth(), frame->GetHeight());
441  if (background_frame) {
442  // If a background frame is provided, use it instead
443  timeline_frame_number = background_frame->number;
444  timeline_size.setWidth(background_frame->GetWidth());
445  timeline_size.setHeight(background_frame->GetHeight());
446  }
447 
448  // Get time mapped frame object (used to increase speed, change direction, etc...)
449  apply_timemapping(frame);
450 
451  // Apply waveform image (if any)
452  apply_waveform(frame, timeline_size);
453 
454  // Apply effects BEFORE applying keyframes (if any local or global effects are used)
455  apply_effects(frame, timeline_frame_number, options, true);
456 
457  // Apply keyframe / transforms to current clip image
458  apply_keyframes(frame, timeline_size);
459 
460  // Apply effects AFTER applying keyframes (if any local or global effects are used)
461  apply_effects(frame, timeline_frame_number, options, false);
462 
463  // Add final frame to cache (before flattening into background_frame)
464  final_cache.Add(frame);
465  }
466 
467  if (!background_frame) {
468  // Create missing background_frame w/ transparent color (if needed)
469  background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
470  "#00000000", frame->GetAudioSamplesCount(),
471  frame->GetAudioChannelsCount());
472  }
473 
474  // Apply background canvas (i.e. flatten this image onto previous layer image)
475  apply_background(frame, background_frame);
476 
477  // Return processed 'frame'
478  return frame;
479  }
480  else
481  // Throw error if reader not initialized
482  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
483 }
484 
485 // Look up an effect by ID
486 openshot::EffectBase* Clip::GetEffect(const std::string& id)
487 {
488  // Find the matching effect (if any)
489  for (const auto& effect : effects) {
490  if (effect->Id() == id) {
491  return effect;
492  }
493  }
494  return nullptr;
495 }
496 
497 // Return the associated ParentClip (if any)
499  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
500  // Attach parent clip OR object to this clip
501  AttachToObject(parentObjectId);
502  }
503  return parentClipObject;
504 }
505 
506 // Return the associated Parent Tracked Object (if any)
507 std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
508  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
509  // Attach parent clip OR object to this clip
510  AttachToObject(parentObjectId);
511  }
512  return parentTrackedObject;
513 }
514 
515 // Get file extension
516 std::string Clip::get_file_extension(std::string path)
517 {
518  // return last part of path
519  return path.substr(path.find_last_of(".") + 1);
520 }
521 
522 // Adjust the audio and image of a time mapped frame
523 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
524 {
525  // Check for valid reader
526  if (!reader)
527  // Throw error if reader not initialized
528  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
529 
530  // Check for a valid time map curve
531  if (time.GetLength() > 1)
532  {
533  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
534 
535  int64_t clip_frame_number = frame->number;
536  int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
537 
538  // create buffer
539  juce::AudioBuffer<float> *source_samples = nullptr;
540 
541  // Get delta (difference from this frame to the next time mapped frame: Y value)
542  double delta = time.GetDelta(clip_frame_number + 1);
543  bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
544 
545  // Determine length of source audio (in samples)
546  // A delta of 1.0 == normal expected samples
547  // A delta of 0.5 == 50% of normal expected samples
548  // A delta of 2.0 == 200% of normal expected samples
549  int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
551  Reader()->info.channels);
552  int source_sample_count = round(target_sample_count * fabs(delta));
553 
554  // Determine starting audio location
555  AudioLocation location;
556  if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2) {
557  // No previous location OR gap detected
558  location.frame = new_frame_number;
559  location.sample_start = 0;
560 
561  // Create / Reset resampler
562  // We don't want to interpolate between unrelated audio data
563  if (resampler) {
564  delete resampler;
565  }
566  // Init resampler with # channels from Reader (should match the timeline)
567  resampler = new AudioResampler(Reader()->info.channels);
568 
569  // Allocate buffer of silence to initialize some data inside the resampler
570  // To prevent it from becoming input limited
571  juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
572  init_samples.clear();
573  resampler->SetBuffer(&init_samples, 1.0);
574  resampler->GetResampledBuffer();
575 
576  } else {
577  // Use previous location
578  location = previous_location;
579  }
580 
581  if (source_sample_count <= 0) {
582  // Add silence and bail (we don't need any samples)
583  frame->AddAudioSilence(target_sample_count);
584  return;
585  }
586 
587  // Allocate a new sample buffer for these delta frames
588  source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
589  source_samples->clear();
590 
591  // Determine ending audio location
592  int remaining_samples = source_sample_count;
593  int source_pos = 0;
594  while (remaining_samples > 0) {
595  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
596  int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
597 
598  if (frame_sample_count == 0) {
599  // No samples found in source frame (fill with silence)
600  if (is_increasing) {
601  location.frame++;
602  } else {
603  location.frame--;
604  }
605  location.sample_start = 0;
606  break;
607  }
608  if (remaining_samples - frame_sample_count >= 0) {
609  // Use all frame samples & increment location
610  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
611  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
612  }
613  if (is_increasing) {
614  location.frame++;
615  } else {
616  location.frame--;
617  }
618  location.sample_start = 0;
619  remaining_samples -= frame_sample_count;
620  source_pos += frame_sample_count;
621 
622  } else {
623  // Use just what is needed (and reverse samples)
624  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
625  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
626  }
627  location.sample_start += remaining_samples;
628  remaining_samples = 0;
629  source_pos += remaining_samples;
630  }
631 
632  }
633 
634  // Resize audio for current frame object + fill with silence
635  // We are fixing to clobber this with actual audio data (possibly resampled)
636  frame->AddAudioSilence(target_sample_count);
637 
638  if (source_sample_count != target_sample_count) {
639  // Resample audio (if needed)
640  double resample_ratio = double(source_sample_count) / double(target_sample_count);
641  resampler->SetBuffer(source_samples, resample_ratio);
642 
643  // Resample the data
644  juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
645 
646  // Fill the frame with resampled data
647  for (int channel = 0; channel < Reader()->info.channels; channel++) {
648  // Add new (slower) samples, to the frame object
649  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
650  }
651  } else {
652  // Fill the frame
653  for (int channel = 0; channel < Reader()->info.channels; channel++) {
654  // Add new (slower) samples, to the frame object
655  frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
656  }
657  }
658 
659  // Clean up
660  delete source_samples;
661 
662  // Set previous location
663  previous_location = location;
664  }
665 }
666 
667 // Adjust frame number minimum value
668 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
669 {
670  // Never return a frame number 0 or below
671  if (frame_number < 1)
672  return 1;
673  else
674  return frame_number;
675 
676 }
677 
678 // Get or generate a blank frame
679 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
680 {
681  try {
682  // Init to requested frame
683  int64_t clip_frame_number = adjust_frame_number_minimum(number);
684 
685  // Adjust for time-mapping (if any)
686  if (enable_time && time.GetLength() > 1) {
687  clip_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
688  }
689 
690  // Debug output
692  "Clip::GetOrCreateFrame (from reader)",
693  "number", number, "clip_frame_number", clip_frame_number);
694 
695  // Attempt to get a frame (but this could fail if a reader has just been closed)
696  auto reader_frame = reader->GetFrame(clip_frame_number);
697  reader_frame->number = number; // Override frame # (due to time-mapping might change it)
698 
699  // Return real frame
700  if (reader_frame) {
701  // Create a new copy of reader frame
702  // This allows a clip to modify the pixels and audio of this frame without
703  // changing the underlying reader's frame data
704  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
705  if (has_video.GetInt(number) == 0) {
706  // No video, so add transparent pixels
707  reader_copy->AddColor(QColor(Qt::transparent));
708  }
709  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
710  // No audio, so include silence (also, mute audio if past end of reader)
711  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
712  }
713  return reader_copy;
714  }
715 
716  } catch (const ReaderClosed & e) {
717  // ...
718  } catch (const OutOfBoundsFrame & e) {
719  // ...
720  }
721 
722  // Estimate # of samples needed for this frame
723  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
724 
725  // Debug output
727  "Clip::GetOrCreateFrame (create blank)",
728  "number", number,
729  "estimated_samples_in_frame", estimated_samples_in_frame);
730 
731  // Create blank frame
732  auto new_frame = std::make_shared<Frame>(
733  number, reader->info.width, reader->info.height,
734  "#000000", estimated_samples_in_frame, reader->info.channels);
735  new_frame->SampleRate(reader->info.sample_rate);
736  new_frame->ChannelsLayout(reader->info.channel_layout);
737  new_frame->AddAudioSilence(estimated_samples_in_frame);
738  return new_frame;
739 }
740 
741 // Generate JSON string of this object
742 std::string Clip::Json() const {
743 
744  // Return formatted string
745  return JsonValue().toStyledString();
746 }
747 
748 // Get all properties for a specific frame
749 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
750 
751  // Generate JSON properties list
752  Json::Value root;
753  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
754  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
755  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
756  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
757  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
758  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
759  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
760  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
761  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
762  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
763  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
764  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
765 
766  // Add gravity choices (dropdown style)
767  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
768  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
769  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
770  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
771  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
772  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
773  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
774  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
775  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
776 
777  // Add scale choices (dropdown style)
778  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
779  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
780  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
781  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
782 
783  // Add frame number display choices (dropdown style)
784  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
785  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
786  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
787  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
788 
789  // Add volume mixing choices (dropdown style)
790  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
791  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
792  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
793 
794  // Add waveform choices (dropdown style)
795  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
796  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
797 
798  // Add the parentClipObject's properties
799  if (parentClipObject)
800  {
801  // Convert Clip's frame position to Timeline's frame position
802  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
803  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
804  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
805 
806  // Correct the parent Clip Object properties by the clip's reference system
807  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
808  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
809  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
810  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
811  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
812  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
813  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
814 
815  // Add the parent Clip Object properties to JSON
816  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
817  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
818  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
819  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
820  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
821  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
822  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
823  }
824  else
825  {
826  // Add this own clip's properties to JSON
827  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
828  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
829  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
830  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
831  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
832  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
833  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
834  }
835 
836  // Keyframes
837  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
838  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
839  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
840  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
841  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
842  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
843  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
844  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
845  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
846 
847  // Add enable audio/video choices (dropdown style)
848  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
849  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
850  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
851  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
852  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
853  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
854 
855  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
856  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
857  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
858  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
859  root["wave_color"]["alpha"] = add_property_json("Alpha", wave_color.alpha.GetValue(requested_frame), "float", "", &wave_color.alpha, 0, 255, false, requested_frame);
860 
861  // Return formatted string
862  return root.toStyledString();
863 }
864 
865 // Generate Json::Value for this object
866 Json::Value Clip::JsonValue() const {
867 
868  // Create root json object
869  Json::Value root = ClipBase::JsonValue(); // get parent properties
870  root["parentObjectId"] = parentObjectId;
871  root["gravity"] = gravity;
872  root["scale"] = scale;
873  root["anchor"] = anchor;
874  root["display"] = display;
875  root["mixing"] = mixing;
876  root["waveform"] = waveform;
877  root["scale_x"] = scale_x.JsonValue();
878  root["scale_y"] = scale_y.JsonValue();
879  root["location_x"] = location_x.JsonValue();
880  root["location_y"] = location_y.JsonValue();
881  root["alpha"] = alpha.JsonValue();
882  root["rotation"] = rotation.JsonValue();
883  root["time"] = time.JsonValue();
884  root["volume"] = volume.JsonValue();
885  root["wave_color"] = wave_color.JsonValue();
886  root["shear_x"] = shear_x.JsonValue();
887  root["shear_y"] = shear_y.JsonValue();
888  root["origin_x"] = origin_x.JsonValue();
889  root["origin_y"] = origin_y.JsonValue();
890  root["channel_filter"] = channel_filter.JsonValue();
891  root["channel_mapping"] = channel_mapping.JsonValue();
892  root["has_audio"] = has_audio.JsonValue();
893  root["has_video"] = has_video.JsonValue();
894  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
895  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
896  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
897  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
898  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
899  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
900  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
901  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
902 
903  // Add array of effects
904  root["effects"] = Json::Value(Json::arrayValue);
905 
906  // loop through effects
907  for (auto existing_effect : effects)
908  {
909  root["effects"].append(existing_effect->JsonValue());
910  }
911 
912  if (reader)
913  root["reader"] = reader->JsonValue();
914  else
915  root["reader"] = Json::Value(Json::objectValue);
916 
917  // return JsonValue
918  return root;
919 }
920 
921 // Load JSON string into this object
922 void Clip::SetJson(const std::string value) {
923 
924  // Parse JSON string into JSON objects
925  try
926  {
927  const Json::Value root = openshot::stringToJson(value);
928  // Set all values that match
929  SetJsonValue(root);
930  }
931  catch (const std::exception& e)
932  {
933  // Error parsing JSON (or missing keys)
934  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
935  }
936 }
937 
938 // Load Json::Value into this object
939 void Clip::SetJsonValue(const Json::Value root) {
940 
941  // Set parent data
943 
944  // Set data from Json (if key is found)
945  if (!root["parentObjectId"].isNull()){
946  parentObjectId = root["parentObjectId"].asString();
947  if (parentObjectId.size() > 0 && parentObjectId != ""){
948  AttachToObject(parentObjectId);
949  } else{
950  parentTrackedObject = nullptr;
951  parentClipObject = NULL;
952  }
953  }
954  if (!root["gravity"].isNull())
955  gravity = (GravityType) root["gravity"].asInt();
956  if (!root["scale"].isNull())
957  scale = (ScaleType) root["scale"].asInt();
958  if (!root["anchor"].isNull())
959  anchor = (AnchorType) root["anchor"].asInt();
960  if (!root["display"].isNull())
961  display = (FrameDisplayType) root["display"].asInt();
962  if (!root["mixing"].isNull())
963  mixing = (VolumeMixType) root["mixing"].asInt();
964  if (!root["waveform"].isNull())
965  waveform = root["waveform"].asBool();
966  if (!root["scale_x"].isNull())
967  scale_x.SetJsonValue(root["scale_x"]);
968  if (!root["scale_y"].isNull())
969  scale_y.SetJsonValue(root["scale_y"]);
970  if (!root["location_x"].isNull())
971  location_x.SetJsonValue(root["location_x"]);
972  if (!root["location_y"].isNull())
973  location_y.SetJsonValue(root["location_y"]);
974  if (!root["alpha"].isNull())
975  alpha.SetJsonValue(root["alpha"]);
976  if (!root["rotation"].isNull())
977  rotation.SetJsonValue(root["rotation"]);
978  if (!root["time"].isNull())
979  time.SetJsonValue(root["time"]);
980  if (!root["volume"].isNull())
981  volume.SetJsonValue(root["volume"]);
982  if (!root["wave_color"].isNull())
983  wave_color.SetJsonValue(root["wave_color"]);
984  if (!root["shear_x"].isNull())
985  shear_x.SetJsonValue(root["shear_x"]);
986  if (!root["shear_y"].isNull())
987  shear_y.SetJsonValue(root["shear_y"]);
988  if (!root["origin_x"].isNull())
989  origin_x.SetJsonValue(root["origin_x"]);
990  if (!root["origin_y"].isNull())
991  origin_y.SetJsonValue(root["origin_y"]);
992  if (!root["channel_filter"].isNull())
993  channel_filter.SetJsonValue(root["channel_filter"]);
994  if (!root["channel_mapping"].isNull())
995  channel_mapping.SetJsonValue(root["channel_mapping"]);
996  if (!root["has_audio"].isNull())
997  has_audio.SetJsonValue(root["has_audio"]);
998  if (!root["has_video"].isNull())
999  has_video.SetJsonValue(root["has_video"]);
1000  if (!root["perspective_c1_x"].isNull())
1001  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1002  if (!root["perspective_c1_y"].isNull())
1003  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1004  if (!root["perspective_c2_x"].isNull())
1005  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1006  if (!root["perspective_c2_y"].isNull())
1007  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1008  if (!root["perspective_c3_x"].isNull())
1009  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1010  if (!root["perspective_c3_y"].isNull())
1011  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1012  if (!root["perspective_c4_x"].isNull())
1013  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1014  if (!root["perspective_c4_y"].isNull())
1015  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1016  if (!root["effects"].isNull()) {
1017 
1018  // Clear existing effects
1019  effects.clear();
1020 
1021  // loop through effects
1022  for (const auto existing_effect : root["effects"]) {
1023  // Skip NULL nodes
1024  if (existing_effect.isNull()) {
1025  continue;
1026  }
1027 
1028  // Create Effect
1029  EffectBase *e = NULL;
1030  if (!existing_effect["type"].isNull()) {
1031 
1032  // Create instance of effect
1033  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1034 
1035  // Load Json into Effect
1036  e->SetJsonValue(existing_effect);
1037 
1038  // Add Effect to Timeline
1039  AddEffect(e);
1040  }
1041  }
1042  }
1043  }
1044  if (!root["reader"].isNull()) // does Json contain a reader?
1045  {
1046  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1047  {
1048  // Close previous reader (if any)
1049  bool already_open = false;
1050  if (reader)
1051  {
1052  // Track if reader was open
1053  already_open = reader->IsOpen();
1054 
1055  // Close and delete existing allocated reader (if any)
1056  Reader(NULL);
1057  }
1058 
1059  // Create new reader (and load properties)
1060  std::string type = root["reader"]["type"].asString();
1061 
1062  if (type == "FFmpegReader") {
1063 
1064  // Create new reader
1065  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1066  reader->SetJsonValue(root["reader"]);
1067 
1068  } else if (type == "QtImageReader") {
1069 
1070  // Create new reader
1071  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1072  reader->SetJsonValue(root["reader"]);
1073 
1074 #ifdef USE_IMAGEMAGICK
1075  } else if (type == "ImageReader") {
1076 
1077  // Create new reader
1078  reader = new ImageReader(root["reader"]["path"].asString(), false);
1079  reader->SetJsonValue(root["reader"]);
1080 
1081  } else if (type == "TextReader") {
1082 
1083  // Create new reader
1084  reader = new TextReader();
1085  reader->SetJsonValue(root["reader"]);
1086 #endif
1087 
1088  } else if (type == "ChunkReader") {
1089 
1090  // Create new reader
1091  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1092  reader->SetJsonValue(root["reader"]);
1093 
1094  } else if (type == "DummyReader") {
1095 
1096  // Create new reader
1097  reader = new openshot::DummyReader();
1098  reader->SetJsonValue(root["reader"]);
1099 
1100  } else if (type == "Timeline") {
1101 
1102  // Create new reader (always load from file again)
1103  // This prevents FrameMappers from being loaded on accident
1104  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1105  }
1106 
1107  // mark as managed reader and set parent
1108  if (reader) {
1109  reader->ParentClip(this);
1110  allocated_reader = reader;
1111  }
1112 
1113  // Re-Open reader (if needed)
1114  if (already_open) {
1115  reader->Open();
1116  }
1117  }
1118  }
1119 
1120  // Clear cache (it might have changed)
1121  final_cache.Clear();
1122 }
1123 
1124 // Sort effects by order
1125 void Clip::sort_effects()
1126 {
1127  // sort clips
1128  effects.sort(CompareClipEffects());
1129 }
1130 
1131 // Add an effect to the clip
1133 {
1134  // Set parent clip pointer
1135  effect->ParentClip(this);
1136 
1137  // Add effect to list
1138  effects.push_back(effect);
1139 
1140  // Sort effects
1141  sort_effects();
1142 
1143  // Get the parent timeline of this clip
1144  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1145 
1146  if (parentTimeline)
1147  effect->ParentTimeline(parentTimeline);
1148 
1149  #ifdef USE_OPENCV
1150  // Add Tracked Object to Timeline
1151  if (effect->info.has_tracked_object){
1152 
1153  // Check if this clip has a parent timeline
1154  if (parentTimeline){
1155 
1156  effect->ParentTimeline(parentTimeline);
1157 
1158  // Iterate through effect's vector of Tracked Objects
1159  for (auto const& trackedObject : effect->trackedObjects){
1160 
1161  // Cast the Tracked Object as TrackedObjectBBox
1162  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1163 
1164  // Set the Tracked Object's parent clip to this
1165  trackedObjectBBox->ParentClip(this);
1166 
1167  // Add the Tracked Object to the timeline
1168  parentTimeline->AddTrackedObject(trackedObjectBBox);
1169  }
1170  }
1171  }
1172  #endif
1173 
1174  // Clear cache (it might have changed)
1175  final_cache.Clear();
1176 }
1177 
1178 // Remove an effect from the clip
1180 {
1181  effects.remove(effect);
1182 
1183  // Clear cache (it might have changed)
1184  final_cache.Clear();
1185 }
1186 
1187 // Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1188 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1189  // Add background canvas
1190  std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1191  QPainter painter(background_canvas.get());
1192  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1193 
1194  // Composite a new layer onto the image
1195  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1196  painter.drawImage(0, 0, *frame->GetImage());
1197  painter.end();
1198 
1199  // Add new QImage to frame
1200  frame->AddImage(background_canvas);
1201 }
1202 
1203 // Apply effects to the source frame (if any)
1204 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1205 {
1206  for (auto effect : effects)
1207  {
1208  // Apply the effect to this frame
1209  if (effect->info.apply_before_clip && before_keyframes) {
1210  effect->GetFrame(frame, frame->number);
1211  } else if (!effect->info.apply_before_clip && !before_keyframes) {
1212  effect->GetFrame(frame, frame->number);
1213  }
1214  }
1215 
1216  if (timeline != NULL && options != NULL) {
1217  // Apply global timeline effects (i.e. transitions & masks... if any)
1218  Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1219  options->is_before_clip_keyframes = before_keyframes;
1220  timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1221  }
1222 }
1223 
1224 // Compare 2 floating point numbers for equality
1225 bool Clip::isNear(double a, double b)
1226 {
1227  return fabs(a - b) < 0.000001;
1228 }
1229 
1230 // Apply keyframes to the source frame (if any)
1231 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1232  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1233  if (!frame->has_image_data) {
1234  // Skip the rest of the image processing for performance reasons
1235  return;
1236  }
1237 
1238  // Get image from clip, and create transparent background image
1239  std::shared_ptr<QImage> source_image = frame->GetImage();
1240  std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1241  timeline_size.height(),
1242  QImage::Format_RGBA8888_Premultiplied);
1243  background_canvas->fill(QColor(Qt::transparent));
1244 
1245  // Get transform from clip's keyframes
1246  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1247 
1248  // Load timeline's new frame image into a QPainter
1249  QPainter painter(background_canvas.get());
1250  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1251 
1252  // Apply transform (translate, rotate, scale)
1253  painter.setTransform(transform);
1254 
1255  // Composite a new layer onto the image
1256  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1257  painter.drawImage(0, 0, *source_image);
1258 
1259  if (timeline) {
1260  Timeline *t = static_cast<Timeline *>(timeline);
1261 
1262  // Draw frame #'s on top of image (if needed)
1263  if (display != FRAME_DISPLAY_NONE) {
1264  std::stringstream frame_number_str;
1265  switch (display) {
1266  case (FRAME_DISPLAY_NONE):
1267  // This is only here to prevent unused-enum warnings
1268  break;
1269 
1270  case (FRAME_DISPLAY_CLIP):
1271  frame_number_str << frame->number;
1272  break;
1273 
1274  case (FRAME_DISPLAY_TIMELINE):
1275  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1276  break;
1277 
1278  case (FRAME_DISPLAY_BOTH):
1279  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1280  break;
1281  }
1282 
1283  // Draw frame number on top of image
1284  painter.setPen(QColor("#ffffff"));
1285  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1286  }
1287  }
1288  painter.end();
1289 
1290  // Add new QImage to frame
1291  frame->AddImage(background_canvas);
1292 }
1293 
1294 // Apply apply_waveform image to the source frame (if any)
1295 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1296 
1297  if (!Waveform()) {
1298  // Exit if no waveform is needed
1299  return;
1300  }
1301 
1302  // Get image from clip
1303  std::shared_ptr<QImage> source_image = frame->GetImage();
1304 
1305  // Debug output
1306  ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1307  "frame->number", frame->number,
1308  "Waveform()", Waveform(),
1309  "width", timeline_size.width(),
1310  "height", timeline_size.height());
1311 
1312  // Get the color of the waveform
1313  int red = wave_color.red.GetInt(frame->number);
1314  int green = wave_color.green.GetInt(frame->number);
1315  int blue = wave_color.blue.GetInt(frame->number);
1316  int alpha = wave_color.alpha.GetInt(frame->number);
1317 
1318  // Generate Waveform Dynamically (the size of the timeline)
1319  source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue, alpha);
1320  frame->AddImage(source_image);
1321 }
1322 
1323 // Scale a source size to a target size (given a specific scale-type)
1324 QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1325  switch (source_scale)
1326  {
1327  case (SCALE_FIT): {
1328  source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1329  break;
1330  }
1331  case (SCALE_STRETCH): {
1332  source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1333  break;
1334  }
1335  case (SCALE_CROP): {
1336  source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1337  break;
1338  }
1339  }
1340 
1341  return source_size;
1342 }
1343 
1344 // Get QTransform from keyframes
1345 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1346 {
1347  // Get image from clip
1348  std::shared_ptr<QImage> source_image = frame->GetImage();
1349 
1350  /* ALPHA & OPACITY */
1351  if (alpha.GetValue(frame->number) != 1.0)
1352  {
1353  float alpha_value = alpha.GetValue(frame->number);
1354 
1355  // Get source image's pixels
1356  unsigned char *pixels = source_image->bits();
1357 
1358  // Loop through pixels
1359  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1360  {
1361  // Apply alpha to pixel values (since we use a premultiplied value, we must
1362  // multiply the alpha with all colors).
1363  pixels[byte_index + 0] *= alpha_value;
1364  pixels[byte_index + 1] *= alpha_value;
1365  pixels[byte_index + 2] *= alpha_value;
1366  pixels[byte_index + 3] *= alpha_value;
1367  }
1368 
1369  // Debug output
1370  ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)",
1371  "alpha_value", alpha_value,
1372  "frame->number", frame->number);
1373  }
1374 
1375  /* RESIZE SOURCE IMAGE - based on scale type */
1376  QSize source_size = scale_size(source_image->size(), scale, width, height);
1377 
1378  // Initialize parent object's properties (Clip or Tracked Object)
1379  float parentObject_location_x = 0.0;
1380  float parentObject_location_y = 0.0;
1381  float parentObject_scale_x = 1.0;
1382  float parentObject_scale_y = 1.0;
1383  float parentObject_shear_x = 0.0;
1384  float parentObject_shear_y = 0.0;
1385  float parentObject_rotation = 0.0;
1386 
1387  // Get the parentClipObject properties
1388  if (GetParentClip()){
1389  // Get the start trim position of the parent clip
1390  long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1391  long parent_frame_number = frame->number + parent_start_offset;
1392 
1393  // Get parent object's properties (Clip)
1394  parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1395  parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1396  parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1397  parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1398  parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1399  parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1400  parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1401  }
1402 
1403  // Get the parentTrackedObject properties
1404  if (GetParentTrackedObject()){
1405  // Get the attached object's parent clip's properties
1406  Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1407  if (parentClip)
1408  {
1409  // Get the start trim position of the parent clip
1410  long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1411  long parent_frame_number = frame->number + parent_start_offset;
1412 
1413  // Access the parentTrackedObject's properties
1414  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1415 
1416  // Get actual scaled parent size
1417  QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1418  parentClip->scale, width, height);
1419 
1420  // Get actual scaled tracked object size
1421  int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1422  parentClip->scale_x.GetValue(parent_frame_number);
1423  int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1424  parentClip->scale_y.GetValue(parent_frame_number);
1425 
1426  // Scale the clip source_size based on the actual tracked object size
1427  source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1428 
1429  // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1430  parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1431  parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1432  parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1433  }
1434  }
1435 
1436  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1437  float x = 0.0; // left
1438  float y = 0.0; // top
1439 
1440  // Adjust size for scale x and scale y
1441  float sx = scale_x.GetValue(frame->number); // percentage X scale
1442  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1443 
1444  // Change clip's scale to parentObject's scale
1445  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1446  sx*= parentObject_scale_x;
1447  sy*= parentObject_scale_y;
1448  }
1449 
1450  float scaled_source_width = source_size.width() * sx;
1451  float scaled_source_height = source_size.height() * sy;
1452 
1453  switch (gravity)
1454  {
1455  case (GRAVITY_TOP_LEFT):
1456  // This is only here to prevent unused-enum warnings
1457  break;
1458  case (GRAVITY_TOP):
1459  x = (width - scaled_source_width) / 2.0; // center
1460  break;
1461  case (GRAVITY_TOP_RIGHT):
1462  x = width - scaled_source_width; // right
1463  break;
1464  case (GRAVITY_LEFT):
1465  y = (height - scaled_source_height) / 2.0; // center
1466  break;
1467  case (GRAVITY_CENTER):
1468  x = (width - scaled_source_width) / 2.0; // center
1469  y = (height - scaled_source_height) / 2.0; // center
1470  break;
1471  case (GRAVITY_RIGHT):
1472  x = width - scaled_source_width; // right
1473  y = (height - scaled_source_height) / 2.0; // center
1474  break;
1475  case (GRAVITY_BOTTOM_LEFT):
1476  y = (height - scaled_source_height); // bottom
1477  break;
1478  case (GRAVITY_BOTTOM):
1479  x = (width - scaled_source_width) / 2.0; // center
1480  y = (height - scaled_source_height); // bottom
1481  break;
1482  case (GRAVITY_BOTTOM_RIGHT):
1483  x = width - scaled_source_width; // right
1484  y = (height - scaled_source_height); // bottom
1485  break;
1486  }
1487 
1488  // Debug output
1490  "Clip::get_transform (Gravity)",
1491  "frame->number", frame->number,
1492  "source_clip->gravity", gravity,
1493  "scaled_source_width", scaled_source_width,
1494  "scaled_source_height", scaled_source_height);
1495 
1496  QTransform transform;
1497 
1498  /* LOCATION, ROTATION, AND SCALE */
1499  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1500  x += width * (location_x.GetValue(frame->number) + parentObject_location_x); // move in percentage of final width
1501  y += height * (location_y.GetValue(frame->number) + parentObject_location_y); // move in percentage of final height
1502  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1503  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1504  float origin_x_value = origin_x.GetValue(frame->number);
1505  float origin_y_value = origin_y.GetValue(frame->number);
1506 
1507  // Transform source image (if needed)
1509  "Clip::get_transform (Build QTransform - if needed)",
1510  "frame->number", frame->number,
1511  "x", x, "y", y,
1512  "r", r,
1513  "sx", sx, "sy", sy);
1514 
1515  if (!isNear(x, 0) || !isNear(y, 0)) {
1516  // TRANSLATE/MOVE CLIP
1517  transform.translate(x, y);
1518  }
1519  if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1520  // ROTATE CLIP (around origin_x, origin_y)
1521  float origin_x_offset = (scaled_source_width * origin_x_value);
1522  float origin_y_offset = (scaled_source_height * origin_y_value);
1523  transform.translate(origin_x_offset, origin_y_offset);
1524  transform.rotate(r);
1525  transform.shear(shear_x_value, shear_y_value);
1526  transform.translate(-origin_x_offset,-origin_y_offset);
1527  }
1528  // SCALE CLIP (if needed)
1529  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1530  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1531  if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1532  transform.scale(source_width_scale, source_height_scale);
1533  }
1534 
1535  return transform;
1536 }
1537 
1538 // Adjust frame number for Clip position and start (which can result in a different number)
1539 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1540 
1541  // Get clip position from parent clip (if any)
1542  float position = 0.0;
1543  float start = 0.0;
1544  Clip *parent = static_cast<Clip *>(ParentClip());
1545  if (parent) {
1546  position = parent->Position();
1547  start = parent->Start();
1548  }
1549 
1550  // Adjust start frame and position based on parent clip.
1551  // This ensures the same frame # is used by mapped readers and clips,
1552  // when calculating samples per frame.
1553  // Thus, this prevents gaps and mismatches in # of samples.
1554  int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1555  int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1556  int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1557 
1558  return frame_number;
1559 }
openshot::ClipBase::add_property_json
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
openshot::CacheMemory::Clear
void Clear()
Clear the cache of all frames.
Definition: CacheMemory.cpp:221
openshot::Clip::Open
void Open() override
Open the internal reader.
Definition: Clip.cpp:335
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
openshot::ClipBase::timeline
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:41
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:28
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:56
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:53
openshot::EffectBase::info
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:69
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:107
openshot::Clip::anchor
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:169
Clip.h
Header file for Clip class.
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
openshot::CacheMemory::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
Definition: CacheMemory.cpp:80
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:78
openshot::CacheMemory::Add
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
Definition: CacheMemory.cpp:46
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::Clip::previous_location
AudioLocation previous_location
Previous time-mapped audio location.
Definition: Clip.h:95
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:54
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:55
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:162
openshot::Clip::GetEffect
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:486
openshot::ClipBase::End
virtual void End(float value)
Set end position (in seconds) of clip (trim end of video)
Definition: ClipBase.cpp:53
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:28
openshot::Clip::scale_y
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:307
openshot::Clip::PropertiesJSON
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:749
openshot::EffectBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:201
openshot::AudioLocation
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
openshot::Keyframe::GetDelta
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:399
TextReader.h
Header file for TextReader class.
openshot::Clip::time
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:320
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
openshot::AudioLocation::frame
int64_t frame
Definition: AudioLocation.h:26
openshot::Clip
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
juce::AudioBuffer< float >
openshot::AudioLocation::sample_start
int sample_start
Definition: AudioLocation.h:27
openshot::Clip::alpha
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:310
openshot::Clip::End
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:371
openshot::ReaderBase::info
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:23
Timeline.h
Header file for Timeline class.
openshot::Clip::origin_x
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:316
openshot::Clip::ParentTimeline
void ParentTimeline(openshot::TimelineBase *new_timeline) override
Set associated Timeline pointer.
Definition: Clip.cpp:398
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:406
openshot::Clip::Close
void Close() override
Close the internal reader.
Definition: Clip.cpp:356
AudioResampler.h
Header file for AudioResampler class.
openshot::Clip::location_y
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:309
openshot::DummyReader
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:85
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:25
openshot::Keyframe::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:21
openshot::Clip::origin_y
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:317
openshot::Clip::GetParentTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition: Clip.cpp:507
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::EffectBase::trackedObjects
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:66
openshot::Clip::channel_mapping
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:338
openshot::Clip::AddEffect
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1132
openshot::ReaderBase::Name
virtual std::string Name()=0
Return the type name of the class.
openshot::Clip::~Clip
virtual ~Clip()
Destructor.
Definition: Clip.cpp:236
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
openshot::Clip::Json
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:742
openshot::ClipBase::Position
void Position(float value)
Set the Id of this clip object
Definition: ClipBase.cpp:19
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:53
openshot::CompareClipEffects
Definition: Clip.h:48
openshot::Clip::SetJsonValue
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:939
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:300
openshot::Fraction::ToDouble
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
openshot::Timeline::apply_effects
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:539
openshot::Keyframe::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
FrameMapper.h
Header file for the FrameMapper class.
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:24
openshot::CacheBase::SetMaxBytesFromInfo
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:30
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
openshot::Clip::display
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:170
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
openshot::Clip::perspective_c2_y
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:330
openshot::Clip::scale_x
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:306
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:74
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
openshot::ReaderInfo::video_length
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:30
openshot::AudioResampler::SetBuffer
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:60
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:64
openshot::ClipBase::position
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:36
openshot::Timeline::GetClip
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:408
openshot::Clip::perspective_c3_y
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:332
openshot::Clip::perspective_c4_y
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:334
ZmqLogger.h
Header file for ZeroMQ-based Logger class.
openshot::Clip::has_video
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:342
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:53
openshot::Clip::gravity
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:167
openshot::Color::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
openshot::TimelineInfoStruct::is_before_clip_keyframes
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
Definition: TimelineBase.h:35
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:30
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:217
openshot::Timeline
This class represents a timeline.
Definition: Timeline.h:148
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:55
openshot::Clip::perspective_c1_x
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:327
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
openshot::Color::green
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
openshot::Clip::init_settings
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:36
openshot::TimelineInfoStruct
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:32
openshot::EffectInfoStruct::has_tracked_object
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:42
openshot::ReaderInfo::metadata
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:39
openshot::ClipBase::Start
void Start(float value)
Set start position (in seconds) of clip (trim start of video)
Definition: ClipBase.cpp:42
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:101
path
path
Definition: FFmpegWriter.cpp:1476
openshot::FrameMapper
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:193
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:484
ChunkReader.h
Header file for ChunkReader class.
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:38
openshot::Clip::Reader
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:325
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:29
openshot::Clip::perspective_c2_x
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:329
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
openshot::Clip::volume
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:321
openshot::Timeline::AddTrackedObject
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:223
openshot::Clip::SetJson
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:922
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:31
openshot::Color::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
openshot::Keyframe::GetLength
int64_t GetLength() const
Definition: KeyFrame.cpp:417
openshot::Keyframe::GetInt
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:46
openshot::Clip::SetAttachedClip
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:284
openshot::Clip::perspective_c4_x
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:333
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:363
openshot::ReaderInfo::channel_layout
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
openshot::Clip::perspective_c1_y
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:328
openshot::Clip::channel_filter
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:337
openshot::Clip::init_reader_rotation
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:114
openshot::ClipBase::Id
void Id(std::string value)
Definition: ClipBase.h:94
openshot::Clip::init_reader_settings
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:101
openshot::TimelineBase
This class represents a timeline (used for building generic timeline implementations)
Definition: TimelineBase.h:41
MagickUtilities.h
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
openshot::Keyframe::GetCount
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:424
openshot::ReaderInfo::fps
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:150
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:75
openshot::Clip::SetAttachedObject
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:279
openshot::ClipBase::previous_properties
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:40
openshot::Clip::scale
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:168
openshot::AudioResampler::GetResampledBuffer
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:106
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:63
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:44
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:35
openshot::Clip::AttachToObject
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:256
openshot::Color::alpha
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
openshot::Clip::has_audio
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:341
openshot::Clip::GetParentClip
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition: Clip.cpp:498
openshot::Clip::rotation
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:313
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:40
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:62
QtImageReader.h
Header file for QtImageReader class.
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
openshot::Clip::JsonValue
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:866
openshot::Color::red
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
ImageReader.h
Header file for ImageReader class.
openshot::FrameMapper::Reader
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:64
openshot::Clip::perspective_c3_x
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:331
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:62
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:49
openshot::ClipBase::Layer
void Layer(int value)
Set layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.cpp:31
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:60
openshot::Clip::wave_color
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:324
openshot::Clip::shear_y
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:315
openshot::Clip::RemoveEffect
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1179
DummyReader.h
Header file for DummyReader class.
openshot::Color::blue
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Timeline::GetTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:241
Exceptions.h
Header file for all Exception classes.
openshot::Clip::mixing
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:171
FFmpegReader.h
Header file for FFmpegReader class.
openshot::Clip::shear_x
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:314
openshot::EffectBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:115
openshot::Keyframe::GetValue
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
openshot::Clip::location_x
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:308
openshot::Clip::getFrameMutex
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:92
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:51
openshot::ReaderBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:245