25 #ifdef USE_IMAGEMAGICK
68 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
93 parentTrackedObject =
nullptr;
94 parentClipObject = NULL;
118 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
122 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
124 }
catch (
const std::exception& e) {}
132 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
139 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
158 Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
164 std::string ext = get_file_extension(
path);
165 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
168 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
169 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob")
211 allocated_reader = reader;
221 if (allocated_reader) {
222 delete allocated_reader;
223 allocated_reader = NULL;
240 if (parentTimeline) {
242 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
243 Clip* clipObject = parentTimeline->
GetClip(object_id);
249 else if (clipObject) {
258 parentTrackedObject = trackedObject;
264 parentClipObject = clipObject;
273 bool is_same_reader =
false;
274 if (new_reader && allocated_reader) {
275 if (new_reader->
Name() ==
"FrameMapper") {
278 if (allocated_reader == clip_mapped_reader->
Reader()) {
279 is_same_reader =
true;
284 if (allocated_reader && !is_same_reader) {
286 allocated_reader->
Close();
287 delete allocated_reader;
289 allocated_reader = NULL;
311 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
332 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
347 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
363 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
382 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
387 frame_number = adjust_frame_number_minimum(frame_number);
390 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
391 return GetFrame(original_frame, frame_number, NULL);
395 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
399 std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
403 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
408 frame_number = adjust_frame_number_minimum(frame_number);
411 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
412 return GetFrame(original_frame, frame_number, NULL);
416 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
424 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
429 frame_number = adjust_frame_number_minimum(frame_number);
432 int64_t new_frame_number = frame_number;
433 int64_t time_mapped_number = adjust_frame_number_minimum(
time.
GetLong(frame_number));
435 new_frame_number = time_mapped_number;
438 std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
442 get_time_mapped_frame(original_frame, new_frame_number);
444 original_frame->number = frame_number;
447 apply_effects(original_frame);
450 if (
timeline != NULL && options != NULL) {
454 original_frame = timeline_instance->
apply_effects(original_frame, background_frame->number,
Layer());
459 apply_keyframes(original_frame, background_frame->GetImage());
462 return original_frame;
466 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
473 for (
const auto& effect : effects) {
474 if (effect->Id() ==
id) {
482 std::string Clip::get_file_extension(std::string
path)
485 return path.substr(
path.find_last_of(
".") + 1);
491 int number_of_samples = buffer->getNumSamples();
492 int channels = buffer->getNumChannels();
498 for (
int channel = 0; channel < channels; channel++)
501 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
502 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
508 for (
int channel = 0; channel < channels; channel++)
510 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
517 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
522 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
527 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
535 int new_frame_number = frame->number;
542 int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
557 for (
int channel = 0; channel < channels; channel++)
559 samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
560 number_of_samples, 1.0f);
564 reverse_buffer(samples);
576 for (
int channel = 0; channel < channels; channel++)
578 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel,
start),
579 number_of_samples, 1.0f);
582 resampled_buffer =
nullptr;
585 else if (abs(delta) > 1 && abs(delta) < 100) {
589 int total_delta_samples = 0;
590 for (
int delta_frame = new_frame_number - (delta - 1);
591 delta_frame <= new_frame_number; delta_frame++)
601 for (
int delta_frame = new_frame_number - (delta - 1);
602 delta_frame <= new_frame_number; delta_frame++) {
604 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
606 number_of_delta_samples);
607 delta_samples->clear();
609 for (
int channel = 0; channel < channels; channel++)
610 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
611 number_of_delta_samples, 1.0f);
615 reverse_buffer(delta_samples);
618 for (
int channel = 0; channel < channels; channel++)
620 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
621 number_of_delta_samples, 1.0f);
624 delete delta_samples;
625 delta_samples =
nullptr;
628 start += number_of_delta_samples;
633 int total_delta_samples = 0;
634 for (
int delta_frame = new_frame_number - (delta + 1);
635 delta_frame >= new_frame_number; delta_frame--)
645 for (
int delta_frame = new_frame_number - (delta + 1);
646 delta_frame >= new_frame_number; delta_frame--) {
648 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
650 number_of_delta_samples);
651 delta_samples->clear();
653 for (
int channel = 0; channel < channels; channel++)
654 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
655 number_of_delta_samples, 1.0f);
659 reverse_buffer(delta_samples);
662 for (
int channel = 0; channel < channels; channel++)
664 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
665 number_of_delta_samples, 1.0f);
668 delete delta_samples;
669 delta_samples = NULL;
672 start += number_of_delta_samples;
677 resampler->
SetBuffer(samples,
float(
start) /
float(number_of_samples));
683 for (
int channel = 0; channel < channels; channel++)
685 frame->AddAudio(
true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
696 for (
int channel = 0; channel < channels; channel++)
698 samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
702 reverse_buffer(samples);
705 for (
int channel = 0; channel < channels; channel++)
706 frame->AddAudio(
true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
718 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
721 if (frame_number < 1)
729 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
734 "Clip::GetOrCreateFrame (from reader)",
738 auto reader_frame = reader->
GetFrame(number);
745 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
748 reader_copy->AddColor(QColor(Qt::transparent));
752 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
768 "Clip::GetOrCreateFrame (create blank)",
770 "estimated_samples_in_frame", estimated_samples_in_frame);
773 auto new_frame = std::make_shared<Frame>(
775 "#000000", estimated_samples_in_frame, reader->
info.
channels);
778 new_frame->AddAudioSilence(estimated_samples_in_frame);
794 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
795 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
797 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
798 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
799 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
804 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
805 if (!parentObjectId.empty()) {
806 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
808 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string",
"", NULL, -1, -1,
false, requested_frame);
843 if (parentTrackedObject)
848 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
851 std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
852 double parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
854 std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
857 float parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"cx"];
858 float parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"cy"];
859 float parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
860 float parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
861 float parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"r"];
864 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
865 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
866 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
867 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
868 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
873 else if (parentClipObject)
878 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
881 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
882 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
883 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
884 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
885 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
886 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
887 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
890 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
891 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
892 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
893 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
894 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
895 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
896 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
936 return root.toStyledString();
944 root[
"parentObjectId"] = parentObjectId;
946 root[
"scale"] =
scale;
950 root[
"waveform"] = waveform;
978 root[
"effects"] = Json::Value(Json::arrayValue);
981 for (
auto existing_effect : effects)
983 root[
"effects"].append(existing_effect->JsonValue());
989 root[
"reader"] = Json::Value(Json::objectValue);
1005 catch (
const std::exception& e)
1008 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1019 if (!root[
"parentObjectId"].isNull()){
1020 parentObjectId = root[
"parentObjectId"].asString();
1021 if (parentObjectId.size() > 0 && parentObjectId !=
""){
1024 parentTrackedObject =
nullptr;
1025 parentClipObject = NULL;
1028 if (!root[
"gravity"].isNull())
1030 if (!root[
"scale"].isNull())
1032 if (!root[
"anchor"].isNull())
1034 if (!root[
"display"].isNull())
1036 if (!root[
"mixing"].isNull())
1038 if (!root[
"waveform"].isNull())
1039 waveform = root[
"waveform"].asBool();
1040 if (!root[
"scale_x"].isNull())
1042 if (!root[
"scale_y"].isNull())
1044 if (!root[
"location_x"].isNull())
1046 if (!root[
"location_y"].isNull())
1048 if (!root[
"alpha"].isNull())
1050 if (!root[
"rotation"].isNull())
1052 if (!root[
"time"].isNull())
1054 if (!root[
"volume"].isNull())
1056 if (!root[
"wave_color"].isNull())
1058 if (!root[
"shear_x"].isNull())
1060 if (!root[
"shear_y"].isNull())
1062 if (!root[
"origin_x"].isNull())
1064 if (!root[
"origin_y"].isNull())
1066 if (!root[
"channel_filter"].isNull())
1068 if (!root[
"channel_mapping"].isNull())
1070 if (!root[
"has_audio"].isNull())
1072 if (!root[
"has_video"].isNull())
1074 if (!root[
"perspective_c1_x"].isNull())
1076 if (!root[
"perspective_c1_y"].isNull())
1078 if (!root[
"perspective_c2_x"].isNull())
1080 if (!root[
"perspective_c2_y"].isNull())
1082 if (!root[
"perspective_c3_x"].isNull())
1084 if (!root[
"perspective_c3_y"].isNull())
1086 if (!root[
"perspective_c4_x"].isNull())
1088 if (!root[
"perspective_c4_y"].isNull())
1090 if (!root[
"effects"].isNull()) {
1096 for (
const auto existing_effect : root[
"effects"]) {
1099 if (!existing_effect[
"type"].isNull()) {
1102 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1113 if (!root[
"reader"].isNull())
1115 if (!root[
"reader"][
"type"].isNull())
1118 bool already_open =
false;
1122 already_open = reader->
IsOpen();
1129 std::string type = root[
"reader"][
"type"].asString();
1131 if (type ==
"FFmpegReader") {
1137 }
else if (type ==
"QtImageReader") {
1143 #ifdef USE_IMAGEMAGICK
1144 }
else if (type ==
"ImageReader") {
1147 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1150 }
else if (type ==
"TextReader") {
1157 }
else if (type ==
"ChunkReader") {
1163 }
else if (type ==
"DummyReader") {
1169 }
else if (type ==
"Timeline") {
1179 allocated_reader = reader;
1191 void Clip::sort_effects()
1204 effects.push_back(effect);
1220 if (parentTimeline){
1228 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1231 trackedObjectBBox->ParentClip(
this);
1244 effects.remove(effect);
1248 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1251 for (
auto effect : effects)
1254 frame = effect->GetFrame(frame, frame->number);
1260 bool Clip::isEqual(
double a,
double b)
1262 return fabs(a - b) < 0.000001;
1266 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1274 std::shared_ptr<QImage> source_image = frame->GetImage();
1281 "Clip::get_transform (Generate Waveform Image)",
1282 "frame->number", frame->number,
1284 "background_canvas->width()", background_canvas->width(),
1285 "background_canvas->height()", background_canvas->height());
1294 source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue,
alpha);
1295 frame->AddImage(source_image);
1299 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1303 "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1304 "frame->number", frame->number,
1305 "background_canvas->width()", background_canvas->width(),
1306 "background_canvas->height()", background_canvas->height());
1309 QPainter painter(background_canvas.get());
1310 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1313 painter.setTransform(transform);
1316 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1317 painter.drawImage(0, 0, *source_image);
1324 std::stringstream frame_number_str;
1331 frame_number_str << frame->number;
1344 painter.setPen(QColor(
"#ffffff"));
1345 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1351 frame->AddImage(background_canvas);
1355 QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1358 std::shared_ptr<QImage> source_image = frame->GetImage();
1366 unsigned char *pixels = source_image->bits();
1369 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1373 pixels[byte_index + 0] *= alpha_value;
1374 pixels[byte_index + 1] *= alpha_value;
1375 pixels[byte_index + 2] *= alpha_value;
1376 pixels[byte_index + 3] *= alpha_value;
1381 "Clip::get_transform (Set Alpha & Opacity)",
1382 "alpha_value", alpha_value,
1383 "frame->number", frame->number);
1387 QSize source_size = source_image->size();
1390 if (parentTrackedObject){
1397 source_size.scale(width, height, Qt::KeepAspectRatio);
1401 "Clip::get_transform (Scale: SCALE_FIT)",
1402 "frame->number", frame->number,
1403 "source_width", source_size.width(),
1404 "source_height", source_size.height());
1408 source_size.scale(width, height, Qt::IgnoreAspectRatio);
1412 "Clip::get_transform (Scale: SCALE_STRETCH)",
1413 "frame->number", frame->number,
1414 "source_width", source_size.width(),
1415 "source_height", source_size.height());
1419 source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1423 "Clip::get_transform (Scale: SCALE_CROP)",
1424 "frame->number", frame->number,
1425 "source_width", source_size.width(),
1426 "source_height", source_size.height());
1435 "Clip::get_transform (Scale: SCALE_NONE)",
1436 "frame->number", frame->number,
1437 "source_width", source_size.width(),
1438 "source_height", source_size.height());
1444 float parentObject_location_x = 0.0;
1445 float parentObject_location_y = 0.0;
1446 float parentObject_scale_x = 1.0;
1447 float parentObject_scale_y = 1.0;
1448 float parentObject_shear_x = 0.0;
1449 float parentObject_shear_y = 0.0;
1450 float parentObject_rotation = 0.0;
1453 if (parentClipObject){
1458 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1461 parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
1462 parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
1463 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
1464 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
1465 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
1466 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
1467 parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
1471 if (parentTrackedObject){
1476 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1479 std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1482 if (!trackedObjectParentClipProperties.empty())
1485 float parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
1488 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1491 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"location_x"];
1492 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"location_y"];
1493 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1494 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1495 parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"rotation"];
1500 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1503 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5;
1504 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5;
1505 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1506 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1507 parentObject_rotation = trackedObjectProperties[
"r"];
1520 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1521 sx*= parentObject_scale_x;
1522 sy*= parentObject_scale_y;
1525 float scaled_source_width = source_size.width() * sx;
1526 float scaled_source_height = source_size.height() * sy;
1534 x = (width - scaled_source_width) / 2.0;
1537 x = width - scaled_source_width;
1540 y = (height - scaled_source_height) / 2.0;
1543 x = (width - scaled_source_width) / 2.0;
1544 y = (height - scaled_source_height) / 2.0;
1547 x = width - scaled_source_width;
1548 y = (height - scaled_source_height) / 2.0;
1551 y = (height - scaled_source_height);
1554 x = (width - scaled_source_width) / 2.0;
1555 y = (height - scaled_source_height);
1558 x = width - scaled_source_width;
1559 y = (height - scaled_source_height);
1565 "Clip::get_transform (Gravity)",
1566 "frame->number", frame->number,
1567 "source_clip->gravity",
gravity,
1568 "scaled_source_width", scaled_source_width,
1569 "scaled_source_height", scaled_source_height);
1571 QTransform transform;
1577 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1578 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1584 "Clip::get_transform (Build QTransform - if needed)",
1585 "frame->number", frame->number,
1588 "sx", sx,
"sy", sy);
1590 if (!isEqual(x, 0) || !isEqual(y, 0)) {
1592 transform.translate(x, y);
1594 if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1596 float origin_x_offset = (scaled_source_width * origin_x_value);
1597 float origin_y_offset = (scaled_source_height * origin_y_value);
1598 transform.translate(origin_x_offset, origin_y_offset);
1599 transform.rotate(r);
1600 transform.shear(shear_x_value, shear_y_value);
1601 transform.translate(-origin_x_offset,-origin_y_offset);
1604 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1605 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1606 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1607 transform.scale(source_width_scale, source_height_scale);
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
float Duration() const
Get the length of this clip (in seconds)
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
float Position() const
Get position on timeline (in seconds)
float end
The position in seconds to end playing (used to trim the ending of a clip)
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::FrameDisplayType display
The format to display the frame number (if any)
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
bool Waveform()
Get the waveform property of this clip.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
std::string PropertiesJSON(int64_t requested_frame) const override
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
This class represents a color (used on the timeline and clips)
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Json::Value JsonValue() const
Generate Json::Value for this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
int den
Denominator for the fraction.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
int64_t GetLength() const
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
bool has_tracked_object
Determines if this effect track objects through the clip.
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool is_top_clip
Is clip on top (if overlapping another clip)