68 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
93 parentTrackedObject =
nullptr;
94 parentClipObject = NULL;
121 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
125 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
127 }
catch (
const std::exception& e) {}
135Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
142Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
161Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
167 std::string ext = get_file_extension(
path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
171 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
172 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" ||
path.find(
"%") != std::string::npos)
214 allocated_reader = reader;
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
246 if (parentTimeline) {
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
249 Clip* clipObject = parentTimeline->
GetClip(object_id);
255 else if (clipObject) {
263 parentTrackedObject = trackedObject;
268 parentClipObject = clipObject;
276 bool is_same_reader =
false;
277 if (new_reader && allocated_reader) {
278 if (new_reader->
Name() ==
"FrameMapper") {
281 if (allocated_reader == clip_mapped_reader->
Reader()) {
282 is_same_reader =
true;
287 if (allocated_reader && !is_same_reader) {
289 allocated_reader->
Close();
290 delete allocated_reader;
292 allocated_reader = NULL;
314 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
335 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
341 if (is_open && reader) {
366 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
392 return GetFrame(NULL, clip_frame_number, NULL);
397std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
400 return GetFrame(background_frame, clip_frame_number, NULL);
408 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
413 std::shared_ptr<Frame> frame = NULL;
416 frame = final_cache.
GetFrame(clip_frame_number);
420 "Clip::GetFrame (Cached frame found)",
421 "requested_frame", clip_frame_number);
428 frame = GetOrCreateFrame(clip_frame_number);
430 if (!background_frame) {
432 background_frame = std::make_shared<Frame>(clip_frame_number, frame->GetWidth(), frame->GetHeight(),
433 "#00000000", frame->GetAudioSamplesCount(),
434 frame->GetAudioChannelsCount());
438 apply_timemapping(frame);
441 apply_waveform(frame, background_frame);
444 apply_effects(frame);
447 if (
timeline != NULL && options != NULL) {
456 apply_keyframes(frame, background_frame);
459 final_cache.
Add(frame);
466 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
473 for (
const auto& effect : effects) {
474 if (effect->Id() ==
id) {
482std::string Clip::get_file_extension(std::string
path)
485 return path.substr(
path.find_last_of(
".") + 1);
491 int number_of_samples = buffer->getNumSamples();
492 int channels = buffer->getNumChannels();
498 for (
int channel = 0; channel < channels; channel++)
501 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
502 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
508 for (
int channel = 0; channel < channels; channel++)
510 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
516void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
521 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
526 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
528 int64_t clip_frame_number = frame->number;
529 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
545 int source_sample_count = round(target_sample_count * fabs(delta));
551 location.
frame = new_frame_number;
565 init_samples.clear();
566 resampler->
SetBuffer(&init_samples, 1.0);
574 if (source_sample_count <= 0) {
576 frame->AddAudioSilence(target_sample_count);
582 source_samples->clear();
585 int remaining_samples = source_sample_count;
587 while (remaining_samples > 0) {
588 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
589 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
591 if (frame_sample_count == 0) {
601 if (remaining_samples - frame_sample_count >= 0) {
603 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
604 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
612 remaining_samples -= frame_sample_count;
613 source_pos += frame_sample_count;
617 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
618 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
621 remaining_samples = 0;
622 source_pos += remaining_samples;
629 frame->AddAudioSilence(target_sample_count);
631 if (source_sample_count != target_sample_count) {
633 double resample_ratio = double(source_sample_count) / double(target_sample_count);
634 resampler->
SetBuffer(source_samples, resample_ratio);
642 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
648 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
653 delete source_samples;
661int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
664 if (frame_number < 1)
672std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
676 int64_t clip_frame_number = adjust_frame_number_minimum(number);
680 clip_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
685 "Clip::GetOrCreateFrame (from reader)",
686 "number", number,
"clip_frame_number", clip_frame_number);
689 auto reader_frame = reader->
GetFrame(clip_frame_number);
690 reader_frame->number = number;
697 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
700 reader_copy->AddColor(QColor(Qt::transparent));
704 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
720 "Clip::GetOrCreateFrame (create blank)",
722 "estimated_samples_in_frame", estimated_samples_in_frame);
725 auto new_frame = std::make_shared<Frame>(
727 "#000000", estimated_samples_in_frame, reader->
info.
channels);
730 new_frame->AddAudioSilence(estimated_samples_in_frame);
746 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
747 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
749 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
750 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
751 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
756 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
757 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
792 if (parentTrackedObject && parentClipObject)
797 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
800 std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
801 double parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
803 std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
806 float parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"cx"];
807 float parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"cy"];
808 float parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
809 float parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
810 float parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"r"];
813 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
814 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
815 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
816 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
817 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
822 else if (parentClipObject)
827 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
830 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
831 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
832 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
833 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
834 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
835 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
836 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
839 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
840 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
841 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
842 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
843 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
844 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
845 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
885 return root.toStyledString();
893 root[
"parentObjectId"] = parentObjectId;
895 root[
"scale"] =
scale;
899 root[
"waveform"] = waveform;
927 root[
"effects"] = Json::Value(Json::arrayValue);
930 for (
auto existing_effect : effects)
932 root[
"effects"].append(existing_effect->JsonValue());
938 root[
"reader"] = Json::Value(Json::objectValue);
954 catch (
const std::exception& e)
957 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
968 if (!root[
"parentObjectId"].isNull()){
969 parentObjectId = root[
"parentObjectId"].asString();
970 if (parentObjectId.size() > 0 && parentObjectId !=
""){
973 parentTrackedObject =
nullptr;
974 parentClipObject = NULL;
977 if (!root[
"gravity"].isNull())
979 if (!root[
"scale"].isNull())
981 if (!root[
"anchor"].isNull())
983 if (!root[
"display"].isNull())
985 if (!root[
"mixing"].isNull())
987 if (!root[
"waveform"].isNull())
988 waveform = root[
"waveform"].asBool();
989 if (!root[
"scale_x"].isNull())
991 if (!root[
"scale_y"].isNull())
993 if (!root[
"location_x"].isNull())
995 if (!root[
"location_y"].isNull())
997 if (!root[
"alpha"].isNull())
999 if (!root[
"rotation"].isNull())
1001 if (!root[
"time"].isNull())
1003 if (!root[
"volume"].isNull())
1005 if (!root[
"wave_color"].isNull())
1007 if (!root[
"shear_x"].isNull())
1009 if (!root[
"shear_y"].isNull())
1011 if (!root[
"origin_x"].isNull())
1013 if (!root[
"origin_y"].isNull())
1015 if (!root[
"channel_filter"].isNull())
1017 if (!root[
"channel_mapping"].isNull())
1019 if (!root[
"has_audio"].isNull())
1021 if (!root[
"has_video"].isNull())
1023 if (!root[
"perspective_c1_x"].isNull())
1025 if (!root[
"perspective_c1_y"].isNull())
1027 if (!root[
"perspective_c2_x"].isNull())
1029 if (!root[
"perspective_c2_y"].isNull())
1031 if (!root[
"perspective_c3_x"].isNull())
1033 if (!root[
"perspective_c3_y"].isNull())
1035 if (!root[
"perspective_c4_x"].isNull())
1037 if (!root[
"perspective_c4_y"].isNull())
1039 if (!root[
"effects"].isNull()) {
1045 for (
const auto existing_effect : root[
"effects"]) {
1048 if (!existing_effect[
"type"].isNull()) {
1051 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1062 if (!root[
"reader"].isNull())
1064 if (!root[
"reader"][
"type"].isNull())
1067 bool already_open =
false;
1071 already_open = reader->
IsOpen();
1078 std::string type = root[
"reader"][
"type"].asString();
1080 if (type ==
"FFmpegReader") {
1086 }
else if (type ==
"QtImageReader") {
1092#ifdef USE_IMAGEMAGICK
1093 }
else if (type ==
"ImageReader") {
1096 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1099 }
else if (type ==
"TextReader") {
1106 }
else if (type ==
"ChunkReader") {
1112 }
else if (type ==
"DummyReader") {
1118 }
else if (type ==
"Timeline") {
1128 allocated_reader = reader;
1139 final_cache.
Clear();
1143void Clip::sort_effects()
1156 effects.push_back(effect);
1172 if (parentTimeline){
1180 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1183 trackedObjectBBox->ParentClip(
this);
1193 final_cache.
Clear();
1199 effects.remove(effect);
1202 final_cache.
Clear();
1206void Clip::apply_effects(std::shared_ptr<Frame> frame)
1209 for (
auto effect : effects)
1212 frame = effect->GetFrame(frame, frame->number);
1218bool Clip::isEqual(
double a,
double b)
1220 return fabs(a - b) < 0.000001;
1224void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
1226 if (!frame->has_image_data) {
1232 std::shared_ptr<QImage> source_image = frame->GetImage();
1233 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1236 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1240 "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1241 "frame->number", frame->number,
1242 "background_canvas->width()", background_canvas->width(),
1243 "background_canvas->height()", background_canvas->height());
1246 QPainter painter(background_canvas.get());
1247 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1250 painter.setTransform(transform);
1253 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1254 painter.drawImage(0, 0, *source_image);
1261 std::stringstream frame_number_str;
1268 frame_number_str << frame->number;
1281 painter.setPen(QColor(
"#ffffff"));
1282 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1288 frame->AddImage(background_canvas);
1292void Clip::apply_waveform(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
1300 std::shared_ptr<QImage> source_image = frame->GetImage();
1301 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1305 "Clip::apply_waveform (Generate Waveform Image)",
1306 "frame->number", frame->number,
1308 "background_canvas->width()", background_canvas->width(),
1309 "background_canvas->height()", background_canvas->height());
1318 source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue,
alpha);
1319 frame->AddImage(source_image);
1323QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1326 std::shared_ptr<QImage> source_image = frame->GetImage();
1334 unsigned char *pixels = source_image->bits();
1337 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1341 pixels[byte_index + 0] *= alpha_value;
1342 pixels[byte_index + 1] *= alpha_value;
1343 pixels[byte_index + 2] *= alpha_value;
1344 pixels[byte_index + 3] *= alpha_value;
1349 "Clip::get_transform (Set Alpha & Opacity)",
1350 "alpha_value", alpha_value,
1351 "frame->number", frame->number);
1355 QSize source_size = source_image->size();
1358 if (parentTrackedObject){
1365 source_size.scale(width, height, Qt::KeepAspectRatio);
1369 "Clip::get_transform (Scale: SCALE_FIT)",
1370 "frame->number", frame->number,
1371 "source_width", source_size.width(),
1372 "source_height", source_size.height());
1376 source_size.scale(width, height, Qt::IgnoreAspectRatio);
1380 "Clip::get_transform (Scale: SCALE_STRETCH)",
1381 "frame->number", frame->number,
1382 "source_width", source_size.width(),
1383 "source_height", source_size.height());
1387 source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1391 "Clip::get_transform (Scale: SCALE_CROP)",
1392 "frame->number", frame->number,
1393 "source_width", source_size.width(),
1394 "source_height", source_size.height());
1403 "Clip::get_transform (Scale: SCALE_NONE)",
1404 "frame->number", frame->number,
1405 "source_width", source_size.width(),
1406 "source_height", source_size.height());
1412 float parentObject_location_x = 0.0;
1413 float parentObject_location_y = 0.0;
1414 float parentObject_scale_x = 1.0;
1415 float parentObject_scale_y = 1.0;
1416 float parentObject_shear_x = 0.0;
1417 float parentObject_shear_y = 0.0;
1418 float parentObject_rotation = 0.0;
1421 if (parentClipObject){
1426 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1429 parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
1430 parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
1431 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
1432 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
1433 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
1434 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
1435 parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
1439 if (parentTrackedObject){
1443 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1446 std::map<std::string, float> trackedObjectParentClipProperties =
1447 parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1450 if (!trackedObjectParentClipProperties.empty())
1453 float parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
1456 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1459 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"location_x"];
1460 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"location_y"];
1461 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1462 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1463 parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"rotation"];
1468 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1471 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5;
1472 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5;
1473 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1474 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1475 parentObject_rotation = trackedObjectProperties[
"r"];
1488 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1489 sx*= parentObject_scale_x;
1490 sy*= parentObject_scale_y;
1493 float scaled_source_width = source_size.width() * sx;
1494 float scaled_source_height = source_size.height() * sy;
1502 x = (width - scaled_source_width) / 2.0;
1505 x = width - scaled_source_width;
1508 y = (height - scaled_source_height) / 2.0;
1511 x = (width - scaled_source_width) / 2.0;
1512 y = (height - scaled_source_height) / 2.0;
1515 x = width - scaled_source_width;
1516 y = (height - scaled_source_height) / 2.0;
1519 y = (height - scaled_source_height);
1522 x = (width - scaled_source_width) / 2.0;
1523 y = (height - scaled_source_height);
1526 x = width - scaled_source_width;
1527 y = (height - scaled_source_height);
1533 "Clip::get_transform (Gravity)",
1534 "frame->number", frame->number,
1535 "source_clip->gravity",
gravity,
1536 "scaled_source_width", scaled_source_width,
1537 "scaled_source_height", scaled_source_height);
1539 QTransform transform;
1545 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1546 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1552 "Clip::get_transform (Build QTransform - if needed)",
1553 "frame->number", frame->number,
1556 "sx", sx,
"sy", sy);
1558 if (!isEqual(x, 0) || !isEqual(y, 0)) {
1560 transform.translate(x, y);
1562 if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1564 float origin_x_offset = (scaled_source_width * origin_x_value);
1565 float origin_y_offset = (scaled_source_height * origin_y_value);
1566 transform.translate(origin_x_offset, origin_y_offset);
1567 transform.rotate(r);
1568 transform.shear(shear_x_value, shear_y_value);
1569 transform.translate(-origin_x_offset,-origin_y_offset);
1572 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1573 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1574 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1575 transform.scale(source_width_scale, source_height_scale);
1582int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1599 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1601 return frame_number;
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
float Duration() const
Get the length of this clip (in seconds)
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
std::string id
ID Property for all derived Clip and Effect classes.
float position
The position on the timeline where this clip should start playing.
float end
The position in seconds to end playing (used to trim the ending of a clip)
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::FrameDisplayType display
The format to display the frame number (if any)
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
bool Waveform()
Get the waveform property of this clip.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
AudioLocation previous_location
Previous time-mapped audio location.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
std::string PropertiesJSON(int64_t requested_frame) const override
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
This class represents a color (used on the timeline and clips)
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Json::Value JsonValue() const
Generate Json::Value for this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
int64_t GetLength() const
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool is_top_clip
Is clip on top (if overlapping another clip)