OpenShot Library | libopenshot  0.2.7
ObjectDetection.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Object Detection effect class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  * @author Brenno Caldato <brenno.caldato@outlook.com>
6  *
7  * @ref License
8  */
9 
10 /* LICENSE
11  *
12  * Copyright (c) 2008-2019 OpenShot Studios, LLC
13  * <http://www.openshotstudios.com/>. This file is part of
14  * OpenShot Library (libopenshot), an open-source project dedicated to
15  * delivering high quality video editing and animation solutions to the
16  * world. For more information visit <http://www.openshot.org/>.
17  *
18  * OpenShot Library (libopenshot) is free software: you can redistribute it
19  * and/or modify it under the terms of the GNU Lesser General Public License
20  * as published by the Free Software Foundation, either version 3 of the
21  * License, or (at your option) any later version.
22  *
23  * OpenShot Library (libopenshot) is distributed in the hope that it will be
24  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26  * GNU Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public License
29  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
30  */
31 
32 #include <fstream>
33 #include <iostream>
34 
36 #include "effects/Tracker.h"
37 #include "Exceptions.h"
38 #include "Timeline.h"
39 
40 #include <QImage>
41 #include <QPainter>
42 #include <QRectF>
43 using namespace std;
44 using namespace openshot;
45 
46 
47 /// Blank constructor, useful when using Json to load the effect properties
48 ObjectDetection::ObjectDetection(std::string clipObDetectDataPath)
49 {
50  // Init effect properties
51  init_effect_details();
52 
53  // Tries to load the tracker data from protobuf
54  LoadObjDetectdData(clipObDetectDataPath);
55 
56  // Initialize the selected object index as the first object index
57  selectedObjectIndex = trackedObjects.begin()->first;
58 }
59 
60 // Default constructor
61 ObjectDetection::ObjectDetection()
62 {
63  // Init effect properties
64  init_effect_details();
65 
66  // Initialize the selected object index as the first object index
67  selectedObjectIndex = trackedObjects.begin()->first;
68 }
69 
70 // Init effect settings
71 void ObjectDetection::init_effect_details()
72 {
73  /// Initialize the values of the EffectInfo struct.
74  InitEffectInfo();
75 
76  /// Set the effect info
77  info.class_name = "Object Detector";
78  info.name = "Object Detector";
79  info.description = "Detect objects through the video.";
80  info.has_audio = false;
81  info.has_video = true;
82  info.has_tracked_object = true;
83 }
84 
85 // This method is required for all derived classes of EffectBase, and returns a
86 // modified openshot::Frame object
87 std::shared_ptr<Frame> ObjectDetection::GetFrame(std::shared_ptr<Frame> frame, int64_t frame_number)
88 {
89  // Get the frame's image
90  cv::Mat cv_image = frame->GetImageCV();
91 
92  // Check if frame isn't NULL
93  if(cv_image.empty()){
94  return frame;
95  }
96 
97  // Initialize the Qt rectangle that will hold the positions of the bounding-box
98  std::vector<QRectF> boxRects;
99  // Initialize the image of the TrackedObject child clip
100  std::vector<std::shared_ptr<QImage>> childClipImages;
101 
102  // Check if track data exists for the requested frame
103  if (detectionsData.find(frame_number) != detectionsData.end()) {
104  float fw = cv_image.size().width;
105  float fh = cv_image.size().height;
106 
107  DetectionData detections = detectionsData[frame_number];
108  for(int i = 0; i<detections.boxes.size(); i++){
109 
110  // Does not show boxes with confidence below the threshold
111  if(detections.confidences.at(i) < confidence_threshold){
112  continue;
113  }
114  // Just display selected classes
115  if( display_classes.size() > 0 &&
116  std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
117  continue;
118  }
119 
120  // Get the object id
121  int objectId = detections.objectIds.at(i);
122 
123  // Search for the object in the trackedObjects map
124  auto trackedObject_it = trackedObjects.find(objectId);
125 
126  // Cast the object as TrackedObjectBBox
127  std::shared_ptr<TrackedObjectBBox> trackedObject = std::static_pointer_cast<TrackedObjectBBox>(trackedObject_it->second);
128 
129  // Check if the tracked object has data for this frame
130  if (trackedObject->Contains(frame_number) &&
131  trackedObject->visible.GetValue(frame_number) == 1)
132  {
133  // Get the bounding-box of given frame
134  BBox trackedBox = trackedObject->GetBox(frame_number);
135  bool draw_text = !display_box_text.GetValue(frame_number);
136  std::vector<int> stroke_rgba = trackedObject->stroke.GetColorRGBA(frame_number);
137  int stroke_width = trackedObject->stroke_width.GetValue(frame_number);
138  float stroke_alpha = trackedObject->stroke_alpha.GetValue(frame_number);
139  std::vector<int> bg_rgba = trackedObject->background.GetColorRGBA(frame_number);
140  float bg_alpha = trackedObject->background_alpha.GetValue(frame_number);
141 
142  // Create a rotated rectangle object that holds the bounding box
143  // cv::RotatedRect box ( cv::Point2f( (int)(trackedBox.cx*fw), (int)(trackedBox.cy*fh) ),
144  // cv::Size2f( (int)(trackedBox.width*fw), (int)(trackedBox.height*fh) ),
145  // (int) (trackedBox.angle) );
146 
147  // DrawRectangleRGBA(cv_image, box, bg_rgba, bg_alpha, 1, true);
148  // DrawRectangleRGBA(cv_image, box, stroke_rgba, stroke_alpha, stroke_width, false);
149 
150 
151  cv::Rect2d box(
152  (int)( (trackedBox.cx-trackedBox.width/2)*fw),
153  (int)( (trackedBox.cy-trackedBox.height/2)*fh),
154  (int)( trackedBox.width*fw),
155  (int)( trackedBox.height*fh)
156  );
157 
158  // If the Draw Box property is off, then make the box invisible
159  if (trackedObject->draw_box.GetValue(frame_number) == 0)
160  {
161  bg_alpha = 1.0;
162  stroke_alpha = 1.0;
163  }
164 
165  drawPred(detections.classIds.at(i), detections.confidences.at(i),
166  box, cv_image, detections.objectIds.at(i), bg_rgba, bg_alpha, 1, true, draw_text);
167  drawPred(detections.classIds.at(i), detections.confidences.at(i),
168  box, cv_image, detections.objectIds.at(i), stroke_rgba, stroke_alpha, stroke_width, false, draw_text);
169 
170 
171  // Get the Detected Object's child clip
172  if (trackedObject->ChildClipId() != ""){
173  // Cast the parent timeline of this effect
174  Timeline* parentTimeline = (Timeline *) ParentTimeline();
175  if (parentTimeline){
176  // Get the Tracked Object's child clip
177  Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId());
178 
179  if (childClip){
180  std::shared_ptr<Frame> f(new Frame(1, frame->GetWidth(), frame->GetHeight(), "#00000000"));
181  // Get the image of the child clip for this frame
182  std::shared_ptr<Frame> childClipFrame = childClip->GetFrame(f, frame_number);
183  childClipImages.push_back(childClipFrame->GetImage());
184 
185  // Set the Qt rectangle with the bounding-box properties
186  QRectF boxRect;
187  boxRect.setRect((int)((trackedBox.cx-trackedBox.width/2)*fw),
188  (int)((trackedBox.cy - trackedBox.height/2)*fh),
189  (int)(trackedBox.width*fw),
190  (int)(trackedBox.height*fh));
191  boxRects.push_back(boxRect);
192  }
193  }
194  }
195  }
196  }
197  }
198 
199  // Update Qt image with new Opencv frame
200  frame->SetImageCV(cv_image);
201 
202  // Set the bounding-box image with the Tracked Object's child clip image
203  if(boxRects.size() > 0){
204  // Get the frame image
205  QImage frameImage = *(frame->GetImage());
206  for(int i; i < boxRects.size();i++){
207  // Set a Qt painter to the frame image
208  QPainter painter(&frameImage);
209  // Draw the child clip image inside the bounding-box
210  painter.drawImage(boxRects[i], *childClipImages[i], QRectF(0, 0, frameImage.size().width(), frameImage.size().height()));
211  }
212  // Set the frame image as the composed image
213  frame->AddImage(std::make_shared<QImage>(frameImage));
214  }
215 
216  return frame;
217 }
218 
219 void ObjectDetection::DrawRectangleRGBA(cv::Mat &frame_image, cv::RotatedRect box, std::vector<int> color, float alpha,
220  int thickness, bool is_background){
221  // Get the bouding box vertices
222  cv::Point2f vertices2f[4];
223  box.points(vertices2f);
224 
225  // TODO: take a rectangle of frame_image by refencence and draw on top of that to improve speed
226  // select min enclosing rectangle to draw on a small portion of the image
227  // cv::Rect rect = box.boundingRect();
228  // cv::Mat image = frame_image(rect)
229 
230  if(is_background){
231  cv::Mat overlayFrame;
232  frame_image.copyTo(overlayFrame);
233 
234  // draw bounding box background
235  cv::Point vertices[4];
236  for(int i = 0; i < 4; ++i){
237  vertices[i] = vertices2f[i];}
238 
239  cv::Rect rect = box.boundingRect();
240  cv::fillConvexPoly(overlayFrame, vertices, 4, cv::Scalar(color[2],color[1],color[0]), cv::LINE_AA);
241  // add opacity
242  cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
243  }
244  else{
245  cv::Mat overlayFrame;
246  frame_image.copyTo(overlayFrame);
247 
248  // Draw bounding box
249  for (int i = 0; i < 4; i++)
250  {
251  cv::line(overlayFrame, vertices2f[i], vertices2f[(i+1)%4], cv::Scalar(color[2],color[1],color[0]),
252  thickness, cv::LINE_AA);
253  }
254 
255  // add opacity
256  cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
257  }
258 }
259 
260 void ObjectDetection::drawPred(int classId, float conf, cv::Rect2d box, cv::Mat& frame, int objectNumber, std::vector<int> color,
261  float alpha, int thickness, bool is_background, bool display_text)
262 {
263 
264  if(is_background){
265  cv::Mat overlayFrame;
266  frame.copyTo(overlayFrame);
267 
268  //Draw a rectangle displaying the bounding box
269  cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
270 
271  // add opacity
272  cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
273  }
274  else{
275  cv::Mat overlayFrame;
276  frame.copyTo(overlayFrame);
277 
278  //Draw a rectangle displaying the bounding box
279  cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), thickness);
280 
281  if(display_text){
282  //Get the label for the class name and its confidence
283  std::string label = cv::format("%.2f", conf);
284  if (!classNames.empty())
285  {
286  CV_Assert(classId < (int)classNames.size());
287  label = classNames[classId] + ":" + label;
288  }
289 
290  //Display the label at the top of the bounding box
291  int baseLine;
292  cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
293 
294  double left = box.x;
295  double top = std::max((int)box.y, labelSize.height);
296 
297  cv::rectangle(overlayFrame, cv::Point(left, top - round(1.025*labelSize.height)), cv::Point(left + round(1.025*labelSize.width), top + baseLine),
298  cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
299  putText(overlayFrame, label, cv::Point(left+1, top), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0,0,0),1);
300  }
301  // add opacity
302  cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
303  }
304 }
305 
306 // Load protobuf data file
307 bool ObjectDetection::LoadObjDetectdData(std::string inputFilePath){
308  // Create tracker message
309  pb_objdetect::ObjDetect objMessage;
310 
311  // Read the existing tracker message.
312  std::fstream input(inputFilePath, std::ios::in | std::ios::binary);
313  if (!objMessage.ParseFromIstream(&input)) {
314  std::cerr << "Failed to parse protobuf message." << std::endl;
315  return false;
316  }
317 
318  // Make sure classNames, detectionsData and trackedObjects are empty
319  classNames.clear();
320  detectionsData.clear();
321  trackedObjects.clear();
322 
323  // Seed to generate same random numbers
324  std::srand(1);
325  // Get all classes names and assign a color to them
326  for(int i = 0; i < objMessage.classnames_size(); i++)
327  {
328  classNames.push_back(objMessage.classnames(i));
329  classesColor.push_back(cv::Scalar(std::rand()%205 + 50, std::rand()%205 + 50, std::rand()%205 + 50));
330  }
331 
332  // Iterate over all frames of the saved message
333  for (size_t i = 0; i < objMessage.frame_size(); i++)
334  {
335  // Create protobuf message reader
336  const pb_objdetect::Frame& pbFrameData = objMessage.frame(i);
337 
338  // Get frame Id
339  size_t id = pbFrameData.id();
340 
341  // Load bounding box data
342  const google::protobuf::RepeatedPtrField<pb_objdetect::Frame_Box > &pBox = pbFrameData.bounding_box();
343 
344  // Construct data vectors related to detections in the current frame
345  std::vector<int> classIds;
346  std::vector<float> confidences;
347  std::vector<cv::Rect_<float>> boxes;
348  std::vector<int> objectIds;
349 
350  // Iterate through the detected objects
351  for(int i = 0; i < pbFrameData.bounding_box_size(); i++)
352  {
353  // Get bounding box coordinates
354  float x = pBox.Get(i).x();
355  float y = pBox.Get(i).y();
356  float w = pBox.Get(i).w();
357  float h = pBox.Get(i).h();
358  // Get class Id (which will be assign to a class name)
359  int classId = pBox.Get(i).classid();
360  // Get prediction confidence
361  float confidence = pBox.Get(i).confidence();
362 
363  // Get the object Id
364  int objectId = pBox.Get(i).objectid();
365 
366  // Search for the object id on trackedObjects map
367  auto trackedObject = trackedObjects.find(objectId);
368  // Check if object already exists on the map
369  if (trackedObject != trackedObjects.end())
370  {
371  // Add a new BBox to it
372  trackedObject->second->AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
373  }
374  else
375  {
376  // There is no tracked object with that id, so insert a new one
377  TrackedObjectBBox trackedObj((int)classesColor[classId](0), (int)classesColor[classId](1), (int)classesColor[classId](2), (int)0);
378  trackedObj.AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
379 
380  std::shared_ptr<TrackedObjectBBox> trackedObjPtr = std::make_shared<TrackedObjectBBox>(trackedObj);
381  ClipBase* parentClip = this->ParentClip();
382  trackedObjPtr->ParentClip(parentClip);
383 
384  // Create a temp ID. This ID is necessary to initialize the object_id Json list
385  // this Id will be replaced by the one created in the UI
386  trackedObjPtr->Id(std::to_string(objectId));
387  trackedObjects.insert({objectId, trackedObjPtr});
388  }
389 
390  // Create OpenCV rectangle with the bouding box info
391  cv::Rect_<float> box(x, y, w, h);
392 
393  // Push back data into vectors
394  boxes.push_back(box);
395  classIds.push_back(classId);
396  confidences.push_back(confidence);
397  objectIds.push_back(objectId);
398  }
399 
400  // Assign data to object detector map
401  detectionsData[id] = DetectionData(classIds, confidences, boxes, id, objectIds);
402  }
403 
404  // Delete all global objects allocated by libprotobuf.
405  google::protobuf::ShutdownProtobufLibrary();
406 
407  return true;
408 }
409 
410 // Get the indexes and IDs of all visible objects in the given frame
411 std::string ObjectDetection::GetVisibleObjects(int64_t frame_number) const{
412 
413  // Initialize the JSON objects
414  Json::Value root;
415  root["visible_objects_index"] = Json::Value(Json::arrayValue);
416  root["visible_objects_id"] = Json::Value(Json::arrayValue);
417 
418  // Check if track data exists for the requested frame
419  if (detectionsData.find(frame_number) == detectionsData.end()){
420  return root.toStyledString();
421  }
422  DetectionData detections = detectionsData.at(frame_number);
423 
424  // Iterate through the tracked objects
425  for(int i = 0; i<detections.boxes.size(); i++){
426  // Does not show boxes with confidence below the threshold
427  if(detections.confidences.at(i) < confidence_threshold){
428  continue;
429  }
430 
431  // Just display selected classes
432  if( display_classes.size() > 0 &&
433  std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
434  continue;
435  }
436 
437  int objectId = detections.objectIds.at(i);
438  // Search for the object in the trackedObjects map
439  auto trackedObject = trackedObjects.find(objectId);
440 
441  // Get the tracked object JSON properties for this frame
442  Json::Value trackedObjectJSON = trackedObject->second->PropertiesJSON(frame_number);
443 
444  if (trackedObjectJSON["visible"]["value"].asBool() &&
445  trackedObject->second->ExactlyContains(frame_number)){
446  // Save the object's index and ID if it's visible in this frame
447  root["visible_objects_index"].append(trackedObject->first);
448  root["visible_objects_id"].append(trackedObject->second->Id());
449  }
450  }
451 
452  return root.toStyledString();
453 }
454 
455 // Generate JSON string of this object
456 std::string ObjectDetection::Json() const {
457 
458  // Return formatted string
459  return JsonValue().toStyledString();
460 }
461 
462 // Generate Json::Value for this object
463 Json::Value ObjectDetection::JsonValue() const {
464 
465  // Create root json object
466  Json::Value root = EffectBase::JsonValue(); // get parent properties
467  root["type"] = info.class_name;
468  root["protobuf_data_path"] = protobuf_data_path;
469  root["selected_object_index"] = selectedObjectIndex;
470  root["confidence_threshold"] = confidence_threshold;
471  root["display_box_text"] = display_box_text.JsonValue();
472 
473  // Add tracked object's IDs to root
474  Json::Value objects;
475  for (auto const& trackedObject : trackedObjects){
476  Json::Value trackedObjectJSON = trackedObject.second->JsonValue();
477  // add object json
478  objects[trackedObject.second->Id()] = trackedObjectJSON;
479  }
480  root["objects"] = objects;
481 
482  // return JsonValue
483  return root;
484 }
485 
486 // Load JSON string into this object
487 void ObjectDetection::SetJson(const std::string value) {
488 
489  // Parse JSON string into JSON objects
490  try
491  {
492  const Json::Value root = openshot::stringToJson(value);
493  // Set all values that match
494  SetJsonValue(root);
495  }
496  catch (const std::exception& e)
497  {
498  // Error parsing JSON (or missing keys)
499  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
500  }
501 }
502 
503 // Load Json::Value into this object
504 void ObjectDetection::SetJsonValue(const Json::Value root) {
505  // Set parent data
506  EffectBase::SetJsonValue(root);
507 
508  // Set data from Json (if key is found)
509  if (!root["protobuf_data_path"].isNull() && protobuf_data_path.size() <= 1){
510  protobuf_data_path = root["protobuf_data_path"].asString();
511 
512  if(!LoadObjDetectdData(protobuf_data_path)){
513  throw InvalidFile("Invalid protobuf data path", "");
514  protobuf_data_path = "";
515  }
516  }
517 
518  // Set the selected object index
519  if (!root["selected_object_index"].isNull())
520  selectedObjectIndex = root["selected_object_index"].asInt();
521 
522  if (!root["confidence_threshold"].isNull())
523  confidence_threshold = root["confidence_threshold"].asFloat();
524 
525  if (!root["display_box_text"].isNull())
526  display_box_text.SetJsonValue(root["display_box_text"]);
527 
528  if (!root["class_filter"].isNull()){
529  class_filter = root["class_filter"].asString();
530  std::stringstream ss(class_filter);
531  display_classes.clear();
532  while( ss.good() )
533  {
534  // Parse comma separated string
535  std::string substr;
536  std::getline( ss, substr, ',' );
537  display_classes.push_back( substr );
538  }
539  }
540 
541  if (!root["objects"].isNull()){
542  for (auto const& trackedObject : trackedObjects){
543  std::string obj_id = std::to_string(trackedObject.first);
544  if(!root["objects"][obj_id].isNull()){
545  trackedObject.second->SetJsonValue(root["objects"][obj_id]);
546  }
547  }
548  }
549 
550  // Set the tracked object's ids
551  if (!root["objects_id"].isNull()){
552  for (auto const& trackedObject : trackedObjects){
553  Json::Value trackedObjectJSON;
554  trackedObjectJSON["box_id"] = root["objects_id"][trackedObject.first].asString();
555  trackedObject.second->SetJsonValue(trackedObjectJSON);
556  }
557  }
558 }
559 
560 // Get all properties for a specific frame
561 std::string ObjectDetection::PropertiesJSON(int64_t requested_frame) const {
562 
563  // Generate JSON properties list
564  Json::Value root;
565 
566  Json::Value objects;
567  if(trackedObjects.count(selectedObjectIndex) != 0){
568  auto selectedObject = trackedObjects.at(selectedObjectIndex);
569  if (selectedObject){
570  Json::Value trackedObjectJSON = selectedObject->PropertiesJSON(requested_frame);
571  // add object json
572  objects[selectedObject->Id()] = trackedObjectJSON;
573  }
574  }
575  root["objects"] = objects;
576 
577  root["selected_object_index"] = add_property_json("Selected Object", selectedObjectIndex, "int", "", NULL, 0, 200, false, requested_frame);
578  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
579  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
580  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
581  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
582  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
583  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
584  root["confidence_threshold"] = add_property_json("Confidence Theshold", confidence_threshold, "float", "", NULL, 0, 1, false, requested_frame);
585  root["class_filter"] = add_property_json("Class Filter", 0.0, "string", class_filter, NULL, -1, -1, false, requested_frame);
586 
587  root["display_box_text"] = add_property_json("Draw Box Text", display_box_text.GetValue(requested_frame), "int", "", &display_box_text, 0, 1.0, false, requested_frame);
588  root["display_box_text"]["choices"].append(add_property_choice_json("Off", 1, display_box_text.GetValue(requested_frame)));
589  root["display_box_text"]["choices"].append(add_property_choice_json("On", 0, display_box_text.GetValue(requested_frame)));
590 
591  // Return formatted string
592  return root.toStyledString();
593 }
Header file for all Exception classes.
Header file for Object Detection effect class.
Header file for Timeline class.
Header file for Tracker effect class.
This abstract class is the base class, used by all clips in libopenshot.
Definition: ClipBase.h:51
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:360
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:108
Exception for files that can not be found or opened.
Definition: Exceptions.h:174
Exception for invalid JSON.
Definition: Exceptions.h:206
This class represents a timeline.
Definition: Timeline.h:168
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:404
This class contains the properties of a tracked object and functions to manipulate it.
void AddBox(int64_t _frame_num, float _cx, float _cy, float _width, float _height, float _angle) override
Add a BBox to the BoxVec map.
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:47
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:34
std::vector< cv::Rect_< float > > boxes
std::vector< float > confidences
std::vector< int > classIds
std::vector< int > objectIds
This struct holds the information of a bounding-box.
float cy
y-coordinate of the bounding box center
float height
bounding box height
float cx
x-coordinate of the bounding box center
float width
bounding box width