YARP
Yet Another Robot Platform
FfmpegWriter.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006-2018 Istituto Italiano di Tecnologia (IIT)
3  * Copyright (C) 2006-2010 RobotCub Consortium
4  * All rights reserved.
5  *
6  * This software may be modified and distributed under the terms of the
7  * BSD-3-Clause license. See the accompanying LICENSE file for details.
8  */
9 
10 /*
11  * Most of this file is from the output_example.c of ffmpeg -
12  * copyright/copypolicy statement follows --
13  */
14 
15 /*
16  * Libavformat API example: Output a media file in any supported
17  * libavformat format. The default codecs are used.
18  *
19  * Copyright (c) 2003 Fabrice Bellard
20  *
21  * Permission is hereby granted, free of charge, to any person obtaining a copy
22  * of this software and associated documentation files (the "Software"), to deal
23  * in the Software without restriction, including without limitation the rights
24  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25  * copies of the Software, and to permit persons to whom the Software is
26  * furnished to do so, subject to the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be included in
29  * all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37  * THE SOFTWARE.
38  */
39 
40 
41 #include "FfmpegWriter.h"
42 #include "ffmpeg_api.h"
43 
44 #include <yarp/os/all.h>
45 #include <yarp/sig/all.h>
46 
47 #include <cstdio>
48 #include <cstdlib>
49 #include <cstring>
50 #include <cmath>
51 
52 #ifndef M_PI
53 #define M_PI 3.1415926535897931
54 #endif
55 
56 #define STREAM_FRAME_RATE 25 /* 25 images/s */
57 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
58 #define STREAM_PIX_WORK AV_PIX_FMT_RGB24
59 
60 using namespace yarp::os;
61 using namespace yarp::dev;
62 using namespace yarp::sig;
63 using namespace yarp::sig::file;
64 
65 
66 /**************************************************************/
67 /* audio output */
68 
69 float t, tincr, tincr2;
70 
71 int16_t *samples;
75 
76 uint8_t *audio_outbuf;
79 
80 /*
81  * add an audio output stream
82  */
83 static AVStream *add_audio_stream(AVFormatContext *oc, AVCodecID codec_id)
84 {
85  AVCodecContext *c;
86  AVStream *st;
87 
88  st = avformat_new_stream(oc, NULL);
89  if (!st) {
90  fprintf(stderr, "Could not alloc stream\n");
91  ::exit(1);
92  }
93 
94  c = st->codec;
95  c->codec_id = codec_id;
96  c->codec_type = AVMEDIA_TYPE_AUDIO;
97 
98  /* put sample parameters */
99  c->bit_rate = 64000;
100  c->sample_rate = 44100;
101  c->channels = 2;
102  return st;
103 }
104 
105 static void open_audio(AVFormatContext *oc, AVStream *st)
106 {
107  printf("Opening audio stream\n");
108  AVCodecContext *c;
109  AVCodec *codec;
110 
111  c = st->codec;
112 
113  /* find the audio encoder */
114  codec = avcodec_find_encoder(c->codec_id);
115  if (!codec) {
116  fprintf(stderr, "audio codec not found\n");
117  ::exit(1);
118  }
119 
120  /* open it */
121  if (avcodec_open2(c, codec, nullptr) < 0) {
122  fprintf(stderr, "could not open codec\n");
123  ::exit(1);
124  }
125 
126  /* init signal generator */
127  t = 0;
128  tincr = 2 * M_PI * 110.0 / c->sample_rate;
129  /* increment frequency by 110 Hz per second */
130  tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
131 
132  audio_outbuf_size = 10000;
133  audio_outbuf = (uint8_t*)av_malloc(audio_outbuf_size);
134 
135  /* ugly hack for PCM codecs (will be removed ASAP with new PCM
136  support to compute the input frame size in samples */
137  if (c->frame_size <= 1) {
139  switch(st->codec->codec_id) {
140  case AV_CODEC_ID_PCM_S16LE:
141  case AV_CODEC_ID_PCM_S16BE:
142  case AV_CODEC_ID_PCM_U16LE:
143  case AV_CODEC_ID_PCM_U16BE:
145  break;
146  default:
147  break;
148  }
149  } else {
150  audio_input_frame_size = c->frame_size;
151  }
153  samples_at = 0;
154  samples_channels = c->channels;
155  samples = (int16_t*)av_malloc(samples_size*2*samples_channels);
156 
157 
158  printf("FRAME SIZE is %d / samples size is %d\n",
159  c->frame_size,
160  samples_size);
161  ::exit(1);
162 }
163 
164 /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
165  'nb_channels' channels */
166 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
167 {
168  int j, i, v;
169  int16_t *q;
170 
171  q = samples;
172  for(j=0;j<frame_size;j++) {
173  v = (int)(sin(t) * 10000);
174  for(i = 0; i < nb_channels; i++)
175  *q++ = v;
176  t += tincr;
177  tincr += tincr2;
178  }
179 }
180 
181 static void make_audio_frame(AVCodecContext *c, AVFrame * &frame,
182  void *&samples) {
183  frame = av_frame_alloc();
184  if (!frame) {
185  fprintf(stderr, "Could not allocate audio frame\n");
186  ::exit(1);
187  }
188  frame->nb_samples = c->frame_size;
189  frame->format = c->sample_fmt;
190  frame->channel_layout = c->channel_layout;
191  int buffer_size = av_samples_get_buffer_size(nullptr, c->channels,
192  c->frame_size,
193  c->sample_fmt, 0);
194  if (buffer_size < 0) {
195  fprintf(stderr, "Could not get sample buffer size\n");
196  ::exit(1);
197  }
198  samples = av_malloc(buffer_size);
199  if (!samples) {
200  fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
201  buffer_size);
202  ::exit(1);
203  }
204  /* setup the data pointers in the AVFrame */
205  int ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
206  (const uint8_t*)samples, buffer_size, 0);
207  if (ret < 0) {
208  fprintf(stderr, "Could not setup audio frame\n");
209  ::exit(1);
210  }
211 }
212 
213 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
214 {
215  AVCodecContext *c;
216  AVPacket pkt;
217  av_init_packet(&pkt);
218 
219  c = st->codec;
220 
222 
223  AVFrame *frame;
224  void *samples;
225  make_audio_frame(c,frame,samples);
226  AVPacket tmp;
227  int got_packet = 0;
228  av_init_packet(&tmp);
229  tmp.data = audio_outbuf;
230  tmp.size = audio_outbuf_size;
231  pkt.size = avcodec_encode_audio2(c, &tmp, frame, &got_packet);
232  if (tmp.side_data_elems > 0) {
233  for (int i = 0; i < tmp.side_data_elems; i++) {
234  av_free(tmp.side_data[i].data);
235  }
236  av_freep(&tmp.side_data);
237  tmp.side_data_elems = 0;
238  }
239  av_freep(&samples);
240  av_frame_free(&frame);
241 
242  pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
243  pkt.flags |= AV_PKT_FLAG_KEY;
244  pkt.stream_index= st->index;
245  pkt.data= audio_outbuf;
246 
247  /* write the compressed frame in the media file */
248  if (av_write_frame(oc, &pkt) != 0) {
249  fprintf(stderr, "Error while writing audio frame\n");
250  ::exit(1);
251  } else {
252  printf("Wrote some audio\n");
253  }
254 }
255 
256 static void write_audio_frame(AVFormatContext *oc, AVStream *st, Sound& snd)
257 {
258  printf("Preparing to write audio (%d left over)\n", samples_at);
259  AVCodecContext *c;
260  int key = 1;
261 
262  c = st->codec;
263 
264  size_t at = 0;
265  while (at<snd.getSamples()) {
266 
267  int avail = samples_size - samples_at;
268  int remain = snd.getSamples() - at;
269  int chan = snd.getChannels();
270  if (remain<avail) { avail = remain; }
271  for (int i=0; i<avail; i++) {
272  int offset = samples_at*samples_channels;
273  for (int j=0; j<samples_channels; j++) {
274  samples[offset+j] = snd.get(at,j%chan);
275  }
276  samples_at++;
277  at++;
278  }
279  avail = samples_size - samples_at;
280 
281  if (avail==0) {
282  AVPacket pkt;
283  av_init_packet(&pkt);
284 
285 
286  AVFrame *frame;
287  void *samples;
288  make_audio_frame(c,frame,samples);
289  AVPacket tmp;
290  int got_packet = 0;
291  av_init_packet(&tmp);
292  tmp.data = audio_outbuf;
293  tmp.size = audio_outbuf_size;
294  pkt.size = avcodec_encode_audio2(c, &tmp, frame, &got_packet);
295  if (tmp.side_data_elems > 0) {
296  for (int i = 0; i < tmp.side_data_elems; i++) {
297  av_free(tmp.side_data[i].data);
298  }
299  av_freep(&tmp.side_data);
300  tmp.side_data_elems = 0;
301  }
302  av_freep(&samples);
303  av_frame_free(&frame);
304 
305  pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base,
306  st->time_base);
307  pkt.dts = pkt.pts;
308  //printf("(%d)", pkt.size);
309  if (key) {
310  pkt.flags |= AV_PKT_FLAG_KEY;
311  key = 0;
312  }
313  pkt.stream_index= st->index;
314  pkt.data = audio_outbuf;
315  pkt.duration = 0;
316 
317 
318  /* write the compressed frame in the media file */
319  printf("+");
320  fflush(stdout);
321  if (av_write_frame(oc, &pkt) != 0) {
322  fprintf(stderr, "Error while writing audio frame\n");
323  ::exit(1);
324  } else {
325  printf(".");
326  }
327  samples_at = 0;
328  }
329  }
330  printf(" wrote audio\n");
331 }
332 
333 static void close_audio(AVFormatContext *oc, AVStream *st)
334 {
335  avcodec_close(st->codec);
336 
337  av_free(samples);
338  av_free(audio_outbuf);
339 }
340 
341 /**************************************************************/
342 /* video output */
343 
344 
345 /* add a video output stream */
346 static AVStream *add_video_stream(AVFormatContext *oc, AVCodecID codec_id,
347  int w, int h, int framerate)
348 {
349  AVCodecContext *c;
350  AVStream *st;
351 
352  st = avformat_new_stream(oc, NULL);
353  if (!st) {
354  fprintf(stderr, "Could not alloc stream\n");
355  ::exit(1);
356  }
357 
358  c = st->codec;
359  c->codec_id = codec_id;
360  c->codec_type = AVMEDIA_TYPE_VIDEO;
361 
362  /* put sample parameters */
363  c->bit_rate = 400000;
364  /* resolution must be a multiple of two */
365  c->width = w;
366  c->height = h;
367  /* time base: this is the fundamental unit of time (in seconds) in terms
368  of which frame timestamps are represented. for fixed-fps content,
369  timebase should be 1/framerate and timestamp increments should be
370  identically 1. */
371  c->time_base.den = framerate;
372  c->time_base.num = 1;
373  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
374  c->pix_fmt = STREAM_PIX_FMT;
375  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
376  /* just for testing, we also add B frames */
377  c->max_b_frames = 2;
378  }
379  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO){
380  /* needed to avoid using macroblocks in which some coeffs overflow
381  this doesnt happen with normal video, it just happens here as the
382  motion of the chroma plane doesnt match the luma plane */
383  c->mb_decision=2;
384  }
385  // some formats want stream headers to be separate
386  if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
387  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
388 
389 
390  return st;
391 }
392 
393 static AVFrame *alloc_picture(int pix_fmt, int width, int height)
394 {
395  AVFrame *picture;
396  uint8_t *picture_buf;
397  int size;
398 
399  picture = av_frame_alloc();
400  if (!picture)
401  return nullptr;
402  size = avpicture_get_size((AVPixelFormat)pix_fmt, width, height);
403  picture_buf = (uint8_t*)av_malloc(size);
404  if (!picture_buf) {
405  av_free(picture);
406  return nullptr;
407  }
408  avpicture_fill((AVPicture *)picture, picture_buf,
409  (AVPixelFormat)pix_fmt, width, height);
410  return picture;
411 }
412 
413 void FfmpegWriter::open_video(AVFormatContext *oc, AVStream *st)
414 {
415  printf("Opening video stream\n");
416  AVCodec *codec;
417  AVCodecContext *c;
418 
419  c = st->codec;
420 
421  /* find the video encoder */
422  codec = avcodec_find_encoder(c->codec_id);
423  if (!codec) {
424  fprintf(stderr, "video codec not found\n");
425  ::exit(1);
426  }
427 
428  /* open the codec */
429  if (avcodec_open2(c, codec, nullptr) < 0) {
430  fprintf(stderr, "could not open codec\n");
431  ::exit(1);
432  }
433 
434  video_outbuf = nullptr;
435  /* allocate output buffer */
436  /* XXX: API change will be done */
437  /* buffers passed into lav* can be allocated any way you prefer,
438  as long as they're aligned enough for the architecture, and
439  they're freed appropriately (such as using av_free for buffers
440  allocated with av_malloc) */
441  video_outbuf_size = 200000;
442  video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);
443 
444  /* allocate the encoded raw picture */
445  picture = alloc_picture(c->pix_fmt, c->width, c->height);
446  if (!picture) {
447  fprintf(stderr, "Could not allocate picture\n");
448  ::exit(1);
449  }
450 
451  /* if the output format is not YUV420P, then a temporary YUV420P
452  picture is needed too. It is then converted to the required
453  output format */
454  tmp_picture = nullptr;
455  if (c->pix_fmt != AV_PIX_FMT_RGB24) {
456  tmp_picture = alloc_picture(AV_PIX_FMT_RGB24, c->width, c->height);
457  if (!tmp_picture) {
458  fprintf(stderr, "Could not allocate temporary picture\n");
459  ::exit(1);
460  }
461  }
462 }
463 
464 static void fill_rgb_image(AVFrame *pict, int frame_index, int width,
465  int height, ImageOf<PixelRgb>& img)
466 {
467  int x, y;
468 
469  for(y=0;y<height;y++) {
470  for(x=0;x<width;x++) {
471  int base = y*(width*3);
472  pict->data[0][base + x*3] = img.safePixel(x,y).r;
473  pict->data[0][base +x*3+1] = img.safePixel(x,y).g;
474  pict->data[0][base +x*3+2] = img.safePixel(x,y).b;
475  }
476  }
477 }
478 
479 
480 void FfmpegWriter::write_video_frame(AVFormatContext *oc, AVStream *st,
481  ImageOf<PixelRgb>& img)
482 {
483  int out_size, ret;
484  AVCodecContext *c;
485 
486  c = st->codec;
487 
488  if (c->pix_fmt != AV_PIX_FMT_RGB24) {
489  fill_rgb_image(tmp_picture, frame_count, c->width, c->height, img);
490  stable_img_convert((AVPicture *)picture, c->pix_fmt,
491  (AVPicture *)tmp_picture, AV_PIX_FMT_RGB24,
492  c->width, c->height);
493  } else {
494  fill_rgb_image(picture, frame_count, c->width, c->height, img);
495  }
496 
497  /* encode the image */
498  AVPacket tmp;
499  int got_packet = 0;
500  av_init_packet(&tmp);
501  tmp.data = video_outbuf;
502  tmp.size = video_outbuf_size;
503  out_size = avcodec_encode_video2(c, &tmp, picture, &got_packet);
504  if (tmp.side_data_elems > 0) {
505  for (int i = 0; i < tmp.side_data_elems; i++) {
506  av_free(tmp.side_data[i].data);
507  }
508  av_freep(&tmp.side_data);
509  tmp.side_data_elems = 0;
510  }
511  /* if zero size, it means the image was buffered */
512  if (out_size > 0) {
513  AVPacket pkt;
514  av_init_packet(&pkt);
515 
516  pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
517  if(c->coded_frame->key_frame)
518  pkt.flags |= AV_PKT_FLAG_KEY;
519  pkt.stream_index= st->index;
520  pkt.data= video_outbuf;
521  pkt.size= out_size;
522 
523  /*
524  static int x = 0;
525  printf("%ld / %ld : %ld / %ld --> %d\n",
526  (long int) c->time_base.num,
527  (long int) c->time_base.den,
528  (long int) st->time_base.num,
529  (long int) st->time_base.den,
530  x);
531  pkt.pts = x;
532  x++;
533  */
534 
535  /* write the compressed frame in the media file */
536  ret = av_write_frame(oc, &pkt);
537  } else {
538  ret = 0;
539  }
540 
541  if (ret != 0) {
542  fprintf(stderr, "Error while writing video frame\n");
543  ::exit(1);
544  }
545  frame_count++;
546 }
547 
548 void FfmpegWriter::close_video(AVFormatContext *oc, AVStream *st)
549 {
550  avcodec_close(st->codec);
551  av_free(picture->data[0]);
552  av_free(picture);
553  if (tmp_picture) {
554  av_free(tmp_picture->data[0]);
555  av_free(tmp_picture);
556  }
557  av_free(video_outbuf);
558 }
559 
560 
561 
562 
563 /**************************************************************/
564 /* YARP adaptation */
565 
566 bool FfmpegWriter::open(yarp::os::Searchable & config) {
567 // printf("ffmpeg libavcodec version number %d.%d.%d\n", LIBAVCODEC_VERSION_MAJOR,
568 // LIBAVCODEC_VERSION_MINOR,
569 // LIBAVCODEC_VERSION_MICRO);
570 
571  ready = false;
572  savedConfig.fromString(config.toString());
573 
574  // open if possible, if not will do it later
575  return delayedOpen(config);
576 }
577 
578 
579 bool FfmpegWriter::delayedOpen(yarp::os::Searchable & config) {
580  //printf("DELAYED OPEN %s\n", config.toString().c_str());
581 
582  int w = config.check("width",Value(0),
583  "width of image (must be even)").asInt32();
584  int h = config.check("height",Value(0),
585  "height of image (must be even)").asInt32();
586  int framerate = config.check("framerate",Value(30),
587  "baseline images per second").asInt32();
588 
589  int sample_rate = 0;
590  int channels = 0;
591  bool audio = config.check("audio","should audio be included");
592  if (audio) {
593  sample_rate = config.check("sample_rate",Value(44100),
594  "audio samples per second").asInt32();
595  channels = config.check("channels",Value(1),
596  "audio samples per second").asInt32();
597  }
598 
599  filename = config.check("out",Value("movie.avi"),
600  "name of movie to write").asString();
601 
602  delayed = false;
603  if (w<=0||h<=0) {
604  delayed = true;
605  return true;
606  }
607  ready = true;
608 
609  /* initialize libavcodec, and register all codecs and formats */
610  av_register_all();
611 
612  /* auto detect the output format from the name. default is
613  mpeg. */
614  fmt = av_guess_format(nullptr, filename.c_str(), nullptr);
615  if (!fmt) {
616  printf("Could not deduce output format from file extension: using MPEG.\n");
617  fmt = av_guess_format("mpeg", nullptr, nullptr);
618  }
619  if (!fmt) {
620  fprintf(stderr, "Could not find suitable output format\n");
621  ::exit(1);
622  }
623 
624  /* allocate the output media context */
625  oc = avformat_alloc_context();
626  if (!oc) {
627  fprintf(stderr, "Memory error\n");
628  ::exit(1);
629  }
630  oc->oformat = fmt;
631  snprintf(oc->filename, sizeof(oc->filename), "%s", filename.c_str());
632 
633  /* add the audio and video streams using the default format codecs
634  and initialize the codecs */
635  video_st = nullptr;
636  audio_st = nullptr;
637  if (fmt->video_codec != AV_CODEC_ID_NONE) {
638  video_st = add_video_stream(oc, fmt->video_codec, w, h, framerate);
639  }
640 
641  if (audio) {
642  printf("Adding audio %dx%d\n", sample_rate, channels);
643  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
644  audio_st = add_audio_stream(oc, fmt->audio_codec);
645  if (audio_st!=nullptr) {
646  AVCodecContext *c = audio_st->codec;
647  c->sample_rate = sample_rate;
648  c->channels = channels;
649  } else {
650  printf("Failed to add audio\n");
651  }
652  } else {
653  printf("No audio codec available\n");
654  }
655  } else {
656  printf("Skipping audio\n");
657  }
658 
659  av_dump_format(oc, 0, filename.c_str(), 1);
660 
661  /* now that all the parameters are set, we can open the audio and
662  video codecs and allocate the necessary encode buffers */
663  if (video_st) {
664  open_video(oc, video_st);
665  }
666  if (audio_st) {
667  open_audio(oc, audio_st);
668  }
669 
670  /* open the output file, if needed */
671  if (!(fmt->flags & AVFMT_NOFILE)) {
672  if (avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE) < 0) {
673  fprintf(stderr, "Could not open '%s'\n", filename.c_str());
674  ::exit(1);
675  }
676  }
677 
678  /* write the stream header, if any */
679  avformat_write_header(oc, NULL);
680 
681  return true;
682 }
683 
684 bool FfmpegWriter::close() {
685  if (!isOk()) { return false; }
686 
687  /* close each codec */
688  if (video_st)
689  close_video(oc, video_st);
690  if (audio_st)
691  close_audio(oc, audio_st);
692 
693  /* write the trailer, if any */
694  av_write_trailer(oc);
695 
696  /* free the streams */
697  for(unsigned int i = 0; i < oc->nb_streams; i++) {
698  av_freep(&oc->streams[i]->codec);
699  av_freep(&oc->streams[i]);
700  }
701 
702  if (!(fmt->flags & AVFMT_NOFILE)) {
703  /* close the output file */
704  avio_close(oc->pb);
705  }
706 
707  /* free the stream */
708  av_free(oc);
709 
710  printf("Closed media file %s\n", filename.c_str());
711 
712  return true;
713 }
714 
715 bool FfmpegWriter::putImage(yarp::sig::ImageOf<yarp::sig::PixelRgb> & image) {
716  if (delayed) {
717  savedConfig.put("width",Value((int)image.width()));
718  savedConfig.put("height",Value((int)image.height()));
719  }
720  if (!isOk()) { return false; }
721 
722  /* compute current audio and video time */
723  if (audio_st)
724  audio_pts = (double)av_stream_get_end_pts(audio_st) * audio_st->time_base.num / audio_st->time_base.den;
725  else
726  audio_pts = 0.0;
727 
728  if (video_st)
729  video_pts = (double)av_stream_get_end_pts(video_st) * video_st->time_base.num / video_st->time_base.den;
730  else
731  video_pts = 0.0;
732 
733  if (!(audio_st||video_st))
734  return false;
735 
736  /* write interleaved audio and video frames */
737  if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
738  write_audio_frame(oc, audio_st);
739  } else {
740  write_video_frame(oc, video_st, image);
741  }
742 
743  return true;
744 }
745 
746 
747 
748 bool FfmpegWriter::putAudioVisual(yarp::sig::ImageOf<yarp::sig::PixelRgb>& image,
749  yarp::sig::Sound& sound) {
750  if (delayed) {
751  savedConfig.put("width",Value((int)image.width()));
752  savedConfig.put("height",Value((int)image.height()));
753  savedConfig.put("sample_rate",Value((int)sound.getFrequency()));
754  savedConfig.put("channels",Value((int)sound.getChannels()));
755  savedConfig.put("audio",Value(1));
756  }
757  if (!isOk()) { return false; }
758 
759  /* write interleaved audio and video frames */
760  write_video_frame(oc, video_st, image);
761  write_audio_frame(oc, audio_st, sound);
762  return true;
763 }
float tincr2
size_t getFrequency() const
Definition: Sound.cpp:186
int audio_outbuf_size
bool ret
static AVStream * add_video_stream(AVFormatContext *oc, AVCodecID codec_id, int w, int h, int framerate)
int get(size_t sample, size_t channel=0) const
Definition: Sound.cpp:159
A base class for nested structures that can be searched.
Definition: Searchable.h:66
unsigned char g
Definition: Image.h:434
static void make_audio_frame(AVCodecContext *c, AVFrame *&frame, void *&samples)
size_t getSamples() const
Definition: Sound.h:101
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
Class for storing sounds.
Definition: Sound.h:28
size_t width() const
Gets width of image in pixels.
Definition: Image.h:138
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
virtual bool check(const std::string &key) const =0
Check if there exists a property of the given name.
virtual std::string toString() const =0
Return a standard text representation of the content of the object.
int samples_size
float t
An interface for the device drivers.
size_t getChannels() const
Definition: Sound.h:103
int samples_channels
uint8_t * audio_outbuf
static AVFrame * alloc_picture(int pix_fmt, int width, int height)
int audio_input_frame_size
static void open_audio(AVFormatContext *oc, AVStream *st)
size_t height() const
Gets height of image in pixels.
Definition: Image.h:144
int samples_at
float tincr
int stable_img_convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src, int src_pix_fmt, int src_width, int src_height)
Definition: ffmpeg_api.cpp:13
A single value (typically within a Bottle).
Definition: Value.h:46
#define STREAM_PIX_FMT
static void fill_rgb_image(AVFrame *pict, int frame_index, int width, int height, ImageOf< PixelRgb > &img)
An interface to the operating system, including Port based communication.
static AVStream * add_audio_stream(AVFormatContext *oc, AVCodecID codec_id)
constexpr char framerate[]
int16_t * samples
static void close_audio(AVFormatContext *oc, AVStream *st)
T & safePixel(size_t x, size_t y)
Definition: Image.h:597
unsigned char b
Definition: Image.h:434
Signal processing.
Definition: Image.h:25
unsigned char r
Definition: Image.h:434
#define M_PI
Image file operations.
Definition: ImageFile.h:23