Wed Jan 8 2020 09:49:46

Asterisk developer's documentation


console_video.c
Go to the documentation of this file.
1 /*
2  * Asterisk -- An open source telephony toolkit.
3  *
4  * Copyright 2007-2008, Marta Carbone, Sergio Fadda, Luigi Rizzo
5  *
6  * See http://www.asterisk.org for more information about
7  * the Asterisk project. Please do not directly contact
8  * any of the maintainers of this project for assistance;
9  * the project provides a web site, mailing lists and IRC
10  * channels for your use.
11  *
12  * This program is free software, distributed under the terms of
13  * the GNU General Public License Version 2. See the LICENSE file
14  * at the top of the source tree.
15  */
16 
17 /*
18  * Experimental support for video sessions. We use SDL for rendering, ffmpeg
19  * as the codec library for encoding and decoding, and Video4Linux and X11
20  * to generate the local video stream.
21  *
22  * If one of these pieces is not available, either at compile time or at
23  * runtime, we do our best to run without it. Of course, no codec library
24  * means we can only deal with raw data, no SDL means we cannot do rendering,
25  * no V4L or X11 means we cannot generate data (but in principle we could
26  * stream from or record to a file).
27  *
28  * We need a recent (2007.07.12 or newer) version of ffmpeg to avoid warnings.
29  * Older versions might give 'deprecated' messages during compilation,
30  * thus not compiling in AST_DEVMODE, or don't have swscale, in which case
31  * you can try to compile #defining OLD_FFMPEG here.
32  *
33  * $Revision: 369001 $
34  */
35 
36 //#define DROP_PACKETS 5 /* if set, drop this % of video packets */
37 //#define OLD_FFMPEG 1 /* set for old ffmpeg with no swscale */
38 
39 /*** MODULEINFO
40  <support_level>extended</support_level>
41  ***/
42 
43 #include "asterisk.h"
44 ASTERISK_FILE_VERSION(__FILE__, "$Revision: 369001 $")
45 #include <sys/ioctl.h>
46 #include "asterisk/cli.h"
47 #include "asterisk/file.h"
48 #include "asterisk/channel.h"
49 
50 #include "console_video.h"
51 
52 /*
53 The code is structured as follows.
54 
55 When a new console channel is created, we call console_video_start()
56 to initialize SDL, the source, and the encoder/ decoder for the
57 formats in use (XXX the latter two should be done later, once the
58 codec negotiation is complete). Also, a thread is created to handle
59 the video source and generate frames.
60 
61 While communication is on, the local source is generated by the
62 video thread, which wakes up periodically, generates frames and
63 enqueues them in chan->readq. Incoming rtp frames are passed to
64 console_write_video(), decoded and passed to SDL for display.
65 
66 For as unfortunate and confusing as it can be, we need to deal with a
67 number of different video representations (size, codec/pixel format,
68 codec parameters), as follows:
69 
70  loc_src is the data coming from the camera/X11/etc.
71  The format is typically constrained by the video source.
72 
73  enc_in is the input required by the encoder.
74  Typically constrained in size by the encoder type.
75 
76  enc_out is the bitstream transmitted over RTP.
77  Typically negotiated while the call is established.
78 
79  loc_dpy is the format used to display the local video source.
80  Depending on user preferences this can have the same size as
81  loc_src_fmt, or enc_in_fmt, or thumbnail size (e.g. PiP output)
82 
83  dec_in is the incoming RTP bitstream. Negotiated
84  during call establishment, it is not necessarily the same as
85  enc_in_fmt
86 
87  dec_out the output of the decoder.
88  The format is whatever the other side sends, and the
89  buffer is allocated by avcodec_decode_... so we only
90  copy the data here.
91 
92  rem_dpy the format used to display the remote stream
93 
94  src_dpy is the format used to display the local video source streams
95  The number of these fbuf_t is determined at run time, with dynamic allocation
96 
97 We store the format info together with the buffer storing the data.
98 As a future optimization, a format/buffer may reference another one
99 if the formats are equivalent. This will save some unnecessary format
100 conversion.
101 
102 
103 In order to handle video you need to add to sip.conf (and presumably
104 iax.conf too) the following:
105 
106  [general](+)
107  videosupport=yes
108  allow=h263 ; this or other video formats
109  allow=h263p ; this or other video formats
110 
111  */
112 
113 /*
114  * Codecs are absolutely necessary or we cannot do anything.
115  * SDL is optional (used for rendering only), so that we can still
116  * stream video withouth displaying it.
117  */
118 #if !defined(HAVE_VIDEO_CONSOLE) || !defined(HAVE_FFMPEG)
119 /* stubs if required pieces are missing */
120 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
121 {
122  return 0; /* writing video not supported */
123 }
124 
125 int console_video_cli(struct video_desc *env, const char *var, int fd)
126 {
127  return 1; /* nothing matched */
128 }
129 
130 int console_video_config(struct video_desc **penv, const char *var, const char *val)
131 {
132  return 1; /* no configuration */
133 }
134 
135 void console_video_start(struct video_desc *env, struct ast_channel *owner)
136 {
137  ast_log(LOG_NOTICE, "voice only, console video support not present\n");
138 }
139 
140 void console_video_uninit(struct video_desc *env)
141 {
142 }
143 
144 int get_gui_startup(struct video_desc* env)
145 {
146  return 0; /* no gui here */
147 }
148 
150 
151 #else /* defined(HAVE_FFMPEG) && defined(HAVE_SDL) */
152 
153 /*! The list of video formats we support. */
154 int console_video_formats =
157 
158 
159 
160 /* function to scale and encode buffers */
161 static void my_scale(struct fbuf_t *in, AVPicture *p_in,
162  struct fbuf_t *out, AVPicture *p_out);
163 
164 /*
165  * this structure will be an entry in the table containing
166  * every device specified in the file oss.conf, it contains various infomation
167  * about the device
168  */
169 struct video_device {
170  char *name; /* name of the device */
171  /* allocated dynamically (see fill_table function) */
172  struct grab_desc *grabber; /* the grabber for the device type */
173  void *grabber_data; /* device's private data structure */
174  struct fbuf_t *dev_buf; /* buffer for incoming data */
175  struct timeval last_frame; /* when we read the last frame ? */
176  int status_index; /* what is the status of the device (source) */
177  /* status index is set using the IS_ON, IS_PRIMARY and IS_SECONDARY costants */
178  /* status_index is the index of the status message in the src_msgs array in console_gui.c */
179 };
180 
181 struct video_codec_desc; /* forward declaration */
182 /*
183  * Descriptor of the local source, made of the following pieces:
184  * + configuration info (geometry, device name, fps...). These are read
185  * from the config file and copied here before calling video_out_init();
186  * + the frame buffer (buf) and source pixel format, allocated at init time;
187  * + the encoding and RTP info, including timestamps to generate
188  * frames at the correct rate;
189  * + source-specific info, i.e. fd for /dev/video, dpy-image for x11, etc,
190  * filled in by grabber_open, part of source_specific information are in
191  * the device table (devices member), others are shared;
192  * NOTE: loc_src.data == NULL means the rest of the struct is invalid, and
193  * the video source is not available.
194  */
195 struct video_out_desc {
196  /* video device support.
197  * videodevice and geometry are read from the config file.
198  * At the right time we try to open it and allocate a buffer.
199  * If we are successful, webcam_bufsize > 0 and we can read.
200  */
201  /* all the following is config file info copied from the parent */
202  int fps;
203  int bitrate;
204  int qmin;
205 
206  int sendvideo;
207 
208  struct fbuf_t loc_src_geometry; /* local source geometry only (from config file) */
209  struct fbuf_t enc_out; /* encoder output buffer, allocated in video_out_init() */
210 
211  struct video_codec_desc *enc; /* encoder */
212  void *enc_ctx; /* encoding context */
213  AVCodec *codec;
214  AVFrame *enc_in_frame; /* enc_in mapped into avcodec format. */
215  /* The initial part of AVFrame is an AVPicture */
216  int mtu;
217 
218  /* Table of devices specified with "videodevice=" in oss.conf.
219  * Static size as we have a limited number of entries.
220  */
221  struct video_device devices[MAX_VIDEO_SOURCES];
222  int device_num; /*number of devices in table*/
223  int device_primary; /*index of the actual primary device in the table*/
224  int device_secondary; /*index of the actual secondary device in the table*/
225 
226  int picture_in_picture; /*Is the PiP mode activated? 0 = NO | 1 = YES*/
227 
228  /* these are the coordinates of the picture inside the picture (visible if PiP mode is active)
229  these coordinates are valid considering the containing buffer with cif geometry*/
230  int pip_x;
231  int pip_y;
232 };
233 
234 /*
235  * The overall descriptor, with room for config info, video source and
236  * received data descriptors, SDL info, etc.
237  * This should be globally visible to all modules (grabber, vcodecs, gui)
238  * and contain all configurtion info.
239  */
240 struct video_desc {
241  char codec_name[64]; /* the codec we use */
242 
243  int stayopen; /* set if gui starts manually */
244  pthread_t vthread; /* video thread */
245  ast_mutex_t dec_lock; /* sync decoder and video thread */
246  int shutdown; /* set to shutdown vthread */
247  struct ast_channel *owner; /* owner channel */
248 
249 
250  struct fbuf_t enc_in; /* encoder input buffer, allocated in video_out_init() */
251 
252  char keypad_file[256]; /* image for the keypad */
253  char keypad_font[256]; /* font for the keypad */
254 
255  char sdl_videodriver[256];
256 
257  struct fbuf_t rem_dpy; /* display remote video, no buffer (it is in win[WIN_REMOTE].bmp) */
258  struct fbuf_t loc_dpy; /* display local source, no buffer (managed by SDL in bmp[1]) */
259 
260  /* geometry of the thumbnails for all video sources. */
261  struct fbuf_t src_dpy[MAX_VIDEO_SOURCES]; /* no buffer allocated here */
262 
263  int frame_freeze; /* flag to freeze the incoming frame */
264 
265  /* local information for grabbers, codecs, gui */
266  struct gui_info *gui;
267  struct video_dec_desc *in; /* remote video descriptor */
268  struct video_out_desc out; /* local video descriptor */
269 };
270 
271 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p);
272 
273 void fbuf_free(struct fbuf_t *b)
274 {
275  struct fbuf_t x = *b;
276 
277  if (b->data && b->size)
278  ast_free(b->data);
279  memset(b, '\0', sizeof(*b));
280  /* restore some fields */
281  b->w = x.w;
282  b->h = x.h;
283  b->pix_fmt = x.pix_fmt;
284 }
285 
286 /* return the status of env->stayopen to chan_oss, as the latter
287  * does not have access to fields of struct video_desc
288  */
289 int get_gui_startup(struct video_desc* env)
290 {
291  return env ? env->stayopen : 0;
292 }
293 
294 #if 0
295 /* helper function to print the amount of memory used by the process.
296  * Useful to track memory leaks, unfortunately this code is OS-specific
297  * so we keep it commented out.
298  */
299 static int
300 used_mem(const char *msg)
301 {
302  char in[128];
303 
304  pid_t pid = getpid();
305  sprintf(in, "ps -o vsz= -o rss= %d", pid);
306  ast_log(LOG_WARNING, "used mem (vsize, rss) %s ", msg);
307  system(in);
308  return 0;
309 }
310 #endif
311 
312 #include "vcodecs.c"
313 #include "console_gui.c"
314 
315 /*! \brief Try to open video sources, return 0 on success, 1 on error
316  * opens all video sources found in the oss.conf configuration files.
317  * Saves the grabber and the datas in the device table (in the devices field
318  * of the descriptor referenced by v).
319  * Initializes the device_primary and device_secondary
320  * fields of v with the first devices that was
321  * successfully opened.
322  *
323  * \param v = video out environment descriptor
324  *
325  * returns 0 on success, 1 on error
326 */
327 static int grabber_open(struct video_out_desc *v)
328 {
329  struct grab_desc *g;
330  void *g_data;
331  int i, j;
332 
333  /* for each device in the device table... */
334  for (i = 0; i < v->device_num; i++) {
335  /* device already open */
336  if (v->devices[i].grabber)
337  continue;
338  /* for each type of grabber supported... */
339  for (j = 0; (g = console_grabbers[j]); j++) {
340  /* the grabber is opened and the informations saved in the device table */
341  g_data = g->open(v->devices[i].name, &v->loc_src_geometry, v->fps);
342  if (!g_data)
343  continue;
344  v->devices[i].grabber = g;
345  v->devices[i].grabber_data = g_data;
346  v->devices[i].status_index |= IS_ON;
347  }
348  }
349  /* the first working device is selected as the primary one and the secondary one */
350  for (i = 0; i < v->device_num; i++) {
351  if (!v->devices[i].grabber)
352  continue;
353  v->device_primary = i;
354  v->device_secondary = i;
355  return 0; /* source found */
356  }
357  return 1; /* no source found */
358 }
359 
360 
361 /*! \brief complete a buffer from the specified local video source.
362  * Called by get_video_frames(), in turn called by the video thread.
363  *
364  * \param dev = video environment descriptor
365  * \param fps = frame per seconds, for every device
366  *
367  * returns:
368  * - NULL on falure
369  * - reference to the device buffer on success
370  */
371 static struct fbuf_t *grabber_read(struct video_device *dev, int fps)
372 {
373  struct timeval now = ast_tvnow();
374 
375  if (dev->grabber == NULL) /* not initialized */
376  return NULL;
377 
378  /* the last_frame field in this row of the device table (dev)
379  is always initialized, it is set during the parsing of the config
380  file, and never unset, function fill_device_table(). */
381  /* check if it is time to read */
382  if (ast_tvdiff_ms(now, dev->last_frame) < 1000/fps)
383  return NULL; /* too early */
384  dev->last_frame = now; /* XXX actually, should correct for drift */
385  return dev->grabber->read(dev->grabber_data);
386 }
387 
388 /*! \brief handler run when dragging with the left button on
389  * the local source window - the effect is to move the offset
390  * of the captured area.
391  */
392 static void grabber_move(struct video_device *dev, int dx, int dy)
393 {
394  if (dev->grabber && dev->grabber->move) {
395  dev->grabber->move(dev->grabber_data, dx, dy);
396  }
397 }
398 
399 /*
400  * Map the codec name to the library. If not recognised, use a default.
401  * This is useful in the output path where we decide by name, presumably.
402  */
403 static struct video_codec_desc *map_config_video_format(char *name)
404 {
405  int i;
406 
407  for (i = 0; supported_codecs[i]; i++)
408  if (!strcasecmp(name, supported_codecs[i]->name))
409  break;
410  if (supported_codecs[i] == NULL) {
411  ast_log(LOG_WARNING, "Cannot find codec for '%s'\n", name);
412  i = 0;
413  strcpy(name, supported_codecs[i]->name);
414  }
415  ast_log(LOG_WARNING, "Using codec '%s'\n", name);
416  return supported_codecs[i];
417 }
418 
419 
420 /*! \brief uninitialize the descriptor for local video stream */
421 static int video_out_uninit(struct video_desc *env)
422 {
423  struct video_out_desc *v = &env->out;
424  int i; /* integer variable used as iterator */
425 
426  /* XXX this should be a codec callback */
427  if (v->enc_ctx) {
428  AVCodecContext *enc_ctx = (AVCodecContext *)v->enc_ctx;
429  avcodec_close(enc_ctx);
430  av_free(enc_ctx);
431  v->enc_ctx = NULL;
432  }
433  if (v->enc_in_frame) {
434  av_free(v->enc_in_frame);
435  v->enc_in_frame = NULL;
436  }
437  v->codec = NULL; /* nothing to free, this is only a reference */
438  /* release the buffers */
439  fbuf_free(&env->enc_in);
440  fbuf_free(&v->enc_out);
441  /* close the grabbers */
442  for (i = 0; i < v->device_num; i++) {
443  if (v->devices[i].grabber){
444  v->devices[i].grabber_data =
445  v->devices[i].grabber->close(v->devices[i].grabber_data);
446  v->devices[i].grabber = NULL;
447  /* dev_buf is already freed by grabber->close() */
448  v->devices[i].dev_buf = NULL;
449  }
450  v->devices[i].status_index = 0;
451  }
452  v->picture_in_picture = 0;
453  env->frame_freeze = 0;
454  return -1;
455 }
456 
457 /*
458  * Initialize the encoder for the local source:
459  * - enc_ctx, codec, enc_in_frame are used by ffmpeg for encoding;
460  * - enc_out is used to store the encoded frame (to be sent)
461  * - mtu is used to determine the max size of video fragment
462  * NOTE: we enter here with the video source already open.
463  */
464 static int video_out_init(struct video_desc *env)
465 {
466  int codec;
467  int size;
468  struct fbuf_t *enc_in;
469  struct video_out_desc *v = &env->out;
470 
471  v->enc_ctx = NULL;
472  v->codec = NULL;
473  v->enc_in_frame = NULL;
474  v->enc_out.data = NULL;
475 
476  codec = map_video_format(v->enc->format, CM_WR);
477  v->codec = avcodec_find_encoder(codec);
478  if (!v->codec) {
479  ast_log(LOG_WARNING, "Cannot find the encoder for format %d\n",
480  codec);
481  return -1; /* error, but nothing to undo yet */
482  }
483 
484  v->mtu = 1400; /* set it early so the encoder can use it */
485 
486  /* allocate the input buffer for encoding.
487  * ffmpeg only supports PIX_FMT_YUV420P for the encoding.
488  */
489  enc_in = &env->enc_in;
490  enc_in->pix_fmt = PIX_FMT_YUV420P;
491  enc_in->size = (enc_in->w * enc_in->h * 3)/2;
492  enc_in->data = ast_calloc(1, enc_in->size);
493  if (!enc_in->data) {
494  ast_log(LOG_WARNING, "Cannot allocate encoder input buffer\n");
495  return video_out_uninit(env);
496  }
497  /* construct an AVFrame that points into buf_in */
498  v->enc_in_frame = avcodec_alloc_frame();
499  if (!v->enc_in_frame) {
500  ast_log(LOG_WARNING, "Unable to allocate the encoding video frame\n");
501  return video_out_uninit(env);
502  }
503 
504  /* parameters for PIX_FMT_YUV420P */
505  size = enc_in->w * enc_in->h;
506  v->enc_in_frame->data[0] = enc_in->data;
507  v->enc_in_frame->data[1] = v->enc_in_frame->data[0] + size;
508  v->enc_in_frame->data[2] = v->enc_in_frame->data[1] + size/4;
509  v->enc_in_frame->linesize[0] = enc_in->w;
510  v->enc_in_frame->linesize[1] = enc_in->w/2;
511  v->enc_in_frame->linesize[2] = enc_in->w/2;
512 
513  /* now setup the parameters for the encoder.
514  * XXX should be codec-specific
515  */
516  {
517  AVCodecContext *enc_ctx = avcodec_alloc_context();
518  v->enc_ctx = enc_ctx;
519  enc_ctx->pix_fmt = enc_in->pix_fmt;
520  enc_ctx->width = enc_in->w;
521  enc_ctx->height = enc_in->h;
522  /* XXX rtp_callback ?
523  * rtp_mode so ffmpeg inserts as many start codes as possible.
524  */
525  enc_ctx->rtp_mode = 1;
526  enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
527  enc_ctx->bit_rate = v->bitrate;
528  enc_ctx->bit_rate_tolerance = enc_ctx->bit_rate/2;
529  enc_ctx->qmin = v->qmin; /* should be configured */
530  enc_ctx->time_base = (AVRational){1, v->fps};
531  enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
532 
533  v->enc->enc_init(v->enc_ctx);
534 
535  if (avcodec_open(enc_ctx, v->codec) < 0) {
536  ast_log(LOG_WARNING, "Unable to initialize the encoder %d\n", codec);
537  av_free(enc_ctx);
538  v->enc_ctx = NULL;
539  return video_out_uninit(env);
540  }
541  }
542  /*
543  * Allocate enough for the encoded bitstream. As we are compressing,
544  * we hope that the output is never larger than the input size.
545  */
546  v->enc_out.data = ast_calloc(1, enc_in->size);
547  v->enc_out.size = enc_in->size;
548  v->enc_out.used = 0;
549 
550  return 0;
551 }
552 
553 /*! \brief possibly uninitialize the video console.
554  * Called at the end of a call, should reset the 'owner' field,
555  * then possibly terminate the video thread if the gui has
556  * not been started manually.
557  * In practice, signal the thread and give it a bit of time to
558  * complete, giving up if it gets stuck. Because uninit
559  * is called from hangup with the channel locked, and the thread
560  * uses the chan lock, we need to unlock here. This is unsafe,
561  * and we should really use refcounts for the channels.
562  */
563 void console_video_uninit(struct video_desc *env)
564 {
565  int i, t = 100; /* initial wait is shorter, than make it longer */
566  if (env->stayopen == 0) { /* gui opened by a call, do the shutdown */
567  env->shutdown = 1;
568  for (i=0; env->shutdown && i < 10; i++) {
569  if (env->owner)
570  ast_channel_unlock(env->owner);
571  usleep(t);
572  t = 1000000;
573  if (env->owner)
574  ast_channel_lock(env->owner);
575  }
576  env->vthread = NULL;
577  }
578  env->owner = NULL; /* this is unconditional */
579 }
580 
581 /*! fill an AVPicture from our fbuf info, as it is required by
582  * the image conversion routines in ffmpeg. Note that the pointers
583  * are recalculated if the fbuf has an offset (and so represents a picture in picture)
584  * XXX This depends on the format.
585  */
586 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p)
587 {
588  /* provide defaults for commonly used formats */
589  int l4 = b->w * b->h/4; /* size of U or V frame */
590  int len = b->w; /* Y linesize, bytes */
591  int luv = b->w/2; /* U/V linesize, bytes */
592  int sample_size = 1;
593 
594  memset(p, '\0', sizeof(*p));
595  switch (b->pix_fmt) {
596  case PIX_FMT_RGB555:
597  case PIX_FMT_RGB565:
598  sample_size = 2;
599  luv = 0;
600  break;
601  case PIX_FMT_RGBA32:
602  sample_size = 4;
603  luv = 0;
604  break;
605  case PIX_FMT_YUYV422: /* Packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr */
606  sample_size = 2; /* all data in first plane, probably */
607  luv = 0;
608  break;
609  }
610  len *= sample_size;
611 
612  p->data[0] = b->data;
613  p->linesize[0] = len;
614  /* these are only valid for component images */
615  p->data[1] = luv ? b->data + 4*l4 : b->data+len;
616  p->data[2] = luv ? b->data + 5*l4 : b->data+len;
617  p->linesize[1] = luv;
618  p->linesize[2] = luv;
619 
620  /* add the offsets to the pointers previously calculated,
621  it is necessary for the picture in picture mode */
622  p->data[0] += len*b->win_y + b->win_x*sample_size;
623  if (luv) {
624  p->data[1] += luv*(b->win_y/2) + (b->win_x/2) * sample_size;
625  p->data[2] += luv*(b->win_y/2) + (b->win_x/2) * sample_size;
626  }
627  return p;
628 }
629 
630 /*! convert/scale between an input and an output format.
631  * Old version of ffmpeg only have img_convert, which does not rescale.
632  * New versions use sws_scale which does both.
633  */
634 static void my_scale(struct fbuf_t *in, AVPicture *p_in,
635  struct fbuf_t *out, AVPicture *p_out)
636 {
637  AVPicture my_p_in, my_p_out;
638  int eff_w=out->w, eff_h=out->h;
639 
640  if (p_in == NULL)
641  p_in = fill_pict(in, &my_p_in);
642  if (p_out == NULL)
643  p_out = fill_pict(out, &my_p_out);
644 
645  /*if win_w is different from zero then we must change
646  the size of the scaled buffer (the position is already
647  encoded into the out parameter)*/
648  if (out->win_w) { /* picture in picture enabled */
649  eff_w=out->win_w;
650  eff_h=out->win_h;
651  }
652 #ifdef OLD_FFMPEG
653  /* XXX img_convert is deprecated, and does not do rescaling, PiP not supported */
654  img_convert(p_out, out->pix_fmt,
655  p_in, in->pix_fmt, in->w, in->h);
656 #else /* XXX replacement */
657  {
658  struct SwsContext *convert_ctx;
659 
660  convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
661  eff_w, eff_h, out->pix_fmt,
662  SWS_BICUBIC, NULL, NULL, NULL);
663  if (convert_ctx == NULL) {
664  ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed\n");
665  return;
666  }
667  if (0)
668  ast_log(LOG_WARNING, "in %d %dx%d out %d %dx%d\n",
669  in->pix_fmt, in->w, in->h, out->pix_fmt, eff_w, eff_h);
670  sws_scale(convert_ctx,
671  p_in->data, p_in->linesize,
672  in->w, in->h, /* src slice */
673  p_out->data, p_out->linesize);
674 
675  sws_freeContext(convert_ctx);
676  }
677 #endif /* XXX replacement */
678 }
679 
680 struct video_desc *get_video_desc(struct ast_channel *c);
681 
682 /*
683  * This function is called (by asterisk) for each video packet
684  * coming from the network (the 'in' path) that needs to be processed.
685  * We need to reconstruct the entire video frame before we can decode it.
686  * After a video packet is received we have to:
687  * - extract the bitstream with pre_process_data()
688  * - append the bitstream to a buffer
689  * - if the fragment is the last (RTP Marker) we decode it with decode_video()
690  * - after the decoding is completed we display the decoded frame with show_frame()
691  */
692 int console_write_video(struct ast_channel *chan, struct ast_frame *f);
693 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
694 {
695  struct video_desc *env = get_video_desc(chan);
696  struct video_dec_desc *v = env->in;
697 
698  if (!env->gui) /* no gui, no rendering */
699  return 0;
700  if (v == NULL)
701  env->in = v = dec_init(f->subclass & ~1);
702  if (v == NULL) {
703  /* This is not fatal, but we won't have incoming video */
704  ast_log(LOG_WARNING, "Cannot initialize input decoder\n");
705  return 0;
706  }
707 
708  if (v->dec_in_cur == NULL) /* no buffer for incoming frames, drop */
709  return 0;
710 #if defined(DROP_PACKETS) && DROP_PACKETS > 0
711  /* Simulate lost packets */
712  if ((random() % 10000) <= 100*DROP_PACKETS) {
713  ast_log(LOG_NOTICE, "Packet lost [%d]\n", f->seqno);
714  return 0;
715  }
716 #endif
717  if (v->discard) {
718  /*
719  * In discard mode, drop packets until we find one with
720  * the RTP marker set (which is the end of frame).
721  * Note that the RTP marker flag is sent as the LSB of the
722  * subclass, which is a bitmask of formats. The low bit is
723  * normally used for audio so there is no interference.
724  */
725  if (f->subclass & 0x01) {
726  v->dec_in_cur->used = 0;
727  v->dec_in_cur->ebit = 0;
728  v->next_seq = f->seqno + 1; /* wrap at 16 bit */
729  v->discard = 0;
730  ast_log(LOG_WARNING, "out of discard mode, frame %d\n", f->seqno);
731  }
732  return 0;
733  }
734 
735  /*
736  * Only in-order fragments will be accepted. Remember seqno
737  * has 16 bit so there is wraparound. Also, ideally we could
738  * accept a bit of reordering, but at the moment we don't.
739  */
740  if (v->next_seq != f->seqno) {
741  ast_log(LOG_WARNING, "discarding frame out of order, %d %d\n",
742  v->next_seq, f->seqno);
743  v->discard = 1;
744  return 0;
745  }
746  v->next_seq++;
747 
748  if (f->data.ptr == NULL || f->datalen < 2) {
749  ast_log(LOG_WARNING, "empty video frame, discard\n");
750  return 0;
751  }
752  if (v->d_callbacks->dec_decap(v->dec_in_cur, f->data.ptr, f->datalen)) {
753  ast_log(LOG_WARNING, "error in dec_decap, enter discard\n");
754  v->discard = 1;
755  }
756  if (f->subclass & 0x01) { // RTP Marker
757  /* prepare to decode: advance the buffer so the video thread knows. */
758  struct fbuf_t *tmp = v->dec_in_cur; /* store current pointer */
759  ast_mutex_lock(&env->dec_lock);
760  if (++v->dec_in_cur == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
761  v->dec_in_cur = &v->dec_in[0];
762  if (v->dec_in_dpy == NULL) { /* were not displaying anything, so set it */
763  v->dec_in_dpy = tmp;
764  } else if (v->dec_in_dpy == v->dec_in_cur) { /* current slot is busy */
765  v->dec_in_cur = NULL;
766  }
767  ast_mutex_unlock(&env->dec_lock);
768  }
769  return 0;
770 }
771 
772 
773 /*! \brief refreshes the buffers of all the device by calling the
774  * grabber_read on each device in the device table.
775  * it encodes the primary source buffer, if the picture in picture mode is
776  * enabled it encodes (in the buffer to split) the secondary source buffer too.
777  * The encoded buffer is splitted to build the local and the remote view.
778  * Return a list of ast_frame representing the video fragments.
779  * The head pointer is returned by the function, the tail pointer
780  * is returned as an argument.
781  *
782  * \param env = video environment descriptor
783  * \param tail = tail ponter (pratically a return value)
784  */
785 static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_frame **tail)
786 {
787  struct video_out_desc *v = &env->out;
788  struct ast_frame *dummy;
789  struct fbuf_t *loc_src_primary = NULL, *p_read;
790  int i;
791  /* if no device was found in the config file */
792  if (!env->out.device_num)
793  return NULL;
794  /* every time this function is called we refresh the buffers of every device,
795  updating the private device buffer in the device table */
796  for (i = 0; i < env->out.device_num; i++) {
797  p_read = grabber_read(&env->out.devices[i], env->out.fps);
798  /* it is used only if different from NULL, we mantain last good buffer otherwise */
799  if (p_read)
800  env->out.devices[i].dev_buf = p_read;
801  }
802  /* select the primary device buffer as the one to encode */
803  loc_src_primary = env->out.devices[env->out.device_primary].dev_buf;
804  /* loc_src_primary can be NULL if the device has been turned off during
805  execution of it is read too early */
806  if (loc_src_primary) {
807  /* Scale the video for the encoder, then use it for local rendering
808  so we will see the same as the remote party */
809  my_scale(loc_src_primary, NULL, &env->enc_in, NULL);
810  }
811  if (env->out.picture_in_picture) { /* the picture in picture mode is enabled */
812  struct fbuf_t *loc_src_secondary;
813  /* reads from the secondary source */
814  loc_src_secondary = env->out.devices[env->out.device_secondary].dev_buf;
815  if (loc_src_secondary) {
816  env->enc_in.win_x = env->out.pip_x;
817  env->enc_in.win_y = env->out.pip_y;
818  env->enc_in.win_w = env->enc_in.w/3;
819  env->enc_in.win_h = env->enc_in.h/3;
820  /* scales to the correct geometry and inserts in
821  the enc_in buffer the picture in picture */
822  my_scale(loc_src_secondary, NULL, &env->enc_in, NULL);
823  /* returns to normal parameters (not picture in picture) */
824  env->enc_in.win_x = 0;
825  env->enc_in.win_y = 0;
826  env->enc_in.win_w = 0;
827  env->enc_in.win_h = 0;
828  }
829  else {
830  /* loc_src_secondary can be NULL if the device has been turned off during
831  execution of it is read too early */
832  env->out.picture_in_picture = 0; /* disable picture in picture */
833  }
834  }
835  show_frame(env, WIN_LOCAL); /* local rendering */
836  for (i = 0; i < env->out.device_num; i++)
837  show_frame(env, i+WIN_SRC1); /* rendering of every source device in thumbnails */
838  if (tail == NULL)
839  tail = &dummy;
840  *tail = NULL;
841  /* if no reason for encoding, do not encode */
842  if (!env->owner || !loc_src_primary || !v->sendvideo)
843  return NULL;
844  if (v->enc_out.data == NULL) {
845  static volatile int a = 0;
846  if (a++ < 2)
847  ast_log(LOG_WARNING, "fail, no encoder output buffer\n");
848  return NULL;
849  }
850  v->enc->enc_run(v);
851  return v->enc->enc_encap(&v->enc_out, v->mtu, tail);
852 }
853 
854 /*
855  * Helper thread to periodically poll the video sources and enqueue the
856  * generated frames directed to the remote party to the channel's queue.
857  * Using a separate thread also helps because the encoding can be
858  * computationally expensive so we don't want to starve the main thread.
859  */
860 static void *video_thread(void *arg)
861 {
862  struct video_desc *env = arg;
863  int count = 0;
864  char save_display[128] = "";
865  int i; /* integer variable used as iterator */
866 
867  /* if sdl_videodriver is set, override the environment. Also,
868  * if it contains 'console' override DISPLAY around the call to SDL_Init
869  * so we use the console as opposed to the x11 version of aalib
870  */
871  if (!ast_strlen_zero(env->sdl_videodriver)) { /* override */
872  const char *s = getenv("DISPLAY");
873  setenv("SDL_VIDEODRIVER", env->sdl_videodriver, 1);
874  if (s && !strcasecmp(env->sdl_videodriver, "aalib-console")) {
875  ast_copy_string(save_display, s, sizeof(save_display));
876  unsetenv("DISPLAY");
877  }
878  }
879  sdl_setup(env);
880  if (!ast_strlen_zero(save_display)) {
881  setenv("DISPLAY", save_display, 1);
882  }
883 
884  ast_mutex_init(&env->dec_lock); /* used to sync decoder and renderer */
885 
886  if (grabber_open(&env->out)) {
887  ast_log(LOG_WARNING, "cannot open local video source\n");
888  }
889 
890  if (env->out.device_num) {
891  env->out.devices[env->out.device_primary].status_index |= IS_PRIMARY | IS_SECONDARY;
892  }
893 
894  /* even if no device is connected, we must call video_out_init,
895  * as some of the data structures it initializes are
896  * used in get_video_frames()
897  */
898  video_out_init(env);
899 
900  /* Writes intial status of the sources. */
901  if (env->gui) {
902  for (i = 0; i < env->out.device_num; i++) {
903  print_message(env->gui->thumb_bd_array[i].board,
904  src_msgs[env->out.devices[i].status_index]);
905  }
906  }
907 
908  for (;;) {
909  struct timespec t = { 0, 50000000 }; /* XXX 20 times/sec */
910  struct ast_frame *p, *f;
911  struct ast_channel *chan;
912  int fd;
913  char *caption = NULL, buf[160];
914 
915  /* determine if video format changed */
916  if (count++ % 10 == 0) {
917  if (env->out.sendvideo && env->out.devices) {
918  snprintf(buf, sizeof(buf), "%s %s %dx%d @@ %dfps %dkbps",
919  env->out.devices[env->out.device_primary].name, env->codec_name,
920  env->enc_in.w, env->enc_in.h,
921  env->out.fps, env->out.bitrate / 1000);
922  } else {
923  sprintf(buf, "hold");
924  }
925  caption = buf;
926  }
927 
928  /* manage keypad events */
929  /* XXX here we should always check for events,
930  * otherwise the drag will not work */
931  if (env->gui)
932  eventhandler(env, caption);
933 
934  /* sleep for a while */
935  nanosleep(&t, NULL);
936 
937  if (env->in) {
938  struct video_dec_desc *v = env->in;
939 
940  /*
941  * While there is something to display, call the decoder and free
942  * the buffer, possibly enabling the receiver to store new data.
943  */
944  while (v->dec_in_dpy) {
945  struct fbuf_t *tmp = v->dec_in_dpy; /* store current pointer */
946 
947  /* decode the frame, but show it only if not frozen */
948  if (v->d_callbacks->dec_run(v, tmp) && !env->frame_freeze)
949  show_frame(env, WIN_REMOTE);
950  tmp->used = 0; /* mark buffer as free */
951  tmp->ebit = 0;
952  ast_mutex_lock(&env->dec_lock);
953  if (++v->dec_in_dpy == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
954  v->dec_in_dpy = &v->dec_in[0];
955 
956  if (v->dec_in_cur == NULL) /* receiver was idle, enable it... */
957  v->dec_in_cur = tmp; /* using the slot just freed */
958  else if (v->dec_in_dpy == v->dec_in_cur) /* this was the last slot */
959  v->dec_in_dpy = NULL; /* nothing more to display */
960  ast_mutex_unlock(&env->dec_lock);
961  }
962  }
963 
964  if (env->shutdown)
965  break;
966  f = get_video_frames(env, &p); /* read and display */
967  if (!f)
968  continue;
969  chan = env->owner;
970  if (chan == NULL) {
971  /* drop the chain of frames, nobody uses them */
972  while (f) {
973  struct ast_frame *g = AST_LIST_NEXT(f, frame_list);
974  ast_frfree(f);
975  f = g;
976  }
977  continue;
978  }
979  fd = chan->alertpipe[1];
980  ast_channel_lock(chan);
981 
982  /* AST_LIST_INSERT_TAIL is only good for one frame, cannot use here */
983  if (chan->readq.first == NULL) {
984  chan->readq.first = f;
985  } else {
986  chan->readq.last->frame_list.next = f;
987  }
988  chan->readq.last = p;
989  /*
990  * more or less same as ast_queue_frame, but extra
991  * write on the alertpipe to signal frames.
992  */
993  if (fd > -1) {
994  int blah = 1, l = sizeof(blah);
995  for (p = f; p; p = AST_LIST_NEXT(p, frame_list)) {
996  if (write(fd, &blah, l) != l)
997  ast_log(LOG_WARNING, "Unable to write to alert pipe on %s, frametype/subclass %d/%d: %s!\n",
998  chan->name, f->frametype, f->subclass, strerror(errno));
999  }
1000  }
1001  ast_channel_unlock(chan);
1002  }
1003  /* thread terminating, here could call the uninit */
1004  /* uninitialize the local and remote video environments */
1005  env->in = dec_uninit(env->in);
1006  video_out_uninit(env);
1007 
1008  if (env->gui)
1009  env->gui = cleanup_sdl(env->gui, env->out.device_num);
1010  ast_mutex_destroy(&env->dec_lock);
1011  env->shutdown = 0;
1012  return NULL;
1013 }
1014 
1015 static void copy_geometry(struct fbuf_t *src, struct fbuf_t *dst)
1016 {
1017  if (dst->w == 0)
1018  dst->w = src->w;
1019  if (dst->h == 0)
1020  dst->h = src->h;
1021 }
1022 
1023 /*! initialize the video environment.
1024  * Apart from the formats (constant) used by sdl and the codec,
1025  * we use enc_in as the basic geometry.
1026  */
1027 static void init_env(struct video_desc *env)
1028 {
1029  struct fbuf_t *c = &(env->out.loc_src_geometry); /* local source */
1030  struct fbuf_t *ei = &(env->enc_in); /* encoder input */
1031  struct fbuf_t *ld = &(env->loc_dpy); /* local display */
1032  struct fbuf_t *rd = &(env->rem_dpy); /* remote display */
1033  int i; /* integer working as iterator */
1034 
1035  c->pix_fmt = PIX_FMT_YUV420P; /* default - camera format */
1036  ei->pix_fmt = PIX_FMT_YUV420P; /* encoder input */
1037  if (ei->w == 0 || ei->h == 0) {
1038  ei->w = 352;
1039  ei->h = 288;
1040  }
1041  ld->pix_fmt = rd->pix_fmt = PIX_FMT_YUV420P; /* sdl format */
1042  /* inherit defaults */
1043  copy_geometry(ei, c); /* camera inherits from encoder input */
1044  copy_geometry(ei, rd); /* remote display inherits from encoder input */
1045  copy_geometry(rd, ld); /* local display inherits from remote display */
1046 
1047  /* fix the size of buffers for small windows */
1048  for (i = 0; i < env->out.device_num; i++) {
1049  env->src_dpy[i].pix_fmt = PIX_FMT_YUV420P;
1050  env->src_dpy[i].w = SRC_WIN_W;
1051  env->src_dpy[i].h = SRC_WIN_H;
1052  }
1053  /* now we set the default coordinates for the picture in picture
1054  frames inside the env_in buffers, those can be changed by dragging the
1055  picture in picture with left click */
1056  env->out.pip_x = ei->w - ei->w/3;
1057  env->out.pip_y = ei->h - ei->h/3;
1058 }
1059 
1060 /*!
1061  * The first call to the video code, called by oss_new() or similar.
1062  * Here we initialize the various components we use, namely SDL for display,
1063  * ffmpeg for encoding/decoding, and a local video source.
1064  * We do our best to progress even if some of the components are not
1065  * available.
1066  */
1067 void console_video_start(struct video_desc *env, struct ast_channel *owner)
1068 {
1069  ast_log(LOG_WARNING, "env %p chan %p\n", env, owner);
1070  if (env == NULL) /* video not initialized */
1071  return;
1072  env->owner = owner; /* work even if no owner is specified */
1073  if (env->vthread)
1074  return; /* already initialized, nothing to do */
1075  init_env(env);
1076  env->out.enc = map_config_video_format(env->codec_name);
1077 
1078  ast_log(LOG_WARNING, "start video out %s %dx%d\n",
1079  env->codec_name, env->enc_in.w, env->enc_in.h);
1080  /*
1081  * Register all codecs supported by the ffmpeg library.
1082  * We only need to do it once, but probably doesn't
1083  * harm to do it multiple times.
1084  */
1085  avcodec_init();
1086  avcodec_register_all();
1087  av_log_set_level(AV_LOG_ERROR); /* only report errors */
1088 
1089  if (env->out.fps == 0) {
1090  env->out.fps = 15;
1091  ast_log(LOG_WARNING, "fps unset, forcing to %d\n", env->out.fps);
1092  }
1093  if (env->out.bitrate == 0) {
1094  env->out.bitrate = 65000;
1095  ast_log(LOG_WARNING, "bitrate unset, forcing to %d\n", env->out.bitrate);
1096  }
1097  /* create the thread as detached so memory is freed on termination */
1099  NULL, video_thread, env);
1100 }
1101 
1102 /*
1103  * Parse a geometry string, accepting also common names for the formats.
1104  * Trick: if we have a leading > or < and a numeric geometry,
1105  * return the larger or smaller one.
1106  * E.g. <352x288 gives the smaller one, 320x240
1107  */
1108 static int video_geom(struct fbuf_t *b, const char *s)
1109 {
1110  int w = 0, h = 0;
1111 
1112  static struct {
1113  const char *s; int w; int h;
1114  } *fp, formats[] = {
1115  {"16cif", 1408, 1152 },
1116  {"xga", 1024, 768 },
1117  {"4cif", 704, 576 },
1118  {"vga", 640, 480 },
1119  {"cif", 352, 288 },
1120  {"qvga", 320, 240 },
1121  {"qcif", 176, 144 },
1122  {"sqcif", 128, 96 },
1123  {NULL, 0, 0 },
1124  };
1125  if (*s == '<' || *s == '>')
1126  sscanf(s+1,"%dx%d", &w, &h);
1127  for (fp = formats; fp->s; fp++) {
1128  if (*s == '>') { /* look for a larger one */
1129  if (fp->w <= w) {
1130  if (fp > formats)
1131  fp--; /* back one step if possible */
1132  break;
1133  }
1134  } else if (*s == '<') { /* look for a smaller one */
1135  if (fp->w < w)
1136  break;
1137  } else if (!strcasecmp(s, fp->s)) { /* look for a string */
1138  break;
1139  }
1140  }
1141  if (*s == '<' && fp->s == NULL) /* smallest */
1142  fp--;
1143  if (fp->s) {
1144  b->w = fp->w;
1145  b->h = fp->h;
1146  } else if (sscanf(s, "%dx%d", &b->w, &b->h) != 2) {
1147  ast_log(LOG_WARNING, "Invalid video_size %s, using 352x288\n", s);
1148  b->w = 352;
1149  b->h = 288;
1150  }
1151  return 0;
1152 }
1153 
1154 
1155 /*! \brief add an entry to the video_device table,
1156  * ignoring duplicate names.
1157  * The table is a static array of 9 elements.
1158  * The last_frame field of each entry of the table is initialized to
1159  * the current time (we need a value inside this field, on stop of the
1160  * GUI the last_frame value is not changed, to avoid checking if it is 0 we
1161  * set the initial value on current time) XXX
1162  *
1163  * PARAMETERS:
1164  * \param devices_p = pointer to the table of devices
1165  * \param device_num_p = pointer to the number of devices
1166  * \param s = name of the new device to insert
1167  *
1168  * returns 0 on success, 1 on error
1169  */
1170 static int device_table_fill(struct video_device *devices, int *device_num_p, const char *s)
1171 {
1172  int i;
1173  struct video_device *p;
1174 
1175  /* with the current implementation, we support a maximum of 9 devices.*/
1176  if (*device_num_p >= 9)
1177  return 0; /* more devices will be ignored */
1178  /* ignore duplicate names */
1179  for (i = 0; i < *device_num_p; i++) {
1180  if (!strcmp(devices[i].name, s))
1181  return 0;
1182  }
1183  /* inserts the new video device */
1184  p = &devices[*device_num_p];
1185  /* XXX the string is allocated but NEVER deallocated,
1186  the good time to do that is when the module is unloaded, now we skip the problem */
1187  p->name = ast_strdup(s); /* copy the name */
1188  /* other fields initially NULL */
1189  p->grabber = NULL;
1190  p->grabber_data = NULL;
1191  p->dev_buf = NULL;
1192  p->last_frame = ast_tvnow();
1193  p->status_index = 0;
1194  (*device_num_p)++; /* one device added */
1195  return 0;
1196 }
1197 
1198 /* extend ast_cli with video commands. Called by console_video_config */
1199 int console_video_cli(struct video_desc *env, const char *var, int fd)
1200 {
1201  if (env == NULL)
1202  return 1; /* unrecognised */
1203 
1204  if (!strcasecmp(var, "videodevice")) {
1205  ast_cli(fd, "videodevice is [%s]\n", env->out.devices[env->out.device_primary].name);
1206  } else if (!strcasecmp(var, "videocodec")) {
1207  ast_cli(fd, "videocodec is [%s]\n", env->codec_name);
1208  } else if (!strcasecmp(var, "sendvideo")) {
1209  ast_cli(fd, "sendvideo is [%s]\n", env->out.sendvideo ? "on" : "off");
1210  } else if (!strcasecmp(var, "video_size")) {
1211  int in_w = 0, in_h = 0;
1212  if (env->in) {
1213  in_w = env->in->dec_out.w;
1214  in_h = env->in->dec_out.h;
1215  }
1216  ast_cli(fd, "sizes: video %dx%d camera %dx%d local %dx%d remote %dx%d in %dx%d\n",
1217  env->enc_in.w, env->enc_in.h,
1218  env->out.loc_src_geometry.w, env->out.loc_src_geometry.h,
1219  env->loc_dpy.w, env->loc_dpy.h,
1220  env->rem_dpy.w, env->rem_dpy.h,
1221  in_w, in_h);
1222  } else if (!strcasecmp(var, "bitrate")) {
1223  ast_cli(fd, "bitrate is [%d]\n", env->out.bitrate);
1224  } else if (!strcasecmp(var, "qmin")) {
1225  ast_cli(fd, "qmin is [%d]\n", env->out.qmin);
1226  } else if (!strcasecmp(var, "fps")) {
1227  ast_cli(fd, "fps is [%d]\n", env->out.fps);
1228  } else if (!strcasecmp(var, "startgui")) {
1229  env->stayopen = 1;
1230  console_video_start(env, NULL);
1231  } else if (!strcasecmp(var, "stopgui") && env->stayopen != 0) {
1232  env->stayopen = 0;
1233  if (env->gui && env->owner)
1234  ast_cli_command(-1, "console hangup");
1235  else /* not in a call */
1236  console_video_uninit(env);
1237  } else {
1238  return 1; /* unrecognised */
1239  }
1240  return 0; /* recognised */
1241 }
1242 
1243 /*! parse config command for video support. */
1244 int console_video_config(struct video_desc **penv,
1245  const char *var, const char *val)
1246 {
1247  struct video_desc *env;
1248 
1249  if (penv == NULL) {
1250  ast_log(LOG_WARNING, "bad argument penv=NULL\n");
1251  return 1; /* error */
1252  }
1253  /* allocate the video descriptor first time we get here */
1254  env = *penv;
1255  if (env == NULL) {
1256  env = *penv = ast_calloc(1, sizeof(struct video_desc));
1257  if (env == NULL) {
1258  ast_log(LOG_WARNING, "fail to allocate video_desc\n");
1259  return 1; /* error */
1260 
1261  }
1262  /* set default values - 0's are already there */
1263  env->out.device_primary = 0;
1264  env->out.device_secondary = 0;
1265  env->out.fps = 5;
1266  env->out.bitrate = 65000;
1267  env->out.sendvideo = 1;
1268  env->out.qmin = 3;
1269  env->out.device_num = 0;
1270  }
1271  CV_START(var, val);
1272  CV_F("videodevice", device_table_fill(env->out.devices, &env->out.device_num, val));
1273  CV_BOOL("sendvideo", env->out.sendvideo);
1274  CV_F("video_size", video_geom(&env->enc_in, val));
1275  CV_F("camera_size", video_geom(&env->out.loc_src_geometry, val));
1276  CV_F("local_size", video_geom(&env->loc_dpy, val));
1277  CV_F("remote_size", video_geom(&env->rem_dpy, val));
1278  CV_STR("keypad", env->keypad_file);
1279  CV_F("region", keypad_cfg_read(env->gui, val));
1280  CV_UINT("startgui", env->stayopen); /* enable gui at startup */
1281  CV_STR("keypad_font", env->keypad_font);
1282  CV_STR("sdl_videodriver", env->sdl_videodriver);
1283  CV_UINT("fps", env->out.fps);
1284  CV_UINT("bitrate", env->out.bitrate);
1285  CV_UINT("qmin", env->out.qmin);
1286  CV_STR("videocodec", env->codec_name);
1287  return 1; /* nothing found */
1288 
1289  CV_END; /* the 'nothing found' case */
1290  return 0; /* found something */
1291 }
1292 
1293 #endif /* video support */
struct ast_frame * last
Definition: channel.h:820
union ast_frame_subclass subclass
Definition: frame.h:146
static void sdl_setup(struct video_desc *env)
[re]set the main sdl window, useful in case of resize. We can tell the first from subsequent calls fr...
Definition: console_gui.c:1219
#define ast_channel_lock(chan)
Definition: channel.h:2466
Main Channel structure associated with a channel.
Definition: channel.h:742
Asterisk main include file. File version handling, generic pbx functions.
#define N_DEC_IN
Definition: vcodecs.c:92
static struct gui_info * cleanup_sdl(struct gui_info *gui, int device_num)
free the resources in struct gui_info and the descriptor itself. Return NULL so we can assign the val...
Definition: console_gui.c:185
int seqno
Definition: frame.h:172
#define ast_strdup(a)
Definition: astmm.h:109
Definition: ast_expr2.c:325
void fbuf_free(struct fbuf_t *)
static struct video_codec_desc * supported_codecs[]
Definition: vcodecs.c:1144
#define AST_FORMAT_H261
Definition: frame.h:280
#define CV_STR(__x, __dst)
Definition: config.h:744
void * ptr
Definition: frame.h:160
struct fbuf_t dec_in[N_DEC_IN]
Definition: vcodecs.c:95
#define LOG_WARNING
Definition: logger.h:144
struct ast_frame::@173 frame_list
static void dummy(char *unused,...)
Definition: chan_unistim.c:188
#define CV_UINT(__x, __dst)
Definition: config.h:743
#define var
Definition: ast_expr2f.c:606
struct video_codec_desc * d_callbacks
Definition: vcodecs.c:85
int pix_fmt
Definition: console_video.h:69
#define AST_LIST_NEXT(elm, field)
Returns the next entry in the list after the given entry.
Definition: linkedlists.h:438
int used
Definition: console_video.h:63
#define AST_FORMAT_H263_PLUS
Definition: frame.h:285
void console_video_start(struct video_desc *env, struct ast_channel *owner)
struct timeval ast_tvnow(void)
Returns current timeval. Meant to replace calls to gettimeofday().
Definition: time.h:142
#define ast_mutex_lock(a)
Definition: lock.h:155
int64_t ast_tvdiff_ms(struct timeval end, struct timeval start)
Computes the difference (in milliseconds) between two struct timeval instances.
Definition: time.h:90
int alertpipe[2]
Definition: channel.h:851
Generic File Format Support. Should be included by clients of the file handling routines. File service providers should instead include mod_format.h.
int console_video_formats
void *(* open)(const char *name, struct fbuf_t *geom, int fps)
Definition: console_video.h:82
void ast_cli(int fd, const char *fmt,...)
Definition: cli.c:105
struct grab_desc * console_grabbers[]
uint16_t next_seq
Definition: vcodecs.c:90
#define ast_pthread_create_detached_background(a, b, c, d)
Definition: utils.h:431
int console_write_video(struct ast_channel *chan, struct ast_frame *f)
static void show_frame(struct video_desc *env, int out)
Definition: console_gui.c:259
static int video_geom(struct fbuf_t *b, const char *s)
int win_x
Definition: console_video.h:71
static enum CodecID map_video_format(uint32_t ast_format, int rw)
map an asterisk format into an ffmpeg one
Definition: vcodecs.c:1133
int console_video_config(struct video_desc **penv, const char *var, const char *val)
int win_h
Definition: console_video.h:74
#define CV_END
close a variable parsing block
Definition: config.h:733
General Asterisk PBX channel definitions.
#define CV_START(__in_var, __in_val)
the macro to open a block for variable parsing
Definition: config.h:727
const char * src
Definition: frame.h:158
static force_inline int attribute_pure ast_strlen_zero(const char *s)
Definition: strings.h:63
int datalen
Definition: frame.h:148
int win_y
Definition: console_video.h:72
#define MAX_VIDEO_SOURCES
Definition: console_video.h:51
static struct video_dec_desc * dec_init(uint32_t the_ast_format)
Definition: vcodecs.c:1203
ast_cli_command
calling arguments for new-style handlers.
Definition: cli.h:145
struct fbuf_t * dec_in_dpy
Definition: vcodecs.c:94
#define AST_FORMAT_H263
Definition: frame.h:283
struct fbuf_t * dec_in_cur
Definition: vcodecs.c:93
#define IS_PRIMARY
Definition: console_gui.c:235
#define CV_BOOL(__x, __dst)
helper macros to assign the value to a BOOL, UINT, static string and dynamic string ...
Definition: config.h:742
#define LOG_ERROR
Definition: logger.h:155
#define SRC_WIN_H
Definition: console_video.h:47
int setenv(const char *name, const char *value, int overwrite)
uint8_t * data
Definition: console_video.h:60
static int len(struct ast_channel *chan, const char *cmd, char *data, char *buf, size_t buflen)
Definition: file.c:65
const ast_string_field name
Definition: channel.h:787
char * src_msgs[]
Definition: console_gui.c:239
void ast_log(int level, const char *file, int line, const char *function, const char *fmt,...)
Used for sending a log message This is the standard logger function. Probably the only way you will i...
Definition: logger.c:1207
#define LOG_NOTICE
Definition: logger.h:133
int win_w
Definition: console_video.h:73
#define ast_channel_unlock(chan)
Definition: channel.h:2467
int errno
static const char name[]
#define ast_free(a)
Definition: astmm.h:97
static struct video_dec_desc * dec_uninit(struct video_dec_desc *v)
uninitialize the descriptor for remote video stream
Definition: vcodecs.c:1171
static struct ast_format f[]
Definition: format_g726.c:181
static int keypad_cfg_read(struct gui_info *gui, const char *val)
read a keypad entry line in the format reset token circle xc yc diameter token circle xc yc x1 y1 h #...
Definition: console_gui.c:1561
int console_video_cli(struct video_desc *env, const char *var, int fd)
int ebit
Definition: console_video.h:64
#define IS_ON
Definition: console_gui.c:237
static void grabber_move(struct video_device *, int dx, int dy)
#define CV_F(__pattern, __body)
call a generic function if the name matches.
Definition: config.h:736
static void eventhandler(struct video_desc *env, const char *caption)
refresh the screen, and also grab a bunch of events.
Definition: console_gui.c:953
#define SRC_WIN_W
Definition: console_video.h:46
void console_video_uninit(struct video_desc *env)
Standard Command Line Interface.
#define ast_calloc(a, b)
Definition: astmm.h:82
int get_gui_startup(struct video_desc *env)
void ast_copy_string(char *dst, const char *src, size_t size)
Size-limited null-terminating string copy.
Definition: strings.h:223
int print_message(struct board *b, const char *s)
struct ast_frame * next
Definition: frame.h:164
Data structure associated with a single frame of data.
Definition: frame.h:142
enum ast_frame_type frametype
Definition: frame.h:144
struct ast_frame * first
Definition: channel.h:820
#define ast_mutex_init(pmutex)
Definition: lock.h:152
#define ast_frfree(fr)
Definition: frame.h:583
#define ast_mutex_destroy(a)
Definition: lock.h:154
#define AST_FORMAT_MP4_VIDEO
Definition: frame.h:289
#define AST_FORMAT_H264
Definition: frame.h:287
#define IS_SECONDARY
Definition: console_gui.c:236
decoder_decap_f dec_decap
Definition: vcodecs.c:66
union ast_frame::@172 data
struct ast_channel::@156 readq
struct video_desc * get_video_desc(struct ast_channel *c)
return the pointer to the video descriptor
Definition: chan_oss.c:309
int unsetenv(const char *name)
int size
Definition: console_video.h:62
Structure for mutex and tracking information.
Definition: lock.h:121
#define ASTERISK_FILE_VERSION(file, version)
Register/unregister a source code file with the core.
Definition: asterisk.h:180
#define ast_mutex_unlock(a)
Definition: lock.h:156
decoder_decode_f dec_run
Definition: vcodecs.c:67