Libav
avplay.c
Go to the documentation of this file.
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27 
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/dict.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/samplefmt.h"
36 #include "libavutil/time.h"
37 #include "libavformat/avformat.h"
38 #include "libavdevice/avdevice.h"
39 #include "libswscale/swscale.h"
41 #include "libavutil/opt.h"
42 #include "libavcodec/avfft.h"
43 
44 #if CONFIG_AVFILTER
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/buffersink.h"
47 # include "libavfilter/buffersrc.h"
48 #endif
49 
50 #include "cmdutils.h"
51 
52 #include <SDL.h>
53 #include <SDL_thread.h>
54 
55 #ifdef __MINGW32__
56 #undef main /* We don't want SDL to override our main() */
57 #endif
58 
59 #include <assert.h>
60 
61 const char program_name[] = "avplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67 
68 /* SDL audio buffer size, in samples. Should be small to have precise
69  A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71 
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76 
77 #define FRAME_SKIP_FACTOR 0.05
78 
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81 
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB 20
84 
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2 * 65536)
87 
88 static int64_t sws_flags = SWS_BICUBIC;
89 
90 typedef struct PacketQueue {
93  int size;
95  SDL_mutex *mutex;
96  SDL_cond *cond;
97 } PacketQueue;
98 
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101 
102 typedef struct VideoPicture {
103  double pts; // presentation timestamp for this picture
104  double target_clock; // av_gettime() time at which this should be displayed ideally
105  int64_t pos; // byte position in file
106  SDL_Overlay *bmp;
107  int width, height; /* source height & width */
111 
113 } VideoPicture;
114 
115 typedef struct SubPicture {
116  double pts; /* presentation time stamp for this picture */
118 } SubPicture;
119 
120 enum {
121  AV_SYNC_AUDIO_MASTER, /* default choice */
123  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125 
126 typedef struct VideoState {
127  SDL_Thread *parse_tid;
128  SDL_Thread *video_tid;
129  SDL_Thread *refresh_tid;
133  int paused;
135  int seek_req;
137  int64_t seek_pos;
138  int64_t seek_rel;
141 
143 
145  double external_clock; /* external clock base */
147 
148  double audio_clock;
149  double audio_diff_cum; /* used for AV difference average computation */
159  unsigned int audio_buf_size; /* in bytes */
160  int audio_buf_index; /* in bytes */
172 
173  int show_audio; /* if true, display audio samples */
180  int xpos;
181 
182  SDL_Thread *subtitle_tid;
189  SDL_mutex *subpq_mutex;
190  SDL_cond *subpq_cond;
191 
192  double frame_timer;
195  double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
199  double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
200  double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
201  int64_t video_current_pos; // current displayed file pos
204  SDL_mutex *pictq_mutex;
205  SDL_cond *pictq_cond;
206 #if !CONFIG_AVFILTER
208 #endif
209 
210  // QETimer *video_timer;
211  char filename[1024];
213 
215 
216 #if CONFIG_AVFILTER
217  AVFilterContext *in_video_filter; // the first filter in the video chain
218  AVFilterContext *out_video_filter; // the last filter in the video chain
219 #endif
220 
221  float skip_frames;
223  int refresh;
224 } VideoState;
225 
226 /* options specified by the user */
228 static const char *input_filename;
229 static const char *window_title;
230 static int fs_screen_width;
231 static int fs_screen_height;
232 static int screen_width = 0;
233 static int screen_height = 0;
234 static int audio_disable;
235 static int video_disable;
237  [AVMEDIA_TYPE_AUDIO] = -1,
238  [AVMEDIA_TYPE_VIDEO] = -1,
239  [AVMEDIA_TYPE_SUBTITLE] = -1,
240 };
241 static int seek_by_bytes = -1;
242 static int display_disable;
243 static int show_status = 1;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int step = 0;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int idct = FF_IDCT_AUTO;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts = -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop = 1;
261 static int framedrop = 1;
262 static int infinite_buffer = 0;
263 
264 static int rdftspeed = 20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268 
269 /* current context */
270 static int is_full_screen;
272 static int64_t audio_callback_time;
273 
275 
276 #define FF_ALLOC_EVENT (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
279 
280 static SDL_Surface *screen;
281 
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283 
284 /* packet queue handling */
286 {
287  memset(q, 0, sizeof(PacketQueue));
288  q->mutex = SDL_CreateMutex();
289  q->cond = SDL_CreateCond();
290  packet_queue_put(q, &flush_pkt);
291 }
292 
294 {
295  AVPacketList *pkt, *pkt1;
296 
297  SDL_LockMutex(q->mutex);
298  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299  pkt1 = pkt->next;
300  av_free_packet(&pkt->pkt);
301  av_freep(&pkt);
302  }
303  q->last_pkt = NULL;
304  q->first_pkt = NULL;
305  q->nb_packets = 0;
306  q->size = 0;
307  SDL_UnlockMutex(q->mutex);
308 }
309 
311 {
313  SDL_DestroyMutex(q->mutex);
314  SDL_DestroyCond(q->cond);
315 }
316 
318 {
319  AVPacketList *pkt1;
320 
321  /* duplicate the packet */
322  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
323  return -1;
324 
325  pkt1 = av_malloc(sizeof(AVPacketList));
326  if (!pkt1)
327  return -1;
328  pkt1->pkt = *pkt;
329  pkt1->next = NULL;
330 
331 
332  SDL_LockMutex(q->mutex);
333 
334  if (!q->last_pkt)
335 
336  q->first_pkt = pkt1;
337  else
338  q->last_pkt->next = pkt1;
339  q->last_pkt = pkt1;
340  q->nb_packets++;
341  q->size += pkt1->pkt.size + sizeof(*pkt1);
342  /* XXX: should duplicate packet data in DV case */
343  SDL_CondSignal(q->cond);
344 
345  SDL_UnlockMutex(q->mutex);
346  return 0;
347 }
348 
350 {
351  SDL_LockMutex(q->mutex);
352 
353  q->abort_request = 1;
354 
355  SDL_CondSignal(q->cond);
356 
357  SDL_UnlockMutex(q->mutex);
358 }
359 
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363  AVPacketList *pkt1;
364  int ret;
365 
366  SDL_LockMutex(q->mutex);
367 
368  for (;;) {
369  if (q->abort_request) {
370  ret = -1;
371  break;
372  }
373 
374  pkt1 = q->first_pkt;
375  if (pkt1) {
376  q->first_pkt = pkt1->next;
377  if (!q->first_pkt)
378  q->last_pkt = NULL;
379  q->nb_packets--;
380  q->size -= pkt1->pkt.size + sizeof(*pkt1);
381  *pkt = pkt1->pkt;
382  av_free(pkt1);
383  ret = 1;
384  break;
385  } else if (!block) {
386  ret = 0;
387  break;
388  } else {
389  SDL_CondWait(q->cond, q->mutex);
390  }
391  }
392  SDL_UnlockMutex(q->mutex);
393  return ret;
394 }
395 
396 static inline void fill_rectangle(SDL_Surface *screen,
397  int x, int y, int w, int h, int color)
398 {
399  SDL_Rect rect;
400  rect.x = x;
401  rect.y = y;
402  rect.w = w;
403  rect.h = h;
404  SDL_FillRect(screen, &rect, color);
405 }
406 
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409 
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412  unsigned int v = ((const uint32_t *)(s))[0];\
413  a = (v >> 24) & 0xff;\
414  r = (v >> 16) & 0xff;\
415  g = (v >> 8) & 0xff;\
416  b = v & 0xff;\
417 }
418 
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422  a = (val >> 24) & 0xff;\
423  y = (val >> 16) & 0xff;\
424  u = (val >> 8) & 0xff;\
425  v = val & 0xff;\
426 }
427 
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432 
433 
434 #define BPP 1
435 
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438  int wrap, wrap3, width2, skip2;
439  int y, u, v, a, u1, v1, a1, w, h;
440  uint8_t *lum, *cb, *cr;
441  const uint8_t *p;
442  const uint32_t *pal;
443  int dstx, dsty, dstw, dsth;
444 
445  dstw = av_clip(rect->w, 0, imgw);
446  dsth = av_clip(rect->h, 0, imgh);
447  dstx = av_clip(rect->x, 0, imgw - dstw);
448  dsty = av_clip(rect->y, 0, imgh - dsth);
449  lum = dst->data[0] + dsty * dst->linesize[0];
450  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452 
453  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454  skip2 = dstx >> 1;
455  wrap = dst->linesize[0];
456  wrap3 = rect->pict.linesize[0];
457  p = rect->pict.data[0];
458  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
459 
460  if (dsty & 1) {
461  lum += dstx;
462  cb += skip2;
463  cr += skip2;
464 
465  if (dstx & 1) {
466  YUVA_IN(y, u, v, a, p, pal);
467  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470  cb++;
471  cr++;
472  lum++;
473  p += BPP;
474  }
475  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
476  YUVA_IN(y, u, v, a, p, pal);
477  u1 = u;
478  v1 = v;
479  a1 = a;
480  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481 
482  YUVA_IN(y, u, v, a, p + BPP, pal);
483  u1 += u;
484  v1 += v;
485  a1 += a;
486  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489  cb++;
490  cr++;
491  p += 2 * BPP;
492  lum += 2;
493  }
494  if (w) {
495  YUVA_IN(y, u, v, a, p, pal);
496  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499  p++;
500  lum++;
501  }
502  p += wrap3 - dstw * BPP;
503  lum += wrap - dstw - dstx;
504  cb += dst->linesize[1] - width2 - skip2;
505  cr += dst->linesize[2] - width2 - skip2;
506  }
507  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
508  lum += dstx;
509  cb += skip2;
510  cr += skip2;
511 
512  if (dstx & 1) {
513  YUVA_IN(y, u, v, a, p, pal);
514  u1 = u;
515  v1 = v;
516  a1 = a;
517  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518  p += wrap3;
519  lum += wrap;
520  YUVA_IN(y, u, v, a, p, pal);
521  u1 += u;
522  v1 += v;
523  a1 += a;
524  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527  cb++;
528  cr++;
529  p += -wrap3 + BPP;
530  lum += -wrap + 1;
531  }
532  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
533  YUVA_IN(y, u, v, a, p, pal);
534  u1 = u;
535  v1 = v;
536  a1 = a;
537  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538 
539  YUVA_IN(y, u, v, a, p + BPP, pal);
540  u1 += u;
541  v1 += v;
542  a1 += a;
543  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544  p += wrap3;
545  lum += wrap;
546 
547  YUVA_IN(y, u, v, a, p, pal);
548  u1 += u;
549  v1 += v;
550  a1 += a;
551  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552 
553  YUVA_IN(y, u, v, a, p + BPP, pal);
554  u1 += u;
555  v1 += v;
556  a1 += a;
557  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558 
559  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561 
562  cb++;
563  cr++;
564  p += -wrap3 + 2 * BPP;
565  lum += -wrap + 2;
566  }
567  if (w) {
568  YUVA_IN(y, u, v, a, p, pal);
569  u1 = u;
570  v1 = v;
571  a1 = a;
572  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573  p += wrap3;
574  lum += wrap;
575  YUVA_IN(y, u, v, a, p, pal);
576  u1 += u;
577  v1 += v;
578  a1 += a;
579  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582  cb++;
583  cr++;
584  p += -wrap3 + BPP;
585  lum += -wrap + 1;
586  }
587  p += wrap3 + (wrap3 - dstw * BPP);
588  lum += wrap + (wrap - dstw - dstx);
589  cb += dst->linesize[1] - width2 - skip2;
590  cr += dst->linesize[2] - width2 - skip2;
591  }
592  /* handle odd height */
593  if (h) {
594  lum += dstx;
595  cb += skip2;
596  cr += skip2;
597 
598  if (dstx & 1) {
599  YUVA_IN(y, u, v, a, p, pal);
600  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603  cb++;
604  cr++;
605  lum++;
606  p += BPP;
607  }
608  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609  YUVA_IN(y, u, v, a, p, pal);
610  u1 = u;
611  v1 = v;
612  a1 = a;
613  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614 
615  YUVA_IN(y, u, v, a, p + BPP, pal);
616  u1 += u;
617  v1 += v;
618  a1 += a;
619  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622  cb++;
623  cr++;
624  p += 2 * BPP;
625  lum += 2;
626  }
627  if (w) {
628  YUVA_IN(y, u, v, a, p, pal);
629  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632  }
633  }
634 }
635 
636 static void free_subpicture(SubPicture *sp)
637 {
638  avsubtitle_free(&sp->sub);
639 }
640 
642 {
643  VideoPicture *vp;
644  SubPicture *sp;
645  AVPicture pict;
646  float aspect_ratio;
647  int width, height, x, y;
648  SDL_Rect rect;
649  int i;
650 
651  vp = &is->pictq[is->pictq_rindex];
652  if (vp->bmp) {
653 #if CONFIG_AVFILTER
654  if (!vp->sar.num)
655  aspect_ratio = 0;
656  else
657  aspect_ratio = av_q2d(vp->sar);
658 #else
659 
660  /* XXX: use variable in the frame */
662  aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663  else if (is->video_st->codec->sample_aspect_ratio.num)
664  aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665  else
666  aspect_ratio = 0;
667 #endif
668  if (aspect_ratio <= 0.0)
669  aspect_ratio = 1.0;
670  aspect_ratio *= (float)vp->width / (float)vp->height;
671 
672  if (is->subtitle_st)
673  {
674  if (is->subpq_size > 0)
675  {
676  sp = &is->subpq[is->subpq_rindex];
677 
678  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679  {
680  SDL_LockYUVOverlay (vp->bmp);
681 
682  pict.data[0] = vp->bmp->pixels[0];
683  pict.data[1] = vp->bmp->pixels[2];
684  pict.data[2] = vp->bmp->pixels[1];
685 
686  pict.linesize[0] = vp->bmp->pitches[0];
687  pict.linesize[1] = vp->bmp->pitches[2];
688  pict.linesize[2] = vp->bmp->pitches[1];
689 
690  for (i = 0; i < sp->sub.num_rects; i++)
691  blend_subrect(&pict, sp->sub.rects[i],
692  vp->bmp->w, vp->bmp->h);
693 
694  SDL_UnlockYUVOverlay (vp->bmp);
695  }
696  }
697  }
698 
699 
700  /* XXX: we suppose the screen has a 1.0 pixel ratio */
701  height = is->height;
702  width = ((int)rint(height * aspect_ratio)) & ~1;
703  if (width > is->width) {
704  width = is->width;
705  height = ((int)rint(width / aspect_ratio)) & ~1;
706  }
707  x = (is->width - width) / 2;
708  y = (is->height - height) / 2;
709  is->no_background = 0;
710  rect.x = is->xleft + x;
711  rect.y = is->ytop + y;
712  rect.w = width;
713  rect.h = height;
714  SDL_DisplayYUVOverlay(vp->bmp, &rect);
715  }
716 }
717 
718 /* get the current audio output buffer size, in samples. With SDL, we
719  cannot have a precise information */
721 {
722  return is->audio_buf_size - is->audio_buf_index;
723 }
724 
725 static inline int compute_mod(int a, int b)
726 {
727  a = a % b;
728  if (a >= 0)
729  return a;
730  else
731  return a + b;
732 }
733 
735 {
736  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737  int ch, channels, h, h2, bgcolor, fgcolor;
738  int16_t time_diff;
739  int rdft_bits, nb_freq;
740 
741  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
742  ;
743  nb_freq = 1 << (rdft_bits - 1);
744 
745  /* compute display index : center on currently output samples */
746  channels = s->sdl_channels;
747  nb_display_channels = channels;
748  if (!s->paused) {
749  int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
750  n = 2 * channels;
751  delay = audio_write_get_buf_size(s);
752  delay /= n;
753 
754  /* to be more precise, we take into account the time spent since
755  the last buffer computation */
756  if (audio_callback_time) {
757  time_diff = av_gettime() - audio_callback_time;
758  delay -= (time_diff * s->sdl_sample_rate) / 1000000;
759  }
760 
761  delay += 2 * data_used;
762  if (delay < data_used)
763  delay = data_used;
764 
765  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766  if (s->show_audio == 1) {
767  h = INT_MIN;
768  for (i = 0; i < 1000; i += channels) {
769  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770  int a = s->sample_array[idx];
771  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
772  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
773  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
774  int score = a - d;
775  if (h < score && (b ^ c) < 0) {
776  h = score;
777  i_start = idx;
778  }
779  }
780  }
781 
782  s->last_i_start = i_start;
783  } else {
784  i_start = s->last_i_start;
785  }
786 
787  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788  if (s->show_audio == 1) {
790  s->xleft, s->ytop, s->width, s->height,
791  bgcolor);
792 
793  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794 
795  /* total height for one channel */
796  h = s->height / nb_display_channels;
797  /* graph height / 2 */
798  h2 = (h * 9) / 20;
799  for (ch = 0; ch < nb_display_channels; ch++) {
800  i = i_start + ch;
801  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802  for (x = 0; x < s->width; x++) {
803  y = (s->sample_array[i] * h2) >> 15;
804  if (y < 0) {
805  y = -y;
806  ys = y1 - y;
807  } else {
808  ys = y1;
809  }
811  s->xleft + x, ys, 1, y,
812  fgcolor);
813  i += channels;
814  if (i >= SAMPLE_ARRAY_SIZE)
815  i -= SAMPLE_ARRAY_SIZE;
816  }
817  }
818 
819  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820 
821  for (ch = 1; ch < nb_display_channels; ch++) {
822  y = s->ytop + ch * h;
824  s->xleft, y, s->width, 1,
825  fgcolor);
826  }
827  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828  } else {
829  nb_display_channels= FFMIN(nb_display_channels, 2);
830  if (rdft_bits != s->rdft_bits) {
831  av_rdft_end(s->rdft);
832  av_free(s->rdft_data);
833  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834  s->rdft_bits = rdft_bits;
835  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
836  }
837  {
838  FFTSample *data[2];
839  for (ch = 0; ch < nb_display_channels; ch++) {
840  data[ch] = s->rdft_data + 2 * nb_freq * ch;
841  i = i_start + ch;
842  for (x = 0; x < 2 * nb_freq; x++) {
843  double w = (x-nb_freq) * (1.0 / nb_freq);
844  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
845  i += channels;
846  if (i >= SAMPLE_ARRAY_SIZE)
847  i -= SAMPLE_ARRAY_SIZE;
848  }
849  av_rdft_calc(s->rdft, data[ch]);
850  }
851  /* Least efficient way to do this, we should of course
852  * directly access it but it is more than fast enough. */
853  for (y = 0; y < s->height; y++) {
854  double w = 1 / sqrt(nb_freq);
855  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
856  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
857  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
858  a = FFMIN(a, 255);
859  b = FFMIN(b, 255);
860  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
861 
863  s->xpos, s->height-y, 1, 1,
864  fgcolor);
865  }
866  }
867  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868  s->xpos++;
869  if (s->xpos >= s->width)
870  s->xpos= s->xleft;
871  }
872 }
873 
874 static int video_open(VideoState *is)
875 {
876  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
877  int w,h;
878 
879  if (is_full_screen) flags |= SDL_FULLSCREEN;
880  else flags |= SDL_RESIZABLE;
881 
883  w = fs_screen_width;
884  h = fs_screen_height;
885  } else if (!is_full_screen && screen_width) {
886  w = screen_width;
887  h = screen_height;
888 #if CONFIG_AVFILTER
889  } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
890  w = is->out_video_filter->inputs[0]->w;
891  h = is->out_video_filter->inputs[0]->h;
892 #else
893  } else if (is->video_st && is->video_st->codec->width) {
894  w = is->video_st->codec->width;
895  h = is->video_st->codec->height;
896 #endif
897  } else {
898  w = 640;
899  h = 480;
900  }
901  if (screen && is->width == screen->w && screen->w == w
902  && is->height== screen->h && screen->h == h)
903  return 0;
904 
905 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
906  /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
907  screen = SDL_SetVideoMode(w, h, 24, flags);
908 #else
909  screen = SDL_SetVideoMode(w, h, 0, flags);
910 #endif
911  if (!screen) {
912  fprintf(stderr, "SDL: could not set video mode - exiting\n");
913  return -1;
914  }
915  if (!window_title)
917  SDL_WM_SetCaption(window_title, window_title);
918 
919  is->width = screen->w;
920  is->height = screen->h;
921 
922  return 0;
923 }
924 
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928  if (!screen)
929  video_open(cur_stream);
930  if (is->audio_st && is->show_audio)
932  else if (is->video_st)
934 }
935 
936 static int refresh_thread(void *opaque)
937 {
938  VideoState *is= opaque;
939  while (!is->abort_request) {
940  SDL_Event event;
941  event.type = FF_REFRESH_EVENT;
942  event.user.data1 = opaque;
943  if (!is->refresh) {
944  is->refresh = 1;
945  SDL_PushEvent(&event);
946  }
947  av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948  }
949  return 0;
950 }
951 
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955  double pts;
956  int hw_buf_size, bytes_per_sec;
957  pts = is->audio_clock;
958  hw_buf_size = audio_write_get_buf_size(is);
959  bytes_per_sec = 0;
960  if (is->audio_st) {
961  bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
963  }
964  if (bytes_per_sec)
965  pts -= (double)hw_buf_size / bytes_per_sec;
966  return pts;
967 }
968 
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972  if (is->paused) {
973  return is->video_current_pts;
974  } else {
975  return is->video_current_pts_drift + av_gettime() / 1000000.0;
976  }
977 }
978 
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982  int64_t ti;
983  ti = av_gettime();
984  return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986 
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990  double val;
991 
992  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993  if (is->video_st)
994  val = get_video_clock(is);
995  else
996  val = get_audio_clock(is);
997  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998  if (is->audio_st)
999  val = get_audio_clock(is);
1000  else
1001  val = get_video_clock(is);
1002  } else {
1003  val = get_external_clock(is);
1004  }
1005  return val;
1006 }
1007 
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011  if (!is->seek_req) {
1012  is->seek_pos = pos;
1013  is->seek_rel = rel;
1014  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015  if (seek_by_bytes)
1017  is->seek_req = 1;
1018  }
1019 }
1020 
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024  if (is->paused) {
1025  is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026  if (is->read_pause_return != AVERROR(ENOSYS)) {
1027  is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028  }
1029  is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030  }
1031  is->paused = !is->paused;
1032 }
1033 
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036  double delay, sync_threshold, diff;
1037 
1038  /* compute nominal delay */
1039  delay = frame_current_pts - is->frame_last_pts;
1040  if (delay <= 0 || delay >= 10.0) {
1041  /* if incorrect delay, use previous one */
1042  delay = is->frame_last_delay;
1043  } else {
1044  is->frame_last_delay = delay;
1045  }
1046  is->frame_last_pts = frame_current_pts;
1047 
1048  /* update delay to follow master synchronisation source */
1049  if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1051  /* if video is slave, we try to correct big delays by
1052  duplicating or deleting a frame */
1053  diff = get_video_clock(is) - get_master_clock(is);
1054 
1055  /* skip or repeat frame. We take into account the
1056  delay to compute the threshold. I still don't know
1057  if it is the best guess */
1058  sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059  if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060  if (diff <= -sync_threshold)
1061  delay = 0;
1062  else if (diff >= sync_threshold)
1063  delay = 2 * delay;
1064  }
1065  }
1066  is->frame_timer += delay;
1067 
1068  av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069  delay, frame_current_pts, -diff);
1070 
1071  return is->frame_timer;
1072 }
1073 
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077  VideoState *is = opaque;
1078  VideoPicture *vp;
1079 
1080  SubPicture *sp, *sp2;
1081 
1082  if (is->video_st) {
1083 retry:
1084  if (is->pictq_size == 0) {
1085  // nothing to do, no picture to display in the que
1086  } else {
1087  double time = av_gettime() / 1000000.0;
1088  double next_target;
1089  /* dequeue the picture */
1090  vp = &is->pictq[is->pictq_rindex];
1091 
1092  if (time < vp->target_clock)
1093  return;
1094  /* update current video pts */
1095  is->video_current_pts = vp->pts;
1096  is->video_current_pts_drift = is->video_current_pts - time;
1097  is->video_current_pos = vp->pos;
1098  if (is->pictq_size > 1) {
1099  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1100  assert(nextvp->target_clock >= vp->target_clock);
1101  next_target= nextvp->target_clock;
1102  } else {
1103  next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1104  }
1105  if (framedrop && time > next_target) {
1106  is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107  if (is->pictq_size > 1 || time > next_target + 0.5) {
1108  /* update queue size and signal for next picture */
1110  is->pictq_rindex = 0;
1111 
1112  SDL_LockMutex(is->pictq_mutex);
1113  is->pictq_size--;
1114  SDL_CondSignal(is->pictq_cond);
1115  SDL_UnlockMutex(is->pictq_mutex);
1116  goto retry;
1117  }
1118  }
1119 
1120  if (is->subtitle_st) {
1121  if (is->subtitle_stream_changed) {
1122  SDL_LockMutex(is->subpq_mutex);
1123 
1124  while (is->subpq_size) {
1125  free_subpicture(&is->subpq[is->subpq_rindex]);
1126 
1127  /* update queue size and signal for next picture */
1128  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129  is->subpq_rindex = 0;
1130 
1131  is->subpq_size--;
1132  }
1133  is->subtitle_stream_changed = 0;
1134 
1135  SDL_CondSignal(is->subpq_cond);
1136  SDL_UnlockMutex(is->subpq_mutex);
1137  } else {
1138  if (is->subpq_size > 0) {
1139  sp = &is->subpq[is->subpq_rindex];
1140 
1141  if (is->subpq_size > 1)
1142  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143  else
1144  sp2 = NULL;
1145 
1146  if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147  || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148  {
1149  free_subpicture(sp);
1150 
1151  /* update queue size and signal for next picture */
1152  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153  is->subpq_rindex = 0;
1154 
1155  SDL_LockMutex(is->subpq_mutex);
1156  is->subpq_size--;
1157  SDL_CondSignal(is->subpq_cond);
1158  SDL_UnlockMutex(is->subpq_mutex);
1159  }
1160  }
1161  }
1162  }
1163 
1164  /* display picture */
1165  if (!display_disable)
1166  video_display(is);
1167 
1168  /* update queue size and signal for next picture */
1170  is->pictq_rindex = 0;
1171 
1172  SDL_LockMutex(is->pictq_mutex);
1173  is->pictq_size--;
1174  SDL_CondSignal(is->pictq_cond);
1175  SDL_UnlockMutex(is->pictq_mutex);
1176  }
1177  } else if (is->audio_st) {
1178  /* draw the next audio frame */
1179 
1180  /* if only audio stream, then display the audio bars (better
1181  than nothing, just to test the implementation */
1182 
1183  /* display picture */
1184  if (!display_disable)
1185  video_display(is);
1186  }
1187  if (show_status) {
1188  static int64_t last_time;
1189  int64_t cur_time;
1190  int aqsize, vqsize, sqsize;
1191  double av_diff;
1192 
1193  cur_time = av_gettime();
1194  if (!last_time || (cur_time - last_time) >= 30000) {
1195  aqsize = 0;
1196  vqsize = 0;
1197  sqsize = 0;
1198  if (is->audio_st)
1199  aqsize = is->audioq.size;
1200  if (is->video_st)
1201  vqsize = is->videoq.size;
1202  if (is->subtitle_st)
1203  sqsize = is->subtitleq.size;
1204  av_diff = 0;
1205  if (is->audio_st && is->video_st)
1206  av_diff = get_audio_clock(is) - get_video_clock(is);
1207  printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1208  get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1209  vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1210  fflush(stdout);
1211  last_time = cur_time;
1212  }
1213  }
1214 }
1215 
1216 static void stream_close(VideoState *is)
1217 {
1218  VideoPicture *vp;
1219  int i;
1220  /* XXX: use a special url_shutdown call to abort parse cleanly */
1221  is->abort_request = 1;
1222  SDL_WaitThread(is->parse_tid, NULL);
1223  SDL_WaitThread(is->refresh_tid, NULL);
1224 
1225  /* free all pictures */
1226  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1227  vp = &is->pictq[i];
1228  if (vp->bmp) {
1229  SDL_FreeYUVOverlay(vp->bmp);
1230  vp->bmp = NULL;
1231  }
1232  }
1233  SDL_DestroyMutex(is->pictq_mutex);
1234  SDL_DestroyCond(is->pictq_cond);
1235  SDL_DestroyMutex(is->subpq_mutex);
1236  SDL_DestroyCond(is->subpq_cond);
1237 #if !CONFIG_AVFILTER
1238  if (is->img_convert_ctx)
1240 #endif
1241  av_free(is);
1242 }
1243 
1244 static void do_exit(void)
1245 {
1246  if (cur_stream) {
1247  stream_close(cur_stream);
1248  cur_stream = NULL;
1249  }
1250  uninit_opts();
1252  if (show_status)
1253  printf("\n");
1254  SDL_Quit();
1255  av_log(NULL, AV_LOG_QUIET, "");
1256  exit(0);
1257 }
1258 
1259 /* allocate a picture (needs to do that in main thread to avoid
1260  potential locking problems */
1261 static void alloc_picture(void *opaque)
1262 {
1263  VideoState *is = opaque;
1264  VideoPicture *vp;
1265 
1266  vp = &is->pictq[is->pictq_windex];
1267 
1268  if (vp->bmp)
1269  SDL_FreeYUVOverlay(vp->bmp);
1270 
1271 #if CONFIG_AVFILTER
1272  vp->width = is->out_video_filter->inputs[0]->w;
1273  vp->height = is->out_video_filter->inputs[0]->h;
1274  vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1275 #else
1276  vp->width = is->video_st->codec->width;
1277  vp->height = is->video_st->codec->height;
1278  vp->pix_fmt = is->video_st->codec->pix_fmt;
1279 #endif
1280 
1281  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1282  SDL_YV12_OVERLAY,
1283  screen);
1284  if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1285  /* SDL allocates a buffer smaller than requested if the video
1286  * overlay hardware is unable to support the requested size. */
1287  fprintf(stderr, "Error: the video system does not support an image\n"
1288  "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1289  "to reduce the image size.\n", vp->width, vp->height );
1290  do_exit();
1291  }
1292 
1293  SDL_LockMutex(is->pictq_mutex);
1294  vp->allocated = 1;
1295  SDL_CondSignal(is->pictq_cond);
1296  SDL_UnlockMutex(is->pictq_mutex);
1297 }
1298 
1299 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1300  * guessed if not known. */
1301 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1302 {
1303  VideoPicture *vp;
1304 #if CONFIG_AVFILTER
1305  AVPicture pict_src;
1306 #else
1307  int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1308 #endif
1309  /* wait until we have space to put a new picture */
1310  SDL_LockMutex(is->pictq_mutex);
1311 
1312  if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1313  is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1314 
1315  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1316  !is->videoq.abort_request) {
1317  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1318  }
1319  SDL_UnlockMutex(is->pictq_mutex);
1320 
1321  if (is->videoq.abort_request)
1322  return -1;
1323 
1324  vp = &is->pictq[is->pictq_windex];
1325 
1326  vp->sar = src_frame->sample_aspect_ratio;
1327 
1328  /* alloc or resize hardware picture buffer */
1329  if (!vp->bmp || vp->reallocate ||
1330 #if CONFIG_AVFILTER
1331  vp->width != is->out_video_filter->inputs[0]->w ||
1332  vp->height != is->out_video_filter->inputs[0]->h) {
1333 #else
1334  vp->width != is->video_st->codec->width ||
1335  vp->height != is->video_st->codec->height) {
1336 #endif
1337  SDL_Event event;
1338 
1339  vp->allocated = 0;
1340  vp->reallocate = 0;
1341 
1342  /* the allocation must be done in the main thread to avoid
1343  locking problems */
1344  event.type = FF_ALLOC_EVENT;
1345  event.user.data1 = is;
1346  SDL_PushEvent(&event);
1347 
1348  /* wait until the picture is allocated */
1349  SDL_LockMutex(is->pictq_mutex);
1350  while (!vp->allocated && !is->videoq.abort_request) {
1351  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352  }
1353  SDL_UnlockMutex(is->pictq_mutex);
1354 
1355  if (is->videoq.abort_request)
1356  return -1;
1357  }
1358 
1359  /* if the frame is not skipped, then display it */
1360  if (vp->bmp) {
1361  AVPicture pict = { { 0 } };
1362 
1363  /* get a pointer on the bitmap */
1364  SDL_LockYUVOverlay (vp->bmp);
1365 
1366  pict.data[0] = vp->bmp->pixels[0];
1367  pict.data[1] = vp->bmp->pixels[2];
1368  pict.data[2] = vp->bmp->pixels[1];
1369 
1370  pict.linesize[0] = vp->bmp->pitches[0];
1371  pict.linesize[1] = vp->bmp->pitches[2];
1372  pict.linesize[2] = vp->bmp->pitches[1];
1373 
1374 #if CONFIG_AVFILTER
1375  pict_src.data[0] = src_frame->data[0];
1376  pict_src.data[1] = src_frame->data[1];
1377  pict_src.data[2] = src_frame->data[2];
1378 
1379  pict_src.linesize[0] = src_frame->linesize[0];
1380  pict_src.linesize[1] = src_frame->linesize[1];
1381  pict_src.linesize[2] = src_frame->linesize[2];
1382 
1383  // FIXME use direct rendering
1384  av_picture_copy(&pict, &pict_src,
1385  vp->pix_fmt, vp->width, vp->height);
1386 #else
1387  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1389  vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1390  dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1391  if (!is->img_convert_ctx) {
1392  fprintf(stderr, "Cannot initialize the conversion context\n");
1393  exit(1);
1394  }
1395  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1396  0, vp->height, pict.data, pict.linesize);
1397 #endif
1398  /* update the bitmap content */
1399  SDL_UnlockYUVOverlay(vp->bmp);
1400 
1401  vp->pts = pts;
1402  vp->pos = pos;
1403 
1404  /* now we can update the picture count */
1406  is->pictq_windex = 0;
1407  SDL_LockMutex(is->pictq_mutex);
1408  vp->target_clock = compute_target_time(vp->pts, is);
1409 
1410  is->pictq_size++;
1411  SDL_UnlockMutex(is->pictq_mutex);
1412  }
1413  return 0;
1414 }
1415 
1416 /* Compute the exact PTS for the picture if it is omitted in the stream.
1417  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1418 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1419 {
1420  double frame_delay, pts;
1421  int ret;
1422 
1423  pts = pts1;
1424 
1425  if (pts != 0) {
1426  /* update video clock with pts, if present */
1427  is->video_clock = pts;
1428  } else {
1429  pts = is->video_clock;
1430  }
1431  /* update video clock for next frame */
1432  frame_delay = av_q2d(is->video_st->codec->time_base);
1433  /* for MPEG2, the frame can be repeated, so we update the
1434  clock accordingly */
1435  frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1436  is->video_clock += frame_delay;
1437 
1438  ret = queue_picture(is, src_frame, pts, pos);
1439  av_frame_unref(src_frame);
1440  return ret;
1441 }
1442 
1443 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1444 {
1445  int got_picture, i;
1446 
1447  if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1448  return -1;
1449 
1450  if (pkt->data == flush_pkt.data) {
1452 
1453  SDL_LockMutex(is->pictq_mutex);
1454  // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1455  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1456  is->pictq[i].target_clock= 0;
1457  }
1458  while (is->pictq_size && !is->videoq.abort_request) {
1459  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1460  }
1461  is->video_current_pos = -1;
1462  SDL_UnlockMutex(is->pictq_mutex);
1463 
1466  is->frame_last_delay = 0;
1467  is->frame_timer = (double)av_gettime() / 1000000.0;
1468  is->skip_frames = 1;
1469  is->skip_frames_index = 0;
1470  return 0;
1471  }
1472 
1473  avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1474 
1475  if (got_picture) {
1476  if (decoder_reorder_pts == -1) {
1477  *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1478  } else if (decoder_reorder_pts) {
1479  *pts = frame->pkt_pts;
1480  } else {
1481  *pts = frame->pkt_dts;
1482  }
1483 
1484  if (*pts == AV_NOPTS_VALUE) {
1485  *pts = 0;
1486  }
1487  if (is->video_st->sample_aspect_ratio.num) {
1489  }
1490 
1491  is->skip_frames_index += 1;
1492  if (is->skip_frames_index >= is->skip_frames) {
1493  is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1494  return 1;
1495  }
1496  av_frame_unref(frame);
1497  }
1498  return 0;
1499 }
1500 
1501 #if CONFIG_AVFILTER
1502 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1503 {
1504  char sws_flags_str[128];
1505  char buffersrc_args[256];
1506  int ret;
1507  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1508  AVCodecContext *codec = is->video_st->codec;
1509 
1510  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1511  graph->scale_sws_opts = av_strdup(sws_flags_str);
1512 
1513  snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1514  codec->width, codec->height, codec->pix_fmt,
1517 
1518 
1519  if ((ret = avfilter_graph_create_filter(&filt_src,
1520  avfilter_get_by_name("buffer"),
1521  "src", buffersrc_args, NULL,
1522  graph)) < 0)
1523  return ret;
1524  if ((ret = avfilter_graph_create_filter(&filt_out,
1525  avfilter_get_by_name("buffersink"),
1526  "out", NULL, NULL, graph)) < 0)
1527  return ret;
1528 
1529  if ((ret = avfilter_graph_create_filter(&filt_format,
1530  avfilter_get_by_name("format"),
1531  "format", "yuv420p", NULL, graph)) < 0)
1532  return ret;
1533  if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1534  return ret;
1535 
1536 
1537  if (vfilters) {
1540 
1541  outputs->name = av_strdup("in");
1542  outputs->filter_ctx = filt_src;
1543  outputs->pad_idx = 0;
1544  outputs->next = NULL;
1545 
1546  inputs->name = av_strdup("out");
1547  inputs->filter_ctx = filt_format;
1548  inputs->pad_idx = 0;
1549  inputs->next = NULL;
1550 
1551  if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1552  return ret;
1553  } else {
1554  if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1555  return ret;
1556  }
1557 
1558  if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1559  return ret;
1560 
1561  is->in_video_filter = filt_src;
1562  is->out_video_filter = filt_out;
1563 
1564  return ret;
1565 }
1566 
1567 #endif /* CONFIG_AVFILTER */
1568 
1569 static int video_thread(void *arg)
1570 {
1571  AVPacket pkt = { 0 };
1572  VideoState *is = arg;
1573  AVFrame *frame = av_frame_alloc();
1574  int64_t pts_int;
1575  double pts;
1576  int ret;
1577 
1578 #if CONFIG_AVFILTER
1580  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1581  int last_w = is->video_st->codec->width;
1582  int last_h = is->video_st->codec->height;
1583 
1584  if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1585  goto the_end;
1586  filt_in = is->in_video_filter;
1587  filt_out = is->out_video_filter;
1588 #endif
1589 
1590  for (;;) {
1591 #if CONFIG_AVFILTER
1592  AVRational tb;
1593 #endif
1594  while (is->paused && !is->videoq.abort_request)
1595  SDL_Delay(10);
1596 
1597  av_free_packet(&pkt);
1598 
1599  ret = get_video_frame(is, frame, &pts_int, &pkt);
1600  if (ret < 0)
1601  goto the_end;
1602 
1603  if (!ret)
1604  continue;
1605 
1606 #if CONFIG_AVFILTER
1607  if ( last_w != is->video_st->codec->width
1608  || last_h != is->video_st->codec->height) {
1609  av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1610  is->video_st->codec->width, is->video_st->codec->height);
1611  avfilter_graph_free(&graph);
1612  graph = avfilter_graph_alloc();
1613  if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1614  goto the_end;
1615  filt_in = is->in_video_filter;
1616  filt_out = is->out_video_filter;
1617  last_w = is->video_st->codec->width;
1618  last_h = is->video_st->codec->height;
1619  }
1620 
1621  frame->pts = pts_int;
1622  ret = av_buffersrc_add_frame(filt_in, frame);
1623  if (ret < 0)
1624  goto the_end;
1625 
1626  while (ret >= 0) {
1627  ret = av_buffersink_get_frame(filt_out, frame);
1628  if (ret < 0) {
1629  ret = 0;
1630  break;
1631  }
1632 
1633  pts_int = frame->pts;
1634  tb = filt_out->inputs[0]->time_base;
1635  if (av_cmp_q(tb, is->video_st->time_base)) {
1636  av_unused int64_t pts1 = pts_int;
1637  pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1638  av_dlog(NULL, "video_thread(): "
1639  "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1640  tb.num, tb.den, pts1,
1641  is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1642  }
1643  pts = pts_int * av_q2d(is->video_st->time_base);
1644  ret = output_picture2(is, frame, pts, 0);
1645  }
1646 #else
1647  pts = pts_int * av_q2d(is->video_st->time_base);
1648  ret = output_picture2(is, frame, pts, pkt.pos);
1649 #endif
1650 
1651  if (ret < 0)
1652  goto the_end;
1653 
1654 
1655  if (step)
1656  if (cur_stream)
1657  stream_pause(cur_stream);
1658  }
1659  the_end:
1660 #if CONFIG_AVFILTER
1661  av_freep(&vfilters);
1662  avfilter_graph_free(&graph);
1663 #endif
1664  av_free_packet(&pkt);
1665  av_frame_free(&frame);
1666  return 0;
1667 }
1668 
1669 static int subtitle_thread(void *arg)
1670 {
1671  VideoState *is = arg;
1672  SubPicture *sp;
1673  AVPacket pkt1, *pkt = &pkt1;
1674  int got_subtitle;
1675  double pts;
1676  int i, j;
1677  int r, g, b, y, u, v, a;
1678 
1679  for (;;) {
1680  while (is->paused && !is->subtitleq.abort_request) {
1681  SDL_Delay(10);
1682  }
1683  if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1684  break;
1685 
1686  if (pkt->data == flush_pkt.data) {
1688  continue;
1689  }
1690  SDL_LockMutex(is->subpq_mutex);
1691  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1692  !is->subtitleq.abort_request) {
1693  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1694  }
1695  SDL_UnlockMutex(is->subpq_mutex);
1696 
1697  if (is->subtitleq.abort_request)
1698  return 0;
1699 
1700  sp = &is->subpq[is->subpq_windex];
1701 
1702  /* NOTE: ipts is the PTS of the _first_ picture beginning in
1703  this packet, if any */
1704  pts = 0;
1705  if (pkt->pts != AV_NOPTS_VALUE)
1706  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1707 
1709  &got_subtitle, pkt);
1710 
1711  if (got_subtitle && sp->sub.format == 0) {
1712  sp->pts = pts;
1713 
1714  for (i = 0; i < sp->sub.num_rects; i++)
1715  {
1716  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1717  {
1718  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1719  y = RGB_TO_Y_CCIR(r, g, b);
1720  u = RGB_TO_U_CCIR(r, g, b, 0);
1721  v = RGB_TO_V_CCIR(r, g, b, 0);
1722  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1723  }
1724  }
1725 
1726  /* now we can update the picture count */
1727  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1728  is->subpq_windex = 0;
1729  SDL_LockMutex(is->subpq_mutex);
1730  is->subpq_size++;
1731  SDL_UnlockMutex(is->subpq_mutex);
1732  }
1733  av_free_packet(pkt);
1734  }
1735  return 0;
1736 }
1737 
1738 /* copy samples for viewing in editor window */
1739 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1740 {
1741  int size, len;
1742 
1743  size = samples_size / sizeof(short);
1744  while (size > 0) {
1746  if (len > size)
1747  len = size;
1748  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1749  samples += len;
1750  is->sample_array_index += len;
1752  is->sample_array_index = 0;
1753  size -= len;
1754  }
1755 }
1756 
1757 /* return the new audio buffer size (samples can be added or deleted
1758  to get better sync if video or external master clock) */
1759 static int synchronize_audio(VideoState *is, short *samples,
1760  int samples_size1, double pts)
1761 {
1762  int n, samples_size;
1763  double ref_clock;
1764 
1766  samples_size = samples_size1;
1767 
1768  /* if not master, then we try to remove or add samples to correct the clock */
1769  if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1771  double diff, avg_diff;
1772  int wanted_size, min_size, max_size, nb_samples;
1773 
1774  ref_clock = get_master_clock(is);
1775  diff = get_audio_clock(is) - ref_clock;
1776 
1777  if (diff < AV_NOSYNC_THRESHOLD) {
1778  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1780  /* not enough measures to have a correct estimate */
1781  is->audio_diff_avg_count++;
1782  } else {
1783  /* estimate the A-V difference */
1784  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1785 
1786  if (fabs(avg_diff) >= is->audio_diff_threshold) {
1787  wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1788  nb_samples = samples_size / n;
1789 
1790  min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1791  max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1792  if (wanted_size < min_size)
1793  wanted_size = min_size;
1794  else if (wanted_size > max_size)
1795  wanted_size = max_size;
1796 
1797  /* add or remove samples to correction the synchro */
1798  if (wanted_size < samples_size) {
1799  /* remove samples */
1800  samples_size = wanted_size;
1801  } else if (wanted_size > samples_size) {
1802  uint8_t *samples_end, *q;
1803  int nb;
1804 
1805  /* add samples */
1806  nb = (samples_size - wanted_size);
1807  samples_end = (uint8_t *)samples + samples_size - n;
1808  q = samples_end + n;
1809  while (nb > 0) {
1810  memcpy(q, samples_end, n);
1811  q += n;
1812  nb -= n;
1813  }
1814  samples_size = wanted_size;
1815  }
1816  }
1817  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1818  diff, avg_diff, samples_size - samples_size1,
1820  }
1821  } else {
1822  /* too big difference : may be initial PTS errors, so
1823  reset A-V filter */
1824  is->audio_diff_avg_count = 0;
1825  is->audio_diff_cum = 0;
1826  }
1827  }
1828 
1829  return samples_size;
1830 }
1831 
1832 /* decode one audio frame and returns its uncompressed size */
1833 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1834 {
1835  AVPacket *pkt_temp = &is->audio_pkt_temp;
1836  AVPacket *pkt = &is->audio_pkt;
1837  AVCodecContext *dec = is->audio_st->codec;
1838  int n, len1, data_size, got_frame;
1839  double pts;
1840  int new_packet = 0;
1841  int flush_complete = 0;
1842 
1843  for (;;) {
1844  /* NOTE: the audio packet can contain several frames */
1845  while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1846  int resample_changed, audio_resample;
1847 
1848  if (!is->frame) {
1849  if (!(is->frame = av_frame_alloc()))
1850  return AVERROR(ENOMEM);
1851  }
1852 
1853  if (flush_complete)
1854  break;
1855  new_packet = 0;
1856  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1857  if (len1 < 0) {
1858  /* if error, we skip the frame */
1859  pkt_temp->size = 0;
1860  break;
1861  }
1862 
1863  pkt_temp->data += len1;
1864  pkt_temp->size -= len1;
1865 
1866  if (!got_frame) {
1867  /* stop sending empty packets if the decoder is finished */
1868  if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1869  flush_complete = 1;
1870  continue;
1871  }
1872  data_size = av_samples_get_buffer_size(NULL, dec->channels,
1873  is->frame->nb_samples,
1874  is->frame->format, 1);
1875 
1876  audio_resample = is->frame->format != is->sdl_sample_fmt ||
1877  is->frame->channel_layout != is->sdl_channel_layout ||
1878  is->frame->sample_rate != is->sdl_sample_rate;
1879 
1880  resample_changed = is->frame->format != is->resample_sample_fmt ||
1883 
1884  if ((!is->avr && audio_resample) || resample_changed) {
1885  int ret;
1886  if (is->avr)
1887  avresample_close(is->avr);
1888  else if (audio_resample) {
1889  is->avr = avresample_alloc_context();
1890  if (!is->avr) {
1891  fprintf(stderr, "error allocating AVAudioResampleContext\n");
1892  break;
1893  }
1894  }
1895  if (audio_resample) {
1896  av_opt_set_int(is->avr, "in_channel_layout", is->frame->channel_layout, 0);
1897  av_opt_set_int(is->avr, "in_sample_fmt", is->frame->format, 0);
1898  av_opt_set_int(is->avr, "in_sample_rate", is->frame->sample_rate, 0);
1899  av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1900  av_opt_set_int(is->avr, "out_sample_fmt", is->sdl_sample_fmt, 0);
1901  av_opt_set_int(is->avr, "out_sample_rate", is->sdl_sample_rate, 0);
1902 
1903  if ((ret = avresample_open(is->avr)) < 0) {
1904  fprintf(stderr, "error initializing libavresample\n");
1905  break;
1906  }
1907  }
1908  is->resample_sample_fmt = is->frame->format;
1911  }
1912 
1913  if (audio_resample) {
1914  void *tmp_out;
1915  int out_samples, out_size, out_linesize;
1916  int osize = av_get_bytes_per_sample(is->sdl_sample_fmt);
1917  int nb_samples = is->frame->nb_samples;
1918 
1919  out_size = av_samples_get_buffer_size(&out_linesize,
1920  is->sdl_channels,
1921  nb_samples,
1922  is->sdl_sample_fmt, 0);
1923  tmp_out = av_realloc(is->audio_buf1, out_size);
1924  if (!tmp_out)
1925  return AVERROR(ENOMEM);
1926  is->audio_buf1 = tmp_out;
1927 
1928  out_samples = avresample_convert(is->avr,
1929  &is->audio_buf1,
1930  out_linesize, nb_samples,
1931  is->frame->data,
1932  is->frame->linesize[0],
1933  is->frame->nb_samples);
1934  if (out_samples < 0) {
1935  fprintf(stderr, "avresample_convert() failed\n");
1936  break;
1937  }
1938  is->audio_buf = is->audio_buf1;
1939  data_size = out_samples * osize * is->sdl_channels;
1940  } else {
1941  is->audio_buf = is->frame->data[0];
1942  }
1943 
1944  /* if no pts, then compute it */
1945  pts = is->audio_clock;
1946  *pts_ptr = pts;
1948  is->audio_clock += (double)data_size /
1949  (double)(n * is->sdl_sample_rate);
1950 #ifdef DEBUG
1951  {
1952  static double last_clock;
1953  printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1954  is->audio_clock - last_clock,
1955  is->audio_clock, pts);
1956  last_clock = is->audio_clock;
1957  }
1958 #endif
1959  return data_size;
1960  }
1961 
1962  /* free the current packet */
1963  if (pkt->data)
1964  av_free_packet(pkt);
1965  memset(pkt_temp, 0, sizeof(*pkt_temp));
1966 
1967  if (is->paused || is->audioq.abort_request) {
1968  return -1;
1969  }
1970 
1971  /* read next packet */
1972  if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1973  return -1;
1974 
1975  if (pkt->data == flush_pkt.data) {
1976  avcodec_flush_buffers(dec);
1977  flush_complete = 0;
1978  }
1979 
1980  *pkt_temp = *pkt;
1981 
1982  /* if update the audio clock with the pts */
1983  if (pkt->pts != AV_NOPTS_VALUE) {
1984  is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1985  }
1986  }
1987 }
1988 
1989 /* prepare a new audio buffer */
1990 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1991 {
1992  VideoState *is = opaque;
1993  int audio_size, len1;
1994  double pts;
1995 
1997 
1998  while (len > 0) {
1999  if (is->audio_buf_index >= is->audio_buf_size) {
2000  audio_size = audio_decode_frame(is, &pts);
2001  if (audio_size < 0) {
2002  /* if error, just output silence */
2003  is->audio_buf = is->silence_buf;
2004  is->audio_buf_size = sizeof(is->silence_buf);
2005  } else {
2006  if (is->show_audio)
2007  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2008  audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2009  pts);
2010  is->audio_buf_size = audio_size;
2011  }
2012  is->audio_buf_index = 0;
2013  }
2014  len1 = is->audio_buf_size - is->audio_buf_index;
2015  if (len1 > len)
2016  len1 = len;
2017  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2018  len -= len1;
2019  stream += len1;
2020  is->audio_buf_index += len1;
2021  }
2022 }
2023 
2024 /* open a given stream. Return 0 if OK */
2025 static int stream_component_open(VideoState *is, int stream_index)
2026 {
2027  AVFormatContext *ic = is->ic;
2028  AVCodecContext *avctx;
2029  AVCodec *codec;
2030  SDL_AudioSpec wanted_spec, spec;
2031  AVDictionary *opts;
2032  AVDictionaryEntry *t = NULL;
2033 
2034  if (stream_index < 0 || stream_index >= ic->nb_streams)
2035  return -1;
2036  avctx = ic->streams[stream_index]->codec;
2037 
2038  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2039 
2040  codec = avcodec_find_decoder(avctx->codec_id);
2042  avctx->idct_algo = idct;
2043  avctx->skip_frame = skip_frame;
2044  avctx->skip_idct = skip_idct;
2047 
2048  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2049 
2050  if (!av_dict_get(opts, "threads", NULL, 0))
2051  av_dict_set(&opts, "threads", "auto", 0);
2052  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2053  av_dict_set(&opts, "refcounted_frames", "1", 0);
2054  if (!codec ||
2055  avcodec_open2(avctx, codec, &opts) < 0)
2056  return -1;
2057  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2058  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2059  return AVERROR_OPTION_NOT_FOUND;
2060  }
2061 
2062  /* prepare audio output */
2063  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2064  is->sdl_sample_rate = avctx->sample_rate;
2065 
2066  if (!avctx->channel_layout)
2068  if (!avctx->channel_layout) {
2069  fprintf(stderr, "unable to guess channel layout\n");
2070  return -1;
2071  }
2072  if (avctx->channels == 1)
2074  else
2077 
2078  wanted_spec.format = AUDIO_S16SYS;
2079  wanted_spec.freq = is->sdl_sample_rate;
2080  wanted_spec.channels = is->sdl_channels;
2081  wanted_spec.silence = 0;
2082  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2083  wanted_spec.callback = sdl_audio_callback;
2084  wanted_spec.userdata = is;
2085  if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2086  fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2087  return -1;
2088  }
2089  is->audio_hw_buf_size = spec.size;
2093  is->resample_sample_rate = avctx->sample_rate;
2094  }
2095 
2096  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2097  switch (avctx->codec_type) {
2098  case AVMEDIA_TYPE_AUDIO:
2099  is->audio_stream = stream_index;
2100  is->audio_st = ic->streams[stream_index];
2101  is->audio_buf_size = 0;
2102  is->audio_buf_index = 0;
2103 
2104  /* init averaging filter */
2105  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2106  is->audio_diff_avg_count = 0;
2107  /* since we do not have a precise anough audio fifo fullness,
2108  we correct audio sync only if larger than this threshold */
2110 
2111  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2112  packet_queue_init(&is->audioq);
2113  SDL_PauseAudio(0);
2114  break;
2115  case AVMEDIA_TYPE_VIDEO:
2116  is->video_stream = stream_index;
2117  is->video_st = ic->streams[stream_index];
2118 
2119  packet_queue_init(&is->videoq);
2120  is->video_tid = SDL_CreateThread(video_thread, is);
2121  break;
2122  case AVMEDIA_TYPE_SUBTITLE:
2123  is->subtitle_stream = stream_index;
2124  is->subtitle_st = ic->streams[stream_index];
2126 
2127  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2128  break;
2129  default:
2130  break;
2131  }
2132  return 0;
2133 }
2134 
2135 static void stream_component_close(VideoState *is, int stream_index)
2136 {
2137  AVFormatContext *ic = is->ic;
2138  AVCodecContext *avctx;
2139 
2140  if (stream_index < 0 || stream_index >= ic->nb_streams)
2141  return;
2142  avctx = ic->streams[stream_index]->codec;
2143 
2144  switch (avctx->codec_type) {
2145  case AVMEDIA_TYPE_AUDIO:
2146  packet_queue_abort(&is->audioq);
2147 
2148  SDL_CloseAudio();
2149 
2150  packet_queue_end(&is->audioq);
2151  av_free_packet(&is->audio_pkt);
2152  if (is->avr)
2153  avresample_free(&is->avr);
2154  av_freep(&is->audio_buf1);
2155  is->audio_buf = NULL;
2156  av_frame_free(&is->frame);
2157 
2158  if (is->rdft) {
2159  av_rdft_end(is->rdft);
2160  av_freep(&is->rdft_data);
2161  is->rdft = NULL;
2162  is->rdft_bits = 0;
2163  }
2164  break;
2165  case AVMEDIA_TYPE_VIDEO:
2166  packet_queue_abort(&is->videoq);
2167 
2168  /* note: we also signal this mutex to make sure we deblock the
2169  video thread in all cases */
2170  SDL_LockMutex(is->pictq_mutex);
2171  SDL_CondSignal(is->pictq_cond);
2172  SDL_UnlockMutex(is->pictq_mutex);
2173 
2174  SDL_WaitThread(is->video_tid, NULL);
2175 
2176  packet_queue_end(&is->videoq);
2177  break;
2178  case AVMEDIA_TYPE_SUBTITLE:
2180 
2181  /* note: we also signal this mutex to make sure we deblock the
2182  video thread in all cases */
2183  SDL_LockMutex(is->subpq_mutex);
2184  is->subtitle_stream_changed = 1;
2185 
2186  SDL_CondSignal(is->subpq_cond);
2187  SDL_UnlockMutex(is->subpq_mutex);
2188 
2189  SDL_WaitThread(is->subtitle_tid, NULL);
2190 
2192  break;
2193  default:
2194  break;
2195  }
2196 
2197  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2198  avcodec_close(avctx);
2199  switch (avctx->codec_type) {
2200  case AVMEDIA_TYPE_AUDIO:
2201  is->audio_st = NULL;
2202  is->audio_stream = -1;
2203  break;
2204  case AVMEDIA_TYPE_VIDEO:
2205  is->video_st = NULL;
2206  is->video_stream = -1;
2207  break;
2208  case AVMEDIA_TYPE_SUBTITLE:
2209  is->subtitle_st = NULL;
2210  is->subtitle_stream = -1;
2211  break;
2212  default:
2213  break;
2214  }
2215 }
2216 
2217 /* since we have only one decoding thread, we can use a global
2218  variable instead of a thread local variable */
2220 
2221 static int decode_interrupt_cb(void *ctx)
2222 {
2223  return global_video_state && global_video_state->abort_request;
2224 }
2225 
2226 /* this thread gets the stream from the disk or the network */
2227 static int decode_thread(void *arg)
2228 {
2229  VideoState *is = arg;
2230  AVFormatContext *ic = NULL;
2231  int err, i, ret;
2232  int st_index[AVMEDIA_TYPE_NB];
2233  AVPacket pkt1, *pkt = &pkt1;
2234  int eof = 0;
2235  int pkt_in_play_range = 0;
2236  AVDictionaryEntry *t;
2237  AVDictionary **opts;
2238  int orig_nb_streams;
2239 
2240  memset(st_index, -1, sizeof(st_index));
2241  is->video_stream = -1;
2242  is->audio_stream = -1;
2243  is->subtitle_stream = -1;
2244 
2245  global_video_state = is;
2246 
2247  ic = avformat_alloc_context();
2249  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2250  if (err < 0) {
2251  print_error(is->filename, err);
2252  ret = -1;
2253  goto fail;
2254  }
2256  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2258  goto fail;
2259  }
2260  is->ic = ic;
2261 
2262  if (genpts)
2263  ic->flags |= AVFMT_FLAG_GENPTS;
2264 
2266  orig_nb_streams = ic->nb_streams;
2267 
2268  err = avformat_find_stream_info(ic, opts);
2269  if (err < 0) {
2270  fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2271  ret = -1;
2272  goto fail;
2273  }
2274  for (i = 0; i < orig_nb_streams; i++)
2275  av_dict_free(&opts[i]);
2276  av_freep(&opts);
2277 
2278  if (ic->pb)
2279  ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2280 
2281  if (seek_by_bytes < 0)
2283 
2284  /* if seeking requested, we execute it */
2285  if (start_time != AV_NOPTS_VALUE) {
2286  int64_t timestamp;
2287 
2288  timestamp = start_time;
2289  /* add the stream start time */
2290  if (ic->start_time != AV_NOPTS_VALUE)
2291  timestamp += ic->start_time;
2292  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2293  if (ret < 0) {
2294  fprintf(stderr, "%s: could not seek to position %0.3f\n",
2295  is->filename, (double)timestamp / AV_TIME_BASE);
2296  }
2297  }
2298 
2299  for (i = 0; i < ic->nb_streams; i++)
2300  ic->streams[i]->discard = AVDISCARD_ALL;
2301  if (!video_disable)
2302  st_index[AVMEDIA_TYPE_VIDEO] =
2305  if (!audio_disable)
2306  st_index[AVMEDIA_TYPE_AUDIO] =
2309  st_index[AVMEDIA_TYPE_VIDEO],
2310  NULL, 0);
2311  if (!video_disable)
2312  st_index[AVMEDIA_TYPE_SUBTITLE] =
2315  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2316  st_index[AVMEDIA_TYPE_AUDIO] :
2317  st_index[AVMEDIA_TYPE_VIDEO]),
2318  NULL, 0);
2319  if (show_status) {
2320  av_dump_format(ic, 0, is->filename, 0);
2321  }
2322 
2323  /* open the streams */
2324  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2325  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2326  }
2327 
2328  ret = -1;
2329  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2330  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2331  }
2332  is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2333  if (ret < 0) {
2334  if (!display_disable)
2335  is->show_audio = 2;
2336  }
2337 
2338  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2339  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2340  }
2341 
2342  if (is->video_stream < 0 && is->audio_stream < 0) {
2343  fprintf(stderr, "%s: could not open codecs\n", is->filename);
2344  ret = -1;
2345  goto fail;
2346  }
2347 
2348  for (;;) {
2349  if (is->abort_request)
2350  break;
2351  if (is->paused != is->last_paused) {
2352  is->last_paused = is->paused;
2353  if (is->paused)
2354  is->read_pause_return = av_read_pause(ic);
2355  else
2356  av_read_play(ic);
2357  }
2358 #if CONFIG_RTSP_DEMUXER
2359  if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2360  /* wait 10 ms to avoid trying to get another packet */
2361  /* XXX: horrible */
2362  SDL_Delay(10);
2363  continue;
2364  }
2365 #endif
2366  if (is->seek_req) {
2367  int64_t seek_target = is->seek_pos;
2368  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2369  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2370 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2371 // of the seek_pos/seek_rel variables
2372 
2373  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2374  if (ret < 0) {
2375  fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2376  } else {
2377  if (is->audio_stream >= 0) {
2378  packet_queue_flush(&is->audioq);
2379  packet_queue_put(&is->audioq, &flush_pkt);
2380  }
2381  if (is->subtitle_stream >= 0) {
2383  packet_queue_put(&is->subtitleq, &flush_pkt);
2384  }
2385  if (is->video_stream >= 0) {
2386  packet_queue_flush(&is->videoq);
2387  packet_queue_put(&is->videoq, &flush_pkt);
2388  }
2389  }
2390  is->seek_req = 0;
2391  eof = 0;
2392  }
2393 
2394  /* if the queue are full, no need to read more */
2395  if (!infinite_buffer &&
2396  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2397  || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2398  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0)
2399  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2400  /* wait 10 ms */
2401  SDL_Delay(10);
2402  continue;
2403  }
2404  if (eof) {
2405  if (is->video_stream >= 0) {
2406  av_init_packet(pkt);
2407  pkt->data = NULL;
2408  pkt->size = 0;
2409  pkt->stream_index = is->video_stream;
2410  packet_queue_put(&is->videoq, pkt);
2411  }
2412  if (is->audio_stream >= 0 &&
2414  av_init_packet(pkt);
2415  pkt->data = NULL;
2416  pkt->size = 0;
2417  pkt->stream_index = is->audio_stream;
2418  packet_queue_put(&is->audioq, pkt);
2419  }
2420  SDL_Delay(10);
2421  if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2422  if (loop != 1 && (!loop || --loop)) {
2423  stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2424  } else if (autoexit) {
2425  ret = AVERROR_EOF;
2426  goto fail;
2427  }
2428  }
2429  continue;
2430  }
2431  ret = av_read_frame(ic, pkt);
2432  if (ret < 0) {
2433  if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2434  eof = 1;
2435  if (ic->pb && ic->pb->error)
2436  break;
2437  SDL_Delay(100); /* wait for user event */
2438  continue;
2439  }
2440  /* check if packet is in play range specified by user, then queue, otherwise discard */
2441  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2442  (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2443  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2444  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2445  <= ((double)duration / 1000000);
2446  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2447  packet_queue_put(&is->audioq, pkt);
2448  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2449  packet_queue_put(&is->videoq, pkt);
2450  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2451  packet_queue_put(&is->subtitleq, pkt);
2452  } else {
2453  av_free_packet(pkt);
2454  }
2455  }
2456  /* wait until the end */
2457  while (!is->abort_request) {
2458  SDL_Delay(100);
2459  }
2460 
2461  ret = 0;
2462  fail:
2463  /* disable interrupting */
2464  global_video_state = NULL;
2465 
2466  /* close each stream */
2467  if (is->audio_stream >= 0)
2469  if (is->video_stream >= 0)
2471  if (is->subtitle_stream >= 0)
2473  if (is->ic) {
2474  avformat_close_input(&is->ic);
2475  }
2476 
2477  if (ret != 0) {
2478  SDL_Event event;
2479 
2480  event.type = FF_QUIT_EVENT;
2481  event.user.data1 = is;
2482  SDL_PushEvent(&event);
2483  }
2484  return 0;
2485 }
2486 
2487 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2488 {
2489  VideoState *is;
2490 
2491  is = av_mallocz(sizeof(VideoState));
2492  if (!is)
2493  return NULL;
2494  av_strlcpy(is->filename, filename, sizeof(is->filename));
2495  is->iformat = iformat;
2496  is->ytop = 0;
2497  is->xleft = 0;
2498 
2499  /* start video display */
2500  is->pictq_mutex = SDL_CreateMutex();
2501  is->pictq_cond = SDL_CreateCond();
2502 
2503  is->subpq_mutex = SDL_CreateMutex();
2504  is->subpq_cond = SDL_CreateCond();
2505 
2506  is->av_sync_type = av_sync_type;
2507  is->parse_tid = SDL_CreateThread(decode_thread, is);
2508  if (!is->parse_tid) {
2509  av_free(is);
2510  return NULL;
2511  }
2512  return is;
2513 }
2514 
2516 {
2517  AVFormatContext *ic = is->ic;
2518  int start_index, stream_index;
2519  AVStream *st;
2520 
2521  if (codec_type == AVMEDIA_TYPE_VIDEO)
2522  start_index = is->video_stream;
2523  else if (codec_type == AVMEDIA_TYPE_AUDIO)
2524  start_index = is->audio_stream;
2525  else
2526  start_index = is->subtitle_stream;
2527  if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2528  return;
2529  stream_index = start_index;
2530  for (;;) {
2531  if (++stream_index >= is->ic->nb_streams)
2532  {
2533  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2534  {
2535  stream_index = -1;
2536  goto the_end;
2537  } else
2538  stream_index = 0;
2539  }
2540  if (stream_index == start_index)
2541  return;
2542  st = ic->streams[stream_index];
2543  if (st->codec->codec_type == codec_type) {
2544  /* check that parameters are OK */
2545  switch (codec_type) {
2546  case AVMEDIA_TYPE_AUDIO:
2547  if (st->codec->sample_rate != 0 &&
2548  st->codec->channels != 0)
2549  goto the_end;
2550  break;
2551  case AVMEDIA_TYPE_VIDEO:
2552  case AVMEDIA_TYPE_SUBTITLE:
2553  goto the_end;
2554  default:
2555  break;
2556  }
2557  }
2558  }
2559  the_end:
2560  stream_component_close(is, start_index);
2561  stream_component_open(is, stream_index);
2562 }
2563 
2564 
2565 static void toggle_full_screen(void)
2566 {
2567 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2568  /* OS X needs to empty the picture_queue */
2569  int i;
2570  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2571  cur_stream->pictq[i].reallocate = 1;
2572 #endif
2574  video_open(cur_stream);
2575 }
2576 
2577 static void toggle_pause(void)
2578 {
2579  if (cur_stream)
2580  stream_pause(cur_stream);
2581  step = 0;
2582 }
2583 
2584 static void step_to_next_frame(void)
2585 {
2586  if (cur_stream) {
2587  /* if the stream is paused unpause it, then step */
2588  if (cur_stream->paused)
2589  stream_pause(cur_stream);
2590  }
2591  step = 1;
2592 }
2593 
2594 static void toggle_audio_display(void)
2595 {
2596  if (cur_stream) {
2597  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2598  cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2600  cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2601  bgcolor);
2602  SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2603  }
2604 }
2605 
2606 static void seek_chapter(VideoState *is, int incr)
2607 {
2608  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2609  int i;
2610 
2611  if (!is->ic->nb_chapters)
2612  return;
2613 
2614  /* find the current chapter */
2615  for (i = 0; i < is->ic->nb_chapters; i++) {
2616  AVChapter *ch = is->ic->chapters[i];
2617  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2618  i--;
2619  break;
2620  }
2621  }
2622 
2623  i += incr;
2624  i = FFMAX(i, 0);
2625  if (i >= is->ic->nb_chapters)
2626  return;
2627 
2628  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2629  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2630  AV_TIME_BASE_Q), 0, 0);
2631 }
2632 
2633 /* handle an event sent by the GUI */
2634 static void event_loop(void)
2635 {
2636  SDL_Event event;
2637  double incr, pos, frac;
2638 
2639  for (;;) {
2640  double x;
2641  SDL_WaitEvent(&event);
2642  switch (event.type) {
2643  case SDL_KEYDOWN:
2644  if (exit_on_keydown) {
2645  do_exit();
2646  break;
2647  }
2648  switch (event.key.keysym.sym) {
2649  case SDLK_ESCAPE:
2650  case SDLK_q:
2651  do_exit();
2652  break;
2653  case SDLK_f:
2655  break;
2656  case SDLK_p:
2657  case SDLK_SPACE:
2658  toggle_pause();
2659  break;
2660  case SDLK_s: // S: Step to next frame
2662  break;
2663  case SDLK_a:
2664  if (cur_stream)
2666  break;
2667  case SDLK_v:
2668  if (cur_stream)
2670  break;
2671  case SDLK_t:
2672  if (cur_stream)
2674  break;
2675  case SDLK_w:
2677  break;
2678  case SDLK_PAGEUP:
2679  seek_chapter(cur_stream, 1);
2680  break;
2681  case SDLK_PAGEDOWN:
2682  seek_chapter(cur_stream, -1);
2683  break;
2684  case SDLK_LEFT:
2685  incr = -10.0;
2686  goto do_seek;
2687  case SDLK_RIGHT:
2688  incr = 10.0;
2689  goto do_seek;
2690  case SDLK_UP:
2691  incr = 60.0;
2692  goto do_seek;
2693  case SDLK_DOWN:
2694  incr = -60.0;
2695  do_seek:
2696  if (cur_stream) {
2697  if (seek_by_bytes) {
2698  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2699  pos = cur_stream->video_current_pos;
2700  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2701  pos = cur_stream->audio_pkt.pos;
2702  } else
2703  pos = avio_tell(cur_stream->ic->pb);
2704  if (cur_stream->ic->bit_rate)
2705  incr *= cur_stream->ic->bit_rate / 8.0;
2706  else
2707  incr *= 180000.0;
2708  pos += incr;
2709  stream_seek(cur_stream, pos, incr, 1);
2710  } else {
2711  pos = get_master_clock(cur_stream);
2712  pos += incr;
2713  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2714  }
2715  }
2716  break;
2717  default:
2718  break;
2719  }
2720  break;
2721  case SDL_MOUSEBUTTONDOWN:
2722  if (exit_on_mousedown) {
2723  do_exit();
2724  break;
2725  }
2726  case SDL_MOUSEMOTION:
2727  if (event.type == SDL_MOUSEBUTTONDOWN) {
2728  x = event.button.x;
2729  } else {
2730  if (event.motion.state != SDL_PRESSED)
2731  break;
2732  x = event.motion.x;
2733  }
2734  if (cur_stream) {
2735  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2736  uint64_t size = avio_size(cur_stream->ic->pb);
2737  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2738  } else {
2739  int64_t ts;
2740  int ns, hh, mm, ss;
2741  int tns, thh, tmm, tss;
2742  tns = cur_stream->ic->duration / 1000000LL;
2743  thh = tns / 3600;
2744  tmm = (tns % 3600) / 60;
2745  tss = (tns % 60);
2746  frac = x / cur_stream->width;
2747  ns = frac * tns;
2748  hh = ns / 3600;
2749  mm = (ns % 3600) / 60;
2750  ss = (ns % 60);
2751  fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2752  hh, mm, ss, thh, tmm, tss);
2753  ts = frac * cur_stream->ic->duration;
2754  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2755  ts += cur_stream->ic->start_time;
2756  stream_seek(cur_stream, ts, 0, 0);
2757  }
2758  }
2759  break;
2760  case SDL_VIDEORESIZE:
2761  if (cur_stream) {
2762  screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2763  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2764  screen_width = cur_stream->width = event.resize.w;
2765  screen_height = cur_stream->height = event.resize.h;
2766  }
2767  break;
2768  case SDL_QUIT:
2769  case FF_QUIT_EVENT:
2770  do_exit();
2771  break;
2772  case FF_ALLOC_EVENT:
2773  video_open(event.user.data1);
2774  alloc_picture(event.user.data1);
2775  break;
2776  case FF_REFRESH_EVENT:
2777  video_refresh_timer(event.user.data1);
2778  cur_stream->refresh = 0;
2779  break;
2780  default:
2781  break;
2782  }
2783  }
2784 }
2785 
2786 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2787 {
2789  "Option '%s' has been removed, use private format options instead\n", opt);
2790  return AVERROR(EINVAL);
2791 }
2792 
2793 static int opt_width(void *optctx, const char *opt, const char *arg)
2794 {
2795  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2796  return 0;
2797 }
2798 
2799 static int opt_height(void *optctx, const char *opt, const char *arg)
2800 {
2801  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2802  return 0;
2803 }
2804 
2805 static int opt_format(void *optctx, const char *opt, const char *arg)
2806 {
2807  file_iformat = av_find_input_format(arg);
2808  if (!file_iformat) {
2809  fprintf(stderr, "Unknown input format: %s\n", arg);
2810  return AVERROR(EINVAL);
2811  }
2812  return 0;
2813 }
2814 
2815 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2816 {
2818  "Option '%s' has been removed, use private format options instead\n", opt);
2819  return AVERROR(EINVAL);
2820 }
2821 
2822 static int opt_sync(void *optctx, const char *opt, const char *arg)
2823 {
2824  if (!strcmp(arg, "audio"))
2826  else if (!strcmp(arg, "video"))
2828  else if (!strcmp(arg, "ext"))
2830  else {
2831  fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2832  exit(1);
2833  }
2834  return 0;
2835 }
2836 
2837 static int opt_seek(void *optctx, const char *opt, const char *arg)
2838 {
2839  start_time = parse_time_or_die(opt, arg, 1);
2840  return 0;
2841 }
2842 
2843 static int opt_duration(void *optctx, const char *opt, const char *arg)
2844 {
2845  duration = parse_time_or_die(opt, arg, 1);
2846  return 0;
2847 }
2848 
2849 static const OptionDef options[] = {
2850 #include "cmdutils_common_opts.h"
2851  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2852  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2853  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2854  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2855  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2856  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2857  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2858  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2859  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2860  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2861  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
2862  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2863  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2864  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2865  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2866  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2867  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2868  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2869  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2870  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2871  { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2872  { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2873  { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2874  { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
2875  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
2876  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2877  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2878  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2879  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2880  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2881  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2882  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2883  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2884 #if CONFIG_AVFILTER
2885  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2886 #endif
2887  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2888  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2889  { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2890  { NULL, },
2891 };
2892 
2893 static void show_usage(void)
2894 {
2895  printf("Simple media player\n");
2896  printf("usage: %s [options] input_file\n", program_name);
2897  printf("\n");
2898 }
2899 
2900 void show_help_default(const char *opt, const char *arg)
2901 {
2903  show_usage();
2904  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2905  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2906  printf("\n");
2909 #if !CONFIG_AVFILTER
2911 #endif
2912  printf("\nWhile playing:\n"
2913  "q, ESC quit\n"
2914  "f toggle full screen\n"
2915  "p, SPC pause\n"
2916  "a cycle audio channel\n"
2917  "v cycle video channel\n"
2918  "t cycle subtitle channel\n"
2919  "w show audio waves\n"
2920  "s activate frame-step mode\n"
2921  "left/right seek backward/forward 10 seconds\n"
2922  "down/up seek backward/forward 1 minute\n"
2923  "mouse click seek to percentage in file corresponding to fraction of width\n"
2924  );
2925 }
2926 
2927 static void opt_input_file(void *optctx, const char *filename)
2928 {
2929  if (input_filename) {
2930  fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2931  filename, input_filename);
2932  exit(1);
2933  }
2934  if (!strcmp(filename, "-"))
2935  filename = "pipe:";
2936  input_filename = filename;
2937 }
2938 
2939 /* Called from the main */
2940 int main(int argc, char **argv)
2941 {
2942  int flags;
2943 
2945  parse_loglevel(argc, argv, options);
2946 
2947  /* register all codecs, demux and protocols */
2949 #if CONFIG_AVDEVICE
2951 #endif
2952 #if CONFIG_AVFILTER
2954 #endif
2955  av_register_all();
2957 
2958  init_opts();
2959 
2960  show_banner();
2961 
2962  parse_options(NULL, argc, argv, options, opt_input_file);
2963 
2964  if (!input_filename) {
2965  show_usage();
2966  fprintf(stderr, "An input file must be specified\n");
2967  fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2968  exit(1);
2969  }
2970 
2971  if (display_disable) {
2972  video_disable = 1;
2973  }
2974  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2975 #if !defined(__MINGW32__) && !defined(__APPLE__)
2976  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2977 #endif
2978  if (SDL_Init (flags)) {
2979  fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2980  exit(1);
2981  }
2982 
2983  if (!display_disable) {
2984  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2985  fs_screen_width = vi->current_w;
2986  fs_screen_height = vi->current_h;
2987  }
2988 
2989  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2990  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2991  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2992 
2993  av_init_packet(&flush_pkt);
2994  flush_pkt.data = (uint8_t *)&flush_pkt;
2995 
2996  cur_stream = stream_open(input_filename, file_iformat);
2997 
2998  event_loop();
2999 
3000  /* never returns */
3001 
3002  return 0;
3003 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1119
SDL_Overlay * bmp
Definition: avplay.c:106
uint64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
AVPacketList * first_pkt
Definition: avplay.c:91
int64_t num_faulty_dts
Number of incorrect PTS values so far.
Definition: cmdutils.h:477
const struct AVCodec * codec
Definition: avcodec.h:1059
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
int width
Definition: avplay.c:212
#define OPT_EXPERT
Definition: cmdutils.h:144
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:664
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:241
int size
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3028
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:243
AVStream * subtitle_st
Definition: avplay.c:185
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
double target_clock
Definition: avplay.c:104
static double rint(double x)
Definition: libm.h:130
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3059
int64_t num_faulty_pts
Definition: cmdutils.h:476
#define SWS_BICUBIC
Definition: swscale.h:59
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1163
double frame_timer
Definition: avplay.c:192
#define OPT_VIDEO
Definition: cmdutils.h:146
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]
Definition: avplay.c:202
static int subtitle_thread(void *arg)
Definition: avplay.c:1669
static int64_t sws_flags
Definition: avplay.c:88
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:71
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:232
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:998
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:247
AVRational sar
Definition: avplay.c:112
Memory buffer source API.
static int workaround_bugs
Definition: avplay.c:248
void show_banner(void)
Print the program banner to stderr.
Definition: cmdutils.c:815
static void free_subpicture(SubPicture *sp)
Definition: avplay.c:636
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
int seek_flags
Definition: avplay.c:136
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1102
static void video_display(VideoState *is)
Definition: avplay.c:926
static int show_status
Definition: avplay.c:243
#define OPT_AUDIO
Definition: cmdutils.h:147
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:769
#define CONFIG_AVFILTER
Definition: config.h:358
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:315
int nb_colors
number of colors in pict, undefined when pict is not set
Definition: avcodec.h:3063
static void stream_component_close(VideoState *is, int stream_index)
Definition: avplay.c:2135
int size
Definition: avcodec.h:974
static void toggle_pause(void)
Definition: avplay.c:2577
double audio_diff_cum
Definition: avplay.c:149
Various defines for YUV<->RGB conversion.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1429
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
AVInputFormat * iformat
Definition: avplay.c:130
enum AVMediaType codec_type
Definition: rtp.c:36
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
Definition: avplay.c:361
int paused
Definition: avplay.c:133
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1254
double video_current_pts_drift
Definition: avplay.c:200
int abort_request
Definition: avplay.c:94
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:55
SDL_cond * subpq_cond
Definition: avplay.c:190
static int audio_write_get_buf_size(VideoState *is)
Definition: avplay.c:720
unsigned num_rects
Definition: avcodec.h:3087
static void packet_queue_abort(PacketQueue *q)
Definition: avplay.c:349
double video_clock
Definition: avplay.c:195
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:41
double audio_diff_threshold
Definition: avplay.c:151
enum AVPixelFormat pix_fmt
Definition: avplay.c:110
static void video_image_display(VideoState *is)
Definition: avplay.c:641
int pictq_rindex
Definition: avplay.c:203
SDL_Thread * parse_tid
Definition: avplay.c:127
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:54
discard all
Definition: avcodec.h:568
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:71
#define AV_CH_LAYOUT_STEREO
static void packet_queue_flush(PacketQueue *q)
Definition: avplay.c:293
AVStream * audio_st
Definition: avplay.c:153
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:1677
PtsCorrectionContext pts_ctx
Definition: avplay.c:214
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:190
four components are given, that's all.
Definition: avcodec.h:3026
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: avplay.c:62
AVCodec.
Definition: avcodec.h:2796
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1589
void init_pts_correction(PtsCorrectionContext *ctx)
Reset the state of the PtsCorrectionContext.
Definition: cmdutils.c:1445
static int64_t duration
Definition: avplay.c:246
void avresample_free(AVAudioResampleContext **avr)
Free AVAudioResampleContext and associated AVOption values.
Definition: utils.c:278
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:951
AVStream * video_st
Definition: avplay.c:197
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1175
static AVInputFormat * file_iformat
Definition: avplay.c:227
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
struct SwsContext * img_convert_ctx
Definition: avplay.c:207
AVSubtitleRect ** rects
Definition: avcodec.h:3088
static void opt_input_file(void *optctx, const char *filename)
Definition: avplay.c:2927
void av_picture_copy(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
Copy image src to dst.
Definition: avpicture.c:119
Format I/O context.
Definition: avformat.h:922
#define FF_REFRESH_EVENT
Definition: avplay.c:277
enum AVDiscard skip_frame
Definition: avcodec.h:2727
memory buffer sink API
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:217
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2805
int av_sync_type
Definition: avplay.c:144
#define AV_LOG_QUIET
Print no output.
Definition: log.h:105
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3061
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:571
#define SAMPLE_ARRAY_SIZE
Definition: avplay.c:86
Public dictionary API.
double audio_diff_avg_coef
Definition: avplay.c:150
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:75
int rdft_bits
Definition: avplay.c:178
int size
Definition: avplay.c:93
int subtitle_stream_changed
Definition: avplay.c:184
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:82
uint8_t
double pts
Definition: avplay.c:116
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:433
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: avplay.c:2515
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2815
double external_clock
Definition: avplay.c:145
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
Definition: avplay.c:1443
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK.
Definition: avformat.h:539
#define HAS_ARG
Definition: cmdutils.h:142
int audio_hw_buf_size
Definition: avplay.c:155
static double get_video_clock(VideoState *is)
Definition: avplay.c:970
uint8_t * data[AV_NUM_DATA_POINTERS]
Definition: avcodec.h:3027
static int wanted_stream[AVMEDIA_TYPE_NB]
Definition: avplay.c:236
AVPacket pkt
Definition: avformat.h:1260
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: avplay.c:1009
static int decode_thread(void *arg)
Definition: avplay.c:2227
static int seek_by_bytes
Definition: avplay.c:241
#define b
Definition: input.c:52
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:211
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1034
#define RGBA_IN(r, g, b, a, s)
Definition: avplay.c:410
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:63
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
Definition: avplay.c:1418
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:990
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:39
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:98
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static void stream_close(VideoState *is)
Definition: avplay.c:1216
const char data[16]
Definition: mxf.c:70
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:38
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1033
static int audio_disable
Definition: avplay.c:234
uint8_t * data
Definition: avcodec.h:973
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:333
static int flags
Definition: log.c:44
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:2950
#define AVERROR_EOF
End of file.
Definition: error.h:51
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:139
static const OptionDef options[]
Definition: avplay.c:2849
float skip_frames
Definition: avplay.c:221
static void seek_chapter(VideoState *is, int incr)
Definition: avplay.c:2606
int width
Definition: avplay.c:107
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:114
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:423
external api for the swscale stuff
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3062
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:219
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:135
static void stream_pause(VideoState *is)
Definition: avplay.c:1022
int subpq_windex
Definition: avplay.c:188
static int genpts
Definition: avplay.c:250
static AVPacket flush_pkt
Definition: avplay.c:274
float skip_frames_index
Definition: avplay.c:222
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: avplay.c:2487
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:264
void avresample_close(AVAudioResampleContext *avr)
Close AVAudioResampleContext.
Definition: utils.c:262
#define FF_IDCT_AUTO
Definition: avcodec.h:2474
PacketQueue videoq
Definition: avplay.c:198
int subpq_rindex
Definition: avplay.c:188
#define r
Definition: input.c:51
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1645
AVDictionary * format_opts
Definition: cmdutils.c:59
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:129
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:395
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:2373
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1710
int audio_diff_avg_count
Definition: avplay.c:152
static int64_t start_time
Definition: avplay.c:245
int ytop
Definition: avplay.c:212
static void packet_queue_end(PacketQueue *q)
Definition: avplay.c:310
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int seek_req
Definition: avplay.c:135
int(* callback)(void *)
Definition: avio.h:52
int main(int argc, char **argv)
Definition: avplay.c:2940
int read_pause_return
Definition: avplay.c:139
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:418
static void show_usage(void)
Definition: avplay.c:2893
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3060
static int decode_interrupt_cb(void *ctx)
Definition: avplay.c:2221
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
RDFTContext * rdft
Definition: avplay.c:177
int error_concealment
error concealment flags
Definition: avcodec.h:2353
static int fast
Definition: avplay.c:249
g
Definition: yuv2rgb.c:535
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
Definition: avplay.c:1301
int capabilities
Codec capabilities.
Definition: avcodec.h:2815
static int framedrop
Definition: avplay.c:261
int resample_sample_rate
Definition: avplay.c:169
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:2427
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:170
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:268
static enum AVDiscard skip_idct
Definition: avplay.c:253
AVChapter ** chapters
Definition: avformat.h:1120
#define wrap(func)
Definition: neontest.h:62
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:265
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE]
Definition: avplay.c:156
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:168
static int64_t audio_callback_time
Definition: avplay.c:272
int video_stream
Definition: avplay.c:196
int xpos
Definition: avplay.c:180
int subpq_size
Definition: avplay.c:188
#define FFMAX(a, b)
Definition: common.h:55
float FFTSample
Definition: avfft.h:35
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:81
double audio_clock
Definition: avplay.c:148
#define AV_NOSYNC_THRESHOLD
Definition: avplay.c:75
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1852
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:134
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:1730
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3086
int64_t guess_correct_pts(PtsCorrectionContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: cmdutils.c:1451
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:718
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:381
static int fs_screen_width
Definition: avplay.c:230
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:283
static int refresh_thread(void *opaque)
Definition: avplay.c:936
static int screen_height
Definition: avplay.c:233
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:978
double frame_last_pts
Definition: avplay.c:193
int sdl_channels
Definition: avplay.c:165
static void toggle_audio_display(void)
Definition: avplay.c:2594
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:2436
SDL_mutex * mutex
Definition: avplay.c:95
int show_audio
Definition: avplay.c:173
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:162
static int autoexit
Definition: avplay.c:257
char filename[1024]
input or output filename
Definition: avformat.h:998
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3069
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:234
external API header
#define FFMIN(a, b)
Definition: common.h:57
int reallocate
Definition: avplay.c:109
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:200
SDL_Thread * subtitle_tid
Definition: avplay.c:182
int width
picture width / height.
Definition: avcodec.h:1224
uint64_t resample_channel_layout
Definition: avplay.c:168
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:2473
int nb_packets
Definition: avplay.c:92
#define AV_SYNC_THRESHOLD
Definition: avplay.c:73
static int synchronize_audio(VideoState *is, short *samples, int samples_size1, double pts)
Definition: avplay.c:1759
#define MIN_FRAMES
Definition: avplay.c:66
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:1666
int last_i_start
Definition: avplay.c:176
uint16_t format
Definition: avcodec.h:3084
char filename[1024]
Definition: avplay.c:211
#define OPT_INT64
Definition: cmdutils.h:151
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
SDL_mutex * subpq_mutex
Definition: avplay.c:189
AVSubtitle sub
Definition: avplay.c:117
int64_t external_clock_time
Definition: avplay.c:146
static int video_open(VideoState *is)
Definition: avplay.c:874
static void video_audio_display(VideoState *s)
Definition: avplay.c:734
enum AVSampleFormat resample_sample_fmt
Definition: avplay.c:167
SDL_Thread * video_tid
Definition: avplay.c:128
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: avplay.c:174
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1096
#define BPP
Definition: avplay.c:434
if(ac->has_optimized_func)
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2822
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: avplay.c:1739
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:37
Stream structure.
Definition: avformat.h:699
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:1987
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1091
int64_t video_current_pos
Definition: avplay.c:201
#define FF_ALLOC_EVENT
Definition: avplay.c:276
#define MIN_AUDIOQ_SIZE
Definition: avplay.c:65
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:186
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
Definition: opt.c:393
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:2962
static int stream_component_open(VideoState *is, int stream_index)
Definition: avplay.c:2025
NULL
Definition: eval.c:55
static SDL_Surface * screen
Definition: avplay.c:280
static int width
Definition: utils.c:156
const char program_name[]
program name, defined by the program for show_version().
Definition: avplay.c:61
enum AVMediaType codec_type
Definition: avcodec.h:1058
#define SUBPICTURE_QUEUE_SIZE
Definition: avplay.c:100
AVFrame * frame
Definition: avplay.c:171
double pts
Definition: avplay.c:103
static void do_exit(void)
Definition: avplay.c:1244
AVPacketList * last_pkt
Definition: avplay.c:91
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:61
enum AVCodecID codec_id
Definition: avcodec.h:1067
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:240
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:213
int sample_rate
samples per second
Definition: avcodec.h:1791
enum AVDiscard skip_idct
Definition: avcodec.h:2720
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
AVIOContext * pb
I/O context.
Definition: avformat.h:964
int last_paused
Definition: avplay.c:134
FFT functions.
main external API structure.
Definition: avcodec.h:1050
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:1780
int pictq_size
Definition: avplay.c:203
SDL_mutex * pictq_mutex
Definition: avplay.c:204
static double get_external_clock(VideoState *is)
Definition: avplay.c:980
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2793
#define YUVA_OUT(d, y, u, v, a)
Definition: avplay.c:428
static int video_thread(void *arg)
Definition: avplay.c:1569
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1691
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:206
static void toggle_full_screen(void)
Definition: avplay.c:2565
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: avplay.c:407
SubPicture subpq[SUBPICTURE_QUEUE_SIZE]
Definition: avplay.c:187
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, AVFilterInOut *inputs, AVFilterInOut *outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:451
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:68
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2837
int sample_rate
Sample rate of the audio data.
Definition: frame.h:376
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: avplay.c:436
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:221
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1099
PacketQueue audioq
Definition: avplay.c:154
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:108
int64_t seek_pos
Definition: avplay.c:137
rational number numerator/denominator
Definition: rational.h:43
static int is_full_screen
Definition: avplay.c:270
AVAudioResampleContext * avr
Definition: avplay.c:170
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:265
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:123
#define OPT_STRING
Definition: cmdutils.h:145
SDL_cond * cond
Definition: avplay.c:96
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:1610
struct SwsContext * sws_opts
Definition: cmdutils.c:58
discard useless packets like 0 size packets in avi
Definition: avcodec.h:564
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, int out_plane_size, int out_samples, uint8_t **input, int in_plane_size, int in_samples)
Convert input samples and write them to the output FIFO.
Definition: utils.c:330
static int decoder_reorder_pts
Definition: avplay.c:256
#define AUDIO_DIFF_AVG_NB
Definition: avplay.c:83
static const char * input_filename
Definition: avplay.c:228
static int step
Definition: avplay.c:247
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:982
#define FRAME_SKIP_FACTOR
Definition: avplay.c:77
static int infinite_buffer
Definition: avplay.c:262
static double compute_target_time(double frame_current_pts, VideoState *is)
Definition: avplay.c:1034
void show_help_default(const char *opt, const char *arg)
Per-avtool specific help handler.
Definition: avplay.c:2900
static double get_audio_clock(VideoState *is)
Definition: avplay.c:953
int error
contains the error code or 0 if no error happened
Definition: avio.h:102
misc parsing utilities
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: avplay.c:80
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:92
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:989
int audio_stream
Definition: avplay.c:142
static int audio_decode_frame(VideoState *is, double *pts_ptr)
Definition: avplay.c:1833
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:1542
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2843
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:216
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:102
char * name
unique name for this input/output in the list
Definition: avfilter.h:1093
int sdl_sample_rate
Definition: avplay.c:166
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:103
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: avplay.c:317
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1007
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:95
enum AVSampleFormat sdl_sample_fmt
Definition: avplay.c:163
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:107
int height
Definition: gxfenc.c:72
int sample_array_index
Definition: avplay.c:175
int64_t start
Definition: avformat.h:908
static VideoState * global_video_state
Definition: avplay.c:2219
#define MAX_QUEUE_SIZE
Definition: avplay.c:64
static int loop
Definition: avplay.c:260
static int exit_on_keydown
Definition: avplay.c:258
#define OPT_BOOL
Definition: cmdutils.h:143
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:221
AVAudioResampleContext * avresample_alloc_context(void)
Allocate AVAudioResampleContext and set options.
Definition: options.c:96
static double get_master_clock(VideoState *is)
Definition: avplay.c:988
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:758
int pictq_windex
Definition: avplay.c:203
int64_t pos
Definition: avplay.c:105
#define FF_QUIT_EVENT
Definition: avplay.c:278
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
#define OPT_INT
Definition: cmdutils.h:148
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:174
AVDictionary * codec_opts
Definition: cmdutils.c:59
int height
Definition: avplay.c:107
struct AVPacketList * next
Definition: avformat.h:1261
uint8_t * audio_buf
Definition: avplay.c:157
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:117
static AVInputFormat * iformat
Definition: avprobe.c:53
static const char * window_title
Definition: avplay.c:229
#define YUVA_IN(y, u, v, a, s, pal)
Definition: avplay.c:419
int allocated
Definition: avplay.c:108
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:2052
int64_t start_time
Decoding: pts of the first frame of the stream, in stream time base.
Definition: avformat.h:749
signed 16 bits
Definition: samplefmt.h:64
int audio_buf_index
Definition: avplay.c:160
uint8_t * audio_buf1
Definition: avplay.c:158
static int av_sync_type
Definition: avplay.c:244
static int compute_mod(int a, int b)
Definition: avplay.c:725
uint32_t start_display_time
Definition: avcodec.h:3085
static VideoState * cur_stream
Definition: avplay.c:271
FFTSample * rdft_data
Definition: avplay.c:179
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: avplay.c:1990
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2307
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:907
static int fs_screen_height
Definition: avplay.c:231
double frame_last_delay
Definition: avplay.c:194
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:47
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2713
char * key
Definition: dict.h:75
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:71
PacketQueue subtitleq
Definition: avplay.c:186
static const uint8_t color[]
Definition: log.c:55
SDL_Thread * refresh_tid
Definition: avplay.c:129
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:934
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:2499
AVPacket audio_pkt
Definition: avplay.c:162
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:99
static void event_loop(void)
Definition: avplay.c:2634
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:56
int eof_reached
true if eof reached
Definition: avio.h:96
static void video_refresh_timer(void *opaque)
Definition: avplay.c:1075
int len
int channels
number of audio channels
Definition: avcodec.h:1792
static int error_concealment
Definition: avplay.c:255
void av_log_set_flags(int arg)
Definition: log.c:195
uint64_t sdl_channel_layout
Definition: avplay.c:164
int abort_request
Definition: avplay.c:132
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2799
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: avplay.c:2786
int height
Definition: avplay.c:212
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:164
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:1574
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1151
static void step_to_next_frame(void)
Definition: avplay.c:2584
AVDiscard
Definition: avcodec.h:560
An instance of a filter.
Definition: avfilter.h:563
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: avplay.c:99
AVPacket audio_pkt_temp
Definition: avplay.c:161
int bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1024
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1017
int refresh
Definition: avplay.c:223
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:525
#define AV_DICT_IGNORE_SUFFIX
Definition: dict.h:62
static int exit_on_mousedown
Definition: avplay.c:259
#define SDL_AUDIO_BUFFER_SIZE
Definition: avplay.c:70
static int rdftspeed
Definition: avplay.c:264
int xleft
Definition: avplay.c:212
int stream_index
Definition: avcodec.h:975
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:741
static int display_disable
Definition: avplay.c:242
static enum AVDiscard skip_loop_filter
Definition: avplay.c:254
SDL_cond * pictq_cond
Definition: avplay.c:205
int subtitle_stream
Definition: avplay.c:183
unsigned int audio_buf_size
Definition: avplay.c:159
int64_t seek_rel
Definition: avplay.c:138
static int video_disable
Definition: avplay.c:235
#define AV_CH_LAYOUT_MONO
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:762
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:62
static int screen_width
Definition: avplay.c:232
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avresample_open(AVAudioResampleContext *avr)
Initialize AVAudioResampleContext.
Definition: utils.c:36
static void alloc_picture(void *opaque)
Definition: avplay.c:1261
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:179
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:966
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:228
#define av_unused
Definition: attributes.h:86
static enum AVDiscard skip_frame
Definition: avplay.c:252
double video_current_pts
Definition: avplay.c:199
static void packet_queue_init(PacketQueue *q)
Definition: avplay.c:285
static int idct
Definition: avplay.c:251
AVFormatContext * ic
Definition: avplay.c:140
int no_background
Definition: avplay.c:131
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:1630
static int16_t block[64]
Definition: dct-test.c:88