#include #include #include #include #include #include #include #ifdef __MINGW32__ #undef main /* Prevents SDL from overriding main() */ #endif int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx; int i, j, k, videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; struct SwsContext *img_convert_ctx; float aspect_ratio; int frmcnt = 0; int DispList; SDL_Surface *screen; SDL_Event event; if (argc < 2) { fprintf(stderr, "Usage: test \n"); exit(1); } // Register all formats and codecs av_register_all(); if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if (av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if (av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; for (i=0; inb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { videoStream=i; break; } if (videoStream==-1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec pCodecCtx->thread_count = 2; // Use 2 processor cores if available if (avcodec_open(pCodecCtx, pCodec)<0) return -1; // Could not open codec // Allocate video frame pFrame=avcodec_alloc_frame(); // Calculate aspect ratio if (pCodecCtx->sample_aspect_ratio.num == 0) aspect_ratio = 0.0; else aspect_ratio = av_q2d(pCodecCtx->sample_aspect_ratio) * pCodecCtx->width / pCodecCtx->height; if (aspect_ratio <= 0.0) aspect_ratio = (float)pCodecCtx->width / (float)pCodecCtx->height; fprintf(stderr, "Aspect ratio = %f by %d\n", pCodecCtx->height*aspect_ratio, pCodecCtx->height); int disp_height = pCodecCtx->height;//1080; int disp_width = disp_height*aspect_ratio; // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(disp_width, disp_height, 0, SDL_OPENGL); #else screen = SDL_SetVideoMode(disp_width, disp_height, 24, 0); #endif if (!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } glViewport(0, 0, disp_width, disp_height); glOrtho(0, disp_width, 0, disp_height, -1, 1); // fprintf(stderr, "OpenGL extensions:\n%s\n", glGetString(GL_EXTENSIONS)); /* // Find maximum supported texture size for (i=0x400;i<=0x08000000;i+=0x400) { glTexImage1D(GL_PROXY_TEXTURE_1D, 0, 3, i, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL); glGetTexLevelParameteriv(GL_PROXY_TEXTURE_1D, 0, GL_TEXTURE_WIDTH, &j); if (j==0) { fprintf(stderr, "Max 1D texture size = %d\n", i); break; } } */ // Clear screen glClearColor(0.0f, 0.3f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT); glDisable(GL_DEPTH_TEST); // Allocate buffers to put our video fields AVPicture fyuv, frgb; // Determine required buffer size and allocate buffer // Field of YUV int numBytes=avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height/2); // Size of 1 field of YUV avpicture_fill(&fyuv, (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)), pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height/2); // Field of RGB24 numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height/2); // Size of 1 field of RGB24 avpicture_fill(&frgb, (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)), PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height/2); // Convert from 1 field of YUV to 1 field of RGB24 img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height/2, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height/2, PIX_FMT_RGB24, (SWS_CPU_CAPS_MMX2 | SWS_POINT), NULL, NULL, NULL); if (img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context!\n"); return -1; } // Setup for texture method (support required for non power-of-2 texture maps) glEnable(GL_TEXTURE_2D); glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); // Interpolation required for horizontal access, so careful positioning of texture required glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); // Texture is 1 rgb video field glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height/2, 0, GL_RGB, GL_UNSIGNED_BYTE, frgb.data[0]); DispList = glGenLists(4); // Reserve space for display lists // Top field scan line display list int linecnt = 0; glNewList(DispList+0, GL_COMPILE); glBegin(GL_LINES); for (i=0; i=0) { // Is this a packet from the video stream? if (packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if (frameFinished) { frmcnt++; AVPicture psrc, pdst; // Display fields one at a time for (j=0; j<2; j++) // Need support for bottom field first video source { // Point to decoded frame (source) and yuv field buffer (destination) for (i=0; i<3;i++) { if (j == 0) psrc.data[i] = pFrame->data[i]; else psrc.data[i] = pFrame->data[i] + pFrame->linesize[i]; psrc.linesize[i] = pFrame->linesize[i]; pdst.data[i] = fyuv.data[i]; pdst.linesize[i] = fyuv.linesize[i]; } // Extract 1 video field for (i=j; iheight; i+=4) { // 2 lines of Y memcpy(pdst.data[0], psrc.data[0], psrc.linesize[0]); pdst.data[0]+=pdst.linesize[0]; psrc.data[0]+=(psrc.linesize[0] << 1); memcpy(pdst.data[0], psrc.data[0], psrc.linesize[0]); pdst.data[0]+=pdst.linesize[0]; psrc.data[0]+=(psrc.linesize[0] << 1); // 1 line of U (corresponds to 2 lines of Y) memcpy(pdst.data[1], psrc.data[1], psrc.linesize[1]); pdst.data[1]+=pdst.linesize[1]; psrc.data[1]+=(psrc.linesize[1] << 1); // 1 line of V (corresponds to 2 lines of Y) memcpy(pdst.data[2], psrc.data[2], psrc.linesize[2]); pdst.data[2]+=pdst.linesize[2]; psrc.data[2]+=(psrc.linesize[2] << 1); } // Convert the field into RGB format that OpenGL uses sws_scale(img_convert_ctx, (const uint8_t**)fyuv.data, fyuv.linesize, 0, pCodecCtx->height/2, frgb.data, frgb.linesize); // First field markers if (j==0) { for (i=0; iheight/2; i+=8) { for(k=0; k<30;k++) { *(frgb.data[0]+frgb.linesize[0]*i+k) = 255; } } } glLoadIdentity(); // Display using texture map method glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0 , pCodecCtx->width, pCodecCtx->height/2, GL_RGB, GL_UNSIGNED_BYTE, frgb.data[0]); glCallList(DispList+j); // Display the framebuffer SDL_GL_SwapBuffers(); // SDL_Delay(500); } // SDL_Delay(2000); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); SDL_PollEvent(&event); switch (event.type) { case SDL_QUIT: SDL_Quit(); exit(0); break; default: break; } } fprintf(stderr,"Frame count %d\n", frmcnt); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); return 0; }