2015-10-13 458 views
2

在vs2012的windows中使用ffmpeg庫將一系列圖像轉換爲H264編碼的Mp4。林新FFMPEG。FFmpeg:在C++中生成H264視頻

以下是我的代碼。一切都很順利。視頻已創建。但是,只有當我將擴展名更改爲「.h264」時,我才能在vlc播放器中播放視頻,當我檢查編解碼器信息時,它顯示「H264 - MPEG-4 AVC(第10部分)(h264)」。但是當我檢查相同的從網上下載的其他MP4視頻。它說: 「H264 - MPEG-4」。AVC(第10部分)(AVC1)」我不明白哪裏出了問題還我搜索了很多,有的說像添加SPS和PPS

const uint8_t sps[] = { 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 
0x0a, 0xf8, 0x41, 0xa2 }; 
const uint8_t pps[] = { 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 
0x38, 0x80 }; 

所以我。增加上述值視頻文件之前,我添加圖像流,但沒有運氣。 誰能在this..Thanks幫助提前。

const uint8_t sps[] = { 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 
0x0a, 0xf8, 0x41, 0xa2 }; 
const uint8_t pps[] = { 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 
0x38, 0x80 }; 
const uint8_t slice_header[] = { 0x00, 0x00, 0x00, 0x01, 0x05, 0x88, 
0x84, 0x21, 0xa0 }; 
const uint8_t macroblock_header[] = { 0x0d, 0x00 }; 

const uint8_t spspps[] = { 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x0a, 0xf8, 0x41, 0xa2, 
          0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x38, 0x80 
         }; 

int ff_load_image(uint8_t *data[4], int linesize[4], 
     int *w, int *h, enum PixelFormat *pix_fmt, 
     const char *filename, void *log_ctx) 
    { 
     AVInputFormat *iformat = NULL; 
     AVFormatContext *format_ctx = NULL; 
     AVCodec *codec=NULL; 
     AVCodecContext *codec_ctx=NULL; 
     AVFrame *frame=NULL; 
     int frame_decoded, ret = 0; 
     AVPacket pkt; 

     av_register_all(); 

     iformat = av_find_input_format("image2"); 
     if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) { 
      return ret; 
     } 

     codec_ctx = format_ctx->streams[0]->codec; 
     codec = avcodec_find_decoder(codec_ctx->codec_id); 
     if (!codec) { 
      ret = AVERROR(EINVAL); 
      goto end; 
     } 

     if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) { 
      goto end; 
     } 

     //if (!(frame = avcodec_alloc_frame())) { 
     if (!(frame = av_frame_alloc())) { 
      ret = AVERROR(ENOMEM); 
      goto end; 
     } 

     ret = av_read_frame(format_ctx, &pkt); 
     if (ret < 0) { 
      goto end; 
     } 

     ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt); 
     if (ret < 0 || !frame_decoded) { 
      goto end; 
     } 
     ret = 0; 

     *w  = frame->width; 
     *h  = frame->height; 
     *pix_fmt = (PixelFormat)frame->format; 

     if ((ret = av_image_alloc(data, linesize, *w, *h, (AVPixelFormat)*pix_fmt, 16)) < 0) 
      goto end; 
     ret = 0; 

     av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, (AVPixelFormat)*pix_fmt, *w, *h); 

end: 
     if(codec_ctx) { avcodec_close(codec_ctx); } 
     if(format_ctx) { avformat_close_input(&format_ctx); } 
     if(frame) { av_freep(&frame); } 
     av_free_packet(&pkt); 
       return ret; 
    } 

    int load_image_into_frame(AVFrame *frame, const char *filename) 
    { 
     int retval = -1, res; 
     static struct SwsContext *sws_ctx; 
     uint8_t *image_data[4]; 
     int linesize[4]; 
     int source_width, source_height; 
     enum PixelFormat source_fmt; 

     res = ff_load_image(image_data, linesize, &source_width, &source_height, &source_fmt, filename, NULL); 

     if (source_fmt != frame->format) { 
      sws_ctx = sws_getContext(source_width, source_height, (AVPixelFormat)source_fmt, 
       frame->width, frame->height, (AVPixelFormat)frame->format, 
       sws_flags, NULL, NULL, NULL); 

      sws_scale(sws_ctx, 
       (const uint8_t * const *)image_data, linesize, 
       0, frame->height, frame->data, frame->linesize); 
     } 

     retval = 0; 
error: 
     av_freep(&image_data[0]); 
     sws_freeContext(sws_ctx); 
     return retval; 
    } 

    int write_frame_to_file(FILE *file, AVFrame *frame, AVCodecContext *codec_context, AVPacket *pkt) { 
     int res, got_output; 
     av_init_packet(pkt); 
     pkt->data = NULL; 
     pkt->size = 0; 

     /* generate synthetic video */ 
     frame->pts += 30; 

     res = avcodec_encode_video2(codec_context, pkt, frame, &got_output); 

     if (got_output) { 

      fwrite(pkt->data, 1, pkt->size, file); 
      av_free_packet(pkt); 
     } 
     return 0; 
error: 
     return -1; 
    } 

    int write_image_to_file(FILE *file, const char *filename, int count, AVFrame *frame, AVCodecContext *codec_context, AVPacket *pkt) { 
     int res, i; 
     res = load_image_into_frame(frame, filename); 

     for (i = 0; i < count; i++) { 

      res = write_frame_to_file(file, frame, codec_context, pkt); 
     } 

     return 0; 
error: 
     return -1; 
    } 

    int write_delayed_frames_to_file(FILE *file, AVFrame *frame, AVCodecContext *codec_context, AVPacket *pkt) { 
     int res, got_output; 

     for (got_output = 1; got_output;) { 
      res = avcodec_encode_video2(codec_context, pkt, NULL, &got_output); 

      if (got_output) { 
       fwrite(pkt->data, 1, pkt->size, file); 
       av_free_packet(pkt); 
      } 
     } 

     return 0; 
error: 
     return -1; 
    } 

    AVCodecContext *get_codec_context(int width, int height, int fps) 
    { 
     int res; 
     avcodec_register_all(); 

     AVCodec *codec; 
     AVCodecContext *codec_context = NULL; 

     codec = avcodec_find_encoder(AV_CODEC_ID_H264); 

     codec_context = avcodec_alloc_context3(codec); 

     codec_context->bit_rate = 441000; 
     codec_context->width = width; 
     codec_context->height = height; 
     AVRational temp_113 = {1, fps}; 
     AVRational temp_114 = {fps, 1}; 
     codec_context->time_base= temp_113; 
     codec_context->gop_size = 10; 
     codec_context->max_b_frames=1; 
     codec_context->pix_fmt = AV_PIX_FMT_YUV420P;   

     res = avcodec_open2(codec_context, codec, NULL); 

     return codec_context; 
error: 
     return NULL; 
    } 

    AVFrame *get_av_frame(AVCodecContext *codec_context) { 
     int res; 
     AVFrame *frame; 

     frame = av_frame_alloc(); 
     frame->height = codec_context->height; 
     frame->width = codec_context->width; 
     frame->format = codec_context->pix_fmt; 
     frame->pts = 0; 

     res = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, (AVPixelFormat)frame->format, 1); 

     return frame; 
error: 
     return NULL; 
    } 

    int main(int argc, char **argv) 
    { 
     const char *filename = "result video\\test.mp4"; 
     FILE *file=NULL; 
     int res, retval=-1; 
     AVCodecContext *codec_context= NULL; 
     AVFrame *frame=NULL; 
     AVPacket pkt; 
     uint8_t endcode[] = { 0, 0, 1, 0xb7 }; 

     codec_context = get_codec_context(1920, 1080, 30); 

     file = fopen(filename, "wb"); 
     //check(file != NULL, "could not open destination file %s", filename); 

     frame = get_av_frame(codec_context);   

     //fwrite(sps, 1, sizeof(sps), file); 
     //fwrite(pps, 1, sizeof(pps), file); 

     /*codec_context->extradata = (uint8_t *)malloc(sizeof(uint8_t) * sizeof(spspps)); 

     for(unsigned int index = 0; index < sizeof(spspps); index++) 
     { 
      codec_context->extradata[index] = spspps[index]; 
     } 

     codec_context->extradata_size = (int)sizeof(spspps);*/ 

     codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER; 

     int i, frames= 51; 
     for (i = 0; i < frames; i++) { 
      std::stringstream ss; 
      ss<<"\\frames\\out"<<(i + 1)<<".jpg"; 
      res = write_image_to_file(file, ss.str().c_str(), 3, frame, codec_context, &pkt); 
     } 


     res = write_delayed_frames_to_file(file, frame, codec_context, &pkt); 
     fwrite(endcode, 1, sizeof(endcode), file); 

     retval = 0; 
error: 
     if (file) 
      fclose(file); 
     if (codec_context) { 
      avcodec_close(codec_context); 
      av_free(codec_context); 
     } 
     if (frame) { 
      av_freep(&frame->data[0]); 
      av_free(frame); 
     } 
     return retval; 
    } 
} 

回答

0

你不使用你的編碼的AVFormatContext,所以我看看你不能輸出.mp4文件的原因

你應該:

  1. 使用AVFormatContext
  2. 設置一個AVOutputFormat到AVFormatContext->oformat(輸出格式)直接或使用功能類似avformat_alloc_output_context2()
  3. 可選:設置標誌,如果有必要,並呼籲avio_open2()訪問資源。
  4. 呼叫avformat_write_header()

我會後的代碼,但網上有很多例子,你可以找到使用混合「的ffmpeg編碼」關鍵字這些函數名。

+0

感謝您的回覆:-)。你的提示非常有用。我有一些像你說的例子,但他們不讓我把像素數據寫入文件。我應該調用函數「av_interleaved_write_frame()」。這個函數在內部寫AVPacket輸出文件。但我的要求是獲取視頻緩衝區,它不應該直接寫入文件。 –