2010-09-21 337 views
2

我使用Microsoft Media Foundation示例(即MFCaptureToFile)從我的網絡攝像頭捕獲H264幀並將它們寫入文件。我試圖使用IMFTransform解碼捕獲的幀,並獲得下劃線圖像(YUV,BMP,無論)。Microsoft Media Foundation - 解碼h264示例

但是,ProcessInput方法從不返回MF_E_NOTACCEPTING,並且ProcessOutput方法始終返回MF_E_TRANSFORM_NEED_MORE_INPUT。

我基本上閱讀每一幀,並調用它的ProcessInput。

任何想法?有人可以修改MFCaptureToFile示例來向我展示它是如何完成的?我正在CCapture :: OnReadSample下進行所有處理。

任何幫助將不勝感激!

Ido

+2

嘿,你是否得到它的工作?我有點面臨同樣的問題。 – thunderbird 2015-03-14 02:48:36

回答

1

微軟H264解碼器MFT有些特別。它在內部緩衝大量樣本。 (這就是爲什麼它不適用於現場場景,因爲這種內部緩衝會導致延遲大約一秒鐘)。 我想你必須至少用一個完整的GOP來提供它來接收一些輸出樣本。 試一試

3

我已經能夠成功地使用MF H264解碼器MFT來將存儲在.mp4文件中的幀取消爲原始YUV。完整的代碼示例可用here

關鍵件創建H264解碼器MFT,然後提供樣品。我已經包含了以下兩位代碼片段。

// Create H.264 decoder. 
CHECK_HR(CoCreateInstance(CLSID_CMSH264DecoderMFT, NULL, CLSCTX_INPROC_SERVER, 
    IID_IUnknown, (void**)&spDecTransformUnk), "Failed to create H264 decoder MFT.\n"); 

CHECK_HR(spDecTransformUnk->QueryInterface(IID_PPV_ARGS(&pDecoderTransform)), "Failed to get IMFTransform interface from H264 decoder MFT object.\n"); 

MFCreateMediaType(&pDecInputMediaType); 
CHECK_HR(pFileVideoMediaType->CopyAllItems(pDecInputMediaType), "Error copying media type attributes to decoder input media type.\n"); 
CHECK_HR(pDecoderTransform->SetInputType(0, pDecInputMediaType, 0), "Failed to set input media type on H.264 decoder MFT.\n"); 

MFCreateMediaType(&pDecOutputMediaType); 
pDecOutputMediaType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); 
pDecOutputMediaType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_IYUV); 
CHECK_HR(MFSetAttributeSize(pDecOutputMediaType, MF_MT_FRAME_SIZE, VIDEO_SAMPLE_WIDTH, VIDEO_SAMPLE_HEIGHT), "Failed to set frame size on H264 MFT out type.\n"); 
CHECK_HR(MFSetAttributeRatio(pDecOutputMediaType, MF_MT_FRAME_RATE, 30, 1), "Failed to set frame rate on H264 MFT out type.\n"); 
CHECK_HR(MFSetAttributeRatio(pDecOutputMediaType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1), "Failed to set aspect ratio on H264 MFT out type.\n"); 
pDecOutputMediaType->SetUINT32(MF_MT_INTERLACE_MODE, 2); 

CHECK_HR(pDecoderTransform->SetOutputType(0, pDecOutputMediaType, 0), "Failed to set output media type on H.264 decoder MFT.\n"); 

CHECK_HR(pDecoderTransform->GetInputStatus(0, &mftStatus), "Failed to get input status from H.264 decoder MFT.\n"); 
if (MFT_INPUT_STATUS_ACCEPT_DATA != mftStatus) { 
    printf("H.264 decoder MFT is not accepting data.\n"); 
    goto done; 
} 

CHECK_HR(pDecoderTransform->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL), "Failed to process FLUSH command on H.264 decoder MFT.\n"); 
CHECK_HR(pDecoderTransform->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL), "Failed to process BEGIN_STREAMING command on H.264 decoder MFT.\n"); 
CHECK_HR(pDecoderTransform->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL), "Failed to process START_OF_STREAM command on H.264 decoder MFT.\n") 

一旦解碼器已經創建,你都可以從某處H264編碼的幀,你需要將它們傳遞給上面創建的MFT。

MFCreateSample(&reConstructedVideoSample); 
CHECK_HR(MFCreateMemoryBuffer(srcBufLength, &reConstructedBuffer), "Failed to create memory buffer.\n"); 
CHECK_HR(reConstructedVideoSample->AddBuffer(reConstructedBuffer), "Failed to add buffer to re-constructed sample.\n"); 
CHECK_HR(reConstructedVideoSample->SetSampleTime(llVideoTimeStamp), "Error setting the recon video sample time.\n"); 
CHECK_HR(reConstructedVideoSample->SetSampleDuration(llSampleDuration), "Error setting recon video sample duration.\n"); 

byte *reconByteBuffer; 
DWORD reconBuffCurrLen = 0; 
DWORD reconBuffMaxLen = 0; 
CHECK_HR(reConstructedBuffer->Lock(&reconByteBuffer, &reconBuffMaxLen, &reconBuffCurrLen), "Error locking recon buffer.\n"); 
memcpy(reconByteBuffer, srcByteBuffer, srcBuffCurrLen); 
CHECK_HR(reConstructedBuffer->Unlock(), "Error unlocking recon buffer.\n"); 
reConstructedBuffer->SetCurrentLength(srcBuffCurrLen); 

CHECK_HR(srcBuf->Unlock(), "Error unlocking source buffer.\n"); 

CHECK_HR(pDecoderTransform->ProcessInput(0, reConstructedVideoSample, 0), "The H264 decoder ProcessInput call failed.\n"); 

CHECK_HR(pDecoderTransform->GetOutputStatus(&mftOutFlags), "H264 MFT GetOutputStatus failed.\n"); 

//if (mftOutFlags == MFT_OUTPUT_STATUS_SAMPLE_READY) 
//{ 
    CHECK_HR(pDecoderTransform->GetOutputStreamInfo(0, &StreamInfo), "Failed to get output stream info from H264 MFT.\n"); 

    while (true) 
    { 
     CHECK_HR(MFCreateSample(&mftOutSample), "Failed to create MF sample.\n"); 
     CHECK_HR(MFCreateMemoryBuffer(StreamInfo.cbSize, &pBuffer), "Failed to create memory buffer.\n"); 
     CHECK_HR(mftOutSample->AddBuffer(pBuffer), "Failed to add sample to buffer.\n"); 
     outputDataBuffer.dwStreamID = 0; 
     outputDataBuffer.dwStatus = 0; 
     outputDataBuffer.pEvents = NULL; 
     outputDataBuffer.pSample = mftOutSample; 

     mftProcessOutput = pDecoderTransform->ProcessOutput(0, 1, &outputDataBuffer, &processOutputStatus); 

     if (mftProcessOutput != MF_E_TRANSFORM_NEED_MORE_INPUT) 
     { 
      // ToDo: These two lines are not right. Need to work out where to get timestamp and duration from the H264 decoder MFT. 
      CHECK_HR(outputDataBuffer.pSample->SetSampleTime(llVideoTimeStamp), "Error getting YUV sample time.\n"); 
      CHECK_HR(outputDataBuffer.pSample->SetSampleDuration(llSampleDuration), "Error getting YUV sample duration.\n"); 

      IMFMediaBuffer *buf = NULL; 
      DWORD bufLength; 
      CHECK_HR(mftOutSample->ConvertToContiguousBuffer(&buf), "ConvertToContiguousBuffer failed.\n"); 
      CHECK_HR(buf->GetCurrentLength(&bufLength), "Get buffer length failed.\n"); 

      printf("Writing sample %i, sample time %I64d, sample duration %I64d, sample size %i.\n", sampleCount, yuvVideoTimeStamp, yuvSampleDuration, bufLength); 

      byte *byteBuffer; 
      DWORD buffCurrLen = 0; 
      DWORD buffMaxLen = 0; 
      buf->Lock(&byteBuffer, &buffMaxLen, &buffCurrLen); 
      outputBuffer.write((char *)byteBuffer, bufLength); 
      outputBuffer.flush(); 
     } 
     else { 
      break; 
     } 

     mftOutSample->Release(); 
    }