2012-04-06 81 views
3

我正在做視頻聊天在android和我想端口ffmpeg流rtsp或rtmp,但現在我有一個在RTSP第一次嘗試。 不知何故,現在的問題是av_write_frame或av_interleaved_write_frame無法正常工作或只是崩潰。 也許...... AudioRecord樣本格式不等於FFMPEG設置 幀接收不等於Android AudioRecord FFMPEG編碼本機AAC

所以代碼... AudioRecorder http://pastebin.com/iWtB3Jhy 包com.curtis.broadcaster.Publisher;

import android.app.Activity; 
import android.graphics.Bitmap; 
import android.media.AudioFormat; 
import android.media.AudioRecord; 
import android.media.AudioRecord.OnRecordPositionUpdateListener; 
import android.media.MediaRecorder; 
import android.os.Bundle; 
import android.util.Log; 

public class Publisher extends Activity { 
    private int mAudioBufferSize; 
    private int mAudioBufferSampleSize; 
    private AudioRecord mAudioRecord; 
    private boolean inRecordMode = false; 
    private short[] audioBuffer; 
    private String Tag = "Publisher/Publisher.java"; 

    public void onCreate(Bundle savedInstanceState) { 
     Log.i(Tag, "|| onCreate()"); 
     super.onCreate(savedInstanceState); 
     initAudioRecord(); 
     Log.i(Tag, "-- End onCreate()"); 
    } 

    @Override 
    public void onResume() { 
     Log.i(Tag, "|| onResume()"); 
     super.onResume(); 
     inRecordMode = true; 
     Thread t = new Thread(new Runnable() { 

      public void run() { 
       Log.i(Tag, "|| Run Threat t"); 
       getSamples(); 
       Log.i(Tag, "-- End Threat t"); 
      } 
     }); 
     t.start(); 
     Log.i(Tag, "-- End onResume()"); 
    } 

    protected void onPause() { 
     Log.i(Tag, "|| Run onPause()"); 
     inRecordMode = false; 
     super.onPause(); 
     Log.i(Tag, "-- End onPause()"); 
    } 

    @Override 
    protected void onDestroy() { 
     Log.i(Tag, "|| Run onDestroy()"); 
     if (mAudioRecord != null) { 
      mAudioRecord.release(); 
      Log.i(Tag + " onDestroy", "mAudioRecord.release()"); 
     } 
     jniStopAll(); 
     super.onDestroy(); 
     android.os.Process.killProcess(android.os.Process.myPid()); 
     Log.i(Tag, "-- End onDestroy()"); 
    } 

    public OnRecordPositionUpdateListener mListener = new OnRecordPositionUpdateListener() { 

     public void onPeriodicNotification(AudioRecord recorder) { 
      Log.i(Tag + " mListener(onPeriodicNotification)", "time is " 
        + System.currentTimeMillis()); 
      jniSetAudioSample(audioBuffer); 
     // audioBuffer = new short[mAudioBufferSampleSize]; 
     } 

     public void onMarkerReached(AudioRecord recorder) { 
      Log.i(Tag + " mListener(onMarkerReached)", 
        "time is " + System.currentTimeMillis()); 
      inRecordMode = false; 
      recorder.stop(); 
      Log.i(Tag, "recorder.stop()"); 
     } 
    }; 

    private void initAudioRecord() { 
     try { 
      jniCheck(); 
      int sampleRate = 44100; 
      int channelConfig = AudioFormat.CHANNEL_IN_MONO; 
      int audioFormat = AudioFormat.ENCODING_PCM_16BIT; 
      mAudioBufferSize = 2 * AudioRecord.getMinBufferSize(sampleRate, 
        channelConfig, audioFormat); 
      mAudioBufferSampleSize = mAudioBufferSize/2; 
      Log.i(Tag, "Buffer Size " + mAudioBufferSize); 
      Log.i(Tag, "new AudioRecord begin"); 

      mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, 
        sampleRate, channelConfig, audioFormat, mAudioBufferSize); 
      Log.i(Tag, "new AudioRecord end"); 

      jniInitFFMpeg(); 
     } catch (IllegalArgumentException e) { 
      Log.i(Tag, "initAudioRecord go Errors"); 
      e.printStackTrace(); 
     } 

     // mAudioRecord.setNotificationMarkerPosition(10000); 
     mAudioRecord.setPositionNotificationPeriod(1024); 
     mAudioRecord.setRecordPositionUpdateListener(mListener); 

     int audioRecordState = mAudioRecord.getState(); 
     if (audioRecordState != AudioRecord.STATE_INITIALIZED) { 
      finish(); 
     } 

    } 

    private void getSamples() { 
     Log.i(Tag, "|| getSamples()"); 
     if (mAudioRecord == null) 
      return; 

     audioBuffer = new short[mAudioBufferSampleSize]; 
     mAudioRecord.startRecording(); 
     int audioRecordingState = mAudioRecord.getRecordingState(); 
     if (audioRecordingState != AudioRecord.RECORDSTATE_RECORDING) { 
      finish(); 
     } 
     while (inRecordMode) { 
      int samplesRead = mAudioRecord.read(audioBuffer, 0, 
        mAudioBufferSampleSize); 
      Log.i(Tag, "getSamples >>SamplesRead : " + samplesRead); 
     } 
     mAudioRecord.stop(); 
     Log.i(Tag, "mAudioRecord.stop()"); 
    } 

    private native void jniCheck(); 

    private native void jniInitFFMpeg(); 

    private native void jniSetAudioSample(short[] audioBuffer); 

    private native void jniStopAll(); 

    static { 
     System.loadLibrary("ffmpeg"); 
     System.loadLibrary("testerv4"); 

    } 

} 

FFMPEG JNI http://pastebin.com/hgPva35b

#include <jni.h> 
#include <android/log.h> 
#include <android/bitmap.h> 

#include <stdlib.h> 
#include <stdio.h> 
#include <string.h> 
#include <math.h> 
#include <sys/time.h> 
#include "libavformat/rtsp.h" 

#include <libavutil/mathematics.h> 
#include <libavformat/avformat.h> 
#include <libavcodec/avcodec.h> 
#include <libswscale/swscale.h> 

#undef exit 
/* Log System */ 
#define LOG_TAG "FFMPEGSample - v4a" 
#define DEBUG_TAG "FFMPEG-AUDIO PART" 
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) 
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) 

/* 5 seconds stream duration */ 
#define STREAM_DURATION 5.0 
#define STREAM_FRAME_RATE 25 /* 25 images/s */ 
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) 
#define STREAM_PIX_FMT  PIX_FMT_YUV420P /* default pix_fmt */ 
#define VIDEO_CODEC_ID  CODEC_ID_FLV1 
#define AUDIO_CODEC_ID  CODEC_ID_AAC 

static int sws_flags = SWS_BICUBIC; 
int mode = 1; //1 = only audio, 2 = only video, 3 = both video and audio 

AVFormatContext *avForCtx; 
//AVFormatContext *oc; 
AVStream *audio_st, *video_st; 
double audio_pts, video_pts; 
int frameCount, audioFrameCount, start; 
char *url; 

/*Audio Declare*/ 
float t, tincr, tincr2; 
int16_t *samples; 
uint8_t *audio_outbuf; 
int audio_outbuf_size; 
int audio_input_frame_size; 

AVFormatContext *createAVFormatContext(); 
AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id); 
void open_video(AVFormatContext *oc, AVStream *st); 
void open_audio(AVFormatContext *oc, AVStream *st); 
AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id); 
void write_audio_frame(AVFormatContext *oc, AVStream *st); 
void write_video_frame(AVFormatContext *oc, AVStream *st); 
void init(); 
void setAudioSample(unsigned char *inSample[]); 
void stopAll(); 

/*/////////////////////////////////JNI Bridge////////////////////////////////////// */ 
void Java_com_curtis_broadcaster_Publisher_Publisher_jniCheck(JNIEnv* env, 
     jobject this) { 
    LOGI("[email protected] JNI work fine @-"); 
} 
void Java_com_curtis_broadcaster_Publisher_Publisher_jniInitFFMpeg(JNIEnv* env, 
     jobject this) { 
    LOGI("[email protected] Init Encorder @-"); 

    /* initialize libavcodec, and register all codecs and formats */ 
    avcodec_init(); 
    avcodec_register_all(); 
    av_register_all(); 
    avformat_network_init(); //ERROR 


    /* allocate the output media context */ 
    avForCtx = createAVFormatContext(); 
    frameCount = 1; 
    audioFrameCount = 1; 
    start = 0; 

    /* add the audio and video streams using the default format codecs 
    and initialize the codecs */ 
    video_st = NULL; 
    audio_st = NULL; 
    if (mode == 1 || mode == 3) { 
     audio_st = add_audio_stream(avForCtx, AUDIO_CODEC_ID); 
     LOGI("(Init Encorder) - addAudioStream"); 
    } 
    if (mode == 2 || mode == 3) { 
     video_st = add_video_stream(avForCtx, VIDEO_CODEC_ID); 
     LOGI("(Init Encorder) - addVideoStream"); 

    } 

    // av_dump_format(avForCtx, 0, "rtsp://192.168.1.104/live/live", 1); 
    LOGI("(Init Encorder) - Waiting to call open_*"); 

    if (audio_st) { 
     open_audio(avForCtx, audio_st); 
     LOGI("(Init Encorder) - open_audio"); 
    } 

    if (video_st) { 
     open_video(avForCtx, video_st); 
     LOGI("(Init Encorder) - open_video"); 
    } 

    av_write_header(avForCtx); 
    LOGI("[email protected] Finish Init Encorder @-"); 

} 

void Java_com_curtis_broadcaster_Publisher_Publisher_jniSetAudioSample(
     JNIEnv* env, jobject this, unsigned char *inSample[]) { 
    if (audio_st) { 
     LOGI("[email protected] Start setAudioSample @-"); 
     samples = (int16_t *) inSample; 

     write_audio_frame(avForCtx, audio_st); 
     LOGI("[email protected] Finish setAudioSample @-"); 
    } 
} 

void Java_com_curtis_broadcaster_Publisher_Publisher_jniStopAll(JNIEnv* env, 
     jobject this) { 
    LOGI("[email protected] Stopping All @-"); 
    //close_audio(avForCtx, audio_st); 
    //close_video(avForCtx, video_st); 
    LOGI("[email protected] Stopped All @-"); 
} 
/*/////////////////////////////END JNI Bridge////////////////////////////////////// */ 

/* New Added Coding */ 
AVFormatContext *createAVFormatContext() { 
    LOGI("[email protected] - [email protected]"); 

    AVFormatContext *ctx = avformat_alloc_context(); 
    // ctx->oformat = av_guess_format("flv", "rtmp://192.168.1.104/live/live", 
    //  NULL); 
    // ctx->oformat = av_guess_format("flv", NULL, NULL); 

    //if (!av_guess_format("flv", NULL, NULL)) { 

    //LOGI("-flv Can not Guess Format-"); 
    //} 

    ctx->oformat = av_guess_format("rtsp", NULL, NULL); 

    if (!av_guess_format("rtsp", NULL, NULL)) { 

     LOGI("-flv Can not Guess Format-"); 
    } 

    /* 
    LOGI("%d",avformat_alloc_output_context2(&ctx, ctx->oformat, "flv", 
    "rtmp://192.168.1.104/live/live")); 
    if (!ctx) { 
    LOGI("[email protected]_alloc_output_context2 [email protected]"); 
    }*/ 
    // LOGI("flv %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "flv", 
    // "rtmp://192.168.1.104/live/live")); 
    // LOGI("rtmp %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "rtmp", 
    // "rtmp://192.168.1.104/live/live")); 
    // LOGI("mpeg4 %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "mpeg4", 
    // "rtmp://192.168.1.104/live/live")); 
    // LOGI("NULL %d",avformat_alloc_output_context2(&ctx, ctx->oformat, NULL, 
    // "rtmp://192.168.1.104/live/live")); 
    avformat_alloc_output_context2(&ctx, ctx->oformat, "sdp", 
      "rtsp://192.168.1.104:1935/live/live"); 

    if (!ctx) { 
     LOGI("[email protected]_alloc_output_context2 [email protected]"); 
    } 

    LOGI("[email protected] - [email protected]"); 

    return ctx; 
} 

/**************************************************************/ 
/* audio output */ 

/* 
* add an audio output stream 
*/ 
AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) { 
    LOGI("[email protected] - [email protected]"); 

    AVCodecContext *c; 
    AVStream *st = avformat_new_stream(oc, avcodec_find_encoder(codec_id)); 

    if (!st) { 
     LOGI("[email protected]_audio_stream - Could not alloc [email protected]"); 
     exit(1); 
    } 
    st->id = 1; 

    c = st->codec; 
    c->codec_id = AUDIO_CODEC_ID; 
    c->codec_type = AVMEDIA_TYPE_AUDIO; 

    /* put sample parameters */ 
    c->sample_fmt = AV_SAMPLE_FMT_FLT; 
    //c->sample_fmt = AV_SAMPLE_FMT_S16; 
    c->bit_rate = 100000; 
    c->sample_rate = 44100; 
    c->channels = 1; 

    // some formats want stream headers to be separate 
    if (oc->oformat->flags & AVFMT_GLOBALHEADER) 
     c->flags |= CODEC_FLAG_GLOBAL_HEADER; 
    LOGI("[email protected] - [email protected]"); 

    return st; 
} 

void open_audio(AVFormatContext *oc, AVStream *st) { 
    LOGI("@- open_audio [email protected]"); 

    AVCodecContext *c; 
    AVCodec *codec; 

    c = st->codec; 
    c->strict_std_compliance = -2; 
    /* find the audio encoder */ 
    codec = avcodec_find_encoder(c->codec_id); 
    if (!codec) { 
     LOGI("@- open_audio E:codec not [email protected]"); 
     exit(1); 
    } 

    /* open it */ 
    if (avcodec_open(c, codec) < 0) { 
     LOGI("%d",avcodec_open(c, codec)); 
     LOGI("@- open_audio E:could not open [email protected]"); 
     exit(1); 
    } 

    /* init signal generator */ 
    t = 0; 
    tincr = 2 * M_PI * 110.0/c->sample_rate; 
    /* increment frequency by 110 Hz per second */ 
    tincr2 = 2 * M_PI * 110.0/c->sample_rate/c->sample_rate; 

    audio_outbuf_size = 10000; 
    audio_outbuf = av_malloc(audio_outbuf_size); 

    /* ugly hack for PCM codecs (will be removed ASAP with new PCM 
    support to compute the input frame size in samples */ 
    if (c->frame_size <= 1) { 
     audio_input_frame_size = audio_outbuf_size/c->channels; 
     switch (st->codec->codec_id) { 
     case CODEC_ID_PCM_S16LE: 
     case CODEC_ID_PCM_S16BE: 
     case CODEC_ID_PCM_U16LE: 
     case CODEC_ID_PCM_U16BE: 
      audio_input_frame_size >>= 1; 
      break; 
     default: 
      break; 
     } 
    } else { 
     audio_input_frame_size = c->frame_size; 
    } 
    LOGI("audio_input_frame_size : %d",audio_input_frame_size); 
    samples = av_malloc(audio_input_frame_size * 2 * c->channels); 
    LOGI("@- Close open_audio [email protected]"); 

} 

/* prepare a 16 bit dummy audio frame of 'frame_size' samples and 
'nb_channels' channels */ 
void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) { 
    LOGI("@- get_audio_frame [email protected]"); 

    int j, i, v; 
    int16_t *q; 

    q = samples; 
    for (j = 0; j < frame_size; j++) { 
     v = (int) (sin(t) * 10000); 
     for (i = 0; i < nb_channels; i++) 
      *q++ = v; 
     t += tincr; 
     tincr += tincr2; 
     LOGI("@- audio_frame Looping [email protected]"); 
    } 
    LOGI("@- CLOSE get_audio_frame [email protected]"); 

} 

void write_audio_frame(AVFormatContext *oc, AVStream *st) { 
    LOGI("@- write_audio_frame [email protected]"); 

    AVCodecContext *c; 
    AVPacket pkt; 
    av_init_packet(&pkt); 

    c = st->codec; 

    //get_audio_frame(samples, audio_input_frame_size, c->channels); 
    LOGI("@- write_audio_frame : got frame from get_audio_frame [email protected]"); 

    pkt.size 
      = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples); 
    LOGI("%d",pkt.size); 

    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) 
     pkt.pts 
       = av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); 
    LOGI("%d",pkt.pts); 

    pkt.flags |= AV_PKT_FLAG_KEY; 
    pkt.stream_index = st->index; 
    pkt.data = audio_outbuf; 
    LOGI("Finish PKT"); 

    /* write the compressed frame in the media file */ 
    // if (av_interleaved_write_frame(oc, &pkt) != 0) { 
    // LOGI("@- write_audio_frame E:Error while writing audio frame [email protected]"); 
    // exit(1); 
    // } 

    if (av_interleaved_write_frame(oc, &pkt) != 0) { 
     LOGI("Error while writing audio frame %d\n", audioFrameCount); 
    } else { 
     LOGI("Writing Audio Frame %d", audioFrameCount); 
    } 

    LOGI("@- CLOSE write_audio_frame [email protected]"); 
    audioFrameCount++; 
    av_free_packet(&pkt); 
} 

void close_audio(AVFormatContext *oc, AVStream *st) { 
    avcodec_close(st->codec); 

    av_free(samples); 
    av_free(audio_outbuf); 
} 

/**************************************************************/ 
/* video output */ 

AVFrame *picture, *tmp_picture; 
uint8_t *video_outbuf; 
int frame_count, video_outbuf_size; 

/* add a video output stream */ 
AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) { 
    AVCodecContext *c; 
    AVStream *st; 
    AVCodec *codec; 

    st = avformat_new_stream(oc, NULL); 
    if (!st) { 
     fprintf(stderr, "Could not alloc stream\n"); 
     exit(1); 
    } 

    c = st->codec; 

    /* find the video encoder */ 
    codec = avcodec_find_encoder(codec_id); 
    if (!codec) { 
     fprintf(stderr, "codec not found\n"); 
     exit(1); 
    } 
    avcodec_get_context_defaults3(c, codec); 

    c->codec_id = codec_id; 

    /* put sample parameters */ 
    c->bit_rate = 400000; 
    /* resolution must be a multiple of two */ 
    c->width = 352; 
    c->height = 288; 
    /* time base: this is the fundamental unit of time (in seconds) in terms 
    of which frame timestamps are represented. for fixed-fps content, 
    timebase should be 1/framerate and timestamp increments should be 
    identically 1. */ 
    c->time_base.den = STREAM_FRAME_RATE; 
    c->time_base.num = 1; 
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */ 
    c->pix_fmt = STREAM_PIX_FMT; 
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) { 
     /* just for testing, we also add B frames */ 
     c->max_b_frames = 2; 
    } 
    if (c->codec_id == CODEC_ID_MPEG1VIDEO) { 
     /* Needed to avoid using macroblocks in which some coeffs overflow. 
     This does not happen with normal video, it just happens here as 
     the motion of the chroma plane does not match the luma plane. */ 
     c->mb_decision = 2; 
    } 
    // some formats want stream headers to be separate 
    if (oc->oformat->flags & AVFMT_GLOBALHEADER) 
     c->flags |= CODEC_FLAG_GLOBAL_HEADER; 

    return st; 
} 

AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) { 
    AVFrame * picture; 
    uint8_t *picture_buf; 
    int size; 

    picture = avcodec_alloc_frame(); 
    if (!picture) 
     return NULL; 
    size = avpicture_get_size(pix_fmt, width, height); 
    picture_buf = av_malloc(size); 
    if (!picture_buf) { 
     av_free(picture); 
     return NULL; 
    } 
    avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height); 
    return picture; 
} 

void open_video(AVFormatContext *oc, AVStream *st) { 
    AVCodec *codec; 
    AVCodecContext *c; 

    c = st->codec; 

    /* find the video encoder */ 
    codec = avcodec_find_encoder(c->codec_id); 
    if (!codec) { 
     fprintf(stderr, "codec not found\n"); 
     exit(1); 
    } 

    /* open the codec */ 
    if (avcodec_open(c, codec) < 0) { 
     fprintf(stderr, "could not open codec\n"); 
     exit(1); 
    } 

    video_outbuf = NULL; 
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { 
     /* allocate output buffer */ 
     /* XXX: API change will be done */ 
     /* buffers passed into lav* can be allocated any way you prefer, 
     as long as they're aligned enough for the architecture, and 
     they're freed appropriately (such as using av_free for buffers 
     allocated with av_malloc) */ 
     video_outbuf_size = 200000; 
     video_outbuf = av_malloc(video_outbuf_size); 
    } 

    /* allocate the encoded raw picture */ 
    picture = alloc_picture(c->pix_fmt, c->width, c->height); 
    if (!picture) { 
     fprintf(stderr, "Could not allocate picture\n"); 
     exit(1); 
    } 

    /* if the output format is not YUV420P, then a temporary YUV420P 
    picture is needed too. It is then converted to the required 
    output format */ 
    tmp_picture = NULL; 
    if (c->pix_fmt != PIX_FMT_YUV420P) { 
     tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); 
     if (!tmp_picture) { 
      fprintf(stderr, "Could not allocate temporary picture\n"); 
      exit(1); 
     } 
    } 
} 

/* prepare a dummy image */ 
void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) { 
    int x, y, i; 

    i = frame_index; 

    /* Y */ 
    for (y = 0; y < height; y++) { 
     for (x = 0; x < width; x++) { 
      pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; 
     } 
    } 

    /* Cb and Cr */ 
    for (y = 0; y < height/2; y++) { 
     for (x = 0; x < width/2; x++) { 
      pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; 
      pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; 
     } 
    } 
} 

void write_video_frame(AVFormatContext *oc, AVStream *st) { 
    int out_size, ret; 
    AVCodecContext *c; 
    struct SwsContext *img_convert_ctx; 

    c = st->codec; 

    if (frame_count >= STREAM_NB_FRAMES) { 
     /* no more frame to compress. The codec has a latency of a few 
     frames if using B frames, so we get the last frames by 
     passing the same picture again */ 
    } else { 
     if (c->pix_fmt != PIX_FMT_YUV420P) { 
      /* as we only generate a YUV420P picture, we must convert it 
      to the codec pixel format if needed */ 
      if (img_convert_ctx == NULL) { 
       img_convert_ctx = sws_getContext(c->width, c->height, 
         PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt, 
         sws_flags, NULL, NULL, NULL); 
       if (img_convert_ctx == NULL) { 
        fprintf(stderr, 
          "Cannot initialize the conversion context\n"); 
        exit(1); 
       } 
      } 
      fill_yuv_image(tmp_picture, frame_count, c->width, c->height); 
      sws_scale(img_convert_ctx, tmp_picture->data, 
        tmp_picture->linesize, 0, c->height, picture->data, 
        picture->linesize); 
     } else { 
      fill_yuv_image(picture, frame_count, c->width, c->height); 
     } 
    } 

    if (oc->oformat->flags & AVFMT_RAWPICTURE) { 
     /* raw video case. The API will change slightly in the near 
     future for that. */ 
     AVPacket pkt; 
     av_init_packet(&pkt); 

     pkt.flags |= AV_PKT_FLAG_KEY; 
     pkt.stream_index = st->index; 
     pkt.data = (uint8_t *) picture; 
     pkt.size = sizeof(AVPicture); 

     ret = av_interleaved_write_frame(oc, &pkt); 
    } else { 
     /* encode the image */ 
     out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, 
       picture); 
     /* if zero size, it means the image was buffered */ 
     if (out_size > 0) { 
      AVPacket pkt; 
      av_init_packet(&pkt); 

      if (c->coded_frame->pts != AV_NOPTS_VALUE) 
       pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, 
         st->time_base); 
      if (c->coded_frame->key_frame) 
       pkt.flags |= AV_PKT_FLAG_KEY; 
      pkt.stream_index = st->index; 
      pkt.data = video_outbuf; 
      pkt.size = out_size; 

      /* write the compressed frame in the media file */ 
      ret = av_interleaved_write_frame(oc, &pkt); 
     } else { 
      ret = 0; 
     } 
    } 
    if (ret != 0) { 
     fprintf(stderr, "Error while writing video frame\n"); 
     exit(1); 
    } 
    frame_count++; 
} 

void close_video(AVFormatContext *oc, AVStream *st) { 
    avcodec_close(st->codec); 
    av_free(picture->data[0]); 
    av_free(picture); 
    if (tmp_picture) { 
     av_free(tmp_picture->data[0]); 
     av_free(tmp_picture); 
    } 
    av_free(video_outbuf); 
} 

Android清單已設置和init一切。 請給我一些想法.. 一些日誌信息到你的http://pastebin.com/uPD5LyH2

回答

0

我知道它可能爲時已晚來回答這個問題,但爲了以防萬一,如果它可以幫助你或別人誰在未來在同樣的問題絆倒,這是一個解決方法。

我一直在研究類似的項目,但不同之處在於,我選擇了編譯的本機二進制文件,並使用Java Process API與二進制文件進行通信,而不是使用JNI和編譯的本機FFmpeg共享庫。

FFmpeg需要提示輸入數據的性質。 由AudioRecord創建的音頻幀是PCM-16位編碼,並且您似乎沒有指定傳入音頻流FFmpeg的格式。

發給ffmpeg的命令可以如下:

ffmpeg -f u16le -acodec pcm_s16le -i - -acodec <output-file-codec> <rtsp-stream-address> 

從音頻源接收的音頻數據被寫入到的FFmpeg處理的輸入流。

音頻數據也可以通過管道傳輸到ffmpeg。 ParcelFileDescriptor.createPipe()可用於在Android平臺上創建管道,-i -的命令行替換爲-i pipe:<fd>,其中fd是創建管道的讀取側的文件描述符。

我寧願建議通過命令行界面訪問ffmpeg,而不是使用JNI,因爲它有很好的文檔記錄,並且在使用調試日誌級別檢測問題時也可能是有益的。