好吧,我已經寫了一些代碼,應該打開和音頻輸出設備,並且每當有實際的PCM數據可用時,我會調用Sound_WriteFrame()並堆積更多的數據播放。使用GStreamer播放原始PCM數據
數據是原始的,沒有標題,所以當我調用Sound_Open()時,我傳遞這些信息讓GStreamer知道將會到達什麼樣的數據。
此代碼不起作用 - 坦率地說,我不知道我在做什麼做,我發現GStreamer有點難以合作,我希望最終可以改變。
我正在使用GStreamer 1.0。幫助表示讚賞。
#include <gstreamer-1.0/gst/gst.h>
#include <gstreamer-1.0/gst/gstelement.h>
#include <gtk/gtk.h>
#include <stdlib.h>
#include <string.h>
typedef struct _sound_t {
GstElement *source, *sink, *pipeline;
GMemoryInputStream *giostream;
GstPad *sourcepad;
GstCaps *gcaps;
int bDeviceOpen;
} SOUND_CTX;
int Sound_Close(SOUND_CTX *p){
printf("Closing\n");
gst_element_set_state(p->pipeline, GST_STATE_NULL);
return 1;
}
void Sound_SetState(SOUND_CTX *p, GstState state){
GstStateChangeReturn r = gst_element_set_state(p->pipeline, state);
switch(r){
case GST_STATE_CHANGE_FAILURE: printf("GST_STATE_CHANGE_FAILURE\n"); break;
case GST_STATE_CHANGE_SUCCESS: printf("GST_STATE_CHANGE_SUCCESS\n"); break;
case GST_STATE_CHANGE_ASYNC: printf("GST_STATE_CHANGE_ASYNC\n"); break;
case GST_STATE_CHANGE_NO_PREROLL: printf("GST_STATE_CHANGE_NO_PREROLL\n"); break;
default: printf("Unknown state\n"); break;
}
return;
}
int Sound_Open(SOUND_CTX *p, int nSamplesPerSec, int nChannels){
p->source = gst_element_factory_make("giostreamsrc", "source");
p->giostream = G_MEMORY_INPUT_STREAM(g_memory_input_stream_new());
g_object_set(G_OBJECT(p->source), "stream", G_INPUT_STREAM(p->giostream), NULL);
p->sourcepad = gst_element_get_static_pad(p->source, "src");
p->gcaps = gst_caps_new_simple(
"audio/x-raw",
"rate", G_TYPE_INT, nSamplesPerSec,
"channels", G_TYPE_INT, nChannels,
"width", G_TYPE_INT, 16,
"depth", G_TYPE_INT, 16,
"signed", G_TYPE_BOOLEAN, TRUE,
NULL
);
gst_pad_set_caps(p->sourcepad, p->gcaps);
gst_object_unref(p->sourcepad);
p->sink = gst_element_factory_make("alsasink", "sink");
p->pipeline = gst_pipeline_new("pipeline_name");
gst_bin_add_many(GST_BIN(p->pipeline), p->source, p->sink, NULL);
gst_element_link_many(p->source, p->sink, NULL);
Sound_SetState(p, GST_STATE_PLAYING);
return 1;
}
int Sound_WriteFrame(SOUND_CTX *p, void *lpData, unsigned int size){
g_memory_input_stream_add_data(
G_MEMORY_INPUT_STREAM(p->giostream),
lpData, size, NULL
);
return 0;
}
int timer_callback(const void *data){
g_main_loop_quit((GMainLoop *)data);
return FALSE;
}
int main(int argc, char *argv[]){
gst_init(&argc, &argv);
GMainLoop *loop = NULL;
SOUND_CTX a;
memset(&a, 0x00, sizeof(SOUND_CTX));
if(Sound_Open(&a, 44100, 2)){
FILE *handle;
unsigned char tmp[4096];
if((handle = fopen("test.pcm", "rb")) != NULL){
while(fread(tmp, 1, sizeof(tmp), handle) == sizeof(tmp)){
Sound_WriteFrame(&a, tmp, sizeof(tmp));
}
fclose(handle);
}
}
loop = g_main_loop_new(NULL, FALSE);
g_timeout_add(5500, (GSourceFunc)timer_callback, loop);
g_main_loop_run(loop);
Sound_Close(&a);
g_main_loop_unref(loop);
return 0;
}
好了,所以這裏是一個更新的來源,然而有沒有聲音出來的揚聲器
#include <gstreamer-1.0/gst/gst.h>
#include <gstreamer-1.0/gst/gstelement.h>
#include <gstreamer-1.0/gst/app/gstappsrc.h>
#include <gtk/gtk.h>
#include <stdlib.h>
#include <string.h>
typedef struct _sound_t {
GstElement *sink, *pipeline;
GstAppSrc *src;
GstCaps *pcm_caps;
} SOUND_CTX;
int Sound_Close(SOUND_CTX *p){
printf("Closing\n");
gst_element_set_state(p->pipeline, GST_STATE_NULL);
return 1;
}
void Sound_SetState(SOUND_CTX *p, GstState state){
GstStateChangeReturn r = gst_element_set_state(p->pipeline, state);
switch(r){
case GST_STATE_CHANGE_FAILURE: printf("GST_STATE_CHANGE_FAILURE\n"); break;
case GST_STATE_CHANGE_SUCCESS: printf("GST_STATE_CHANGE_SUCCESS\n"); break;
case GST_STATE_CHANGE_ASYNC: printf("GST_STATE_CHANGE_ASYNC\n"); break;
case GST_STATE_CHANGE_NO_PREROLL: printf("GST_STATE_CHANGE_NO_PREROLL\n"); break;
default: printf("Unknown state\n"); break;
}
return;
}
int Sound_Open(SOUND_CTX *p, int nSamplesPerSec, int nChannels){
p->pipeline = gst_pipeline_new("pipeline_name");
p->sink = gst_element_factory_make("alsasink", "sink");
p->src = (GstAppSrc*) gst_element_factory_make("appsrc", "source");
gst_app_src_set_stream_type(p->src, GST_APP_STREAM_TYPE_STREAM);
// I am hardcoding the format, channels, and rate for now
p->pcm_caps = gst_caps_from_string("audio/x-raw,format=S16LE,rate=44100,channels=2");
gst_app_src_set_caps(p->src, p->pcm_caps);
gst_bin_add_many(GST_BIN(p->pipeline), (GstElement*)p->src, p->sink, NULL);
gst_element_link_many((GstElement*)p->src, p->sink, NULL);
Sound_SetState(p, GST_STATE_PLAYING);
return 1;
}
int Sound_WriteFrame(SOUND_CTX *p, void *lpData, unsigned int size){
GstBuffer *buf = NULL;
void *lpHeapData = NULL;
if((lpHeapData = g_malloc(size)) == NULL) return 0;
memcpy(lpHeapData, lpData, size);
buf = gst_buffer_new_wrapped(lpHeapData, size);
if(buf == NULL){
g_free(lpHeapData);
return 0;
}
gst_app_src_push_buffer(p->src, buf);
return 0;
}
int timer_callback(const void *data){
g_main_loop_quit((GMainLoop *)data);
return FALSE;
}
int main(int argc, char *argv[]){
gst_init(&argc, &argv);
GMainLoop *loop = NULL;
SOUND_CTX a;
memset(&a, 0x00, sizeof(SOUND_CTX));
if(Sound_Open(&a, 44100, 2)){
FILE *handle;
unsigned char tmp[4096];
if((handle = fopen("test.pcm", "rb")) != NULL){
while(fread(tmp, 1, sizeof(tmp), handle) == sizeof(tmp)){
Sound_WriteFrame(&a, tmp, sizeof(tmp));
}
fclose(handle);
}
}
loop = g_main_loop_new(NULL, FALSE);
g_timeout_add(5500, (GSourceFunc)timer_callback, loop);
g_main_loop_run(loop);
Sound_Close(&a);
g_main_loop_unref(loop);
return 0;
}
你可以使用audioparse做包信息gst-launch-1.0 filesrc location = test.pcm! audioparse raw-format = s16le channels = 2 rate = 44100 format = raw! alsasink – chub
我想你誤解了這個問題。文件'test.pcm'僅用於演示Sound_WriteFrame()的使用。甚至可能沒有本地文件。 – SimonB
我想你誤解了我的答案;-),[audioparse/rawaudioparse](https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-bad-plugins/html/gst-plugins-bad -plugins-rawaudioparse.html#GstRawAudioParse)確保數據流正確的緩衝區大小 – chub