Play WebM streamed over HTTP using GStreamer’s souphttpsrc


The pipeline below receives WebM video using souphttpsrc and plays it

gst-launch souphttpsrc location=http://127.0.0.1:9001 ! matroskademux ! vp8dec ! ffmpegcolorspace ! ximagesink

Check the manual page for souphttpsrc or the gst-inspect output for the element, for further details.

Advertisements

Read and write raw PCM using GStreamer


Embedded developers have a frequent need to encode or decode PCM audio. In this post I show some GStreamer pipelines that can help with that task.

Convert WAV to PCM

gst-launch filesrc location=file.wav ! wavparse ! audioresample ! audioconvert ! audio/x-raw-int, rate=8000, channels=1, endianness=4321, width=16, depth=16, signed=true ! filesink location=file.pcm

For bulk conversion

ls *.wav | xargs -i -n 1 gst-launch filesrc location='{}' ! wavparse ! audioresample ! audioconvert ! audio/x-raw-int, rate=8000, channels=1, endianness=4321, width=16, depth=16, signed=true ! filesink location='{}'.pcm

Convert PCM to WAV

gst-launch filesrc location=file.pcm ! audio/x-raw-int, rate=8000, channels=1, endianness=4321, width=16, depth=16, signed=true ! audioconvert ! audio/x-raw-int, rate=8000, channels=1, endianness=1234, width=16, depth=16, signed=true ! wavenc ! filesink location=file.wav

Play PCM

gst-launch filesrc location=file.pcm ! audio/x-raw-int, rate=8000, channels=1, endianness=4321, width=16, depth=16, signed=true ! pulsesink

Use xxd to create C array of PCM data

xxd -i file.pcm > voice.c

Link dynamic pads of demuxer


Demuxers do not have any pads till they receive the buffers to parse. As data is available to parse, pads are dynamically added based on the streams available.

The pad-added signal

The pad-added signal can be used to attach new elements to the pipeline when a new pad gets added. Use the g_signal_connect function to listen for pad-added. In the callback function, you can add new elements to the pipeline and link them to the demuxer based on the name of the pad. If the pad name starts with audio, for instance, you can link the element for audio playback. The state of these new elements needs to set to GST_STATE_PLAYING.

Here’s how you can register a callback for pad-added

g_signal_connect (demux, "pad-added", (GCallback)demux_pad_added, NULL);

Here’s a sample callback function for the matroskademux element

void
demux_pad_added (GstElement* demux, GstPad* pad, gpointer user_data)
{
  char* name;
  GstPad *sinkpad;
  GstElement *tee, *sink;

  name = gst_pad_get_name(pad);

  if (strncmp(name, "audio", 5) == 0)
  {
    // link audio sink pad of demuxer to src pad of audio tee
    tee = gst_element_factory_make ("tee", "audiotee");
    sink = gst_element_factory_make ("fakesink", "audiosink");
    gst_bin_add_many (GST_BIN (pipeline), tee, sink, NULL);
    gst_element_link (tee, sink);
    sinkpad = gst_element_get_static_pad(tee, "sink");
    gst_pad_link(pad, sinkpad);
    gst_object_unref (sinkpad);
    gst_element_set_state(tee, GST_STATE_PLAYING);
    gst_element_set_state(sink, GST_STATE_PLAYING);
    g_print ("Linked pad %s of demuxer\n", name);
  }
  else if (strncmp(name, "video", 5) == 0)
  {
    // link src pad of demuxer to sink pad of video tee
    tee = gst_element_factory_make ("tee", "videotee");
    sink = gst_element_factory_make ("fakesink", "videosink");
    gst_bin_add_many (GST_BIN (pipeline), tee, sink, NULL);
    gst_element_link (tee, sink);
    sinkpad = gst_element_get_static_pad(tee, "sink");
    gst_pad_link(pad, sinkpad);
    gst_object_unref (sinkpad);
    gst_element_set_state(tee, GST_STATE_PLAYING);
    gst_element_set_state (sink, GST_STATE_PLAYING);
    g_print ("Linked pad %s of demuxer\n", name);
  }

  g_free (name);
}

The no-more-pads signal

Another signal that can be used is no-more-pads. You can check for its existence with your version of GStreamer by using gst-inspect e.g. gst-inspect avidemux. In the callback of that signal you can link new elements to the demuxer using gst_element_link_filtered. Call the function once for each caps. The caps parameter required by the function can be created using gst_caps_new_simple e.g. gst_caps_new_simple ("video/x-vp8", NULL). Again, the state of these new elements needs to be set to GST_STATE_PLAYING.

Here’s how you can register a callback for no-more-pads

  g_signal_connect (demux, "no-more-pads", (GCallback)demux_no_more_pads, NULL);

Here’s a sample callback function.

void
demux_no_more_pads (GstElement* demux, gpointer user_data)
{
  GstCaps *caps;
  GstElement *tee, *sink;

  tee = gst_element_factory_make ("tee", "videotee");
  sink = gst_element_factory_make ("fakesink", "videosink");
  gst_bin_add_many (GST_BIN (pipeline), tee, sink, NULL);
  gst_element_link (tee, sink);
  caps = gst_caps_new_simple ("video/x-vp8", NULL);
  gst_element_link_filtered (demux, tee, caps);
  gst_element_set_state(tee, GST_STATE_PLAYING);
  gst_element_set_state(sink, GST_STATE_PLAYING);

  tee = gst_element_factory_make ("tee", "audiotee");
  sink = gst_element_factory_make ("fakesink", "audiosink");
  gst_bin_add_many (GST_BIN (pipeline), tee, sink, NULL);
  gst_element_link (tee, sink);
  caps = gst_caps_new_simple ("audio/x-vorbis", NULL);
  gst_element_link_filtered (demux, tee, caps);
  gst_element_set_state(tee, GST_STATE_PLAYING);
  gst_element_set_state(sink, GST_STATE_PLAYING);
}

Debugging

As usual, if you have any issues you need to troubleshoot with your pipeline, you can try setting the environment variable GST_DEBUG to 5. GStreamer and its elements will print copious amounts of information as they execute.

export GST_DEBUG=5

Video streaming using jpeg encoding


Here’s an example of a GStreamer pipeline that produces a less CPU intensive and low latency video stream using jpeg encoding. Audio in vorbis is muxed, along with the video, into a matroska stream. I have tested this on Ubuntu 11.04.

gst-launch v4l2src decimate=3 ! video/x-raw-yuv,width=320,height=240 ! jpegenc ! queue2 ! m. alsasrc device=hw:2,0 ! audioconvert ! vorbisenc ! queue2 ! m. matroskamux name=m streamable=true ! tcpclientsink host=localhost port=9002

A server can stream it with a content type of video/x-matroska. Most browsers will not play it directly, but external plugins can be used.

Adjusting attributes of v4l2src and vp8enc elements for video conferencing


Video conferencing is real time in nature. The default encoding parameters of vp8enc element of GStreamer are not always appropriate. Let us start with the following pipeline

gst-launch v4l2src ! video/x-raw-rgb,width=320,height=240 ! ffmpegcolorspace ! vp8enc ! vp8dec ! ffmpegcolorspace ! ximagesink sync=false

The CPU usage, on a PandaBoard with Ubuntu 11.04, is close to 100% (since there are 2 cores, that translates to 50%).

Now, modify the pipeline as follows

gst-launch v4l2src decimate=3 ! video/x-raw-rgb,width=320,height=240 ! ffmpegcolorspace ! vp8enc speed=2 max-latency=2 quality=5.0 max-keyframe-distance=3 threads=5 ! vp8dec ! ffmpegcolorspace ! ximagesink sync=false

Note the decimate attribute of the v4l2src element, and the attributes speed, max-latency, max-keyframe-distance, threads and quality of the vp8enc element. With these changes the CPU usage drops to 40% and the video playback is more real time.

Using the fdsink element of GStreamer


The fdsink element is useful because it can be used to write data directly to a socket. In this post, we’ll see how to setup a listener for client connections and stream directly to the client socket using fdsink.

Listen for incoming connections

The functions below set up a server socket to listen for incoming client connections. Once a client connects, we send the appropriate HTTP headers, and call the function that will stream data to the client socket using fdsink. You can find the make_socket function in the GNU libc manual here.

gpointer
client_thread(gpointer data)
{
  int BUF_SIZE = 256;
  char buffer[BUF_SIZE+1];
  int client = (int)data;
  int ret;

  ret = read(client, buffer, BUF_SIZE);

  while(ret != -1)
  {
    buffer[ret] = 0;
    g_print("%s", buffer);
    if (ret > 3 && strncmp(buffer, "GET", 3) == 0)
    {
      send(client, "HTTP/1.0 200 OK\r\n", 17, 0);
      send(client, "Connection: close\r\n", 19, 0);
      send(client, "Content-Type: video/webm\r\n", 26, 0);
      send(client, "\r\n", 2, 0);

      //... create pipeline with fdsink
    }

    ret = read((int)data, buffer, BUF_SIZE);
  }
}

gpointer
server_thread(gpointer data)
{
  int sock, client;
  struct sockaddr_in addr;
  size_t size;

  g_print("Server thread started\n");

  sock = make_socket(9001);

  while(1)
  {
    if (listen (sock, 1) < 0)
    {
      g_printerr ("listener failed");
      exit (EXIT_FAILURE);
    }
    size = sizeof(addr);
    client = accept(sock, (struct sockaddr *)&addr, &size);

    if (client < 0)
    {
      g_printerr ("accept failed");
      continue;
    }

    g_print("connect from host %s, port %d.\n",
      inet_ntoa(addr.sin_addr),
      ntohs(addr.sin_port));

    g_thread_create(client_thread, (gpointer)client, TRUE, NULL);
  }
}

Create listener in its own thread

The server above can be executed in its own thread (we use glib) thus

  sthread = g_thread_create(server_thread, NULL, TRUE, NULL);

Use fdsink to stream to socket

The following code snippet demonstrates how fdsink can be setup

  sink = gst_element_factory_make ("fdsink", NULL);
  g_object_set (G_OBJECT (sink), "fd", client, NULL);

Handling client removal in a dynamic pipeline

A client can disconnect without a warning, fdsink does not provide any mechanism to handle such as situation. The whole pipeline can end if a single client disconnects. Luckily, the multifdsink can be used in such a scenario, it handles client disconnection more gracefully. The num-fds property can be polled to detect that there are no pending clients. Create a multifdsink thus

  sink = gst_element_factory_make ("multifdsink", NULL);

After starting the pipeline, add a new socket fd thus

  g_signal_emit_by_name(sink, "add", client, G_TYPE_NONE);

The multifdsink element has a bug that causes 100% CPU usage, this has been fixed in version 0.10.33 of GStreamer.

Headers

The following headers contain the declarations required to compile the code above

#include <gst/gst.h>
#include <glib.h>
#include <sys/socket.h>
#include <netinet/in.h>

That’s all there is to it.

GStreamer pipeline with Tee


The tee element is useful to branch a data flow so that it can be fed to multiple elements. In this post, we’ll use the tee element to split live, encoded, test video and audio sources, mux the output as live WebM, and stream the result using the tcpclientsink element. This procedure can be repeated several times to stream to multiple clients, the only limit being CPU usage and network bandwidth. By encoding only once, we avoid taxing the CPU – encoding being the most intensive operation it must perform. The code presented below has been tested with GStreamer 0.10.32.

Creating a pipeline with Tee

Example C code that creates a dynamic GStreamer pipeline using tee follows

  GstElement *pipeline, *videosrc, *colorspace, *videoenc,
    *videotee, *audiosrc, *conv, *audioenc, *audiotee;

  // Create elements
  pipeline = gst_pipeline_new ("tcp-streamer");
  videosrc = gst_element_factory_make ("videotestsrc", "videosrc");
  colorspace = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
  videoenc = gst_element_factory_make ("vp8enc", "videoenc");
  videotee = gst_element_factory_make ("tee", "videotee");
  audiosrc = gst_element_factory_make ("autoaudiosrc", "audiosrc");
  conv = gst_element_factory_make ("audioconvert", "converter");
  audioenc = gst_element_factory_make ("vorbisenc", "audioenc");
  audiotee = gst_element_factory_make ("tee", "audiotee");

  if (!pipeline || !videosrc || !colorspace || !videoenc
    || !videotee || !audiosrc || !conv || !audioenc || !audiotee) {
    g_printerr ("One element could not be created.\n");
    return NULL;
  }

  // set the properties of elements
  g_object_set (G_OBJECT (videosrc), "horizontal-speed", 1, NULL);
  g_object_set (G_OBJECT (videosrc), "is-live", 1, NULL);
  g_object_set (G_OBJECT (videoenc), "speed", 2, NULL);

  // add all elements to the pipeline
  gst_bin_add_many (GST_BIN (pipeline),
    videosrc, colorspace, videoenc, videotee, audiosrc, conv,
    audioenc, audiotee, NULL);

  // link the elements together
  gst_element_link_many (videosrc, colorspace, videoenc,
    videotee, NULL);
  gst_element_link_many (audiosrc, conv, audioenc,
    audiotee, NULL);

Branching from a Tee on a running Pipeline

We create a sub-pipeline using a bin. Creating a new branch from the tee, on a running pipeline, can be achieved thus

  GstElement *bin, *videoq, *audioq, *muxer, *sink,
    *videotee, *audiotee;

  GstPad *sinkpadvideo, *srcpadvideo, *sinkpadaudio, *srcpadaudio;

  bin = gst_bin_new (NULL);
  videoq = gst_element_factory_make ("queue2", NULL);
  audioq = gst_element_factory_make ("queue2", NULL);
  muxer = gst_element_factory_make ("webmmux", NULL);
  sink = gst_element_factory_make ("tcpclientsink", NULL);

  if (!bin || !videoq || !audioq || !muxer || !sink) {
    g_printerr ("One element could not be created.\n");
    return FALSE;
  }

  g_object_set (G_OBJECT (muxer), "streamable", 1, NULL);

  g_object_set (G_OBJECT (sink), "port", port,
    "host", "localhost", NULL);

  gst_bin_add_many (GST_BIN (bin), videoq, audioq,
    muxer, sink, NULL);

  // link src pad of video queue to sink pad of muxer
  srcpadvideo = gst_element_get_static_pad(videoq, "src");
  sinkpadvideo = gst_element_get_request_pad(muxer, "video_%d");
  gst_pad_link(srcpadvideo, sinkpadvideo);

  // link src pad of audio queue to sink pad of muxer
  srcpadaudio = gst_element_get_static_pad(audioq, "src");
  sinkpadaudio = gst_element_get_request_pad(muxer, "audio_%d");
  gst_pad_link(srcpadaudio, sinkpadaudio);

  gst_element_link(muxer, sink);

  // Create ghost pads on the bin and link to queues
  sinkpadvideo = gst_element_get_static_pad(videoq, "sink");
  gst_element_add_pad(bin, gst_ghost_pad_new("videosink", sinkpadvideo));
  gst_object_unref(GST_OBJECT(sinkpadvideo));
  sinkpadaudio = gst_element_get_static_pad(audioq, "sink");
  gst_element_add_pad(bin, gst_ghost_pad_new("audiosink", sinkpadaudio));
  gst_object_unref(GST_OBJECT(sinkpadaudio));

  // set the new bin to PAUSE to preroll
  gst_element_set_state(bin, GST_STATE_PAUSED);

  // Request source pads from tee and sink pads from bin
  videotee = gst_bin_get_by_name (GST_BIN(pipeline), "videotee");
  srcpadvideo = gst_element_get_request_pad(videotee, "src%d");
  sinkpadvideo = gst_element_get_pad(bin, "videosink");
  audiotee = gst_bin_get_by_name (GST_BIN(pipeline), "audiotee");
  srcpadaudio = gst_element_get_request_pad(audiotee, "src%d");
  sinkpadaudio = gst_element_get_pad(bin, "audiosink");

  // Link src pad of tees to sink pads of bin
  gst_bin_add(GST_BIN(pipeline), bin);
  gst_pad_link(srcpadvideo, sinkpadvideo);
  gst_pad_link(srcpadaudio, sinkpadaudio);

  gst_element_set_state (pipeline, GST_STATE_PLAYING);

Removing the branch from a running pipeline

The following code illustrates how to remove the sub-pipeline.

  //gst_element_set_state (pipeline, GST_STATE_PAUSED);
  // pause pipeline if no more bins left
  gst_element_set_state (bin, GST_STATE_NULL);

  gst_pad_unlink(srcpadvideo, sinkpadvideo);
  gst_pad_unlink(srcpadaudio, sinkpadaudio);

  gst_element_remove_pad(videotee, srcpadvideo);
  gst_element_remove_pad(audiotee, srcpadaudio);

  gst_bin_remove(GST_BIN(pipeline), bin);

  //gst_element_set_state (pipeline, GST_STATE_PLAYING);
  // resume pipeline if there are bins left

For the curious, I cache the above pointers in a GHashTable using port number as the key.