Update
This commit is contained in:
@@ -0,0 +1,242 @@
|
||||
'''
|
||||
Element that transforms audio samples to video frames representing
|
||||
the waveform.
|
||||
|
||||
Requires matplotlib, numpy and numpy_ringbuffer
|
||||
|
||||
Example pipeline:
|
||||
|
||||
gst-launch-1.0 audiotestsrc ! audioplot window-duration=0.01 ! videoconvert ! autovideosink
|
||||
'''
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version('Gst', '1.0')
|
||||
gi.require_version('GstBase', '1.0')
|
||||
gi.require_version('GstAudio', '1.0')
|
||||
gi.require_version('GstVideo', '1.0')
|
||||
|
||||
from gi.repository import Gst, GLib, GObject, GstBase, GstAudio, GstVideo
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
import matplotlib.patheffects as pe
|
||||
from numpy_ringbuffer import RingBuffer
|
||||
from matplotlib import pyplot as plt
|
||||
from matplotlib.backends.backend_agg import FigureCanvasAgg
|
||||
except ImportError:
|
||||
Gst.error('audioplot requires numpy, numpy_ringbuffer and matplotlib')
|
||||
raise
|
||||
|
||||
|
||||
Gst.init(None)
|
||||
|
||||
AUDIO_FORMATS = [f.strip() for f in
|
||||
GstAudio.AUDIO_FORMATS_ALL.strip('{ }').split(',')]
|
||||
|
||||
ICAPS = Gst.Caps(Gst.Structure('audio/x-raw',
|
||||
format=Gst.ValueList(AUDIO_FORMATS),
|
||||
layout='interleaved',
|
||||
rate = Gst.IntRange(range(1, GLib.MAXINT)),
|
||||
channels = Gst.IntRange(range(1, GLib.MAXINT))))
|
||||
|
||||
OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
|
||||
format='ARGB',
|
||||
width=Gst.IntRange(range(1, GLib.MAXINT)),
|
||||
height=Gst.IntRange(range(1, GLib.MAXINT)),
|
||||
framerate=Gst.FractionRange(Gst.Fraction(1, 1),
|
||||
Gst.Fraction(GLib.MAXINT, 1))))
|
||||
|
||||
DEFAULT_WINDOW_DURATION = 1.0
|
||||
DEFAULT_WIDTH = 640
|
||||
DEFAULT_HEIGHT = 480
|
||||
DEFAULT_FRAMERATE_NUM = 25
|
||||
DEFAULT_FRAMERATE_DENOM = 1
|
||||
|
||||
|
||||
class AudioPlotFilter(GstBase.BaseTransform):
|
||||
__gstmetadata__ = ('AudioPlotFilter','Filter', \
|
||||
'Plot audio waveforms', 'Mathieu Duponchelle')
|
||||
|
||||
__gsttemplates__ = (Gst.PadTemplate.new("src",
|
||||
Gst.PadDirection.SRC,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
OCAPS),
|
||||
Gst.PadTemplate.new("sink",
|
||||
Gst.PadDirection.SINK,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
ICAPS))
|
||||
__gproperties__ = {
|
||||
"window-duration": (float,
|
||||
"Window Duration",
|
||||
"Duration of the sliding window, in seconds",
|
||||
0.01,
|
||||
100.0,
|
||||
DEFAULT_WINDOW_DURATION,
|
||||
GObject.ParamFlags.READWRITE
|
||||
)
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
GstBase.BaseTransform.__init__(self)
|
||||
self.window_duration = DEFAULT_WINDOW_DURATION
|
||||
|
||||
def do_get_property(self, prop):
|
||||
if prop.name == 'window-duration':
|
||||
return self.window_duration
|
||||
else:
|
||||
raise AttributeError('unknown property %s' % prop.name)
|
||||
|
||||
def do_set_property(self, prop, value):
|
||||
if prop.name == 'window-duration':
|
||||
self.window_duration = value
|
||||
else:
|
||||
raise AttributeError('unknown property %s' % prop.name)
|
||||
|
||||
def do_transform(self, inbuf, outbuf):
|
||||
if not self.h:
|
||||
self.h, = self.ax.plot(np.array(self.ringbuffer),
|
||||
lw=0.5,
|
||||
color='k',
|
||||
path_effects=[pe.Stroke(linewidth=1.0,
|
||||
foreground='g'),
|
||||
pe.Normal()])
|
||||
else:
|
||||
self.h.set_ydata(np.array(self.ringbuffer))
|
||||
|
||||
self.fig.canvas.restore_region(self.background)
|
||||
self.ax.draw_artist(self.h)
|
||||
self.fig.canvas.blit(self.ax.bbox)
|
||||
|
||||
s = self.agg.tostring_argb()
|
||||
|
||||
outbuf.fill(0, s)
|
||||
outbuf.pts = self.next_time
|
||||
outbuf.duration = self.frame_duration
|
||||
|
||||
self.next_time += self.frame_duration
|
||||
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
def __append(self, data):
|
||||
arr = np.array(data)
|
||||
end = self.thinning_factor * int(len(arr) / self.thinning_factor)
|
||||
arr = np.mean(arr[:end].reshape(-1, self.thinning_factor), 1)
|
||||
self.ringbuffer.extend(arr)
|
||||
|
||||
def do_generate_output(self):
|
||||
inbuf = self.queued_buf
|
||||
_, info = inbuf.map(Gst.MapFlags.READ)
|
||||
res, data = self.converter.convert(GstAudio.AudioConverterFlags.NONE,
|
||||
info.data)
|
||||
data = memoryview(data).cast('i')
|
||||
|
||||
nsamples = len(data) - self.buf_offset
|
||||
|
||||
if nsamples == 0:
|
||||
self.buf_offset = 0
|
||||
inbuf.unmap(info)
|
||||
return Gst.FlowReturn.OK, None
|
||||
|
||||
if self.cur_offset + nsamples < self.next_offset:
|
||||
self.__append(data[self.buf_offset:])
|
||||
self.buf_offset = 0
|
||||
self.cur_offset += nsamples
|
||||
inbuf.unmap(info)
|
||||
return Gst.FlowReturn.OK, None
|
||||
|
||||
consumed = self.next_offset - self.cur_offset
|
||||
|
||||
self.__append(data[self.buf_offset:self.buf_offset + consumed])
|
||||
inbuf.unmap(info)
|
||||
|
||||
_, outbuf = GstBase.BaseTransform.do_prepare_output_buffer(self, inbuf)
|
||||
|
||||
ret = self.do_transform(inbuf, outbuf)
|
||||
|
||||
self.next_offset += self.samplesperbuffer
|
||||
|
||||
self.cur_offset += consumed
|
||||
self.buf_offset += consumed
|
||||
|
||||
return ret, outbuf
|
||||
|
||||
def do_transform_caps(self, direction, caps, filter_):
|
||||
if direction == Gst.PadDirection.SRC:
|
||||
res = ICAPS
|
||||
else:
|
||||
res = OCAPS
|
||||
|
||||
if filter_:
|
||||
res = res.intersect(filter_)
|
||||
|
||||
return res
|
||||
|
||||
def do_fixate_caps(self, direction, caps, othercaps):
|
||||
if direction == Gst.PadDirection.SRC:
|
||||
return othercaps.fixate()
|
||||
else:
|
||||
so = othercaps.get_structure(0).copy()
|
||||
so.fixate_field_nearest_fraction("framerate",
|
||||
DEFAULT_FRAMERATE_NUM,
|
||||
DEFAULT_FRAMERATE_DENOM)
|
||||
so.fixate_field_nearest_int("width", DEFAULT_WIDTH)
|
||||
so.fixate_field_nearest_int("height", DEFAULT_HEIGHT)
|
||||
ret = Gst.Caps.new_empty()
|
||||
ret.append_structure(so)
|
||||
return ret.fixate()
|
||||
|
||||
def do_set_caps(self, icaps, ocaps):
|
||||
in_info = GstAudio.AudioInfo()
|
||||
in_info.from_caps(icaps)
|
||||
out_info = GstVideo.VideoInfo()
|
||||
out_info.from_caps(ocaps)
|
||||
|
||||
self.convert_info = GstAudio.AudioInfo()
|
||||
self.convert_info.set_format(GstAudio.AudioFormat.S32,
|
||||
in_info.rate,
|
||||
in_info.channels,
|
||||
in_info.position)
|
||||
self.converter = GstAudio.AudioConverter.new(GstAudio.AudioConverterFlags.NONE,
|
||||
in_info,
|
||||
self.convert_info,
|
||||
None)
|
||||
|
||||
self.fig = plt.figure()
|
||||
dpi = self.fig.get_dpi()
|
||||
self.fig.patch.set_alpha(0.3)
|
||||
self.fig.set_size_inches(out_info.width / float(dpi),
|
||||
out_info.height / float(dpi))
|
||||
self.ax = plt.Axes(self.fig, [0., 0., 1., 1.])
|
||||
self.fig.add_axes(self.ax)
|
||||
self.ax.set_axis_off()
|
||||
self.ax.set_ylim((GLib.MININT, GLib.MAXINT))
|
||||
self.agg = self.fig.canvas.switch_backends(FigureCanvasAgg)
|
||||
self.h = None
|
||||
|
||||
samplesperwindow = int(in_info.rate * in_info.channels * self.window_duration)
|
||||
self.thinning_factor = max(int(samplesperwindow / out_info.width - 1), 1)
|
||||
|
||||
cap = int(samplesperwindow / self.thinning_factor)
|
||||
self.ax.set_xlim([0, cap])
|
||||
self.ringbuffer = RingBuffer(capacity=cap)
|
||||
self.ringbuffer.extend([0.0] * cap)
|
||||
self.frame_duration = Gst.util_uint64_scale_int(Gst.SECOND,
|
||||
out_info.fps_d,
|
||||
out_info.fps_n)
|
||||
self.next_time = self.frame_duration
|
||||
|
||||
self.agg.draw()
|
||||
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
|
||||
|
||||
self.samplesperbuffer = Gst.util_uint64_scale_int(in_info.rate * in_info.channels,
|
||||
out_info.fps_d,
|
||||
out_info.fps_n)
|
||||
self.next_offset = self.samplesperbuffer
|
||||
self.cur_offset = 0
|
||||
self.buf_offset = 0
|
||||
|
||||
return True
|
||||
|
||||
GObject.type_register(AudioPlotFilter)
|
||||
__gstelementfactory__ = ("audioplot", Gst.Rank.NONE, AudioPlotFilter)
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/python3
|
||||
# exampleTransform.py
|
||||
# 2019 Daniel Klamt <graphics@pengutronix.de>
|
||||
|
||||
# Inverts a grayscale image in place, requires numpy.
|
||||
#
|
||||
# gst-launch-1.0 videotestsrc ! ExampleTransform ! videoconvert ! xvimagesink
|
||||
|
||||
import gi
|
||||
gi.require_version('Gst', '1.0')
|
||||
gi.require_version('GstBase', '1.0')
|
||||
gi.require_version('GstVideo', '1.0')
|
||||
|
||||
from gi.repository import Gst, GObject, GstBase, GstVideo
|
||||
|
||||
import numpy as np
|
||||
|
||||
Gst.init(None)
|
||||
FIXED_CAPS = Gst.Caps.from_string('video/x-raw,format=GRAY8,width=[1,2147483647],height=[1,2147483647]')
|
||||
|
||||
class ExampleTransform(GstBase.BaseTransform):
|
||||
__gstmetadata__ = ('ExampleTransform Python','Transform',
|
||||
'example gst-python element that can modify the buffer gst-launch-1.0 videotestsrc ! ExampleTransform ! videoconvert ! xvimagesink', 'dkl')
|
||||
|
||||
__gsttemplates__ = (Gst.PadTemplate.new("src",
|
||||
Gst.PadDirection.SRC,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
FIXED_CAPS),
|
||||
Gst.PadTemplate.new("sink",
|
||||
Gst.PadDirection.SINK,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
FIXED_CAPS))
|
||||
|
||||
def do_set_caps(self, incaps, outcaps):
|
||||
struct = incaps.get_structure(0)
|
||||
self.width = struct.get_int("width").value
|
||||
self.height = struct.get_int("height").value
|
||||
return True
|
||||
|
||||
def do_transform_ip(self, buf):
|
||||
try:
|
||||
with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
|
||||
# Create a NumPy ndarray from the memoryview and modify it in place:
|
||||
A = np.ndarray(shape = (self.height, self.width), dtype = np.uint8, buffer = info.data)
|
||||
A[:] = np.invert(A)
|
||||
|
||||
return Gst.FlowReturn.OK
|
||||
except Gst.MapError as e:
|
||||
Gst.error("Mapping error: %s" % e)
|
||||
return Gst.FlowReturn.ERROR
|
||||
|
||||
GObject.type_register(ExampleTransform)
|
||||
__gstelementfactory__ = ("ExampleTransform", Gst.Rank.NONE, ExampleTransform)
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- Mode: Python -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
# identity.py
|
||||
# 2016 Marianna S. Buschle <msb@qtec.com>
|
||||
#
|
||||
# Simple identity element in python
|
||||
#
|
||||
# You can run the example from the source doing from gst-python/:
|
||||
#
|
||||
# $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:$PWD/plugin:$PWD/examples/plugins
|
||||
# $ GST_DEBUG=python:4 gst-launch-1.0 fakesrc num-buffers=10 ! identity_py ! fakesink
|
||||
|
||||
import gi
|
||||
gi.require_version('GstBase', '1.0')
|
||||
|
||||
from gi.repository import Gst, GObject, GstBase
|
||||
Gst.init(None)
|
||||
|
||||
#
|
||||
# Simple Identity element created entirely in python
|
||||
#
|
||||
class Identity(GstBase.BaseTransform):
|
||||
__gstmetadata__ = ('Identity Python','Transform', \
|
||||
'Simple identity element written in python', 'Marianna S. Buschle')
|
||||
|
||||
__gsttemplates__ = (Gst.PadTemplate.new("src",
|
||||
Gst.PadDirection.SRC,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
Gst.Caps.new_any()),
|
||||
Gst.PadTemplate.new("sink",
|
||||
Gst.PadDirection.SINK,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
Gst.Caps.new_any()))
|
||||
|
||||
def do_transform_ip(self, buffer):
|
||||
Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
GObject.type_register(Identity)
|
||||
__gstelementfactory__ = ("identity_py", Gst.Rank.NONE, Identity)
|
||||
@@ -0,0 +1,104 @@
|
||||
'''
|
||||
Simple mixer element, accepts 320 x 240 RGBA at 30 fps
|
||||
on any number of sinkpads.
|
||||
|
||||
Requires PIL (Python Imaging Library)
|
||||
|
||||
Example pipeline:
|
||||
|
||||
gst-launch-1.0 py_videomixer name=mixer ! videoconvert ! autovideosink \
|
||||
videotestsrc ! mixer. \
|
||||
videotestsrc pattern=ball ! mixer. \
|
||||
videotestsrc pattern=snow ! mixer.
|
||||
'''
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version('Gst', '1.0')
|
||||
gi.require_version('GstBase', '1.0')
|
||||
gi.require_version('GObject', '2.0')
|
||||
|
||||
from gi.repository import Gst, GObject, GstBase
|
||||
|
||||
Gst.init(None)
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError:
|
||||
Gst.error('py_videomixer requires PIL')
|
||||
raise
|
||||
|
||||
# Completely fixed input / output
|
||||
ICAPS = Gst.Caps(Gst.Structure('video/x-raw',
|
||||
format='RGBA',
|
||||
width=320,
|
||||
height=240,
|
||||
framerate=Gst.Fraction(30, 1)))
|
||||
|
||||
OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
|
||||
format='RGBA',
|
||||
width=320,
|
||||
height=240,
|
||||
framerate=Gst.Fraction(30, 1)))
|
||||
|
||||
class BlendData:
|
||||
def __init__(self, outimg):
|
||||
self.outimg = outimg
|
||||
self.pts = 0
|
||||
self.eos = True
|
||||
|
||||
class Videomixer(GstBase.Aggregator):
|
||||
__gstmetadata__ = ('Videomixer','Video/Mixer', \
|
||||
'Python video mixer', 'Mathieu Duponchelle')
|
||||
|
||||
__gsttemplates__ = (
|
||||
Gst.PadTemplate.new_with_gtype("sink_%u",
|
||||
Gst.PadDirection.SINK,
|
||||
Gst.PadPresence.REQUEST,
|
||||
ICAPS,
|
||||
GstBase.AggregatorPad.__gtype__),
|
||||
Gst.PadTemplate.new_with_gtype("src",
|
||||
Gst.PadDirection.SRC,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
OCAPS,
|
||||
GstBase.AggregatorPad.__gtype__)
|
||||
)
|
||||
|
||||
def mix_buffers(self, agg, pad, bdata):
|
||||
buf = pad.pop_buffer()
|
||||
_, info = buf.map(Gst.MapFlags.READ)
|
||||
|
||||
img = Image.frombuffer('RGBA', (320, 240), info.data, "raw", 'RGBA', 0, 1)
|
||||
|
||||
bdata.outimg = Image.blend(bdata.outimg, img, alpha=0.5)
|
||||
bdata.pts = buf.pts
|
||||
|
||||
buf.unmap(info)
|
||||
|
||||
bdata.eos = False
|
||||
|
||||
return True
|
||||
|
||||
def do_aggregate(self, timeout):
|
||||
outimg = Image.new('RGBA', (320, 240), 0x00000000)
|
||||
|
||||
bdata = BlendData(outimg)
|
||||
|
||||
self.foreach_sink_pad(self.mix_buffers, bdata)
|
||||
|
||||
data = bdata.outimg.tobytes()
|
||||
|
||||
outbuf = Gst.Buffer.new_allocate(None, len(data), None)
|
||||
outbuf.fill(0, data)
|
||||
outbuf.pts = bdata.pts
|
||||
self.finish_buffer (outbuf)
|
||||
|
||||
# We are EOS when no pad was ready to be aggregated,
|
||||
# this would obviously not work for live
|
||||
if bdata.eos:
|
||||
return Gst.FlowReturn.EOS
|
||||
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
GObject.type_register(Videomixer)
|
||||
__gstelementfactory__ = ("py_videomixer", Gst.Rank.NONE, Videomixer)
|
||||
@@ -0,0 +1,193 @@
|
||||
'''
|
||||
Element that generates a sine audio wave with the specified frequency
|
||||
|
||||
Requires numpy
|
||||
|
||||
Example pipeline:
|
||||
|
||||
gst-launch-1.0 py_audiotestsrc ! autoaudiosink
|
||||
'''
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version('Gst', '1.0')
|
||||
gi.require_version('GstBase', '1.0')
|
||||
gi.require_version('GstAudio', '1.0')
|
||||
|
||||
from gi.repository import Gst, GLib, GObject, GstBase, GstAudio
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
Gst.error('py_audiotestsrc requires numpy')
|
||||
raise
|
||||
|
||||
OCAPS = Gst.Caps.from_string (
|
||||
'audio/x-raw, format=F32LE, layout=interleaved, rate=44100, channels=2')
|
||||
|
||||
SAMPLESPERBUFFER = 1024
|
||||
|
||||
DEFAULT_FREQ = 440
|
||||
DEFAULT_VOLUME = 0.8
|
||||
DEFAULT_MUTE = False
|
||||
DEFAULT_IS_LIVE = False
|
||||
|
||||
class AudioTestSrc(GstBase.BaseSrc):
|
||||
__gstmetadata__ = ('CustomSrc','Src', \
|
||||
'Custom test src element', 'Mathieu Duponchelle')
|
||||
|
||||
__gproperties__ = {
|
||||
"freq": (int,
|
||||
"Frequency",
|
||||
"Frequency of test signal",
|
||||
1,
|
||||
GLib.MAXINT,
|
||||
DEFAULT_FREQ,
|
||||
GObject.ParamFlags.READWRITE
|
||||
),
|
||||
"volume": (float,
|
||||
"Volume",
|
||||
"Volume of test signal",
|
||||
0.0,
|
||||
1.0,
|
||||
DEFAULT_VOLUME,
|
||||
GObject.ParamFlags.READWRITE
|
||||
),
|
||||
"mute": (bool,
|
||||
"Mute",
|
||||
"Mute the test signal",
|
||||
DEFAULT_MUTE,
|
||||
GObject.ParamFlags.READWRITE
|
||||
),
|
||||
"is-live": (bool,
|
||||
"Is live",
|
||||
"Whether to act as a live source",
|
||||
DEFAULT_IS_LIVE,
|
||||
GObject.ParamFlags.READWRITE
|
||||
),
|
||||
}
|
||||
|
||||
__gsttemplates__ = Gst.PadTemplate.new("src",
|
||||
Gst.PadDirection.SRC,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
OCAPS)
|
||||
|
||||
def __init__(self):
|
||||
GstBase.BaseSrc.__init__(self)
|
||||
self.info = GstAudio.AudioInfo()
|
||||
|
||||
self.freq = DEFAULT_FREQ
|
||||
self.volume = DEFAULT_VOLUME
|
||||
self.mute = DEFAULT_MUTE
|
||||
|
||||
self.set_live(DEFAULT_IS_LIVE)
|
||||
self.set_format(Gst.Format.TIME)
|
||||
|
||||
def do_set_caps(self, caps):
|
||||
self.info.from_caps(caps)
|
||||
self.set_blocksize(self.info.bpf * SAMPLESPERBUFFER)
|
||||
return True
|
||||
|
||||
def do_get_property(self, prop):
|
||||
if prop.name == 'freq':
|
||||
return self.freq
|
||||
elif prop.name == 'volume':
|
||||
return self.volume
|
||||
elif prop.name == 'mute':
|
||||
return self.mute
|
||||
elif prop.name == 'is-live':
|
||||
return self.is_live
|
||||
else:
|
||||
raise AttributeError('unknown property %s' % prop.name)
|
||||
|
||||
def do_set_property(self, prop, value):
|
||||
if prop.name == 'freq':
|
||||
self.freq = value
|
||||
elif prop.name == 'volume':
|
||||
self.volume = value
|
||||
elif prop.name == 'mute':
|
||||
self.mute = value
|
||||
elif prop.name == 'is-live':
|
||||
self.set_live(value)
|
||||
else:
|
||||
raise AttributeError('unknown property %s' % prop.name)
|
||||
|
||||
def do_start (self):
|
||||
self.next_sample = 0
|
||||
self.next_byte = 0
|
||||
self.next_time = 0
|
||||
self.accumulator = 0
|
||||
self.generate_samples_per_buffer = SAMPLESPERBUFFER
|
||||
|
||||
return True
|
||||
|
||||
def do_gst_base_src_query(self, query):
|
||||
if query.type == Gst.QueryType.LATENCY:
|
||||
latency = Gst.util_uint64_scale_int(self.generate_samples_per_buffer,
|
||||
Gst.SECOND, self.info.rate)
|
||||
is_live = self.is_live
|
||||
query.set_latency(is_live, latency, Gst.CLOCK_TIME_NONE)
|
||||
res = True
|
||||
else:
|
||||
res = GstBase.BaseSrc.do_query(self, query)
|
||||
return res
|
||||
|
||||
def do_get_times(self, buf):
|
||||
end = 0
|
||||
start = 0
|
||||
if self.is_live:
|
||||
ts = buf.pts
|
||||
if ts != Gst.CLOCK_TIME_NONE:
|
||||
duration = buf.duration
|
||||
if duration != Gst.CLOCK_TIME_NONE:
|
||||
end = ts + duration
|
||||
start = ts
|
||||
else:
|
||||
start = Gst.CLOCK_TIME_NONE
|
||||
end = Gst.CLOCK_TIME_NONE
|
||||
|
||||
return start, end
|
||||
|
||||
def do_fill(self, offset, length, buf):
|
||||
if length == -1:
|
||||
samples = SAMPLESPERBUFFER
|
||||
else:
|
||||
samples = int(length / self.info.bpf)
|
||||
|
||||
self.generate_samples_per_buffer = samples
|
||||
|
||||
bytes_ = samples * self.info.bpf
|
||||
|
||||
next_sample = self.next_sample + samples
|
||||
next_byte = self.next_byte + bytes_
|
||||
next_time = Gst.util_uint64_scale_int(next_sample, Gst.SECOND, self.info.rate)
|
||||
|
||||
try:
|
||||
with buf.map(Gst.MapFlags.WRITE) as info:
|
||||
array = np.ndarray(shape = self.info.channels * samples, dtype = np.float32, buffer = info.data)
|
||||
if not self.mute:
|
||||
r = np.repeat(np.arange(self.accumulator, self.accumulator + samples),
|
||||
self.info.channels)
|
||||
np.sin(2 * np.pi * r * self.freq / self.info.rate, out=array)
|
||||
array *= self.volume
|
||||
else:
|
||||
array[:] = 0
|
||||
except Exception as e:
|
||||
Gst.error("Mapping error: %s" % e)
|
||||
return (Gst.FlowReturn.ERROR, None)
|
||||
|
||||
buf.offset = self.next_sample
|
||||
buf.offset_end = next_sample
|
||||
buf.pts = self.next_time
|
||||
buf.duration = next_time - self.next_time
|
||||
|
||||
self.next_time = next_time
|
||||
self.next_sample = next_sample
|
||||
self.next_byte = next_byte
|
||||
self.accumulator += samples
|
||||
self.accumulator %= self.info.rate / self.freq
|
||||
|
||||
return (Gst.FlowReturn.OK, buf)
|
||||
|
||||
|
||||
__gstelementfactory__ = ("py_audiotestsrc", Gst.Rank.NONE, AudioTestSrc)
|
||||
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- Mode: Python -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
# sinkelement.py
|
||||
# (c) 2005 Edward Hervey <edward@fluendo.com>
|
||||
# (c) 2007 Jan Schmidt <jan@fluendo.com>
|
||||
# Licensed under LGPL
|
||||
#
|
||||
# Small test application to show how to write a sink element
|
||||
# in 20 lines in python and place into the gstreamer registry
|
||||
# so it can be autoplugged or used from parse_launch.
|
||||
#
|
||||
# You can run the example from the source doing from gst-python/:
|
||||
#
|
||||
# $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:$PWD/plugin:$PWD/examples/plugins
|
||||
# $ GST_DEBUG=python:4 gst-launch-1.0 fakesrc num-buffers=10 ! mysink
|
||||
|
||||
from gi.repository import Gst, GObject, GstBase
|
||||
Gst.init(None)
|
||||
|
||||
#
|
||||
# Simple Sink element created entirely in python
|
||||
#
|
||||
class MySink(GstBase.BaseSink):
|
||||
__gstmetadata__ = ('CustomSink','Sink', \
|
||||
'Custom test sink element', 'Edward Hervey')
|
||||
|
||||
__gsttemplates__ = Gst.PadTemplate.new("sink",
|
||||
Gst.PadDirection.SINK,
|
||||
Gst.PadPresence.ALWAYS,
|
||||
Gst.Caps.new_any())
|
||||
|
||||
def do_render(self, buffer):
|
||||
Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
GObject.type_register(MySink)
|
||||
__gstelementfactory__ = ("mysink", Gst.Rank.NONE, MySink)
|
||||
Reference in New Issue
Block a user