webrtc: Replace dsp and echoprobe by gst webrtcdsp and webrtcechoprobe

This commit is contained in:
Igor Sharonov 2024-04-15 12:21:10 +03:00 committed by Maxim Logaev
parent cd5e5db816
commit c438592ab0
14 changed files with 61 additions and 380 deletions

View file

@ -9,8 +9,8 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- run: sudo apt-get update - run: sudo apt-get update
- run: sudo apt-get remove libunwind-14-dev - run: sudo apt-get remove libunwind-14-dev
- run: sudo apt-get install -y build-essential gettext cmake valac libgee-0.8-dev libsqlite3-dev libgtk-4-dev libnotify-dev libgpgme-dev libsoup2.4-dev libgcrypt20-dev libqrencode-dev libnice-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libsrtp2-dev libwebrtc-audio-processing-dev libadwaita-1-dev libsignal-protocol-c-dev libcanberra-dev - run: sudo apt-get install -y build-essential gettext cmake valac libgee-0.8-dev libsqlite3-dev libgtk-4-dev libnotify-dev libgpgme-dev libsoup2.4-dev libgcrypt20-dev libqrencode-dev libnice-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libsrtp2-dev libadwaita-1-dev libsignal-protocol-c-dev libcanberra-dev
- run: ./configure --release --no-debug --with-tests --enable-plugin=notification-sound --prefix=/usr - run: ./configure --release --no-debug --with-tests --enable-plugin=notification-sound --prefix=/usr --without-webrtc
- run: cmake --build build - run: cmake --build build
- run: cmake --build build --target=test - run: cmake --build build --target=test
- name: Build DEB-package - name: Build DEB-package
@ -38,8 +38,8 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- run: sudo apt-get update - run: sudo apt-get update
- run: sudo apt-get remove libunwind-14-dev - run: sudo apt-get remove libunwind-14-dev
- run: sudo apt-get install -y build-essential gettext libadwaita-1-dev libcanberra-dev libgcrypt20-dev libgee-0.8-dev libgpgme-dev libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev libgtk-4-dev libnice-dev libnotify-dev libqrencode-dev libsignal-protocol-c-dev libsoup2.4-dev libsqlite3-dev libsrtp2-dev libwebrtc-audio-processing-dev meson valac - run: sudo apt-get install -y build-essential gettext libadwaita-1-dev libcanberra-dev libgcrypt20-dev libgee-0.8-dev libgpgme-dev libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev libgtk-4-dev libnice-dev libnotify-dev libqrencode-dev libsignal-protocol-c-dev libsoup2.4-dev libsqlite3-dev libsrtp2-dev meson valac
- run: meson setup build -Dcrypto-backend=auto -Dplugin-ice=enabled -Duse-soup2=true - run: meson setup build -Duse-soup2=true -Dplugin-rtp-webrtc-audio-processing=disabled
- run: meson compile -C build - run: meson compile -C build
- run: meson test -C build - run: meson test -C build
build-flatpak: build-flatpak:

View file

@ -12,6 +12,13 @@ endif ()
include(CTest) include(CTest)
option(PLUGIN_RTP_WEBRTC_AUDIO_PROCESSING "Use WebRTC audio processing" ON)
# https://gitlab.kitware.com/cmake/cmake/-/issues/19804
if (WIN32)
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .dll)
endif()
# Prepare Plugins # Prepare Plugins
set(DEFAULT_PLUGINS omemo;openpgp;http-files;ice;rtp) set(DEFAULT_PLUGINS omemo;openpgp;http-files;ice;rtp)
if (WIN32) if (WIN32)

View file

@ -74,7 +74,7 @@ Make sure to install all [dependencies](https://github.com/dino/dino/wiki/Build#
If you want to use `meson` build system, follow the next instructions: If you want to use `meson` build system, follow the next instructions:
meson setup build -Dcrypto-backend=gnutls -Dplugin-ice=enabled meson setup build
meson configure --prefix $PWD/build/install --libdir lib build meson configure --prefix $PWD/build/install --libdir lib build
meson compile -C build meson compile -C build
meson install -C build meson install -C build
@ -85,6 +85,12 @@ If your `nice` library depends on `libsoup-2.4` (consider `ldd` output for the `
Skip `meson configure` step, if you want to install the program globally. Skip `meson configure` step, if you want to install the program globally.
You can specify any convenient directory in the option `--prefix` where the program will be installed. You can specify any convenient directory in the option `--prefix` where the program will be installed.
If there is no `webrtcdsp` plugin in your system (check this by calling `gst-inspect-1.0 webrtcdsp`) you should pass extra argument:
* `--without-webrtcdsp` for `./configure`;
* `-Dplugin-rtp-webrtc-audio-processing=disabled` for `meson`;
* `-DPLUGIN_RTP_WEBRTC_AUDIO_PROCESSING=OFF` for `cmake`.
In addition, there is a git version of this package for **Arch Linux** on [AUR](https://aur.archlinux.org/packages/dino-plus-git) In addition, there is a git version of this package for **Arch Linux** on [AUR](https://aur.archlinux.org/packages/dino-plus-git)
Build on Windows (x86_64) Build on Windows (x86_64)

View file

@ -74,7 +74,6 @@ prepare()
mingw64/mingw-w64-x86_64-nsis \ mingw64/mingw-w64-x86_64-nsis \
mingw64/mingw-w64-x86_64-libsignal-protocol-c \ mingw64/mingw-w64-x86_64-libsignal-protocol-c \
mingw64/mingw-w64-x86_64-icu \ mingw64/mingw-w64-x86_64-icu \
mingw64/mingw-w64-x86_64-webrtc-audio-processing \
mingw64/mingw-w64-x86_64-meson \ mingw64/mingw-w64-x86_64-meson \
git \ git \
make \ make \

View file

@ -0,0 +1,11 @@
find_library(GstWebrtcDsp_LIBRARY gstwebrtcdsp PATH_SUFFIXES gstreamer-1.0)
if(GstWebrtcDsp_LIBRARY_FOUND)
find_package(Gst)
set(GstWebrtcDsp_VERSION ${Gst_VERSION})
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GstWebrtcDsp
REQUIRED_VARS GstWebrtcDsp_LIBRARY
VERSION_VAR GstWebrtcDsp_VERSION)

View file

@ -1,12 +0,0 @@
include(PkgConfigWithFallback)
find_pkg_config_with_fallback(WebRTCAudioProcessing
PKG_CONFIG_NAME webrtc-audio-processing
LIB_NAMES webrtc_audio_processing
INCLUDE_NAMES webrtc/modules/audio_processing/include/audio_processing.h
INCLUDE_DIR_SUFFIXES webrtc-audio-processing webrtc_audio_processing
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(WebRTCAudioProcessing
REQUIRED_VARS WebRTCAudioProcessing_LIBRARY
VERSION_VAR WebRTCAudioProcessing_VERSION)

6
configure vendored
View file

@ -1,7 +1,7 @@
#!/bin/sh #!/bin/sh
OPTS=`getopt -o "h" --long \ OPTS=`getopt -o "h" --long \
help,fetch-only,no-debug,disable-fast-vapi,with-tests,release,with-libsoup3,\ help,fetch-only,no-debug,disable-fast-vapi,with-tests,release,with-libsoup3,without-webrtcdsp,\
enable-plugin:,disable-plugin:,\ enable-plugin:,disable-plugin:,\
prefix:,program-prefix:,exec-prefix:,lib-suffix:,\ prefix:,program-prefix:,exec-prefix:,lib-suffix:,\
bindir:,libdir:,includedir:,datadir:,\ bindir:,libdir:,includedir:,datadir:,\
@ -21,6 +21,7 @@ DISABLE_FAST_VAPI=
LIB_SUFFIX= LIB_SUFFIX=
NO_DEBUG= NO_DEBUG=
USE_SOUP3= USE_SOUP3=
PLUGIN_RTP_WEBRTC_AUDIO_PROCESSING=yes
EXEC_PREFIX= EXEC_PREFIX=
BINDIR= BINDIR=
@ -54,6 +55,7 @@ Configuration:
--no-debug Build without debug symbols --no-debug Build without debug symbols
--release Configure to build an optimized release version --release Configure to build an optimized release version
--with-libsoup3 Build with libsoup-3.0 --with-libsoup3 Build with libsoup-3.0
--without-webrtcdsp Build without WebRTC audio processing
--with-tests Also build tests. --with-tests Also build tests.
Plugin configuration: Plugin configuration:
@ -109,6 +111,7 @@ while true; do
--valac-flags ) VALACFLAGS="$2"; shift; shift ;; --valac-flags ) VALACFLAGS="$2"; shift; shift ;;
--lib-suffix ) LIB_SUFFIX="$2"; shift; shift ;; --lib-suffix ) LIB_SUFFIX="$2"; shift; shift ;;
--with-libsoup3 ) USE_SOUP3=yes; shift ;; --with-libsoup3 ) USE_SOUP3=yes; shift ;;
--without-webrtcdsp ) PLUGIN_RTP_WEBRTC_AUDIO_PROCESSING=no; shift ;;
--disable-fast-vapi ) DISABLE_FAST_VAPI=yes; shift ;; --disable-fast-vapi ) DISABLE_FAST_VAPI=yes; shift ;;
--no-debug ) NO_DEBUG=yes; shift ;; --no-debug ) NO_DEBUG=yes; shift ;;
--release ) BUILD_TYPE=RelWithDebInfo; shift ;; --release ) BUILD_TYPE=RelWithDebInfo; shift ;;
@ -219,6 +222,7 @@ cmake -G "$cmake_type" \
-DBIN_INSTALL_DIR="$BINDIR" \ -DBIN_INSTALL_DIR="$BINDIR" \
-DINCLUDE_INSTALL_DIR="$INCLUDEDIR" \ -DINCLUDE_INSTALL_DIR="$INCLUDEDIR" \
-DLIB_INSTALL_DIR="$LIBDIR" \ -DLIB_INSTALL_DIR="$LIBDIR" \
-DPLUGIN_RTP_WEBRTC_AUDIO_PROCESSING="$PLUGIN_RTP_WEBRTC_AUDIO_PROCESSING" \
-Wno-dev \ -Wno-dev \
.. || exit 9 .. || exit 9

View file

@ -58,6 +58,8 @@ else
libsoup_version = '3.0' libsoup_version = '3.0'
endif endif
cc = meson.get_compiler('c')
dep_gdk_pixbuf = dependency('gdk-pixbuf-2.0') dep_gdk_pixbuf = dependency('gdk-pixbuf-2.0')
dep_gee = dependency('gee-0.8') dep_gee = dependency('gee-0.8')
dep_gio = dependency('gio-2.0') dep_gio = dependency('gio-2.0')
@ -81,9 +83,12 @@ dep_libsrtp2 = dependency('libsrtp2', disabler: true, required: plugin_crypto)
dep_libsignal_protocol_c = dependency('libsignal-protocol-c', version: ['>=2.3.2', '<2.3.4'], disabler: true, required: get_option('plugin-omemo')) dep_libsignal_protocol_c = dependency('libsignal-protocol-c', version: ['>=2.3.2', '<2.3.4'], disabler: true, required: get_option('plugin-omemo'))
dep_libsoup = dependency('libsoup-@0@'.format(libsoup_version), disabler: true, required: get_option('plugin-http-files')) dep_libsoup = dependency('libsoup-@0@'.format(libsoup_version), disabler: true, required: get_option('plugin-http-files'))
dep_nice = dependency('nice', version: '>=0.1.15', disabler: true, required: get_option('plugin-ice')) dep_nice = dependency('nice', version: '>=0.1.15', disabler: true, required: get_option('plugin-ice'))
dep_m = meson.get_compiler('c').find_library('m', required: false) dep_m = cc.find_library('m', required: false)
dep_sqlite3 = dependency('sqlite3', version: '>=3.24') dep_sqlite3 = dependency('sqlite3', version: '>=3.24')
dep_webrtc_audio_processing = dependency('webrtc-audio-processing', version: ['>=0.2', '<0.4'], required: get_option('plugin-rtp-webrtc-audio-processing'))
dep_gstreamer_bad = dependency('gstreamer-plugins-bad-1.0', disabler: true, required: get_option('plugin-rtp-webrtc-audio-processing'))
gstpluginsdir = dep_gstreamer_bad.get_variable('pluginsdir')
dep_webrtcdsp = cc.find_library('gstwebrtcdsp', dirs: gstpluginsdir, disabler: true, required: get_option('plugin-rtp-webrtc-audio-processing'))
prog_git = find_program('git', required: false) prog_git = find_program('git', required: false)
prog_python = python.find_installation() prog_python = python.find_installation()

View file

@ -1,5 +1,13 @@
find_package(GLib ${GLib_GLOBAL_VERSION} REQUIRED) find_package(GLib ${GLib_GLOBAL_VERSION} REQUIRED)
find_package(WebRTCAudioProcessing 0.2)
set(RTP_DEFINITIONS)
set(RTP_EXTRA_OPTIONS)
if(PLUGIN_RTP_WEBRTC_AUDIO_PROCESSING)
set(EXTRA_RTP_PACKAGES GstWebrtcDsp)
list(APPEND RTP_DEFINITIONS WITH_VOICE_PROCESSOR)
endif()
find_packages(RTP_PACKAGES REQUIRED find_packages(RTP_PACKAGES REQUIRED
Gee Gee
GLib GLib
@ -11,11 +19,9 @@ find_packages(RTP_PACKAGES REQUIRED
GstAudio GstAudio
GstRtp GstRtp
GstVideo GstVideo
${EXTRA_RTP_PACKAGES}
) )
set(RTP_DEFINITIONS)
set(RTP_EXTRA_OPTIONS)
if(GstRtp_VERSION VERSION_GREATER_EQUAL "1.16") if(GstRtp_VERSION VERSION_GREATER_EQUAL "1.16")
set(RTP_DEFINITIONS ${RTP_DEFINITIONS} GST_1_16) set(RTP_DEFINITIONS ${RTP_DEFINITIONS} GST_1_16)
endif() endif()
@ -52,20 +58,6 @@ if(RTP_ENABLE_MSDK)
set(RTP_DEFINITIONS ${RTP_DEFINITIONS} ENABLE_MSDK) set(RTP_DEFINITIONS ${RTP_DEFINITIONS} ENABLE_MSDK)
endif() endif()
if(WebRTCAudioProcessing_VERSION GREATER "0.4")
message(STATUS "Ignoring WebRTCAudioProcessing, only versions < 0.4 supported so far")
unset(WebRTCAudioProcessing_FOUND)
endif()
if(WebRTCAudioProcessing_FOUND)
set(RTP_DEFINITIONS ${RTP_DEFINITIONS} WITH_VOICE_PROCESSOR)
set(RTP_VOICE_PROCESSOR_VALA src/voice_processor.vala)
set(RTP_VOICE_PROCESSOR_CXX src/voice_processor_native.cpp)
set(RTP_VOICE_PROCESSOR_LIB webrtc-audio-processing)
else()
message(STATUS "WebRTCAudioProcessing not found, build without voice pre-processing!")
endif()
vala_precompile(RTP_VALA_C vala_precompile(RTP_VALA_C
SOURCES SOURCES
src/codec_util.vala src/codec_util.vala

View file

@ -30,12 +30,8 @@ c_args = [
vala_args = [ vala_args = [
'--vapidir', meson.current_source_dir() / 'vapi', '--vapidir', meson.current_source_dir() / 'vapi',
] ]
if dep_webrtc_audio_processing.found() if dep_webrtcdsp.found()
dependencies += [dep_webrtc_audio_processing] dependencies += [dep_webrtcdsp]
sources += files(
'src/voice_processor.vala',
'src/voice_processor_native.cpp',
)
vala_args += ['-D', 'WITH_VOICE_PROCESSOR'] vala_args += ['-D', 'WITH_VOICE_PROCESSOR']
endif endif
if dep_gstreamer_rtp.version() == 'unknown' or dep_gstreamer_rtp.version().version_compare('>=1.16') if dep_gstreamer_rtp.version() == 'unknown' or dep_gstreamer_rtp.version().version_compare('>=1.16')

View file

@ -475,13 +475,12 @@ public class Dino.Plugins.Rtp.Device : MediaDevice, Object {
Gst.Bin bin = new Gst.Bin("voiceprocessorbin"); Gst.Bin bin = new Gst.Bin("voiceprocessorbin");
Gst.Element converter = Gst.ElementFactory.make("audioconvert", @"dsp_convert_$id"); Gst.Element converter = Gst.ElementFactory.make("audioconvert", @"dsp_convert_$id");
Gst.Element resampler = Gst.ElementFactory.make("audioresample", @"dsp_resmaple_$id"); Gst.Element resampler = Gst.ElementFactory.make("audioresample", @"dsp_resmaple_$id");
Gst.Element voiceproc = Gst.ElementFactory.make("webrtcdsp", @"dsp_$id");
Gst.Element voiceproc = new VoiceProcessor(plugin.echoprobe as EchoProbe, element as Gst.Audio.StreamVolume);
voiceproc.name = @"dsp_$id";
bin.add_many(converter, resampler, voiceproc); bin.add_many(converter, resampler, voiceproc);
converter.link(resampler); converter.link(resampler);
resampler.link(voiceproc); resampler.link(voiceproc);
voiceproc.@set("probe", "webrtcechoprobe0");
Gst.Pad sink_pad = bin.find_unlinked_pad(Gst.PadDirection.SINK); Gst.Pad sink_pad = bin.find_unlinked_pad(Gst.PadDirection.SINK);
Gst.Pad src_pad = bin.find_unlinked_pad(Gst.PadDirection.SRC); Gst.Pad src_pad = bin.find_unlinked_pad(Gst.PadDirection.SRC);

View file

@ -59,13 +59,14 @@ public class Dino.Plugins.Rtp.Plugin : RootInterface, VideoCallPlugin, Object {
} }
#if WITH_VOICE_PROCESSOR #if WITH_VOICE_PROCESSOR
private Gst.Element create_echo_bin(Gst.Element element) { private Gst.Element create_echo_probe() {
Gst.Bin bin = new Gst.Bin("echoprobebin"); Gst.Bin bin = new Gst.Bin("echoprobebin");
Gst.Element converter = Gst.ElementFactory.make("audioconvert", "echo_convert_"); Gst.Element converter = Gst.ElementFactory.make("audioconvert", "echo_convert_");
Gst.Element resampler = Gst.ElementFactory.make("audioresample", "echo_resample_"); Gst.Element resampler = Gst.ElementFactory.make("audioresample", "echo_resample_");
Gst.Element webrtcechoprobe = Gst.ElementFactory.make("webrtcechoprobe", "webrtcechoprobe0");
bin.add_many(element, converter, resampler); bin.add_many(webrtcechoprobe, converter, resampler);
element.link(converter); webrtcechoprobe.link(converter);
converter.link(resampler); converter.link(resampler);
Gst.Pad sink_pad = bin.find_unlinked_pad(Gst.PadDirection.SINK); Gst.Pad sink_pad = bin.find_unlinked_pad(Gst.PadDirection.SINK);
@ -100,11 +101,8 @@ public class Dino.Plugins.Rtp.Plugin : RootInterface, VideoCallPlugin, Object {
#if WITH_VOICE_PROCESSOR #if WITH_VOICE_PROCESSOR
// Audio echo probe // Audio echo probe
echoprobe = new EchoProbe(); echoprobe = create_echo_probe();
if (echoprobe != null) {
echoprobe = create_echo_bin(echoprobe);
pipe.add(echoprobe); pipe.add(echoprobe);
}
#endif #endif
// Pipeline // Pipeline

View file

@ -1,176 +0,0 @@
using Gst;
namespace Dino.Plugins.Rtp {
public static extern Buffer adjust_to_running_time(Base.Transform transform, Buffer buf);
}
public class Dino.Plugins.Rtp.EchoProbe : Audio.Filter {
private static StaticPadTemplate sink_template = {"sink", PadDirection.SINK, PadPresence.ALWAYS, {null, "audio/x-raw,rate=48000,channels=1,layout=interleaved,format=S16LE"}};
private static StaticPadTemplate src_template = {"src", PadDirection.SRC, PadPresence.ALWAYS, {null, "audio/x-raw,rate=48000,channels=1,layout=interleaved,format=S16LE"}};
public Audio.Info audio_info { get; private set; }
public signal void on_new_buffer(Buffer buffer);
private uint period_samples;
private uint period_size;
private Base.Adapter adapter = new Base.Adapter();
static construct {
add_static_pad_template(sink_template);
add_static_pad_template(src_template);
set_static_metadata("Acoustic Echo Canceller probe", "Generic/Audio", "Gathers playback buffers for echo cancellation", "Dino Team <contact@dino.im>");
}
construct {
set_passthrough(true);
}
public override bool setup(Audio.Info info) {
audio_info = info;
period_samples = info.rate / 100; // 10ms buffers
period_size = period_samples * info.bpf;
return true;
}
public override FlowReturn transform_ip(Buffer buf) {
lock (adapter) {
adapter.push(adjust_to_running_time(this, buf));
while (adapter.available() > period_size) {
on_new_buffer(adapter.take_buffer(period_size));
}
}
return FlowReturn.OK;
}
public override bool stop() {
adapter.clear();
return true;
}
}
public class Dino.Plugins.Rtp.VoiceProcessor : Audio.Filter {
private static StaticPadTemplate sink_template = {"sink", PadDirection.SINK, PadPresence.ALWAYS, {null, "audio/x-raw,rate=48000,channels=1,layout=interleaved,format=S16LE"}};
private static StaticPadTemplate src_template = {"src", PadDirection.SRC, PadPresence.ALWAYS, {null, "audio/x-raw,rate=48000,channels=1,layout=interleaved,format=S16LE"}};
public Audio.Info audio_info { get; private set; }
private ulong process_outgoing_buffer_handler_id;
private uint adjust_delay_timeout_id;
private uint period_samples;
private uint period_size;
private Base.Adapter adapter = new Base.Adapter();
private EchoProbe? echo_probe;
private Audio.StreamVolume? stream_volume;
private ClockTime last_reverse;
private void* native;
static construct {
add_static_pad_template(sink_template);
add_static_pad_template(src_template);
set_static_metadata("Voice Processor (AGC, AEC, filters, etc.)", "Generic/Audio", "Pre-processes voice with WebRTC Audio Processing Library", "Dino Team <contact@dino.im>");
}
construct {
set_passthrough(false);
}
public VoiceProcessor(EchoProbe? echo_probe = null, Audio.StreamVolume? stream_volume = null) {
this.echo_probe = echo_probe;
this.stream_volume = stream_volume;
}
private static extern void* init_native(int stream_delay);
private static extern void setup_native(void* native);
private static extern void destroy_native(void* native);
private static extern void analyze_reverse_stream(void* native, Audio.Info info, Buffer buffer);
private static extern void process_stream(void* native, Audio.Info info, Buffer buffer);
private static extern void adjust_stream_delay(void* native);
private static extern void notify_gain_level(void* native, int gain_level);
private static extern int get_suggested_gain_level(void* native);
private static extern bool get_stream_has_voice(void* native);
public override bool setup(Audio.Info info) {
debug("VoiceProcessor.setup(%s)", info.to_caps().to_string());
audio_info = info;
period_samples = info.rate / 100; // 10ms buffers
period_size = period_samples * info.bpf;
adapter.clear();
setup_native(native);
return true;
}
public override bool start() {
native = init_native(150);
if (process_outgoing_buffer_handler_id == 0 && echo_probe != null) {
process_outgoing_buffer_handler_id = echo_probe.on_new_buffer.connect(process_outgoing_buffer);
}
if (stream_volume == null && sinkpad.get_peer() != null && sinkpad.get_peer().get_parent_element() is Audio.StreamVolume) {
stream_volume = sinkpad.get_peer().get_parent_element() as Audio.StreamVolume;
}
return true;
}
private bool adjust_delay() {
if (native != null) {
adjust_stream_delay(native);
return Source.CONTINUE;
} else {
adjust_delay_timeout_id = 0;
return Source.REMOVE;
}
}
private void process_outgoing_buffer(Buffer buffer) {
if (buffer.pts != uint64.MAX) {
last_reverse = buffer.pts;
}
analyze_reverse_stream(native, echo_probe.audio_info, buffer);
if (adjust_delay_timeout_id == 0 && echo_probe != null) {
adjust_delay_timeout_id = Timeout.add(1000, adjust_delay);
}
}
public override FlowReturn submit_input_buffer(bool is_discont, Buffer input) {
lock (adapter) {
if (is_discont) {
adapter.clear();
}
adapter.push(adjust_to_running_time(this, input));
}
return FlowReturn.OK;
}
public override FlowReturn generate_output(out Buffer output_buffer) {
lock (adapter) {
if (adapter.available() >= period_size) {
output_buffer = (Gst.Buffer) adapter.take_buffer(period_size).make_writable();
int old_gain_level = 0;
if (stream_volume != null) {
old_gain_level = (int) (stream_volume.get_volume(Audio.StreamVolumeFormat.LINEAR) * 255.0);
notify_gain_level(native, old_gain_level);
}
process_stream(native, audio_info, output_buffer);
if (stream_volume != null) {
int new_gain_level = get_suggested_gain_level(native);
if (old_gain_level != new_gain_level) {
debug("Gain: %i -> %i", old_gain_level, new_gain_level);
stream_volume.set_volume(Audio.StreamVolumeFormat.LINEAR, ((double)new_gain_level) / 255.0);
}
}
}
}
return FlowReturn.OK;
}
public override bool stop() {
if (process_outgoing_buffer_handler_id != 0) {
echo_probe.disconnect(process_outgoing_buffer_handler_id);
process_outgoing_buffer_handler_id = 0;
}
if (adjust_delay_timeout_id != 0) {
Source.remove(adjust_delay_timeout_id);
adjust_delay_timeout_id = 0;
}
adapter.clear();
destroy_native(native);
native = null;
return true;
}
}

View file

@ -1,148 +0,0 @@
#include <algorithm>
#include <gst/gst.h>
#include <gst/audio/audio.h>
#include <webrtc/modules/audio_processing/include/audio_processing.h>
#include <webrtc/modules/interface/module_common_types.h>
#include <webrtc/system_wrappers/include/trace.h>
#define SAMPLE_RATE 48000
#define SAMPLE_CHANNELS 1
struct _DinoPluginsRtpVoiceProcessorNative {
webrtc::AudioProcessing *apm;
gint stream_delay;
gint last_median;
gint last_poor_delays;
};
extern "C" void *dino_plugins_rtp_adjust_to_running_time(GstBaseTransform *transform, GstBuffer *buffer) {
GstBuffer *copy = gst_buffer_copy(buffer);
GST_BUFFER_PTS(copy) = gst_segment_to_running_time(&transform->segment, GST_FORMAT_TIME, GST_BUFFER_PTS(buffer));
return copy;
}
extern "C" void *dino_plugins_rtp_voice_processor_init_native(gint stream_delay) {
_DinoPluginsRtpVoiceProcessorNative *native = new _DinoPluginsRtpVoiceProcessorNative();
webrtc::Config config;
config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true));
config.Set<webrtc::ExperimentalAgc>(new webrtc::ExperimentalAgc(true, 85));
native->apm = webrtc::AudioProcessing::Create(config);
native->stream_delay = stream_delay;
native->last_median = 0;
native->last_poor_delays = 0;
return native;
}
extern "C" void dino_plugins_rtp_voice_processor_setup_native(void *native_ptr) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::AudioProcessing *apm = native->apm;
webrtc::ProcessingConfig pconfig;
pconfig.streams[webrtc::ProcessingConfig::kInputStream] =
webrtc::StreamConfig(SAMPLE_RATE, SAMPLE_CHANNELS, false);
pconfig.streams[webrtc::ProcessingConfig::kOutputStream] =
webrtc::StreamConfig(SAMPLE_RATE, SAMPLE_CHANNELS, false);
pconfig.streams[webrtc::ProcessingConfig::kReverseInputStream] =
webrtc::StreamConfig(SAMPLE_RATE, SAMPLE_CHANNELS, false);
pconfig.streams[webrtc::ProcessingConfig::kReverseOutputStream] =
webrtc::StreamConfig(SAMPLE_RATE, SAMPLE_CHANNELS, false);
apm->Initialize(pconfig);
apm->high_pass_filter()->Enable(true);
apm->echo_cancellation()->enable_drift_compensation(false);
apm->echo_cancellation()->set_suppression_level(webrtc::EchoCancellation::kModerateSuppression);
apm->echo_cancellation()->enable_delay_logging(true);
apm->echo_cancellation()->Enable(true);
apm->noise_suppression()->set_level(webrtc::NoiseSuppression::kModerate);
apm->noise_suppression()->Enable(true);
apm->gain_control()->set_analog_level_limits(0, 255);
apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveAnalog);
apm->gain_control()->set_target_level_dbfs(3);
apm->gain_control()->set_compression_gain_db(9);
apm->gain_control()->enable_limiter(true);
apm->gain_control()->Enable(true);
apm->voice_detection()->set_likelihood(webrtc::VoiceDetection::Likelihood::kLowLikelihood);
apm->voice_detection()->Enable(true);
}
extern "C" void
dino_plugins_rtp_voice_processor_analyze_reverse_stream(void *native_ptr, GstAudioInfo *info, GstBuffer *buffer) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::StreamConfig config(SAMPLE_RATE, SAMPLE_CHANNELS, false);
webrtc::AudioProcessing *apm = native->apm;
GstMapInfo map;
gst_buffer_map(buffer, &map, GST_MAP_READ);
webrtc::AudioFrame frame;
frame.num_channels_ = info->channels;
frame.sample_rate_hz_ = info->rate;
frame.samples_per_channel_ = gst_buffer_get_size(buffer) / info->bpf;
memcpy(frame.data_, map.data, frame.samples_per_channel_ * info->bpf);
int err = apm->AnalyzeReverseStream(&frame);
if (err < 0) g_warning("voice_processor_native.cpp: ProcessReverseStream %i", err);
gst_buffer_unmap(buffer, &map);
}
extern "C" void dino_plugins_rtp_voice_processor_notify_gain_level(void *native_ptr, gint gain_level) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::AudioProcessing *apm = native->apm;
apm->gain_control()->set_stream_analog_level(gain_level);
}
extern "C" gint dino_plugins_rtp_voice_processor_get_suggested_gain_level(void *native_ptr) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::AudioProcessing *apm = native->apm;
return apm->gain_control()->stream_analog_level();
}
extern "C" bool dino_plugins_rtp_voice_processor_get_stream_has_voice(void *native_ptr) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::AudioProcessing *apm = native->apm;
return apm->voice_detection()->stream_has_voice();
}
extern "C" void dino_plugins_rtp_voice_processor_adjust_stream_delay(void *native_ptr) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::AudioProcessing *apm = native->apm;
int median, std, poor_delays;
float fraction_poor_delays;
apm->echo_cancellation()->GetDelayMetrics(&median, &std, &fraction_poor_delays);
poor_delays = (int)(fraction_poor_delays * 100.0);
if (fraction_poor_delays < 0 || (native->last_median == median && native->last_poor_delays == poor_delays)) return;
g_debug("voice_processor_native.cpp: Stream delay metrics: median=%i std=%i poor_delays=%i%%", median, std, poor_delays);
native->last_median = median;
native->last_poor_delays = poor_delays;
if (poor_delays > 90) {
native->stream_delay = std::min(std::max(0, native->stream_delay + std::min(48, std::max(median, -48))), 384);
g_debug("voice_processor_native.cpp: set stream_delay=%i", native->stream_delay);
}
}
extern "C" void
dino_plugins_rtp_voice_processor_process_stream(void *native_ptr, GstAudioInfo *info, GstBuffer *buffer) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
webrtc::StreamConfig config(SAMPLE_RATE, SAMPLE_CHANNELS, false);
webrtc::AudioProcessing *apm = native->apm;
GstMapInfo map;
gst_buffer_map(buffer, &map, GST_MAP_READWRITE);
webrtc::AudioFrame frame;
frame.num_channels_ = info->channels;
frame.sample_rate_hz_ = info->rate;
frame.samples_per_channel_ = info->rate / 100;
memcpy(frame.data_, map.data, frame.samples_per_channel_ * info->bpf);
apm->set_stream_delay_ms(native->stream_delay);
int err = apm->ProcessStream(&frame);
if (err >= 0) memcpy(map.data, frame.data_, frame.samples_per_channel_ * info->bpf);
if (err < 0) g_warning("voice_processor_native.cpp: ProcessStream %i", err);
gst_buffer_unmap(buffer, &map);
}
extern "C" void dino_plugins_rtp_voice_processor_destroy_native(void *native_ptr) {
_DinoPluginsRtpVoiceProcessorNative *native = (_DinoPluginsRtpVoiceProcessorNative *) native_ptr;
delete native;
}