Merge branch 'master' into vr

This commit is contained in:
Martin Pulec
2020-10-05 14:40:32 +02:00
59 changed files with 1575 additions and 692 deletions

View File

@@ -100,9 +100,6 @@ elif [ x"$1" = x"--tool" ]; then
TOOL=$2
shift 2
$DIR/bin/$TOOL "$@"
elif [ x"$1" = x"--gui" ]; then
shift
$DIR/bin/uv-qt --with-uv $DIR/uv-wrapper.sh "$@"
elif [ x"$1" = x"-h" -o x"$1" = x"--help" ]; then
usage
exit 0

View File

@@ -1,11 +1,15 @@
#!/bin/sh -eu
# Checks libstdc++ ABI version
# Checks libc/ibstdc++ ABI version
# see also https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
GLIBCXX_MAX=$1
CXX_MAX=$2
shift 2
## @todo
## consider removing semver.sh and utilize sort -V to compare
GLIBC_MAX=$1
GLIBCXX_MAX=$2
CXX_MAX=$3
shift 3
SEMVER_CMP=$(dirname $0)/utils/semver.sh
@@ -15,17 +19,24 @@ if [ ! -x $SEMVER_CMP ]; then
fi
while test $# -gt 0; do
if [ -f $1 ]; then
if [ ! -f $1 ]; then
shift
continue
fi
GLIBCXX_CUR=$(nm $1 | sed -n 's/.*GLIBCXX_\([0-9.]*\).*/\1/p' | sort -n | tail -n 1)
CXX_CUR=$(nm $1 | sed -n 's/.*CXXABI_\([0-9.]*\).*/\1/p' | sort -n | tail -n 1)
GLIBC_CUR=$(ldd -r -v $1 | sed -n 's/.*(GLIBC_\([0-9.]*\)).*/\1/p' | sort -V | tail -n 1)
## @todo
## perpaps use ldd as well for the remaining 2?
GLIBCXX_CUR=$(nm $1 | sed -n 's/.*GLIBCXX_\([0-9.]*\).*/\1/p' | sort -V | tail -n 1)
CXX_CUR=$(nm $1 | sed -n 's/.*CXXABI_\([0-9.]*\).*/\1/p' | sort -V | tail -n 1)
if [ -n "$GLIBC_CUR" -a "$($SEMVER_CMP $GLIBC_CUR $GLIBC_MAX)" -gt 0 ]; then
echo "$1: GLIBC $GLIBC_CUR ($GLIBC_MAX required)" 1>&2
exit 1
fi
if [ -n "$GLIBCXX_CUR" -a "$($SEMVER_CMP $GLIBCXX_CUR $GLIBCXX_MAX)" -gt 0 ]; then
echo "$1: GLIBCXX $GLIBCXX_CUR ($GLIBCXX_MAX required)" 1>&2
exit 1
fi
if [ -n "$CXX_CUR" "$($SEMVER_CMP $CXX_CUR $CXX_MAX)" -gt 0 ]; then
if [ -n "$CXX_CUR" -a "$($SEMVER_CMP $CXX_CUR $CXX_MAX)" -gt 0 ]; then
echo "$1: CXX $CXX_CUR ($CXX_MAX required)" 1>&2
exit 1
fi

View File

@@ -101,9 +101,9 @@ jobs:
- name: make distcheck
run: make distcheck
- name: check libstdc++ ABI
run: .github/scripts/Linux/check_cxx_abi.sh 3.4.21 1.3.9 bin/* lib/ultragrid/*
run: .github/scripts/Linux/check_abi.sh 2.23 3.4.21 1.3.9 bin/* lib/ultragrid/*
- name: Create AppImage
run: APPIMAGE=`data/scripts/Linux-AppImage/create-appimage.sh` && mv $APPIMAGE UltraGrid-$VERSION-x86_64.AppImage
run: APPIMAGE=`data/scripts/Linux-AppImage/create-appimage.sh https://github.com/$GITHUB_REPOSITORY/releases/download/$TAG/UltraGrid-$VERSION-x86_64.AppImage.zsync` && mv $APPIMAGE UltraGrid-$VERSION-x86_64.AppImage
- name: Upload NDI Build
id: upload-ndi
if: github.ref == 'refs/heads/ndi-build'

View File

@@ -472,6 +472,7 @@ gui/QT/uv-qt$(APPEXT): $(wildcard $(srcdir)/gui/QT/*.cpp $(srcdir)/gui/QT/*.hpp)
TEST_OBJS = $(COMMON_OBJS) \
@TEST_OBJS@ \
test/codec_conversions_test.o \
test/ff_codec_conversions_test.o \
test/get_framerate_test.o \
test/video_desc_test.o \
test/test_bitstream.o \
@@ -509,6 +510,7 @@ check: tests
distcheck:
$(TARGET)
$(TARGET) --capabilities
$(TARGET) --list-modules
[ -z "$(GUI_EXE)" ] || $(GUI_EXE) -h
# -------------------------------------------------------------------------------------------------

View File

@@ -68,7 +68,11 @@ if [ -n "$appimage_key" ]; then
fi
wget --no-verbose https://github.com/AppImage/AppImageKit/releases/download/12/appimagetool-x86_64.AppImage -O appimagetool && chmod 755 appimagetool
./appimagetool --sign --comp gzip -u "zsync|https://github.com/${GITHUB_REPOSITORY-CESNET/UltraGrid}/releases/download/nightly/UltraGrid-nightly-x86_64.AppImage.zsync" $APPDIR $APPNAME
UPDATE_INFORMATION=
if [ $# -ge 1 ]; then
UPDATE_INFORMATION="-u zsync|$1"
fi
./appimagetool --sign --comp gzip $UPDATE_INFORMATION $APPDIR $APPNAME
)
echo $APPNAME

Submodule gpujpeg updated: 2842002b22...d277a02f51

View File

@@ -288,7 +288,7 @@ std::string Settings::getLaunchParams() const{
std::string Settings::getPreviewParams() const{
std::string out;
out += " --capture-filter preview";
out += " --capture-filter preview,every:0";
out += getOption("video.source").getLaunchOption();
out += " -d preview";
out += getOption("audio.source").getLaunchOption();

View File

@@ -6,7 +6,7 @@
<rect>
<x>0</x>
<y>0</y>
<width>1018</width>
<width>1062</width>
<height>711</height>
</rect>
</property>
@@ -17,14 +17,7 @@
<string/>
</property>
<widget class="QWidget" name="centralwidget">
<layout class="QVBoxLayout" name="verticalLayout_6" stretch="0,6,0,0">
<item>
<widget class="QLabel" name="previewLabel">
<property name="text">
<string>Preview</string>
</property>
</widget>
</item>
<layout class="QVBoxLayout" name="verticalLayout_6" stretch="6,0,0">
<item>
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
@@ -37,8 +30,8 @@
</property>
<property name="sizeHint" stdset="0">
<size>
<width>0</width>
<height>0</height>
<width>13</width>
<height>13</height>
</size>
</property>
</spacer>
@@ -48,33 +41,71 @@
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<widget class="PreviewWidget" name="capturePreview" native="true">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
<widget class="QFrame" name="frame">
<property name="frameShape">
<enum>QFrame::StyledPanel</enum>
</property>
<property name="minimumSize">
<size>
<width>64</width>
<height>64</height>
</size>
<property name="frameShadow">
<enum>QFrame::Raised</enum>
</property>
<layout class="QVBoxLayout" name="verticalLayout">
<item>
<widget class="PreviewWidget" name="capturePreview" native="true">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>64</width>
<height>64</height>
</size>
</property>
</widget>
</item>
<item>
<widget class="QLabel" name="previewLabel">
<property name="text">
<string>Local</string>
</property>
</widget>
</item>
</layout>
</widget>
<widget class="PreviewWidget" name="displayPreview" native="true">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
<widget class="QFrame" name="frame_2">
<property name="frameShape">
<enum>QFrame::StyledPanel</enum>
</property>
<property name="minimumSize">
<size>
<width>64</width>
<height>64</height>
</size>
<property name="frameShadow">
<enum>QFrame::Raised</enum>
</property>
<layout class="QVBoxLayout" name="verticalLayout_2">
<item>
<widget class="PreviewWidget" name="displayPreview" native="true">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>64</width>
<height>64</height>
</size>
</property>
</widget>
</item>
<item>
<widget class="QLabel" name="label">
<property name="text">
<string>Remote</string>
</property>
</widget>
</item>
</layout>
</widget>
</widget>
</item>
@@ -386,6 +417,9 @@
<property name="text">
<string extracomment="Target IP or host"/>
</property>
<property name="placeholderText">
<string>localhost</string>
</property>
</widget>
</item>
<item row="1" column="0">
@@ -501,8 +535,8 @@
<rect>
<x>0</x>
<y>0</y>
<width>1018</width>
<height>22</height>
<width>1062</width>
<height>24</height>
</rect>
</property>
<property name="contextMenuPolicy">

View File

@@ -683,7 +683,7 @@ static void *audio_receiver_thread(void *arg)
dec_state = (struct audio_decoder *) calloc(1, sizeof(struct audio_decoder));
if (get_commandline_param("low-latency-audio")) {
pbuf_set_playout_delay(cp->playout_buffer, 0.005);
pbuf_set_playout_delay(cp->playout_buffer, strcmp(get_commandline_param("low-latency-audio"), "ultra") == 0 ? 0.001 :0.005);
}
assert(dec_state != NULL);
cp->decoder_state = dec_state;

View File

@@ -146,7 +146,7 @@ static void * audio_cap_alsa_init(const char *cfg)
gettimeofday(&s->start_time, NULL);
s->frame.bps = audio_capture_bps;
s->frame.sample_rate = audio_capture_sample_rate;
s->min_device_channels = s->frame.ch_count = audio_capture_channels;
s->min_device_channels = s->frame.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->tmp_data = NULL;
/* Set period size to 128 frames or more. */
@@ -218,7 +218,7 @@ static void * audio_cap_alsa_init(const char *cfg)
if (!snd_pcm_hw_params_test_access(s->handle, params, SND_PCM_ACCESS_RW_INTERLEAVED)) {
s->non_interleaved = false;
} else if (!snd_pcm_hw_params_test_access(s->handle, params, SND_PCM_ACCESS_RW_NONINTERLEAVED)) {
if (audio_capture_channels > 1) {
if (s->frame.ch_count > 1) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Non-interleaved mode "
"available only when capturing mono!\n");
goto error;
@@ -339,7 +339,7 @@ static struct audio_frame *audio_cap_alsa_read(void *state)
}
if (s->non_interleaved) {
assert(audio_capture_channels == 1);
assert(s->frame.ch_count == 1);
discard_data = (char *) alloca(s->frames * s->frame.bps * (s->min_device_channels-1));
for (unsigned int i = 1; i < s->min_device_channels; ++i) {
read_ptr[i] = discard_data + (i - 1) * s->frames * s->frame.bps;

View File

@@ -276,7 +276,7 @@ static void * audio_cap_ca_init(const char *cfg)
s->boss_waiting = FALSE;
s->data_ready = FALSE;
s->frame.bps = audio_capture_bps ? audio_capture_bps : 2;
s->frame.ch_count = audio_capture_channels;
s->frame.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
double rate;
size = sizeof(double);

View File

@@ -206,14 +206,14 @@ static void * audio_cap_jack_init(const char *cfg)
i = 0;
while(ports[i]) i++;
if(i < (int) audio_capture_channels) {
s->frame.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
if (i < s->frame.ch_count) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Requested channel count %d not found (matching pattern %s).\n",
audio_capture_channels, cfg);
s->frame.ch_count, cfg);
goto release_client;
}
s->frame.ch_count = audio_capture_channels;
s->frame.bps = 4;
if (audio_capture_sample_rate) {
log_msg(LOG_LEVEL_WARNING, "[JACK capture] Ignoring user specified sample rate!\n");

View File

@@ -219,11 +219,10 @@ static void * audio_cap_portaudio_init(const char *cfg)
return NULL;
}
if((int) audio_capture_channels <= device_info->maxInputChannels) {
inputParameters.channelCount = audio_capture_channels;
} else {
inputParameters.channelCount = s->frame.ch_count = audio_capture_channels > 0 ? audio_capture_channels : MAX(device_info->maxInputChannels, DEFAULT_AUDIO_CAPTURE_CHANNELS);
if (s->frame.ch_count > device_info->maxInputChannels) {
fprintf(stderr, MODULE_NAME "Requested %d input channels, device offers only %d.\n",
audio_capture_channels,
s->frame.ch_count,
device_info->maxInputChannels);
free(s);
return NULL;
@@ -258,7 +257,6 @@ static void * audio_cap_portaudio_init(const char *cfg)
return NULL;
}
s->frame.ch_count = inputParameters.channelCount;
s->frame.max_size = (s->frame.bps * s->frame.ch_count) * s->frame.sample_rate / 1000 * BUF_MS;
s->frame.data = (char*)malloc(s->frame.max_size);

View File

@@ -166,31 +166,26 @@ void sdi_capture_new_incoming_frame(void *state, struct audio_frame *frame)
unique_lock<mutex> lk(s->lock);
if(
s->audio_frame[FRAME_CAPTURE].bps != frame->bps ||
if (s->audio_frame[FRAME_CAPTURE].bps != frame->bps ||
s->audio_frame[FRAME_CAPTURE].ch_count != frame->ch_count ||
s->audio_frame[FRAME_CAPTURE].sample_rate != frame->sample_rate
) {
s->audio_frame[FRAME_CAPTURE].sample_rate != frame->sample_rate) {
s->audio_frame[FRAME_CAPTURE].bps = frame->bps;
s->audio_frame[FRAME_CAPTURE].ch_count = frame->ch_count;
s->audio_frame[FRAME_CAPTURE].sample_rate = frame->sample_rate;
s->audio_frame[FRAME_CAPTURE].data_len = 0;
s->audio_frame[FRAME_CAPTURE].max_size = frame->bps * frame->ch_count * frame->sample_rate / 1000L * MAX_BUF_SIZE_MS;
s->audio_frame[FRAME_CAPTURE].data = static_cast<char *>(malloc(s->audio_frame[FRAME_CAPTURE].max_size));
}
int needed_size = frame->data_len + s->audio_frame[FRAME_CAPTURE].data_len;
if (needed_size > frame->bps * frame->ch_count * frame->sample_rate / 1000l * MAX_BUF_SIZE_MS) {
fprintf(stderr, "[SDI] Maximal audio buffer length %ld ms exceeded! Dropping samples.\n",
MAX_BUF_SIZE_MS);
} else {
if (needed_size > (int) s->audio_frame[FRAME_CAPTURE].max_size) {
free(s->audio_frame[FRAME_CAPTURE].data);
s->audio_frame[FRAME_CAPTURE].max_size = needed_size;
s->audio_frame[FRAME_CAPTURE].data = (char *) malloc(needed_size);
}
memcpy(s->audio_frame[FRAME_CAPTURE].data + s->audio_frame[FRAME_CAPTURE].data_len,
frame->data, frame->data_len);
s->audio_frame[FRAME_CAPTURE].data_len += frame->data_len;
int len = frame->data_len;
if (len + s->audio_frame[FRAME_CAPTURE].data_len > s->audio_frame[FRAME_CAPTURE].max_size) {
LOG(LOG_LEVEL_WARNING) << "[SDI] Maximal audio buffer length " << MAX_BUF_SIZE_MS << " ms exceeded! Dropping "
<< len - (s->audio_frame[FRAME_CAPTURE].max_size - s->audio_frame[FRAME_CAPTURE].data_len) << " samples.\n";
len = s->audio_frame[FRAME_CAPTURE].max_size - s->audio_frame[FRAME_CAPTURE].data_len;
}
memcpy(s->audio_frame[FRAME_CAPTURE].data + s->audio_frame[FRAME_CAPTURE].data_len,
frame->data, len);
s->audio_frame[FRAME_CAPTURE].data_len += len;
lk.unlock();
s->audio_frame_ready_cv.notify_one();

View File

@@ -220,7 +220,7 @@ static void * audio_cap_testcard_init(const char *cfg)
case EBU:
case SILENCE:
{
s->audio.ch_count = audio_capture_channels;
s->audio.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->audio.sample_rate = audio_capture_sample_rate ? audio_capture_sample_rate :
DEFAULT_AUDIO_SAMPLE_RATE;
s->audio.bps = audio_capture_bps ? audio_capture_bps : DEFAULT_AUDIO_BPS;

View File

@@ -265,7 +265,7 @@ static void * audio_cap_wasapi_init(const char *cfg)
// get the mixer format
THROW_IF_FAILED(s->pAudioClient->GetMixFormat(&pwfx));
// set our preferences
if (audio_capture_channels) {
if (audio_capture_channels != 0) {
pwfx->nChannels = audio_capture_channels;
}
if (audio_capture_sample_rate) {
@@ -327,9 +327,8 @@ static void * audio_cap_wasapi_init(const char *cfg)
} catch (ug_runtime_error &e) {
LOG(LOG_LEVEL_ERROR) << MOD_NAME << e.what() << "\n";
if (audio_capture_channels != DEFAULT_AUDIO_CAPTURE_CHANNELS) {
LOG(LOG_LEVEL_INFO) << MOD_NAME << "Maybe wrong number of channels? Default: "
<< DEFAULT_AUDIO_CAPTURE_CHANNELS << ", requested: " << audio_capture_channels << "\n";
if (audio_capture_channels != 0) {
LOG(LOG_LEVEL_WARNING) << MOD_NAME << "Maybe wrong number of channels? Try using default.";
}
CoUninitialize();
delete s;

View File

@@ -56,8 +56,8 @@ extern "C" {
#include <libavutil/mem.h>
}
#include <array>
#include <memory>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <vector>
@@ -69,6 +69,7 @@ extern "C" {
#include "utils/resource_manager.h"
#define MAGIC 0xb135ca11
#define LOW_LATENCY_AUDIOENC_FRAME_DURATION 2.5
#if LIBAVCODEC_VERSION_MAJOR < 54
#define AV_CODEC_ID_AAC CODEC_ID_AAC
@@ -132,10 +133,25 @@ struct libavcodec_codec_state {
bool context_initialized;
audio_codec_direction_t direction;
};
#if defined __GNUC__ && ! defined __clang__ && defined _GLIBCXX_HAVE_BUILTIN_IS_AGGREGATE
#ifdef __cpp_lib_is_aggregate
static_assert(is_aggregate_v<libavcodec_codec_state>, "ensure aggregate to allow aggregate initialization");
#endif
/**
* @todo
* Remove and use the global print_libav_error. Dependencies need to be resolved first.
*/
static void print_libav_audio_error(int verbosity, const char *msg, int rc) {
char errbuf[1024];
av_strerror(rc, errbuf, sizeof(errbuf));
log_msg(verbosity, "%s: %s\n", msg, errbuf);
}
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
ADD_TO_PARAM("audioenc-frame-duration", "* audioenc-frame-duration=<ms>\n"
" Sets audio encoder frame duration (in ms), default is " STR(LOW_LATENCY_AUDIOENC_FRAME_DURATION) " ms for low-latency-audio\n");
/**
* Initializates selected audio codec
* @param audio_codec requested audio codec
@@ -148,6 +164,10 @@ static_assert(is_aggregate_v<libavcodec_codec_state>, "ensure aggregate to allow
static void *libavcodec_init(audio_codec_t audio_codec, audio_codec_direction_t direction, bool silent,
int bitrate)
{
if (log_level >= LOG_LEVEL_VERBOSE) {
av_log_set_level(AV_LOG_VERBOSE);
}
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
auto it = mapping.find(audio_codec);
@@ -187,11 +207,11 @@ static void *libavcodec_init(audio_codec_t audio_codec, audio_codec_direction_t
}
delete s;
return NULL;
} else {
if (!silent) {
LOG(LOG_LEVEL_NOTICE) << MOD_NAME << "Using audio " <<
(direction == AUDIO_CODER ? "en"s : "de"s) << "coder: " << s->codec->name << "\n";
}
}
if (!silent) {
LOG(LOG_LEVEL_NOTICE) << MOD_NAME << "Using audio " <<
(direction == AUDIO_CODER ? "en"s : "de"s) << "coder: " << s->codec->name << "\n";
}
s->libav_global_lock = rm_acquire_shared_lock(LAVCD_LOCK_NAME);
@@ -203,7 +223,6 @@ static void *libavcodec_init(audio_codec_t audio_codec, audio_codec_direction_t
delete s;
return NULL;
}
s->codec_ctx->strict_std_compliance = -2;
s->bitrate = bitrate;
@@ -239,9 +258,12 @@ static bool reinitialize_coder(struct libavcodec_codec_state *s, struct audio_de
{
cleanup_common(s);
pthread_mutex_lock(s->libav_global_lock);
avcodec_close(s->codec_ctx);
pthread_mutex_unlock(s->libav_global_lock);
s->codec_ctx = avcodec_alloc_context3(s->codec);
if (s->codec_ctx == nullptr) { // not likely :)
LOG(LOG_LEVEL_ERROR) << MOD_NAME << "Could not allocate audio codec context\n";
return false;
}
s->codec_ctx->strict_std_compliance = -2;
/* put sample parameters */
if (s->bitrate > 0) {
@@ -300,10 +322,32 @@ static bool reinitialize_coder(struct libavcodec_codec_state *s, struct audio_de
s->codec_ctx->channel_layout = AV_CH_LAYOUT_MONO;
#endif
if (s->codec->id == AV_CODEC_ID_OPUS) {
if (int ret = av_opt_set(s->codec_ctx->priv_data, "application", "lowdelay", 0)) {
print_libav_audio_error(LOG_LEVEL_WARNING, "Could not set OPUS low delay app type", ret);
}
}
if (s->direction == AUDIO_CODER && (commandline_params.find("low-latency-audio"s) != commandline_params.end()
|| commandline_params.find("audioenc-frame-duration"s) != commandline_params.end())) {
double frame_duration = commandline_params.find("audioenc-frame-duration"s) == commandline_params.end() ?
LOW_LATENCY_AUDIOENC_FRAME_DURATION : stof(commandline_params.at("audioenc-frame-duration"s), nullptr);
if (s->codec->id == AV_CODEC_ID_OPUS) {
string frame_duration_str{to_string(frame_duration)};
int ret = av_opt_set(s->codec_ctx->priv_data, "frame_duration", frame_duration_str.c_str(), 0);
if (ret != 0) {
print_libav_audio_error(LOG_LEVEL_ERROR, "Could not set OPUS frame duration", ret);
}
}
if (s->codec->id == AV_CODEC_ID_FLAC) {
s->codec_ctx->frame_size = desc.sample_rate * frame_duration / std::chrono::milliseconds::period::den;
}
}
pthread_mutex_lock(s->libav_global_lock);
/* open it */
if (avcodec_open2(s->codec_ctx, s->codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
if (int ret = avcodec_open2(s->codec_ctx, s->codec, nullptr)) {
print_libav_audio_error(LOG_LEVEL_ERROR, "Could not open codec", ret);
pthread_mutex_unlock(s->libav_global_lock);
return false;
}
@@ -322,10 +366,7 @@ static bool reinitialize_coder(struct libavcodec_codec_state *s, struct audio_de
int ret = av_frame_get_buffer(s->av_frame, 0);
if (ret != 0) {
array<char, ERR_MSG_BUF_LEN> errbuf{};
av_strerror(ret, errbuf.data(), errbuf.size());
LOG(LOG_LEVEL_ERROR) << MOD_NAME << "Could not allocate audio data buffers: "
<< errbuf.data() << " (" << ret << ")\n";
print_libav_audio_error(LOG_LEVEL_ERROR, "Could not allocate audio data buffers", ret);
return false;
}
@@ -342,9 +383,12 @@ static bool reinitialize_decoder(struct libavcodec_codec_state *s, struct audio_
{
cleanup_common(s);
pthread_mutex_lock(s->libav_global_lock);
avcodec_close(s->codec_ctx);
pthread_mutex_unlock(s->libav_global_lock);
s->codec_ctx = avcodec_alloc_context3(s->codec);
if (s->codec_ctx == nullptr) { // not likely :)
LOG(LOG_LEVEL_ERROR) << MOD_NAME << "Could not allocate audio codec context\n";
return false;
}
s->codec_ctx->strict_std_compliance = -2;
s->codec_ctx->channels = 1;
@@ -442,15 +486,10 @@ static audio_channel *libavcodec_compress(void *state, audio_channel * channel)
s->output_channel.duration += s->codec_ctx->frame_size / (double) s->output_channel.sample_rate;
}
if (ret != AVERROR(EAGAIN) && ret != 0) {
char errbuf[1024];
av_strerror(ret, errbuf, sizeof(errbuf));
log_msg(LOG_LEVEL_WARNING, "Receive packet error: %s %d\n", errbuf, ret);
print_libav_audio_error(LOG_LEVEL_WARNING, "Receive packet error", ret);
}
} else {
array<char, ERR_MSG_BUF_LEN> errbuf{};
av_strerror(ret, errbuf.data(), errbuf.size());
LOG(LOG_LEVEL_WARNING) << MOD_NAME "Error encoding frame: " << errbuf.data() << " (" << ret << ")\n";
print_libav_audio_error(LOG_LEVEL_ERROR, "Error encoding frame", ret);
return {};
}
#else
@@ -460,10 +499,7 @@ static audio_channel *libavcodec_compress(void *state, audio_channel * channel)
int ret = avcodec_encode_audio2(s->codec_ctx, &pkt, s->av_frame,
&got_packet);
if(ret) {
char errbuf[1024];
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Warning: unable to compress audio: %s\n",
errbuf);
print_libav_audio_error(LOG_MSG_WARNING, MOD_NAME "Warning: unable to compress audio", ret);
}
if(got_packet) {
s->output_channel.data_len += pkt.size;
@@ -638,6 +674,11 @@ static void cleanup_common(struct libavcodec_codec_state *s)
#endif
}
pthread_mutex_lock(s->libav_global_lock);
avcodec_close(s->codec_ctx);
avcodec_free_context(&s->codec_ctx);
pthread_mutex_unlock(s->libav_global_lock);
s->context_initialized = false;
}
@@ -648,11 +689,6 @@ static void libavcodec_done(void *state)
cleanup_common(s);
pthread_mutex_lock(s->libav_global_lock);
avcodec_close(s->codec_ctx);
avcodec_free_context(&s->codec_ctx);
pthread_mutex_unlock(s->libav_global_lock);
rm_release_shared_lock(LAVCD_LOCK_NAME);
av_frame_free(&s->av_frame);

View File

@@ -71,6 +71,7 @@ using rang::fg;
using rang::style;
using std::cout;
constexpr int DEFAULT_BUFLEN_MS = 50;
#define NO_DATA_STOP_SEC 2
#define MOD_NAME "[CoreAudio play.] "
@@ -189,7 +190,7 @@ static int audio_play_ca_reconfigure(void *state, struct audio_desc desc)
}
{
int buf_len_ms = 200; // 200 ms by default
int buf_len_ms = DEFAULT_BUFLEN_MS;
if (get_commandline_param("audio-buffer-len")) {
buf_len_ms = atoi(get_commandline_param("audio-buffer-len"));
assert(buf_len_ms > 0 && buf_len_ms < 10000);

View File

@@ -66,6 +66,7 @@ static void usage() {
printf("every usage:\n");
printf("\tevery:numerator[/denominator]\n\n");
printf("Example: every:2 - every second frame will be dropped\n");
printf("The special case every:0 can be used to discard all frames\n");
}
static int init(struct module *parent, const char *cfg, void **state)
@@ -83,7 +84,7 @@ static int init(struct module *parent, const char *cfg, void **state)
if(strchr(cfg, '/')) {
denom = atoi(strchr(cfg, '/') + 1);
}
if (denom > n) {
if (denom > n && n != 0) {
log_msg(LOG_LEVEL_ERROR, "Currently, numerator has to be greater "
"(or equal, which, however, has a little use) than denominator.\n");
return -1;
@@ -117,6 +118,11 @@ static struct video_frame *filter(void *state, struct video_frame *in)
{
struct state_every *s = state;
if (s->num == 0) {
VIDEO_FRAME_DISPOSE(in);
return NULL;
}
s->current = (s->current + 1) % s->num;
if (s->current >= s->denom) {

View File

@@ -84,7 +84,7 @@
using rang::style;
using namespace std;
unsigned int audio_capture_channels = DEFAULT_AUDIO_CAPTURE_CHANNELS;
unsigned int audio_capture_channels = 0;
unsigned int audio_capture_bps = 0;
unsigned int audio_capture_sample_rate = 0;
@@ -541,8 +541,9 @@ ADD_TO_PARAM("audio-cap-frames", "* audio-cap-frames=<f>\n"
" Sets number of audio frames captured at once (CoreAudio)\n");
ADD_TO_PARAM("audio-disable-adaptive-buffer", "* audio-disable-adaptive-buffer\n"
" Disables audio adaptive playback buffer (CoreAudio/JACK)\n");
ADD_TO_PARAM("low-latency-audio", "* low-latency-audio\n"
" Try to reduce audio latency at the expense of worse reliability\n");
ADD_TO_PARAM("low-latency-audio", "* low-latency-audio[=ultra]\n"
" Try to reduce audio latency at the expense of worse reliability\n"
" Add ultra for even more aggressive setting.\n");
ADD_TO_PARAM("window-title", "* window-title=<title>\n"
" Use alternative window title (SDL/GL only)\n");

View File

@@ -83,7 +83,8 @@ void error(int status);
void exit_uv(int status);
#define DEFAULT_AUDIO_CAPTURE_CHANNELS 1
extern unsigned int audio_capture_channels;
extern unsigned int audio_capture_channels; ///< user-specified chan. count, if zero, module should choose
///< best/native or DEFAULT_AUDIO_CAPTURE_CHANNELS
extern unsigned int audio_capture_bps; // user-specified bps, if zero, module should choose
// best bps by itself
extern unsigned int audio_capture_sample_rate; // user-specified sample rate, if zero, module should

View File

@@ -308,7 +308,12 @@ void list_modules(enum library_class cls, int abi_version, bool full) {
}
}
void list_all_modules() {
/**
* @retval false if there occurs some problem opening one or more modules, true otherwise
*/
bool list_all_modules() {
bool ret = true;
for (auto cls_it = library_class_info.begin(); cls_it != library_class_info.end();
++cls_it) {
cout << cls_it->second.class_name << "\n";
@@ -322,12 +327,15 @@ void list_all_modules() {
}
if (!lib_errors.empty()) {
ret = false;
cout << rang::style::bold << rang::fg::red << "Errors:\n" << rang::fg::reset << rang::style::reset;
for (auto && item : lib_errors) {
cout << "\t" << rang::fg::red << item.first << rang::fg::reset << "\n\t\t" << item.second << "\n";
}
cout << "\n";
}
return ret;
}
map<string, const void *> get_libraries_for_class(enum library_class cls, int abi_version, bool include_hidden)

View File

@@ -86,7 +86,7 @@ enum library_class {
const void *load_library(const char *name, enum library_class, int abi_version);
void register_library(const char *name, const void *info, enum library_class, int abi_version, int hidden);
void list_modules(enum library_class, int abi_version, bool full);
void list_all_modules();
bool list_all_modules();
#ifdef __cplusplus
}
#endif

View File

@@ -47,7 +47,9 @@
#include "config_win32.h"
#endif // HAVE_CONFIG_H
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#ifdef HAVE_SWSCALE
#include <libswscale/swscale.h>
@@ -78,6 +80,12 @@
#define HAVE_12_AND_14_PLANAR_COLORSPACES 1
#endif
#ifdef WORDS_BIGENDIAN
#define BYTE_SWAP(x) (3 - x)
#else
#define BYTE_SWAP(x) x
#endif
//
// UG <-> FFMPEG format translations
//
@@ -185,6 +193,56 @@ bool libav_codec_has_extradata(codec_t codec) {
//
// uv_to_av_convert conversions
//
//
/* @brief Color space coedfficients - RGB full range to YCbCr bt. 709 limited range
*
* RGB should use SDI full range [1<<(depth-8)..255<<(depth-8)-1], see [limits]
*
* Scaled by 1<<COMP_BASE, footroom 16/255, headroom 235/255 (luma), 240/255 (chroma); limits [2^(depth-8)..255*2^(depth-8)-1]
* matrix Y = [ 0.182586, 0.614231, 0.062007; -0.100643, -0.338572, 0.4392157; 0.4392157, -0.398942, -0.040274 ]
* * [coefficients]: https://gist.github.com/yohhoy/dafa5a47dade85d8b40625261af3776a "Rec. 709 coefficients"
* * [limits]: https://tech.ebu.ch/docs/r/r103.pdf "SDI limits"
* @todo
* Use this transformations in all conversions.
* @{
*/
#define FULL_FOOT(depth) (1<<((depth)-8))
#define FULL_HEAD(depth) ((255<<((depth)-8))-1)
#define CLAMP_FULL(val, depth) MIN(FULL_HEAD(depth), MAX((val), FULL_FOOT(depth)))
typedef int32_t comp_type_t; // int32_t provides much better performance than int_fast32_t
#define COMP_BASE (sizeof(comp_type_t) == 4 ? 15 : 18) // computation will be less precise when comp_type_t is 32 bit
static_assert(sizeof(comp_type_t) * 8 >= COMP_BASE + 17, "comp_type_t not wide enough (we are computing in up to 16 bits!)");
static const comp_type_t y_r = (0.2126*219/255) * (1<<COMP_BASE);
static const comp_type_t y_g = (0.7152*219/255) * (1<<COMP_BASE);
static const comp_type_t y_b = (0.0722*219/255) * (1<<COMP_BASE);
static const comp_type_t cb_r = (-0.2126/1.8556*224/255) * (1<<COMP_BASE);
static const comp_type_t cb_g = (-0.7152/1.8556*224/255) * (1<<COMP_BASE);
static const comp_type_t cb_b = ((1-0.0722)/1.8556*224/255) * (1<<COMP_BASE);
static const comp_type_t cr_r = ((1-0.2126)/1.5748*224/255) * (1<<COMP_BASE);
static const comp_type_t cr_g = (-0.7152/1.5748*224/255) * (1<<COMP_BASE);
static const comp_type_t cr_b = (-0.0722/1.5748*224/255) * (1<<COMP_BASE);
#define RGB_TO_Y_709_SCALED(r, g, b) ((r) * y_r + (g) * y_g + (b) * y_b)
#define RGB_TO_CB_709_SCALED(r, g, b) ((r) * cb_r + (g) * cb_g + (b) * cb_b)
#define RGB_TO_CR_709_SCALED(r, g, b) ((r) * cr_r + (g) * cr_g + (b) * cr_b)
// matrix Y1^-1 = inv(Y)
static const comp_type_t y_scale = 1.164383 * (1<<COMP_BASE); // precomputed value, Y multiplier is same for all channels
//static const comp_type_t r_y = 1; // during computation already contained in y_scale
//static const comp_type_t r_cb = 0;
static const comp_type_t r_cr = 1.792741 * (1<<COMP_BASE);
//static const comp_type_t g_y = 1;
static const comp_type_t g_cb = -0.213249 * (1<<COMP_BASE);
static const comp_type_t g_cr = -0.532909 * (1<<COMP_BASE);
//static const comp_type_t b_y = 1;
static const comp_type_t b_cb = 2.112402 * (1<<COMP_BASE);
//static const comp_type_t b_cr = 0;
#define YCBCR_TO_R_709_SCALED(y, cb, cr) ((y) /* * r_y */ /* + (cb) * r_cb */ + (cr) * r_cr)
#define YCBCR_TO_G_709_SCALED(y, cb, cr) ((y) /* * g_y */ + (cb) * g_cb + (cr) * g_cr)
#define YCBCR_TO_B_709_SCALED(y, cb, cr) ((y) /* * b_y */ + (cb) * b_cb /* + (cr) * b_cr */)
/// @}
#define FORMAT_RGBA(r, g, b, depth) (CLAMP_FULL((r), (depth)) << rgb_shift[R] | CLAMP_FULL((g), (depth)) << rgb_shift[G] | CLAMP_FULL((b), (depth)) << rgb_shift[B])
static void uyvy_to_yuv420p(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
int y;
@@ -640,6 +698,158 @@ static void r10k_to_yuv422p10le(AVFrame * __restrict out_frame, unsigned char *
}
}
/**
* Converts to yuv444p 10/12/14 le
*/
static inline void r10k_to_yuv444pXXle(int depth, AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
const int src_linesize = vc_get_linesize(width, R10k);
for(int y = 0; y < height; y++) {
uint16_t *dst_y = (uint16_t *) (out_frame->data[0] + out_frame->linesize[0] * y);
uint16_t *dst_cb = (uint16_t *) (out_frame->data[1] + out_frame->linesize[1] * y);
uint16_t *dst_cr = (uint16_t *) (out_frame->data[2] + out_frame->linesize[2] * y);
unsigned char *src = in_data + y * src_linesize;
OPTIMIZED_FOR(int x = 0; x < width; x++){
comp_type_t r = src[0] << 2 | src[1] >> 6;
comp_type_t g = (src[1] & 0x3F ) << 4 | src[2] >> 4;
comp_type_t b = (src[2] & 0x0F) << 6 | src[3] >> 2;
comp_type_t res_y = (RGB_TO_Y_709_SCALED(r, g, b) >> (COMP_BASE+10-depth)) + (1<<(depth-4));
comp_type_t res_cb = (RGB_TO_CB_709_SCALED(r, g, b) >> (COMP_BASE+10-depth)) + (1<<(depth-1));
comp_type_t res_cr = (RGB_TO_CR_709_SCALED(r, g, b) >> (COMP_BASE+10-depth)) + (1<<(depth-1));
*dst_y++ = MIN(MAX(res_y, 1<<(depth-4)), 235 * (1<<(depth-8)));
*dst_cb++ = MIN(MAX(res_cb, 1<<(depth-4)), 240 * (1<<(depth-8)));
*dst_cr++ = MIN(MAX(res_cr, 1<<(depth-4)), 240 * (1<<(depth-8)));
src += 4;
}
}
}
static void r10k_to_yuv444p10le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r10k_to_yuv444pXXle(10, out_frame, in_data, width, height);
}
static void r10k_to_yuv444p12le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r10k_to_yuv444pXXle(12, out_frame, in_data, width, height);
}
static void r10k_to_yuv444p16le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r10k_to_yuv444pXXle(16, out_frame, in_data, width, height);
}
// RGB full range to YCbCr bt. 709 limited range
static inline void r12l_to_yuv444pXXle(int depth, AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
#define WRITE_RES \
res_y = (RGB_TO_Y_709_SCALED(r, g, b) >> (COMP_BASE+12-depth)) + (1<<(depth-4));\
res_cb = (RGB_TO_CB_709_SCALED(r, g, b) >> (COMP_BASE+12-depth)) + (1<<(depth-1));\
res_cr = (RGB_TO_CR_709_SCALED(r, g, b) >> (COMP_BASE+12-depth)) + (1<<(depth-1));\
*dst_y++ = MIN(MAX(res_y, 1<<(depth-4)), 235 * (1<<(depth-8)));\
*dst_cb++ = MIN(MAX(res_cb, 1<<(depth-4)), 240 * (1<<(depth-8)));\
*dst_cr++ = MIN(MAX(res_cr, 1<<(depth-4)), 240 * (1<<(depth-8)));
const int src_linesize = vc_get_linesize(width, R12L);
for (int y = 0; y < height; ++y) {
unsigned char *src = in_data + y * src_linesize;
uint16_t *dst_y = (uint16_t *) (out_frame->data[0] + out_frame->linesize[0] * y);
uint16_t *dst_cb = (uint16_t *) (out_frame->data[1] + out_frame->linesize[1] * y);
uint16_t *dst_cr = (uint16_t *) (out_frame->data[2] + out_frame->linesize[2] * y);
OPTIMIZED_FOR (int x = 0; x < width; x += 8) {
comp_type_t r = 0;
comp_type_t g = 0;
comp_type_t b = 0;
comp_type_t res_y = 0;
comp_type_t res_cb = 0;
comp_type_t res_cr = 0;
r = src[BYTE_SWAP(0)];
r |= (src[BYTE_SWAP(1)] & 0xF) << 8;
g = src[BYTE_SWAP(2)] << 4 | src[BYTE_SWAP(1)] >> 4; // g0
b = src[BYTE_SWAP(3)];
src += 4;
b |= (src[BYTE_SWAP(0)] & 0xF) << 8;
WRITE_RES // 0
r = src[BYTE_SWAP(1)] << 4 | src[BYTE_SWAP(0)] >> 4; // r1
g = src[BYTE_SWAP(2)];
g |= (src[BYTE_SWAP(3)] & 0xF) << 8;
b = src[BYTE_SWAP(3)] >> 4;
src += 4;
b |= src[BYTE_SWAP(0)] << 4; // b1
WRITE_RES // 1
r = src[BYTE_SWAP(1)];
r |= (src[BYTE_SWAP(2)] & 0xF) << 8;
g = src[BYTE_SWAP(3)] << 4 | src[BYTE_SWAP(2)] >> 4; // g2
src += 4;
b = src[BYTE_SWAP(0)];
b |= (src[BYTE_SWAP(1)] & 0xF) << 8;
WRITE_RES // 2
r = src[BYTE_SWAP(2)] << 4 | src[BYTE_SWAP(1)] >> 4; // r3
g = src[BYTE_SWAP(3)];
src += 4;
g |= (src[BYTE_SWAP(0)] & 0xF) << 8;
b = src[BYTE_SWAP(1)] << 4 | src[BYTE_SWAP(0)] >> 4; // b3
WRITE_RES // 3
r = src[BYTE_SWAP(2)];
r |= (src[BYTE_SWAP(3)] & 0xF) << 8;
g = src[BYTE_SWAP(3)] >> 4;
src += 4;
g |= src[BYTE_SWAP(0)] << 4; // g4
b = src[BYTE_SWAP(1)];
b |= (src[BYTE_SWAP(2)] & 0xF) << 8;
WRITE_RES // 4
r = src[BYTE_SWAP(3)] << 4 | src[BYTE_SWAP(2)] >> 4; // r5
src += 4;
g = src[BYTE_SWAP(0)];
g |= (src[BYTE_SWAP(1)] & 0xF) << 8;
b = src[BYTE_SWAP(2)] << 4 | src[BYTE_SWAP(1)] >> 4; // b5
WRITE_RES // 5
r = src[BYTE_SWAP(3)];
src += 4;
r |= (src[BYTE_SWAP(0)] & 0xF) << 8;
g = src[BYTE_SWAP(1)] << 4 | src[BYTE_SWAP(0)] >> 4; // g6
b = src[BYTE_SWAP(2)];
b |= (src[BYTE_SWAP(3)] & 0xF) << 8;
WRITE_RES // 6
r = src[BYTE_SWAP(3)] >> 4;
src += 4;
r |= src[BYTE_SWAP(0)] << 4; // r7
g = src[BYTE_SWAP(1)];
g |= (src[BYTE_SWAP(2)] & 0xF) << 8;
b = src[BYTE_SWAP(3)] << 4 | src[BYTE_SWAP(2)] >> 4; // b7
WRITE_RES // 7
src += 4;
}
}
}
static void r12l_to_yuv444p10le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r12l_to_yuv444pXXle(10, out_frame, in_data, width, height);
}
static void r12l_to_yuv444p12le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r12l_to_yuv444pXXle(12, out_frame, in_data, width, height);
}
static void r12l_to_yuv444p16le(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
r12l_to_yuv444pXXle(16, out_frame, in_data, width, height);
}
static void rgb_to_bgr0(AVFrame * __restrict out_frame, unsigned char * __restrict in_data, int width, int height)
{
int src_linesize = vc_get_linesize(width, RGB);
@@ -897,6 +1107,141 @@ static void gbrp10le_to_r10k(char * __restrict dst_buffer, AVFrame * __restrict
}
}
static void yuv444pXXle_to_r10k(int depth, char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
UNUSED(rgb_shift);
for (int y = 0; y < height; ++y) {
uint16_t *src_y = (uint16_t *) (frame->data[0] + frame->linesize[0] * y);
uint16_t *src_cb = (uint16_t *) (frame->data[1] + frame->linesize[1] * y);
uint16_t *src_cr = (uint16_t *) (frame->data[2] + frame->linesize[2] * y);
unsigned char *dst = (unsigned char *) dst_buffer + y * pitch;
OPTIMIZED_FOR (int x = 0; x < width; ++x) {
comp_type_t y = (y_scale * (*src_y++ - (1<<(depth-4))));
comp_type_t cr = *src_cr++ - (1<<(depth-1));
comp_type_t cb = *src_cb++ - (1<<(depth-1));
comp_type_t r = YCBCR_TO_R_709_SCALED(y, cb, cr) >> (COMP_BASE-10+depth);
comp_type_t g = YCBCR_TO_G_709_SCALED(y, cb, cr) >> (COMP_BASE-10+depth);
comp_type_t b = YCBCR_TO_B_709_SCALED(y, cb, cr) >> (COMP_BASE-10+depth);
// r g b is now on 10 bit scale
r = CLAMP_FULL(r, 10);
g = CLAMP_FULL(g, 10);
b = CLAMP_FULL(b, 10);
*dst++ = r >> 2;
*dst++ = (r & 0x3) << 6 | g >> 4;
*dst++ = (g & 0xF) << 4 | b >> 6;
*dst++ = (b & 0x3F) << 2;
}
}
}
static void yuv444p10le_to_r10k(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r10k(10, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void yuv444p12le_to_r10k(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r10k(12, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void yuv444p16le_to_r10k(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r10k(16, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void yuv444pXXle_to_r12l(int depth, char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
UNUSED(rgb_shift);
for (int y = 0; y < height; ++y) {
uint16_t *src_y = (uint16_t *) (frame->data[0] + frame->linesize[0] * y);
uint16_t *src_cb = (uint16_t *) (frame->data[1] + frame->linesize[1] * y);
uint16_t *src_cr = (uint16_t *) (frame->data[2] + frame->linesize[2] * y);
unsigned char *dst = (unsigned char *) dst_buffer + y * pitch;
OPTIMIZED_FOR (int x = 0; x < width; x += 8) {
comp_type_t r[8];
comp_type_t g[8];
comp_type_t b[8];
OPTIMIZED_FOR (int j = 0; j < 8; ++j) {
comp_type_t y = (y_scale * (*src_y++ - (1<<(depth-4))));
comp_type_t cr = *src_cr++ - (1<<(depth-1));
comp_type_t cb = *src_cb++ - (1<<(depth-1));
comp_type_t rr = YCBCR_TO_R_709_SCALED(y, cb, cr) >> (COMP_BASE-12+depth);
comp_type_t gg = YCBCR_TO_G_709_SCALED(y, cb, cr) >> (COMP_BASE-12+depth);
comp_type_t bb = YCBCR_TO_B_709_SCALED(y, cb, cr) >> (COMP_BASE-12+depth);
r[j] = CLAMP_FULL(rr, 12);
g[j] = CLAMP_FULL(gg, 12);
b[j] = CLAMP_FULL(bb, 12);
}
dst[BYTE_SWAP(0)] = r[0] & 0xff;
dst[BYTE_SWAP(1)] = (g[0] & 0xf) << 4 | r[0] >> 8;
dst[BYTE_SWAP(2)] = g[0] >> 4;
dst[BYTE_SWAP(3)] = b[0] & 0xff;
dst[4 + BYTE_SWAP(0)] = (r[1] & 0xf) << 4 | b[0] >> 8;
dst[4 + BYTE_SWAP(1)] = r[1] >> 4;
dst[4 + BYTE_SWAP(2)] = g[1] & 0xff;
dst[4 + BYTE_SWAP(3)] = (b[1] & 0xf) << 4 | g[1] >> 8;
dst[8 + BYTE_SWAP(0)] = b[1] >> 4;
dst[8 + BYTE_SWAP(1)] = r[2] & 0xff;
dst[8 + BYTE_SWAP(2)] = (g[2] & 0xf) << 4 | r[2] >> 8;
dst[8 + BYTE_SWAP(3)] = g[2] >> 4;
dst[12 + BYTE_SWAP(0)] = b[2] & 0xff;
dst[12 + BYTE_SWAP(1)] = (r[3] & 0xf) << 4 | b[2] >> 8;
dst[12 + BYTE_SWAP(2)] = r[3] >> 4;
dst[12 + BYTE_SWAP(3)] = g[3] & 0xff;
dst[16 + BYTE_SWAP(0)] = (b[3] & 0xf) << 4 | g[3] >> 8;
dst[16 + BYTE_SWAP(1)] = b[3] >> 4;
dst[16 + BYTE_SWAP(2)] = r[4] & 0xff;
dst[16 + BYTE_SWAP(3)] = (g[4] & 0xf) << 4 | r[4] >> 8;
dst[20 + BYTE_SWAP(0)] = g[4] >> 4;
dst[20 + BYTE_SWAP(1)] = b[4] & 0xff;
dst[20 + BYTE_SWAP(2)] = (r[5] & 0xf) << 4 | b[4] >> 8;
dst[20 + BYTE_SWAP(3)] = r[5] >> 4;;
dst[24 + BYTE_SWAP(0)] = g[5] & 0xff;
dst[24 + BYTE_SWAP(1)] = (b[5] & 0xf) << 4 | g[5] >> 8;
dst[24 + BYTE_SWAP(2)] = b[5] >> 4;
dst[24 + BYTE_SWAP(3)] = r[6] & 0xff;
dst[28 + BYTE_SWAP(0)] = (g[6] & 0xf) << 4 | r[6] >> 8;
dst[28 + BYTE_SWAP(1)] = g[6] >> 4;
dst[28 + BYTE_SWAP(2)] = b[6] & 0xff;
dst[28 + BYTE_SWAP(3)] = (r[7] & 0xf) << 4 | b[6] >> 8;
dst[32 + BYTE_SWAP(0)] = r[7] >> 4;
dst[32 + BYTE_SWAP(1)] = g[7] & 0xff;
dst[32 + BYTE_SWAP(2)] = (b[7] & 0xf) << 4 | g[7] >> 8;
dst[32 + BYTE_SWAP(3)] = b[7] >> 4;
dst += 36;
}
}
}
static void yuv444p10le_to_r12l(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r12l(10, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void yuv444p12le_to_r12l(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r12l(12, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void yuv444p16le_to_r12l(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444pXXle_to_r12l(16, dst_buffer, frame, width, height, pitch, rgb_shift);
}
static void gbrp10le_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
@@ -931,12 +1276,6 @@ static void gbrp10le_to_rgba(char * __restrict dst_buffer, AVFrame * __restrict
}
}
#ifdef WORDS_BIGENDIAN
#define BYTE_SWAP(x) (3 - x)
#else
#define BYTE_SWAP(x) x
#endif
#ifdef HAVE_12_AND_14_PLANAR_COLORSPACES
static void gbrp12le_to_r12l(char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift)
@@ -1343,24 +1682,29 @@ static inline void nv12_to_rgb(char * __restrict dst_buffer, AVFrame * __restric
unsigned char *dst = (unsigned char *) dst_buffer + pitch * y;
OPTIMIZED_FOR (int x = 0; x < width / 2; ++x) {
int cb = *src_cbcr++ - 128;
int cr = *src_cbcr++ - 128;
int y = *src_y++ << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
comp_type_t cb = *src_cbcr++ - 128;
comp_type_t cr = *src_cbcr++ - 128;
comp_type_t y = *src_y++ * y_scale;
comp_type_t r = r_cr * cr;
comp_type_t g = g_cb * cb + g_cr * cr;
comp_type_t b = b_cb * cb;
if (rgba) {
*dst++ = 255;
*((uint32_t *) dst) = FORMAT_RGBA((r + y) >> COMP_BASE, (g + y) >> COMP_BASE, (b + y) >> COMP_BASE, 8);
dst += 4;
} else {
*dst++ = CLAMP_FULL((r + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((g + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((b + y) >> COMP_BASE, 8);
}
y = *src_y++ << 16;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
y = *src_y++ * y_scale;
if (rgba) {
*dst++ = 255;
*((uint32_t *) dst) = FORMAT_RGBA((r + y) >> COMP_BASE, (g + y) >> COMP_BASE, (b + y) >> COMP_BASE, 8);
dst += 4;
} else {
*dst++ = CLAMP_FULL((r + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((g + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((b + y) >> COMP_BASE, 8);
}
}
}
@@ -1379,105 +1723,67 @@ static void nv12_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_
}
/**
* Changes pixel format from planar YUV 422 to packed RGB/A.
* Changes pixel format from planar 8-bit YUV to packed RGB/A.
* Color space is assumed ITU-T Rec. 609. YUV is expected to be full scale (aka in JPEG).
*/
static inline void yuv422p_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift, bool rgba)
{
UNUSED(rgb_shift);
for(int y = 0; y < height; ++y) {
unsigned char *src_y = (unsigned char *) in_frame->data[0] + in_frame->linesize[0] * y;
unsigned char *src_cb = (unsigned char *) in_frame->data[1] + in_frame->linesize[1] * y;
unsigned char *src_cr = (unsigned char *) in_frame->data[2] + in_frame->linesize[2] * y;
unsigned char *dst = (unsigned char *) dst_buffer + pitch * y;
OPTIMIZED_FOR (int x = 0; x < width / 2; ++x) {
int cb = *src_cb++ - 128;
int cr = *src_cr++ - 128;
int y = *src_y++ << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst++ = 255;
}
y = *src_y++ << 16;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst++ = 255;
}
}
}
}
static void yuv422p_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv422p_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static void yuv422p_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv422p_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
/**
* Changes pixel format from planar YUV 420 to packed RGB/A.
* Color space is assumed ITU-T Rec. 609. YUV is expected to be full scale (aka in JPEG).
*/
static inline void yuv420p_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
static inline void yuv8p_to_rgb(int subsampling, char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift, bool rgba)
{
UNUSED(rgb_shift);
UNUSED(subsampling);
for(int y = 0; y < height / 2; ++y) {
unsigned char *src_y1 = (unsigned char *) in_frame->data[0] + in_frame->linesize[0] * y * 2;
unsigned char *src_y2 = (unsigned char *) in_frame->data[0] + in_frame->linesize[0] * (y * 2 + 1);
unsigned char *src_cb = (unsigned char *) in_frame->data[1] + in_frame->linesize[1] * y;
unsigned char *src_cr = (unsigned char *) in_frame->data[2] + in_frame->linesize[2] * y;
unsigned char *dst1 = (unsigned char *) dst_buffer + pitch * (y * 2);
unsigned char *dst2 = (unsigned char *) dst_buffer + pitch * (y * 2 + 1);
unsigned char *src_cb1;
unsigned char *src_cr1;
unsigned char *src_cb2;
unsigned char *src_cr2;
if (subsampling == 420) {
src_cb1 = (unsigned char *) in_frame->data[1] + in_frame->linesize[1] * y;
src_cr1 = (unsigned char *) in_frame->data[2] + in_frame->linesize[2] * y;
} else {
src_cb1 = (unsigned char *) in_frame->data[1] + in_frame->linesize[1] * (y * 2);
src_cr1 = (unsigned char *) in_frame->data[2] + in_frame->linesize[2] * (y * 2);
src_cb2 = (unsigned char *) in_frame->data[1] + in_frame->linesize[1] * (y * 2 + 1);
src_cr2 = (unsigned char *) in_frame->data[2] + in_frame->linesize[2] * (y * 2 + 1);
}
#define WRITE_RES_YUV8P_TO_RGB(DST) if (rgba) {\
*((uint32_t *) DST) = FORMAT_RGBA((r + y) >> COMP_BASE, (g + y) >> COMP_BASE, (b + y) >> COMP_BASE, 8);\
DST += 4;\
} else {\
*DST++ = CLAMP_FULL((r + y) >> COMP_BASE, 8);\
*DST++ = CLAMP_FULL((g + y) >> COMP_BASE, 8);\
*DST++ = CLAMP_FULL((b + y) >> COMP_BASE, 8);\
}\
OPTIMIZED_FOR (int x = 0; x < width / 2; ++x) {
int cb = *src_cb++ - 128;
int cr = *src_cr++ - 128;
int y = *src_y1++ << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst1++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst1++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst1++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst1++ = 255;
}
y = *src_y1++ << 16;
*dst1++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst1++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst1++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst1++ = 255;
}
y = *src_y2++ << 16;
*dst2++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst2++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst2++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst2++ = 255;
}
y = *src_y2++ << 16;
*dst2++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst2++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst2++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
if (rgba) {
*dst2++ = 255;
comp_type_t cb = *src_cb1++ - 128;
comp_type_t cr = *src_cr1++ - 128;
comp_type_t y = *src_y1++ * y_scale;
comp_type_t r = r_cr * cr;
comp_type_t g = g_cb * cb + g_cr * cr;
comp_type_t b = b_cb * cb;
WRITE_RES_YUV8P_TO_RGB(dst1)
y = *src_y1++ * y_scale;
WRITE_RES_YUV8P_TO_RGB(dst1)
if (subsampling == 422) {
cb = *src_cb2++ - 128;
cr = *src_cr2++ - 128;
r = r_cr * cr;
g = g_cb * cb + g_cr * cr;
b = b_cb * cb;
}
y = *src_y2++ * y_scale;
WRITE_RES_YUV8P_TO_RGB(dst2)
y = *src_y2++ * y_scale;
WRITE_RES_YUV8P_TO_RGB(dst2)
}
}
}
@@ -1485,15 +1791,28 @@ static inline void yuv420p_to_rgb(char * __restrict dst_buffer, AVFrame * __rest
static void yuv420p_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv420p_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
yuv8p_to_rgb(420, dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static void yuv420p_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv420p_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
yuv8p_to_rgb(420, dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
static void yuv422p_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv8p_to_rgb(422, dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static void yuv422p_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv8p_to_rgb(422, dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
/**
* Changes pixel format from planar YUV 444 to packed RGB/A.
* Color space is assumed ITU-T Rec. 609. YUV is expected to be full scale (aka in JPEG).
@@ -1511,15 +1830,17 @@ static inline void yuv444p_to_rgb(char * __restrict dst_buffer, AVFrame * __rest
OPTIMIZED_FOR (int x = 0; x < width; ++x) {
int cb = *src_cb++ - 128;
int cr = *src_cr++ - 128;
int y = *src_y++ << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
int y = *src_y++ << COMP_BASE;
int r = r_cr * cr;
int g = g_cb * cb + g_cr * cr;
int b = b_cb * cb;
if (rgba) {
*dst++ = 255;
*((uint32_t *) dst) = (MIN(MAX((r + y) >> COMP_BASE, 1), 254) << rgb_shift[R] | MIN(MAX((g + y) >> COMP_BASE, 1), 254) << rgb_shift[G] | MIN(MAX((b + y) >> COMP_BASE, 1), 254) << rgb_shift[B]);
dst += 4;
} else {
*dst++ = MIN(MAX((r + y) >> COMP_BASE, 1), 254);
*dst++ = MIN(MAX((g + y) >> COMP_BASE, 1), 254);
*dst++ = MIN(MAX((b + y) >> COMP_BASE, 1), 254);
}
}
}
@@ -1795,69 +2116,91 @@ static void yuv444p10le_to_uyvy(char * __restrict dst_buffer, AVFrame * __restri
}
}
static inline void yuv420p10le_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
static inline void yuvp10le_to_rgb(int subsampling, char * __restrict dst_buffer, AVFrame * __restrict frame,
int width, int height, int pitch, int * __restrict rgb_shift, int out_bit_depth)
{
assert(subsampling == 422 || subsampling == 420);
for (int y = 0; y < height / 2; ++y) {
uint16_t * __restrict src_y1 = (uint16_t *) (frame->data[0] + frame->linesize[0] * 2 * y);
uint16_t * __restrict src_y2 = (uint16_t *) (frame->data[0] + frame->linesize[0] * (2 * y + 1));
uint16_t * __restrict src_cb1;
uint16_t * __restrict src_cr1;
uint16_t * __restrict src_cb2;
uint16_t * __restrict src_cr2;
if (subsampling == 420) {
src_cb1 = src_cb2 = (uint16_t *) (frame->data[1] + frame->linesize[1] * y);
src_cr1 = src_cr2 = (uint16_t *) (frame->data[2] + frame->linesize[2] * y);
} else {
src_cb1 = (uint16_t *) (frame->data[1] + frame->linesize[1] * (2 * y));
src_cb2 = (uint16_t *) (frame->data[1] + frame->linesize[1] * (2 * y + 1));
src_cr1 = (uint16_t *) (frame->data[2] + frame->linesize[2] * (2 * y));
src_cr2 = (uint16_t *) (frame->data[2] + frame->linesize[2] * (2 * y + 1));
}
unsigned char *dst1 = (unsigned char *) dst_buffer + (2 * y) * pitch;
unsigned char *dst2 = (unsigned char *) dst_buffer + (2 * y + 1) * pitch;
OPTIMIZED_FOR (int x = 0; x < width / 2; ++x) {
comp_type_t cr = *src_cr1++ - (1<<9);
comp_type_t cb = *src_cb1++ - (1<<9);
comp_type_t rr = YCBCR_TO_R_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
comp_type_t gg = YCBCR_TO_G_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
comp_type_t bb = YCBCR_TO_B_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
# define WRITE_RES_YUV10P_TO_RGB(Y, DST) {\
comp_type_t r = Y + rr;\
comp_type_t g = Y + gg;\
comp_type_t b = Y + bb;\
r = CLAMP_FULL(r, 8);\
g = CLAMP_FULL(g, 8);\
b = CLAMP_FULL(b, 8);\
if (out_bit_depth == 32) {\
*((uint32_t *) DST) = (r << rgb_shift[R] | g << rgb_shift[G] | b << rgb_shift[B]);\
DST += 4;\
} else {\
*DST++ = r;\
*DST++ = g;\
*DST++ = b;\
}\
}
comp_type_t y1 = (y_scale * (*src_y1++ - (1<<6))) >> (COMP_BASE + 2);
WRITE_RES_YUV10P_TO_RGB(y1, dst1)
comp_type_t y11 = (y_scale * (*src_y1++ - (1<<6))) >> (COMP_BASE + 2);
WRITE_RES_YUV10P_TO_RGB(y11, dst1)
if (subsampling == 422) {
cr = *src_cr2++ - (1<<9);
cb = *src_cb2++ - (1<<9);
rr = YCBCR_TO_R_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
gg = YCBCR_TO_G_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
bb = YCBCR_TO_B_709_SCALED(0, cb, cr) >> (COMP_BASE + 2);
}
comp_type_t y2 = (y_scale * (*src_y2++ - (1<<6))) >> (COMP_BASE + 2);
WRITE_RES_YUV10P_TO_RGB(y2, dst2)
comp_type_t y22 = (y_scale * (*src_y2++ - (1<<6))) >> (COMP_BASE + 2);
WRITE_RES_YUV10P_TO_RGB(y22, dst2)
}
}
}
#define MAKE_YUV_TO_RGB_FUNCTION_NAME(subs, out_bit_depth) yuv ## subs ## p10le_to_rgb ## out_bit_depth
#define MAKE_YUV_TO_RGB_FUNCTION(subs, out_bit_depth) static void MAKE_YUV_TO_RGB_FUNCTION_NAME(subs, out_bit_depth)(char * __restrict dst_buffer, AVFrame * __restrict in_frame,\
int width, int height, int pitch, int * __restrict rgb_shift) {\
yuvp10le_to_rgb(subs, dst_buffer, in_frame, width, height, pitch, rgb_shift, out_bit_depth);\
}
MAKE_YUV_TO_RGB_FUNCTION(420, 24)
MAKE_YUV_TO_RGB_FUNCTION(420, 32)
MAKE_YUV_TO_RGB_FUNCTION(422, 24)
MAKE_YUV_TO_RGB_FUNCTION(422, 32)
static inline void yuv444p10le_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift, bool rgba)
{
decoder_t decoder = rgba ? vc_copylineUYVYtoRGBA : vc_copylineUYVYtoRGB;
int linesize = vc_get_linesize(width, rgba ? RGBA : RGB);
char *tmp = malloc(vc_get_linesize(width, UYVY) * height);
char *uyvy = tmp;
yuv420p10le_to_uyvy(uyvy, in_frame, width, height, vc_get_linesize(width, UYVY), rgb_shift);
for (int i = 0; i < height; i++) {
decoder((unsigned char *) dst_buffer, (unsigned char *) uyvy, linesize,
rgb_shift[R], rgb_shift[G], rgb_shift[B]);
uyvy += vc_get_linesize(width, UYVY);
dst_buffer += pitch;
}
free(tmp);
}
static inline void yuv420p10le_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv420p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static inline void yuv420p10le_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv420p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
static void yuv422p10le_to_rgb(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift, bool rgba)
{
decoder_t decoder = rgba ? vc_copylineUYVYtoRGBA : vc_copylineUYVYtoRGB;
int linesize = vc_get_linesize(width, rgba ? RGBA : RGB);
char *tmp = malloc(vc_get_linesize(width, UYVY) * height);
char *uyvy = tmp;
yuv422p10le_to_uyvy(uyvy, in_frame, width, height, vc_get_linesize(width, UYVY), rgb_shift);
for (int i = 0; i < height; i++) {
decoder((unsigned char *) dst_buffer, (unsigned char *) uyvy, linesize,
rgb_shift[R], rgb_shift[G], rgb_shift[B]);
uyvy += vc_get_linesize(width, UYVY);
dst_buffer += pitch;
}
free(tmp);
}
static inline void yuv422p10le_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv422p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static inline void yuv422p10le_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv422p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
static inline void yuv444p10le_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
UNUSED(rgb_shift);
for (int y = 0; y < height; y++) {
uint16_t *src_y = (uint16_t *)(void *)(in_frame->data[0] + in_frame->linesize[0] * y);
uint16_t *src_cb = (uint16_t *)(void *)(in_frame->data[1] + in_frame->linesize[1] * y);
@@ -1865,39 +2208,34 @@ static inline void yuv444p10le_to_rgb24(char * __restrict dst_buffer, AVFrame *
uint8_t *dst = (uint8_t *)(void *)(dst_buffer + y * pitch);
OPTIMIZED_FOR (int x = 0; x < width; ++x) {
int cb = (*src_cb++ >> 2) - 128;
int cr = (*src_cr++ >> 2) - 128;
int y = (*src_y++ >> 2) << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst++ = MIN(MAX(r + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(g + y, 0), (1<<24) - 1) >> 16;
*dst++ = MIN(MAX(b + y, 0), (1<<24) - 1) >> 16;
comp_type_t cb = (*src_cb++ >> 2) - 128;
comp_type_t cr = (*src_cr++ >> 2) - 128;
comp_type_t y = (*src_y++ >> 2) * y_scale;
comp_type_t r = r_cr * cr;
comp_type_t g = g_cb * cb + g_cr * cr;
comp_type_t b = b_cb * cb;
if (rgba) {
*(uint32_t *)(void *) dst = FORMAT_RGBA((r + y) >> COMP_BASE, (g + y) >> COMP_BASE, (b + y) >> COMP_BASE, 8);
dst += 4;
} else {
*dst++ = CLAMP_FULL((r + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((g + y) >> COMP_BASE, 8);
*dst++ = CLAMP_FULL((b + y) >> COMP_BASE, 8);
}
}
}
}
static inline void yuv444p10le_to_rgb24(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
yuv444p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, false);
}
static inline void yuv444p10le_to_rgb32(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
int width, int height, int pitch, int * __restrict rgb_shift)
{
for (int y = 0; y < height; y++) {
uint16_t *src_y = (uint16_t *)(void *)(in_frame->data[0] + in_frame->linesize[0] * y);
uint16_t *src_cb = (uint16_t *)(void *)(in_frame->data[1] + in_frame->linesize[1] * y);
uint16_t *src_cr = (uint16_t *)(void *)(in_frame->data[2] + in_frame->linesize[2] * y);
uint32_t *dst = (uint32_t *)(void *)(dst_buffer + y * pitch);
OPTIMIZED_FOR (int x = 0; x < width; ++x) {
int cb = (*src_cb++ >> 2) - 128;
int cr = (*src_cr++ >> 2) - 128;
int y = (*src_y++ >> 2) << 16;
int r = 75700 * cr;
int g = -26864 * cb - 38050 * cr;
int b = 133176 * cb;
*dst++ = (MIN(MAX(r + y, 0), (1<<24) - 1) >> 16) << rgb_shift[0] | (MIN(MAX(g + y, 0), (1<<24) - 1) >> 16) << rgb_shift[1] |
(MIN(MAX(b + y, 0), (1<<24) - 1) >> 16) << rgb_shift[2];
}
}
yuv444p10le_to_rgb(dst_buffer, in_frame, width, height, pitch, rgb_shift, true);
}
static void p010le_to_v210(char * __restrict dst_buffer, AVFrame * __restrict in_frame,
@@ -2031,34 +2369,65 @@ const struct uv_to_av_conversion *get_uv_to_av_conversions() {
* conversions below the others.
*/
static const struct uv_to_av_conversion uv_to_av_conversions[] = {
{ v210, AV_PIX_FMT_YUV420P10LE, v210_to_yuv420p10le },
{ v210, AV_PIX_FMT_YUV422P10LE, v210_to_yuv422p10le },
{ v210, AV_PIX_FMT_YUV444P10LE, v210_to_yuv444p10le },
{ v210, AV_PIX_FMT_YUV444P16LE, v210_to_yuv444p16le },
{ v210, AV_PIX_FMT_YUV420P10LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, v210_to_yuv420p10le },
{ v210, AV_PIX_FMT_YUV422P10LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, v210_to_yuv422p10le },
{ v210, AV_PIX_FMT_YUV444P10LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, v210_to_yuv444p10le },
{ v210, AV_PIX_FMT_YUV444P16LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, v210_to_yuv444p16le },
{ R10k, AV_PIX_FMT_YUV444P10LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r10k_to_yuv444p10le },
{ R10k, AV_PIX_FMT_YUV444P12LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r10k_to_yuv444p12le },
{ R10k, AV_PIX_FMT_YUV444P16LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r10k_to_yuv444p16le },
{ R12L, AV_PIX_FMT_YUV444P16LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r12l_to_yuv444p10le },
{ R12L, AV_PIX_FMT_YUV444P16LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r12l_to_yuv444p12le },
{ R12L, AV_PIX_FMT_YUV444P16LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r12l_to_yuv444p16le },
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 15, 100) // FFMPEG commit c2869b4640f
{ v210, AV_PIX_FMT_P010LE, v210_to_p010le },
{ v210, AV_PIX_FMT_P010LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, v210_to_p010le },
#endif
{ UYVY, AV_PIX_FMT_YUV422P, uyvy_to_yuv422p },
{ UYVY, AV_PIX_FMT_YUVJ422P, uyvy_to_yuv422p },
{ UYVY, AV_PIX_FMT_YUV420P, uyvy_to_yuv420p },
{ UYVY, AV_PIX_FMT_YUVJ420P, uyvy_to_yuv420p },
{ UYVY, AV_PIX_FMT_NV12, uyvy_to_nv12 },
{ UYVY, AV_PIX_FMT_YUV444P, uyvy_to_yuv444p },
{ UYVY, AV_PIX_FMT_YUVJ444P, uyvy_to_yuv444p },
{ RGB, AV_PIX_FMT_BGR0, rgb_to_bgr0 },
{ RGB, AV_PIX_FMT_GBRP, rgb_to_gbrp },
{ RGBA, AV_PIX_FMT_GBRP, rgba_to_gbrp },
{ R10k, AV_PIX_FMT_BGR0, r10k_to_bgr0 },
{ R10k, AV_PIX_FMT_GBRP10LE, r10k_to_gbrp10le },
{ R10k, AV_PIX_FMT_YUV422P10LE, r10k_to_yuv422p10le },
{ UYVY, AV_PIX_FMT_YUV422P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv422p },
{ UYVY, AV_PIX_FMT_YUVJ422P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv422p },
{ UYVY, AV_PIX_FMT_YUV420P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv420p },
{ UYVY, AV_PIX_FMT_YUVJ420P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv420p },
{ UYVY, AV_PIX_FMT_NV12, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_nv12 },
{ UYVY, AV_PIX_FMT_YUV444P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv444p },
{ UYVY, AV_PIX_FMT_YUVJ444P, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, uyvy_to_yuv444p },
{ RGB, AV_PIX_FMT_BGR0, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, rgb_to_bgr0 },
{ RGB, AV_PIX_FMT_GBRP, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, rgb_to_gbrp },
{ RGBA, AV_PIX_FMT_GBRP, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, rgba_to_gbrp },
{ R10k, AV_PIX_FMT_BGR0, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, r10k_to_bgr0 },
{ R10k, AV_PIX_FMT_GBRP10LE, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, r10k_to_gbrp10le },
{ R10k, AV_PIX_FMT_YUV422P10LE, AVCOL_SPC_BT709, AVCOL_RANGE_MPEG, r10k_to_yuv422p10le },
#ifdef HAVE_12_AND_14_PLANAR_COLORSPACES
{ R12L, AV_PIX_FMT_GBRP12LE, r12l_to_gbrp12le },
{ R12L, AV_PIX_FMT_GBRP12LE, AVCOL_SPC_RGB, AVCOL_RANGE_JPEG, r12l_to_gbrp12le },
#endif
{ 0, 0, 0 }
{ 0, 0, 0, 0, 0 }
};
return uv_to_av_conversions;
}
pixfmt_callback_t get_uv_to_av_conversion(codec_t uv_codec, int av_codec) {
for (const struct uv_to_av_conversion *conversions = get_uv_to_av_conversions();
conversions->func != 0; conversions++) {
if (conversions->dst == av_codec &&
conversions->src == uv_codec) {
return conversions->func;
}
}
return NULL;
}
void get_av_pixfmt_details(codec_t uv_codec, int av_codec, enum AVColorSpace *colorspace, enum AVColorRange *color_range)
{
for (const struct uv_to_av_conversion *conversions = get_uv_to_av_conversions();
conversions->func != 0; conversions++) {
if (conversions->dst == av_codec &&
conversions->src == uv_codec) {
*colorspace = conversions->colorspace;
*color_range = conversions->color_range;
return;
}
}
}
/**
* @brief returns list of available conversion. Terminated by uv_to_av_conversion::uv_codec == VIDEO_CODEC_NONE
*/
@@ -2076,8 +2445,10 @@ const struct av_to_uv_conversion *get_av_to_uv_conversions() {
{AV_PIX_FMT_YUV444P10LE, v210, yuv444p10le_to_v210, true},
{AV_PIX_FMT_YUV444P16LE, v210, yuv444p16le_to_v210, true},
{AV_PIX_FMT_YUV444P10LE, UYVY, yuv444p10le_to_uyvy, false},
{AV_PIX_FMT_YUV444P10LE, R10k, yuv444p10le_to_r10k, false},
{AV_PIX_FMT_YUV444P10LE, RGB, yuv444p10le_to_rgb24, false},
{AV_PIX_FMT_YUV444P10LE, RGBA, yuv444p10le_to_rgb32, false},
{AV_PIX_FMT_YUV444P16LE, R12L, yuv444p10le_to_r12l, false},
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 15, 100) // FFMPEG commit c2869b4640f
{AV_PIX_FMT_P010LE, v210, p010le_to_v210, true},
{AV_PIX_FMT_P010LE, UYVY, p010le_to_uyvy, true},
@@ -2095,7 +2466,9 @@ const struct av_to_uv_conversion *get_av_to_uv_conversions() {
{AV_PIX_FMT_YUV444P, UYVY, yuv444p_to_uyvy, true},
{AV_PIX_FMT_YUV444P, RGB, yuv444p_to_rgb24, false},
{AV_PIX_FMT_YUV444P, RGBA, yuv444p_to_rgb32, false},
// 8-bit YUV (JPEG color range)
// 8-bit YUV - this should be supposedly full range JPEG but lavd decoder doesn't honor
// GPUJPEG's SPIFF header indicating YUV BT.709 limited range. The YUVJ pixel formats
// are detected only for GPUJPEG generated JPEGs.
{AV_PIX_FMT_YUVJ420P, v210, yuv420p_to_v210, false},
{AV_PIX_FMT_YUVJ420P, UYVY, yuv420p_to_uyvy, true},
{AV_PIX_FMT_YUVJ420P, RGB, yuv420p_to_rgb24, false},
@@ -2112,6 +2485,12 @@ const struct av_to_uv_conversion *get_av_to_uv_conversions() {
{AV_PIX_FMT_NV12, UYVY, nv12_to_uyvy, true},
{AV_PIX_FMT_NV12, RGB, nv12_to_rgb24, false},
{AV_PIX_FMT_NV12, RGB, nv12_to_rgb32, false},
// 12-bit YUV
{AV_PIX_FMT_YUV444P12LE, R10k, yuv444p12le_to_r10k, false},
{AV_PIX_FMT_YUV444P16LE, R12L, yuv444p12le_to_r12l, false},
// 16-bit YUV
{AV_PIX_FMT_YUV444P16LE, R10k, yuv444p16le_to_r10k, false},
{AV_PIX_FMT_YUV444P16LE, R12L, yuv444p16le_to_r12l, false},
// RGB
{AV_PIX_FMT_GBRP, RGB, gbrp_to_rgb, true},
{AV_PIX_FMT_GBRP, RGBA, gbrp_to_rgba, true},

View File

@@ -189,9 +189,16 @@ typedef uv_to_av_convert *pixfmt_callback_t;
struct uv_to_av_conversion {
codec_t src;
enum AVPixelFormat dst;
pixfmt_callback_t func;
enum AVColorSpace colorspace; ///< destination colorspace
enum AVColorRange color_range; ///< destination color range
pixfmt_callback_t func; ///< conversion function
};
const struct uv_to_av_conversion *get_uv_to_av_conversions(void);
pixfmt_callback_t get_uv_to_av_conversion(codec_t uv_codec, int av_codec);
/**
* Returns AV format details for given pair UV,AV codec (must be unique then)
*/
void get_av_pixfmt_details(codec_t uv_codec, int av_codec, enum AVColorSpace *colorspace, enum AVColorRange *color_range);
typedef void av_to_uv_convert(char * __restrict dst_buffer, AVFrame * __restrict in_frame, int width, int height, int pitch, int * __restrict rgb_shift);
typedef av_to_uv_convert *av_to_uv_convert_p;

View File

@@ -271,14 +271,18 @@ static void crash_signal_handler(int sig)
*ptr++ = message1[i];
}
#ifndef WIN32
*ptr++ = ' '; *ptr++ = '(';
for (size_t i = 0; i < sizeof sys_siglist[sig] - 1; ++i) {
if (sys_siglist[sig][i] == '\0') {
break;
#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 32)
const char *sig_desc = sigdescr_np(sig);
#else
const char *sig_desc = sys_siglist[sig];
#endif
if (sig_desc != NULL) {
*ptr++ = ' '; *ptr++ = '(';
for (size_t i = 0; sig_desc[i] != '\0'; ++i) {
*ptr++ = sig_desc[i];
}
*ptr++ = sys_siglist[sig][i];
*ptr++ = ')';
}
*ptr++ = ')';
#endif
const char message2[] = ".\n\nPlease send a bug report to address ";
for (size_t i = 0; i < sizeof message2 - 1; ++i) {
@@ -619,7 +623,7 @@ static bool parse_params(char *optarg)
return true;
}
#define EXIT(retval) { common_cleanup(init); return retval; }
#define EXIT(expr) { int rc = expr; common_cleanup(init); return rc; }
int main(int argc, char *argv[])
{
@@ -1068,8 +1072,7 @@ int main(int argc, char *argv[])
video_offset = atoi(optarg) < 0 ? abs(atoi(optarg)) : 0;
break;
case OPT_LIST_MODULES:
list_all_modules();
EXIT(EXIT_SUCCESS);
EXIT(list_all_modules() ? EXIT_SUCCESS : EXIT_FAILURE);
case OPT_START_PAUSED:
start_paused = true;
break;

View File

@@ -64,17 +64,22 @@
#endif
#include <algorithm>
#include <array>
#include <condition_variable>
#include <chrono>
#include <mutex>
#include <queue>
#include <string>
#include <utility> // std::swap
using std::array;
using std::condition_variable;
using std::max;
using std::mutex;
using std::queue;
using std::string;
using std::swap;
using std::to_string;
using std::unique_lock;
#define DEFAULT_MAX_UDP_READER_QUEUE_LEN (1920/3*8*1080/1152) //< 10-bit FullHD frame divided by 1280 MTU packets (minus headers)
@@ -85,6 +90,9 @@ static void *udp_reader(void *arg);
#define IPv4 4
#define IPv6 6
constexpr int ERRBUF_SIZE = 255;
const constexpr char *MOD_NAME = "[RTP UDP] ";
#ifdef WIN2K_IPV6
const struct in6_addr in6addr_any = { IN6ADDR_ANY_INIT };
#endif
@@ -204,9 +212,9 @@ static void udp_clean_async_state(socket_udp *s);
void socket_error(const char *msg, ...)
{
char buffer[255];
uint32_t blen = sizeof(buffer) / sizeof(buffer[0]);
va_list ap;
array<char, ERRBUF_SIZE> buffer{};
array<char, ERRBUF_SIZE> strerror_buf{"unknown"};
#ifdef WIN32
#define WSERR(x) {#x,x}
@@ -221,33 +229,45 @@ void socket_error(const char *msg, ...)
WSERR(WSAENOTCONN), WSERR(WSAENOTSOCK), WSERR(WSAEOPNOTSUPP),
WSERR(WSAESHUTDOWN), WSERR(WSAEWOULDBLOCK), WSERR(WSAEMSGSIZE),
WSERR(WSAEHOSTUNREACH), WSERR(WSAECONNABORTED),
WSERR(WSAECONNRESET),
WSERR(WSAECONNRESET), WSERR(WSAEADDRINUSE),
WSERR(WSAEADDRNOTAVAIL), WSERR(WSAEAFNOSUPPORT),
WSERR(WSAEDESTADDRREQ),
WSERR(WSAENETUNREACH), WSERR(WSAETIMEDOUT), WSERR(WSAENOPROTOOPT),
WSERR(0)
};
int i, e = WSAGetLastError();
i = 0;
int i = 0;
int e = WSAGetLastError();
if (e == WSAECONNRESET) {
return;
}
while (ws_errs[i].errno_code && ws_errs[i].errno_code != e) {
i++;
}
va_start(ap, msg);
_vsnprintf(buffer, blen, msg, ap);
_vsnprintf(buffer.data(), buffer.size(), static_cast<const char *>(msg), ap);
va_end(ap);
if (e != WSAECONNRESET)
log_msg(LOG_LEVEL_ERROR, "ERROR: %s, (%d - %s)\n", msg, e, ws_errs[i].errname);
if (i == 0) { // let system format the error message
FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, // flags
NULL, // lpsource
e, // message id
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT), // languageid
strerror_buf.data(), // output buffer
strerror_buf.size(), // size of msgbuf, bytes
NULL); // va_list of arguments
}
const char *errname = i == 0 ? strerror_buf.data() : ws_errs[i].errname;
LOG(LOG_LEVEL_ERROR) << "ERROR: " << buffer.data() << ", (" << e << " - " << errname << ")\n";
#else
va_start(ap, msg);
vsnprintf(buffer, blen, msg, ap);
vsnprintf(buffer.data(), buffer.size(), static_cast<const char *>(msg), ap);
va_end(ap);
char strerror_buf[255] = "";
#if ! defined _POSIX_C_SOURCE || (_POSIX_C_SOURCE >= 200112L && ! _GNU_SOURCE)
strerror_r(errno, strerror_buf, sizeof strerror_buf); // XSI version
log_msg(LOG_LEVEL_ERROR, "%s: %s\n", buffer, strerror_buf);
strerror_r(errno, strerror_buf.data(), strerror_buf.size()); // XSI version
LOG(LOG_LEVEL_ERROR) << buffer.data() << ": " << strerror_buf.data() << "\n";
#else // GNU strerror_r version
log_msg(LOG_LEVEL_ERROR, "%s: %s\n", buffer, strerror_r(errno, strerror_buf, sizeof strerror_buf));
LOG(LOG_LEVEL_ERROR) << buffer.data() << ": " << strerror_r(errno, strerror_buf.data(), strerror_buf.size()) << "\n";
#endif
#endif
}
@@ -1517,64 +1537,70 @@ bool udp_is_ipv6(socket_udp *s)
/**
* @retval 0 success
* @retval -1 failed
* @retval -2 incorrect service or hostname (not a port number)
* @retval -1 port pair is not free
* @retval -2 another error
*/
int udp_port_pair_is_free(const char *addr, int force_ip_version, int even_port)
int udp_port_pair_is_free(int force_ip_version, int even_port)
{
struct sockaddr *sin;
struct addrinfo hints, *res0;
int err;
memset(&hints, 0, sizeof(hints));
hints.ai_family = force_ip_version ? (force_ip_version == 6 ? AF_INET6 : AF_INET) : AF_UNSPEC;
struct addrinfo hints{};
struct addrinfo *res0 = nullptr;
hints.ai_family = force_ip_version == 4 ? AF_INET : AF_INET6;
hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
hints.ai_socktype = SOCK_DGRAM;
char tx_port_str[7];
sprintf(tx_port_str, "%u", 5004);
if ((err = getaddrinfo(addr, tx_port_str, &hints, &res0)) != 0) {
string tx_port_str = to_string(5004);
if (int err = getaddrinfo(nullptr, tx_port_str.c_str(), &hints, &res0)) {
/* We should probably try to do a DNS lookup on the name */
/* here, but I'm trying to get the basics going first... */
log_msg(LOG_LEVEL_VERBOSE, "getaddrinfo: %s\n", gai_strerror(err));
return err == EAI_NONAME ? -2 : -1;
} else {
sin = res0->ai_addr;
LOG(LOG_LEVEL_ERROR) << MOD_NAME << static_cast<const char *>(__func__) << " getaddrinfo: " << gai_strerror(err) << "\n";
return -2;
}
for (int i = 0; i < 2; ++i) {
struct sockaddr *sin = res0->ai_addr;
fd_t fd;
if (sin->sa_family == AF_INET6) {
struct sockaddr_in6 *s_in6 = (struct sockaddr_in6 *) sin;
int ipv6only = 0;
s_in6->sin6_port = htons(even_port + i);
s_in6->sin6_addr = in6addr_any;
fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd != INVALID_SOCKET) {
if (SETSOCKOPT
(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *)&ipv6only,
sizeof(ipv6only)) != 0) {
socket_error("setsockopt IPV6_V6ONLY");
socket_error("%s - setsockopt IPV6_V6ONLY", static_cast<const char *>(__func__));
CLOSESOCKET(fd);
freeaddrinfo(res0);
return -1;
return -2;
}
}
} else {
struct sockaddr_in *s_in = (struct sockaddr_in *) sin;
s_in->sin_addr.s_addr = INADDR_ANY;
s_in->sin_port = htons(even_port + i);
fd = socket(AF_INET, SOCK_DGRAM, 0);
}
if (fd == INVALID_SOCKET) {
socket_error("Unable to initialize socket");
socket_error("%s - unable to initialize socket", static_cast<const char *>(__func__));
freeaddrinfo(res0);
return -1;
return -2;
}
if (bind(fd, (struct sockaddr *) sin, res0->ai_addrlen) != 0) {
int ret = 0;
#ifdef _WIN32
if (WSAGetLastError() == WSAEADDRINUSE) {
#else
if (errno == EADDRINUSE) {
#endif
ret = -1;
} else {
ret = -2;
socket_error("%s - cannot bind", static_cast<const char *>(__func__));
}
freeaddrinfo(res0);
CLOSESOCKET(fd);
return -1;
return ret;
}
CLOSESOCKET(fd);

View File

@@ -97,7 +97,7 @@ int udp_fd_isset_r(socket_udp *s, struct udp_fd_r *);
int udp_recv_data(socket_udp * s, char **buffer);
bool udp_not_empty(socket_udp *s, struct timeval *timeout);
int udp_port_pair_is_free(const char *addr, int force_ip_version, int even_port);
int udp_port_pair_is_free(int force_ip_version, int even_port);
bool udp_is_ipv6(socket_udp *s);
void socket_error(const char *msg, ...);

View File

@@ -1113,13 +1113,12 @@ struct rtp *rtp_init_if(const char *addr, const char *iface,
if (rx_port == 0) {
for (int i = 1<<15; i < 1<<16; i += 2) {
// this stuff is not atomic. but... it cannot be done in this way, either
int ret = udp_port_pair_is_free(addr, force_ip_version, i);
int ret = udp_port_pair_is_free(force_ip_version, i);
if (ret == 0) {
rx_port = i;
break;
}
if (ret == -2) {
log_msg(LOG_LEVEL_ERROR, "Name or service is not known!\n");
free(session);
return NULL;
}

View File

@@ -713,9 +713,10 @@ AJAStatus vidcap_state_aja::SetupAudio (void)
#endif
mMaxAudioChannels = ::NTV2DeviceGetMaxAudioChannels (mDeviceID);
if (mMaxAudioChannels < (int) *aja_audio_capture_channels) {
mAudio.ch_count = *aja_audio_capture_channels > 0 ? *aja_audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
if (mMaxAudioChannels < mAudio.ch_count) {
LOG(LOG_LEVEL_ERROR) << MOD_NAME "Invalid number of capture channels requested. Requested " <<
*aja_audio_capture_channels << ", maximum " << mMaxAudioChannels << endl;
mAudio.ch_count << ", maximum " << mMaxAudioChannels << endl;
return AJA_STATUS_FAIL;
}
if (!mDevice.SetNumberAudioChannels (mMaxAudioChannels, NTV2InputSourceToAudioSystem(mInputSource))) {
@@ -747,7 +748,6 @@ AJAStatus vidcap_state_aja::SetupAudio (void)
mAudio.bps = 4;
mAudio.sample_rate = 48000;
mAudio.data = (char *) malloc(NTV2_AUDIOSIZE_MAX);
mAudio.ch_count = *aja_audio_capture_channels;
mAudio.max_size = NTV2_AUDIOSIZE_MAX;
#ifndef _MSC_VER

View File

@@ -377,9 +377,10 @@ static bool setup_audio(struct vidcap_bluefish444_state *s, unsigned int flags)
{
memset(&s->objHancDecode, 0, sizeof(s->objHancDecode));
s->audio.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->objHancDecode.audio_ch_required_mask = 0;
/* MONO_CHANNEL_9 and _10 are used for analog output */
switch(audio_capture_channels) {
switch (s->audio.ch_count) {
case 16:
s->objHancDecode.audio_ch_required_mask |= MONO_CHANNEL_18;
// fall through
@@ -444,7 +445,6 @@ static bool setup_audio(struct vidcap_bluefish444_state *s, unsigned int flags)
}
s->audio.bps = 2;
s->audio.ch_count = audio_capture_channels;
s->audio.sample_rate = 48000; // perhaps the driver does not support different
s->audio.max_size = 4*4096*16;

View File

@@ -9,7 +9,7 @@
* Dalibor Matura <255899@mail.muni.cz>
* Ian Wesley-Smith <iwsmith@cct.lsu.edu>
*
* Copyright (c) 2005-2019 CESNET z.s.p.o.
* Copyright (c) 2005-2020 CESNET z.s.p.o.
*
* Redistribution and use in source and binary forms, with or without
* modification, is permitted provided that the following conditions
@@ -52,6 +52,7 @@
#include "config_unix.h"
#include "config_win32.h"
#include <algorithm>
#include <cassert>
#include <condition_variable>
#include <chrono>
@@ -63,7 +64,6 @@
#include <set>
#include <string>
#include <vector>
#include <algorithm>
#include "blackmagic_common.h"
#include "audio/audio.h"
@@ -302,7 +302,7 @@ public:
bmdAudioSampleRate48kHz,
s->audio.bps == 2 ? bmdAudioSampleType16bitInteger :
bmdAudioSampleType32bitInteger,
audio_capture_channels == 1 ? 2 : audio_capture_channels); // BMD isn't able to grab single channel
max(s->audio.ch_count, 2)); // BMD isn't able to grab single channel
}
//deckLinkInput->SetCallback(s->state[i].delegate);
deckLinkInput->FlushStreams();
@@ -1085,7 +1085,7 @@ vidcap_decklink_init(struct vidcap_params *params, void **state)
log_msg(LOG_LEVEL_WARNING, "[Decklink] Ignoring unsupported sample rate!\n");
}
s->audio.sample_rate = 48000;
s->audio.ch_count = audio_capture_channels;
s->audio.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->audio.max_size = (s->audio.sample_rate / 10) * s->audio.ch_count * s->audio.bps;
s->audio.data = (char *) malloc(s->audio.max_size);
} else {
@@ -1354,12 +1354,12 @@ vidcap_decklink_init(struct vidcap_params *params, void **state)
fprintf(stderr, "[Decklink capture] Unable to set audio input!!! Please check if it is OK. Continuing anyway.\n");
}
if (audio_capture_channels != 1 &&
audio_capture_channels != 2 &&
audio_capture_channels != 8 &&
audio_capture_channels != 16) {
if (s->audio.ch_count != 1 &&
s->audio.ch_count != 2 &&
s->audio.ch_count != 8 &&
s->audio.ch_count != 16) {
fprintf(stderr, "[DeckLink] Decklink cannot grab %d audio channels. "
"Only 1, 2, 8 or 16 are possible.", audio_capture_channels);
"Only 1, 2, 8 or 16 are possible.", s->audio.ch_count);
goto error;
}
if (s->audio_consumer_levels != -1) {
@@ -1372,7 +1372,7 @@ vidcap_decklink_init(struct vidcap_params *params, void **state)
CALL_AND_CHECK(deckLinkInput->EnableAudioInput(
bmdAudioSampleRate48kHz,
s->audio.bps == 2 ? bmdAudioSampleType16bitInteger : bmdAudioSampleType32bitInteger,
audio_capture_channels == 1 ? 2 : audio_capture_channels),
max(s->audio.ch_count, 2)), // capture at least 2
"EnableAudioInput",
"Decklink audio capture initialized sucessfully: " << audio_desc_from_frame(&s->audio));
}
@@ -1541,7 +1541,7 @@ static audio_frame *process_new_audio_packets(struct vidcap_decklink_state *s) {
void *audioFrame = nullptr;
audioPacket->GetBytes(&audioFrame);
if(audio_capture_channels == 1) { // there are actually 2 channels grabbed
if (s->audio.ch_count == 1) { // there are actually 2 channels grabbed
if (s->audio.data_len + audioPacket->GetSampleFrameCount() * 1U * s->audio.bps <= static_cast<unsigned>(s->audio.max_size)) {
demux_channel(s->audio.data + s->audio.data_len, static_cast<char *>(audioFrame), s->audio.bps, min<int64_t>(audioPacket->GetSampleFrameCount() * 2 /* channels */ * s->audio.bps, INT_MAX), 2 /* channels (originally) */, 0 /* we want first channel */);
s->audio.data_len = min<int64_t>(s->audio.data_len + audioPacket->GetSampleFrameCount() * 1 * s->audio.bps, INT_MAX);
@@ -1549,9 +1549,9 @@ static audio_frame *process_new_audio_packets(struct vidcap_decklink_state *s) {
LOG(LOG_LEVEL_WARNING) << "[DeckLink] Audio frame too small!\n";
}
} else {
if (s->audio.data_len + audioPacket->GetSampleFrameCount() * audio_capture_channels * s->audio.bps <= static_cast<unsigned>(s->audio.max_size)) {
memcpy(s->audio.data + s->audio.data_len, audioFrame, audioPacket->GetSampleFrameCount() * audio_capture_channels * s->audio.bps);
s->audio.data_len = min<int64_t>(s->audio.data_len + audioPacket->GetSampleFrameCount() * audio_capture_channels * s->audio.bps, INT_MAX);
if (s->audio.data_len + audioPacket->GetSampleFrameCount() * s->audio.ch_count * s->audio.bps <= static_cast<unsigned>(s->audio.max_size)) {
memcpy(s->audio.data + s->audio.data_len, audioFrame, audioPacket->GetSampleFrameCount() * s->audio.ch_count * s->audio.bps);
s->audio.data_len = min<int64_t>(s->audio.data_len + audioPacket->GetSampleFrameCount() * s->audio.ch_count * s->audio.bps, INT_MAX);
} else {
LOG(LOG_LEVEL_WARNING) << "[DeckLink] Audio frame too small!\n";
}

View File

@@ -35,35 +35,34 @@
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "host.h"
#include "config.h"
#include "config_unix.h"
#include "config_win32.h"
#include "debug.h"
#include "lib_common.h"
#include "video.h"
#include "video_capture.h"
#include "video_capture_params.h"
#include "tv.h"
#include <algorithm>
#include <fcntl.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32
#include <sys/ioctl.h>
#include <sys/poll.h>
#endif
#include <sys/stat.h>
#include <sys/time.h>
#include <unistd.h>
#include "audio/audio.h"
#include "audio/utils.h"
#include "debug.h"
#include "deltacast_common.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <sys/stat.h>
#ifndef WIN32
#include <sys/poll.h>
#include <sys/ioctl.h>
#endif
#include <sys/time.h>
#include <semaphore.h>
#include "host.h"
#include "lib_common.h"
#include "tv.h"
#include "video.h"
#include "video_capture.h"
#include "video_capture_params.h"
using namespace std;
@@ -279,21 +278,17 @@ static bool wait_for_channel(struct vidcap_deltacast_state *s)
}
if ((s->initialize_flags & VIDCAP_FLAG_AUDIO_EMBEDDED) == 0u) {
if(audio_capture_channels != 1 &&
audio_capture_channels != 2) {
s->audio_frame.ch_count = audio_capture_channels > 0 ? audio_capture_channels : max(DEFAULT_AUDIO_CAPTURE_CHANNELS, 2);
if (s->audio_frame.ch_count != 1 &&
s->audio_frame.ch_count != 2) {
log_msg(LOG_LEVEL_ERROR, "[DELTACAST capture] Unable to handle channel count other than 1 or 2.\n");
throw delta_init_exception();
}
s->audio_frame.bps = 3;
s->audio_frame.sample_rate = 48000;
s->audio_frame.ch_count = audio_capture_channels;
memset(&s->AudioInfo, 0, sizeof(VHD_AUDIOINFO));
s->pAudioChn = &s->AudioInfo.pAudioGroups[0].pAudioChannels[0];
if(audio_capture_channels == 1) {
s->pAudioChn->Mode = s->AudioInfo.pAudioGroups[0].pAudioChannels[1].Mode=VHD_AM_MONO;
} else if(audio_capture_channels == 2) {
s->pAudioChn->Mode = s->AudioInfo.pAudioGroups[0].pAudioChannels[1].Mode=VHD_AM_STEREO;
} else abort();
s->pAudioChn->Mode = s->AudioInfo.pAudioGroups[0].pAudioChannels[1].Mode= s->audio_frame.ch_count == 1 ? VHD_AM_MONO : VHD_AM_STEREO;
s->pAudioChn->BufferFormat = s->AudioInfo.pAudioGroups[0].pAudioChannels[1].BufferFormat=VHD_AF_24;
/* Get the biggest audio frame size */

View File

@@ -482,17 +482,17 @@ static int vidcap_dvs_init(struct vidcap_params *params, void **state)
if (res != SV_OK) {
goto error;
}
if(audio_capture_channels != 2 && audio_capture_channels != 1) {
s->audio.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
if (s->audio.ch_count != 2 && s->audio.ch_count != 1) {
fprintf(stderr, "[DVS cap.] Invalid channel count %d. "
"Currently only 1 or 2 channels are supported.\n",
audio_capture_channels);
s->audio.ch_count);
goto error;
}
res = sv_option(s->sv, SV_OPTION_AUDIOCHANNELS, 1); // one pair
if (res != SV_OK) {
goto error;
}
s->audio.ch_count = audio_capture_channels;
sv_query(s->sv, SV_QUERY_AUDIOBITS, 0, &i);
s->audio.bps = i / 8;
@@ -504,7 +504,7 @@ static int vidcap_dvs_init(struct vidcap_params *params, void **state)
s->audio_bufs[0] = malloc(s->audio.sample_rate * 2 * s->audio.bps);
s->audio_bufs[1] = malloc(s->audio.sample_rate * 2 * s->audio.bps);
if(audio_capture_channels == 1) {
if (s->audio.ch_count == 1) {
// data need to be demultiplexed
s->audio.max_size = s->audio.sample_rate * s->audio.bps;
s->audio.data = (char *) malloc(s->audio.max_size);

View File

@@ -80,6 +80,7 @@
#include "video.h"
#include "video_capture.h"
static const double AUDIO_RATIO = 1.05; ///< at this ratio the audio frame can be longer than the video frame
#define MAGIC to_fourcc('u', 'g', 'l', 'f')
#define MOD_NAME "[File cap.] "
@@ -101,7 +102,7 @@ struct vidcap_state_lavf_decoder {
bool use_audio;
int video_stream_idx, audio_stream_idx;
int64_t last_vid_pts;
int64_t last_vid_pts; ///< last played PTS, if PTS == PTS_NO_VALUE, DTS is stored instead
struct video_desc video_desc;
@@ -185,10 +186,14 @@ static void vidcap_file_write_audio(struct vidcap_state_lavf_decoder *s, AVFrame
s->audio_frame.data_len += plane_count * bps * s->aud_ctx->frame_size;
} else {
int data_size = av_samples_get_buffer_size(NULL, s->audio_frame.ch_count,
s->aud_ctx->frame_size,
frame->nb_samples,
s->aud_ctx->sample_fmt, 1);
append_audio_frame(&s->audio_frame, (char *) frame->data[0],
data_size);
if (data_size < 0) {
print_libav_error(LOG_LEVEL_WARNING, MOD_NAME " av_samples_get_buffer_size", data_size);
} else {
append_audio_frame(&s->audio_frame, (char *) frame->data[0],
data_size);
}
}
pthread_mutex_unlock(&s->audio_frame_lock);
}
@@ -262,10 +267,21 @@ static void *vidcap_file_worker(void *state) {
}
CHECK_FF(ret, FAIL_WORKER); // check the retval of av_read_frame for error other than EOF
log_msg(LOG_LEVEL_DEBUG, MOD_NAME "received %s packet, ID %d, pos %" PRId64 ", size %d\n",
AVRational tb = s->fmt_ctx->streams[pkt.stream_index]->time_base;
char pts_val[128] = "NO VALUE";
if (pkt.pts != AV_NOPTS_VALUE) {
snprintf(pts_val, sizeof pts_val, "%" PRId64, pkt.pts);
}
char dts_val[128] = "NO VALUE";
if (pkt.dts != AV_NOPTS_VALUE) {
snprintf(dts_val, sizeof dts_val, "%" PRId64, pkt.dts);
}
log_msg(LOG_LEVEL_DEBUG, MOD_NAME "received %s packet, ID %d, pos %f (pts %s, dts %s), size %d\n",
av_get_media_type_string(
s->fmt_ctx->streams[pkt.stream_index]->codecpar->codec_type),
pkt.stream_index, pkt.pos, pkt.size);
pkt.stream_index, (double) (pkt.pts == AV_NOPTS_VALUE ? pkt.dts : pkt.pts)
* tb.num / tb.den, pts_val, dts_val, pkt.size);
if (pkt.stream_index == s->audio_stream_idx) {
ret = avcodec_send_packet(s->aud_ctx, &pkt);
@@ -288,7 +304,7 @@ static void *vidcap_file_worker(void *state) {
}
av_frame_free(&frame);
} else if (pkt.stream_index == s->video_stream_idx) {
s->last_vid_pts = pkt.pts;
s->last_vid_pts = pkt.pts == AV_NOPTS_VALUE ? pkt.dts : pkt.pts;
struct video_frame *out;
if (s->no_decode) {
out = vf_alloc_desc(s->video_desc);
@@ -300,6 +316,9 @@ static void *vidcap_file_worker(void *state) {
} else {
AVFrame * frame = av_frame_alloc();
int got_frame = 0;
struct timeval t0;
gettimeofday(&t0, NULL);
ret = avcodec_send_packet(s->vid_ctx, &pkt);
if (ret == 0 || ret == AVERROR(EAGAIN)) {
ret = avcodec_receive_frame(s->vid_ctx, frame);
@@ -307,9 +326,12 @@ static void *vidcap_file_worker(void *state) {
got_frame = 1;
}
}
struct timeval t1;
gettimeofday(&t1, NULL);
if (ret != 0) {
print_decoder_error(MOD_NAME, ret);
}
log_msg(LOG_LEVEL_VERBOSE, MOD_NAME "Video decompress duration: %f\n", tv_diff(t1, t0));
if (ret < 0 || !got_frame) {
if (ret < 0) {
@@ -389,6 +411,9 @@ static AVCodecContext *vidcap_file_open_dec_ctx(AVCodec *dec, AVStream *st) {
if (!dec_ctx) {
return NULL;
}
dec_ctx->thread_count = 0; // means auto for most codecs
dec_ctx->thread_type = FF_THREAD_SLICE;
/* Copy codec parameters from input stream to output codec context */
if (avcodec_parameters_to_context(dec_ctx, st->codecpar) < 0) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Unable to copy parameters\n");
@@ -465,17 +490,16 @@ static int vidcap_file_init(struct vidcap_params *params, void **state) {
/* open input file, and allocate format context */
if ((rc = avformat_open_input(&s->fmt_ctx, s->src_filename, NULL, NULL)) < 0) {
snprintf(errbuf, sizeof errbuf, MOD_NAME "Could not open source file %s: ", s->src_filename);
snprintf(errbuf, sizeof errbuf, MOD_NAME "Could not open source file %s", s->src_filename);
}
/* retrieve stream information */
if (rc >= 0 && (rc = avformat_find_stream_info(s->fmt_ctx, NULL)) < 0) {
snprintf(errbuf, sizeof errbuf, MOD_NAME "Could not find stream information: \n");
snprintf(errbuf, sizeof errbuf, MOD_NAME "Could not find stream information");
}
if (rc < 0) {
av_strerror(rc, errbuf + strlen(errbuf), sizeof errbuf - strlen(errbuf));
log_msg(LOG_LEVEL_ERROR, "%s\n", errbuf);
print_libav_error(LOG_LEVEL_ERROR, errbuf, rc);
vidcap_file_common_cleanup(s);
return VIDCAP_INIT_FAIL;
}
@@ -550,6 +574,8 @@ static int vidcap_file_init(struct vidcap_params *params, void **state) {
s->video_desc.interlacing = PROGRESSIVE; /// @todo other modes
}
log_msg(LOG_LEVEL_VERBOSE, MOD_NAME "Capturing audio idx %d, video idx %d\n", s->audio_stream_idx, s->video_stream_idx);
s->last_vid_pts = s->fmt_ctx->streams[s->video_stream_idx]->start_time;
playback_register_keyboard_ctl(&s->mod);
@@ -577,6 +603,28 @@ static void vidcap_file_dispose_audio(struct audio_frame *f) {
free(f);
}
static struct audio_frame *get_audio(struct vidcap_state_lavf_decoder *s, double video_fps) {
pthread_mutex_lock(&s->audio_frame_lock);
struct audio_frame *ret = (struct audio_frame *) malloc(sizeof(struct audio_frame));
memcpy(ret, &s->audio_frame, sizeof *ret);
// capture more data to ensure the buffer won't grow - it is capped with actually read
// data, still. Moreover there number of audio samples per video frame period may not
// be integer. It shouldn't be much, however, not to confuse adaptible audio buffer.
ret->max_size =
ret->data_len = MIN((int) (AUDIO_RATIO * ret->sample_rate / video_fps) * ret->bps * ret->ch_count , s->audio_frame.data_len);
ret->data = (char *) malloc(ret->max_size);
memcpy(ret->data, s->audio_frame.data, ret->data_len);
s->audio_frame.data_len -= ret->data_len;
memmove(s->audio_frame.data, s->audio_frame.data + ret->data_len, s->audio_frame.data_len);
ret->dispose = vidcap_file_dispose_audio;
pthread_mutex_unlock(&s->audio_frame_lock);
return ret;
}
static struct video_frame *vidcap_file_grab(void *state, struct audio_frame **audio) {
struct vidcap_state_lavf_decoder *s = (struct vidcap_state_lavf_decoder *) state;
@@ -597,11 +645,7 @@ static struct video_frame *vidcap_file_grab(void *state, struct audio_frame **au
pthread_mutex_unlock(&s->lock);
pthread_cond_signal(&s->frame_consumed);
pthread_mutex_lock(&s->audio_frame_lock);
*audio = audio_frame_copy(&s->audio_frame, false);
(*audio)->dispose = vidcap_file_dispose_audio;
s->audio_frame.data_len = 0;
pthread_mutex_unlock(&s->audio_frame_lock);
*audio = get_audio(s, out->fps);
struct timeval t;
do {
@@ -630,7 +674,7 @@ static const struct video_capture_info vidcap_file_info = {
vidcap_file_init,
vidcap_file_done,
vidcap_file_grab,
false
true
};
REGISTER_MODULE(file, &vidcap_file_info, LIBRARY_CLASS_VIDEO_CAPTURE, VIDEO_CAPTURE_ABI_VERSION);

View File

@@ -49,6 +49,7 @@
#endif
#include <Processing.NDI.Lib.h>
#include <algorithm>
#include <array>
#include <chrono>
#include <iostream>
@@ -70,8 +71,11 @@
#include "video.h"
#include "video_capture.h"
static constexpr const char *MOD_NAME = "[NDI] ";
using std::array;
using std::cout;
using std::max;
using std::string;
using std::chrono::duration_cast;
using std::chrono::steady_clock;
@@ -199,22 +203,27 @@ static void vidcap_ndi_done(void *state)
static void audio_append(struct vidcap_state_ndi *s, NDIlib_audio_frame_v2_t *frame)
{
struct audio_desc d{4, frame->sample_rate, static_cast<int>(audio_capture_channels), AC_PCM};
struct audio_desc d{4, frame->sample_rate, static_cast<int>(audio_capture_channels > 0 ? audio_capture_channels : frame->no_channels), AC_PCM};
if (!audio_desc_eq(d, audio_desc_from_audio_frame(&s->audio[s->audio_buf_idx]))) {
free(s->audio[s->audio_buf_idx].data);
s->audio[s->audio_buf_idx].bps = 4;
s->audio[s->audio_buf_idx].sample_rate = frame->sample_rate;
s->audio[s->audio_buf_idx].ch_count = audio_capture_channels;
s->audio[s->audio_buf_idx].ch_count = d.ch_count;
s->audio[s->audio_buf_idx].data_len = 0;
s->audio[s->audio_buf_idx].max_size =
4 * audio_capture_channels * frame->sample_rate / 5; // 200 ms
4 * d.ch_count * frame->sample_rate / 5; // 200 ms
s->audio[s->audio_buf_idx].data = static_cast<char *>(malloc(s->audio[s->audio_buf_idx].max_size));
}
if (frame->no_channels > s->audio[s->audio_buf_idx].ch_count) {
LOG(LOG_LEVEL_WARNING) << MOD_NAME << "Requested " << s->audio[s->audio_buf_idx].ch_count << " channels, stream has only "
<< frame->no_channels << "!\n";
}
for (int i = 0; i < frame->no_samples; ++i) {
float *in = frame->p_data + i;
int32_t *out = (int32_t *) s->audio[s->audio_buf_idx].data + i * audio_capture_channels;
for (int j = 0; j < static_cast<int>(audio_capture_channels); ++j) {
int32_t *out = (int32_t *) s->audio[s->audio_buf_idx].data + i * d.ch_count;
for (int j = 0; j < max(d.ch_count, frame->no_channels); ++j) {
if (s->audio[s->audio_buf_idx].data_len >= s->audio[s->audio_buf_idx].max_size) {
LOG(LOG_LEVEL_WARNING) << "[NDI] Audio frame too small!\n";
return;

View File

@@ -50,6 +50,7 @@
#include "tv.h"
#include "audio/audio.h"
#include "audio/audio.h"
#include <stdio.h>
@@ -60,8 +61,10 @@
#include <Carbon/Carbon.h>
#define MAX_DISPLAY_COUNT 10
#define MOD_NAME "[screen cap mac] "
/* prototypes of functions defined in this module */
static void show_help(void);
static void vidcap_screen_osx_done(void *state);
@@ -70,9 +73,24 @@ static void show_help()
{
printf("Screen capture\n");
printf("Usage\n");
printf("\t-t screen[:fps=<fps>][:codec=<c>]\n");
printf("\t-t screen[:fps=<fps>][:codec=<c>][:display=<d>]\n");
printf("\t\t<fps> - preferred grabbing fps (otherwise unlimited)\n");
printf("\t\t <c> - requested codec to capture (RGB /default/ or RGBA)\n");
printf("\t\t <d> - display ID or \"primary\" or \"secondary\"\n");
printf("\n\nAvailable displays:\n");
CGDirectDisplayID screens[MAX_DISPLAY_COUNT];
uint32_t count = 0;
CGGetOnlineDisplayList(sizeof screens / sizeof screens[0], screens, &count);
for (unsigned int i = 0; i < count; ++i) {
char flags[128];
strcpy(flags, CGDisplayIsMain(screens[i]) ? "primary" : "secondary");
if (CGDisplayIsBuiltin(screens[i])) {
strncat(flags, ", builtin", sizeof flags - strlen(flags) - 1);
}
printf("\tID %u) %s\n", screens[i], flags);
}
}
struct vidcap_screen_osx_state {
@@ -88,14 +106,19 @@ struct vidcap_screen_osx_state {
bool initialized;
};
static void initialize(struct vidcap_screen_osx_state *s) {
s->display = CGMainDisplayID();
static bool initialize(struct vidcap_screen_osx_state *s) {
CGImageRef image = CGDisplayCreateImage(s->display);
if (image == NULL) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Unable create image (wrong display ID?)\n");
return false;
}
s->desc.width = CGImageGetWidth(image);
s->desc.height = CGImageGetHeight(image);
CFRelease(image);
s->video_frame_pool = video_frame_pool_init(s->desc, 2);
return true;
}
static struct vidcap_type * vidcap_screen_osx_probe(bool verbose, void (**deleter)(void *))
@@ -161,14 +184,50 @@ static int vidcap_screen_osx_init(struct vidcap_params *params, void **state)
s->desc.fps = 30;
s->desc.interlacing = PROGRESSIVE;
if(vidcap_params_get_fmt(params)) {
s->display = CGMainDisplayID();
if (vidcap_params_get_fmt(params) && strlen(vidcap_params_get_fmt(params)) > 0) {
if (strcmp(vidcap_params_get_fmt(params), "help") == 0) {
show_help();
return VIDCAP_INIT_NOERR;
} else if (strncasecmp(vidcap_params_get_fmt(params), "fps=", strlen("fps=")) == 0) {
s->desc.fps = atof(vidcap_params_get_fmt(params) + strlen("fps="));
} else if (strncasecmp(vidcap_params_get_fmt(params), "codec=", strlen("codec=")) == 0) {
s->desc.color_spec = get_codec_from_name(vidcap_params_get_fmt(params) + strlen("codec="));
}
char *fmt = alloca(strlen(vidcap_params_get_fmt(params) + 1));
strcpy(fmt, vidcap_params_get_fmt(params));
char *save_ptr = NULL;
char *item = NULL;
while ((item = strtok_r(fmt, ":", &save_ptr)) != NULL) {
if (strncasecmp(item, "fps=", strlen("fps=")) == 0) {
s->desc.fps = atof(item + strlen("fps="));
} else if (strncasecmp(item, "codec=", strlen("codec=")) == 0) {
s->desc.color_spec = get_codec_from_name(item + strlen("codec="));
} else if (strncasecmp(item, "display=", strlen("display=")) == 0) {
char *display = item + strlen("display=");
if (strcasecmp(display, "secondary") == 0) {
CGDirectDisplayID screens[MAX_DISPLAY_COUNT];
uint32_t count = 0;
CGGetOnlineDisplayList(sizeof screens / sizeof screens[0], screens, &count);
uint32_t i = 0;
for (; i < count; ++i) {
if (!CGDisplayIsMain(screens[i])) {
s->display = screens[i];
break;
}
}
if (i == count) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "No secondary scren found!\n");
vidcap_screen_osx_done(s);
return VIDCAP_INIT_FAIL;
}
} if (strcasecmp(display, "primary") != 0) { // primary was already set
s->display = atol(display);
}
} else {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Unrecognized option \"%s\"\n", item);
vidcap_screen_osx_done(s);
return VIDCAP_INIT_FAIL;
}
fmt = NULL;
}
}
@@ -205,8 +264,10 @@ static struct video_frame * vidcap_screen_osx_grab(void *state, struct audio_fra
struct vidcap_screen_osx_state *s = (struct vidcap_screen_osx_state *) state;
if (!s->initialized) {
initialize(s);
s->initialized = true;
s->initialized = initialize(s);
if (!s->initialized) {
return NULL;
}
}
struct video_frame *frame = video_frame_pool_get_disposable_frame(s->video_frame_pool);

View File

@@ -47,6 +47,7 @@
#include "config_win32.h"
#endif /* HAVE_CONFIG_H */
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -55,12 +56,21 @@
#include "host.h"
#include "lib_common.h"
#include "utils/color_out.h"
#include "utils/hresult.h"
#include "video.h"
#include "video_capture.h"
#include "video_capture_params.h"
#define MOD_NAME "[screen win] "
extern const struct video_capture_info vidcap_dshow_info;
struct vidcap_screen_win_state {
HMODULE screen_cap_lib;
bool filter_registered;
void *dshow_state;
};
static void show_help()
{
printf("Screen capture\n");
@@ -69,7 +79,6 @@ static void show_help()
color_out(COLOR_OUT_BOLD, "[:width=<w>][:height=<h>][:fps=<f>]\n");
}
static struct vidcap_type * vidcap_screen_win_probe(bool verbose, void (**deleter)(void *))
{
struct vidcap_type* vt;
@@ -106,7 +115,7 @@ static bool set_key(const char *key, int val)
}
}
DWORD val_dword = val;
if (RegSetValueExA(hKey, key, 0L, REG_DWORD, &val_dword, sizeof val_dword) != ERROR_SUCCESS) {
if (RegSetValueExA(hKey, key, 0L, REG_DWORD, (BYTE *) &val_dword, sizeof val_dword) != ERROR_SUCCESS) {
return false;
}
@@ -161,9 +170,33 @@ static bool vidcap_screen_win_process_params(const char *fmt)
return true;
}
typedef HRESULT __stdcall (*func)();
#define CHECK_NOT_NULL_EX(cmd, err_action) do { if ((cmd) == NULL) { log_msg(LOG_LEVEL_ERROR, "[screen] %s\n", #cmd); err_action; } } while(0)
#define CHECK_NOT_NULL(cmd) CHECK_NOT_NULL_EX(cmd, return VIDCAP_INIT_FAIL);
#define CHECK_NOT_NULL_EX(cmd, err_action) do { if ((cmd) == NULL) { log_msg(LOG_LEVEL_ERROR, MOD_NAME "%s\n", #cmd); err_action; } } while(0)
#define CHECK_NOT_NULL(cmd) CHECK_NOT_NULL_EX(cmd, return);
static void cleanup(struct vidcap_screen_win_state *s) {
assert(s != NULL);
if (s->dshow_state) {
vidcap_dshow_info.done(s->dshow_state);
}
if (s->filter_registered) {
func unregister_filter = NULL;
CHECK_NOT_NULL(unregister_filter = (func)(void *) GetProcAddress(s->screen_cap_lib, "DllUnregisterServer"));
if (unregister_filter != NULL) {
unregister_filter();
}
}
if (s->screen_cap_lib) {
FreeLibrary(s->screen_cap_lib);
}
free(s);
}
#undef CHECK_NOT_NULL
#define CHECK_NOT_NULL(cmd) CHECK_NOT_NULL_EX(cmd, cleanup(s));
static int vidcap_screen_win_init(struct vidcap_params *params, void **state)
{
const char *cfg = vidcap_params_get_fmt(params);
@@ -177,39 +210,41 @@ static int vidcap_screen_win_init(struct vidcap_params *params, void **state)
return VIDCAP_INIT_FAIL;
}
HMODULE mod;
CHECK_NOT_NULL(mod = LoadLibraryA("screen-capture-recorder-x64.dll"));
typedef void (*func)();
struct vidcap_screen_win_state *s = calloc(1, sizeof *s);
CHECK_NOT_NULL(s->screen_cap_lib = LoadLibraryA("screen-capture-recorder-x64.dll"));
func register_filter;
CHECK_NOT_NULL(register_filter = (func) GetProcAddress(mod, "DllRegisterServer"));
register_filter();
FreeLibrary(mod);
CHECK_NOT_NULL(register_filter = (func)(void *) GetProcAddress(s->screen_cap_lib, "DllRegisterServer"));
HRESULT res = register_filter();
if (FAILED(res)) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Register failed: %s\n", hresult_to_str(res));
cleanup(s);
return VIDCAP_INIT_FAIL;
}
s->filter_registered = true;
struct vidcap_params *params_dshow = vidcap_params_allocate();
vidcap_params_set_device(params_dshow, "dshow:device=screen-capture-recorder");
int ret = vidcap_dshow_info.init(params_dshow, state);
int ret = vidcap_dshow_info.init(params_dshow, &s->dshow_state);
if (ret != 0) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "DirectShow init failed: %d\n", ret);
cleanup(s);
return VIDCAP_INIT_FAIL;
}
vidcap_params_free_struct(params_dshow);
*state = s;
return ret;
}
#undef CHECK_NOT_NULL
#define CHECK_NOT_NULL(cmd) CHECK_NOT_NULL_EX(cmd, return);
static void vidcap_screen_win_done(void *state)
{
vidcap_dshow_info.done(state);
HMODULE mod;
CHECK_NOT_NULL(mod = LoadLibraryA("screen-capture-recorder-x64.dll"));
typedef void (*func)();
func unregister_filter;
CHECK_NOT_NULL(unregister_filter = (func) GetProcAddress(mod, "DllUnregisterServer"));
unregister_filter();
FreeLibrary(mod);
cleanup(state);
}
static struct video_frame * vidcap_screen_win_grab(void *state, struct audio_frame **audio)
{
return vidcap_dshow_info.grab(state, audio);
struct vidcap_screen_win_state *s = state;
return vidcap_dshow_info.grab(s->dshow_state, audio);
}
static const struct video_capture_info vidcap_screen_win_info = {

View File

@@ -85,7 +85,7 @@
#define AUDIO_BPS 2
#define BUFFER_SEC 1
#define AUDIO_BUFFER_SIZE (AUDIO_SAMPLE_RATE * AUDIO_BPS * \
audio_capture_channels * BUFFER_SEC)
s->audio.ch_count * BUFFER_SEC)
#define MOD_NAME "[testcard] "
constexpr video_desc default_format = { 1920, 1080, UYVY, 25.0, INTERLACED_MERGED, 1 };
constexpr size_t headroom = 128; // headroom for cases when dst color_spec has wider block size
@@ -316,10 +316,17 @@ static int configure_audio(struct testcard_state *s)
Mix_Music *music;
ssize_t bytes_written = 0l;
s->audio_data = (char *) calloc(1, AUDIO_BUFFER_SIZE /* 1 sec */);
s->audio_start = 0;
s->audio_end = 0;
s->audio.bps = AUDIO_BPS;
s->audio.ch_count = audio_capture_channels > 0 ? audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->audio.sample_rate = AUDIO_SAMPLE_RATE;
SDL_Init(SDL_INIT_AUDIO);
if( Mix_OpenAudio( AUDIO_SAMPLE_RATE, AUDIO_S16LSB,
audio_capture_channels, 4096 ) == -1 ) {
s->audio.ch_count, 4096 ) == -1 ) {
fprintf(stderr,"[testcard] error initalizing sound\n");
return -1;
}
@@ -340,13 +347,6 @@ static int configure_audio(struct testcard_state *s)
close(fd);
music = Mix_LoadMUS(filename);
s->audio_data = (char *) calloc(1, AUDIO_BUFFER_SIZE /* 1 sec */);
s->audio_start = 0;
s->audio_end = 0;
s->audio.bps = AUDIO_BPS;
s->audio.ch_count = audio_capture_channels;
s->audio.sample_rate = AUDIO_SAMPLE_RATE;
// register grab as a postmix processor
if(!Mix_RegisterEffect(MIX_CHANNEL_POST, grab_audio, NULL, s)) {
printf("[testcard] Mix_RegisterEffect: %s\n", Mix_GetError());

View File

@@ -67,7 +67,7 @@
#define AUDIO_BPS 2
#define BUFFER_SEC 1
#define AUDIO_BUFFER_SIZE (AUDIO_SAMPLE_RATE * AUDIO_BPS * \
audio_capture_channels * BUFFER_SEC)
s->audio.ch_count * BUFFER_SEC)
void * vidcap_testcard2_thread(void *args);
void rgb2yuv422(unsigned char *in, unsigned int width, unsigned int height);
@@ -100,22 +100,19 @@ struct testcard_state2 {
static int configure_audio(struct testcard_state2 *s)
{
int i;
s->audio.bps = AUDIO_BPS;
s->audio.ch_count = audio_capture_channels > 0 > audio_capture_channels : DEFAULT_AUDIO_CAPTURE_CHANNELS;
s->audio.sample_rate = AUDIO_SAMPLE_RATE;
s->audio_silence = calloc(1, AUDIO_BUFFER_SIZE /* 1 sec */);
s->audio_tone = calloc(1, AUDIO_BUFFER_SIZE /* 1 sec */);
short int * data = (short int *) s->audio_tone;
for( i=0; i < (int) AUDIO_BUFFER_SIZE/2; i+=2 )
for(int i=0; i < (int) AUDIO_BUFFER_SIZE/2; i+=2 )
{
data[i] = data[i+1] = (float) sin( ((double)i/(double)200) * M_PI * 2. ) * SHRT_MAX;
}
s->audio.bps = AUDIO_BPS;
s->audio.ch_count = audio_capture_channels;
s->audio.sample_rate = AUDIO_SAMPLE_RATE;
printf("[testcard2] playing audio\n");
return 0;
@@ -499,7 +496,7 @@ static void grab_audio(struct testcard_state2 *s)
s->audio_remained = (seconds + s->audio_remained) * AUDIO_SAMPLE_RATE - s->audio.data_len;
s->audio_remained /= AUDIO_SAMPLE_RATE;
s->audio.data_len *= audio_capture_channels * AUDIO_BPS;
s->audio.data_len *= s->audio.ch_count * AUDIO_BPS;
s->last_audio_time = curr_time;
}

View File

@@ -2542,4 +2542,14 @@ void codec_get_planes_subsampling(codec_t pix_fmt, int *sub) {
}
}
bool codec_is_420(codec_t pix_fmt)
{
return pixfmt_plane_info[pix_fmt].plane_info[0] == 1 &&
pixfmt_plane_info[pix_fmt].plane_info[1] == 1 &&
pixfmt_plane_info[pix_fmt].plane_info[2] == 2 &&
pixfmt_plane_info[pix_fmt].plane_info[3] == 2 &&
pixfmt_plane_info[pix_fmt].plane_info[4] == 2 &&
pixfmt_plane_info[pix_fmt].plane_info[5] == 2;
}
/* vim: set expandtab sw=8: */

View File

@@ -96,6 +96,7 @@ int get_pf_block_size(codec_t codec) ATTRIBUTE(const);
int vc_get_linesize(unsigned int width, codec_t codec) ATTRIBUTE(const);
size_t vc_get_datalen(unsigned int width, unsigned int height, codec_t codec) ATTRIBUTE(const);
void codec_get_planes_subsampling(codec_t pix_fmt, int *sub);
bool codec_is_420(codec_t pix_fmt);
int codec_is_a_rgb(codec_t codec) ATTRIBUTE(const);
bool codec_is_in_set(codec_t codec, codec_t *set) ATTRIBUTE(const);
int codec_is_const_size(codec_t codec) ATTRIBUTE(const);

View File

@@ -52,6 +52,7 @@
#include "utils/video_frame_pool.h"
#include "video.h"
#include <algorithm>
#include <initializer_list>
#include <libgpujpeg/gpujpeg_encoder.h>
#include <libgpujpeg/gpujpeg_version.h>
@@ -62,25 +63,14 @@
#include <set>
#include <vector>
#if LIBGPUJPEG_API_VERSION < 11
#error "GPUJPEG API 10 or more requested!"
#endif
#define MOD_NAME "[GPUJPEG enc.] "
using namespace std;
#if LIBGPUJPEG_API_VERSION >= 7
#define GJ_RGBA_SUPP 1
#else
#define GJ_RGBA_SUPP 0
#endif
// compat
#if LIBGPUJPEG_API_VERSION <= 2
#define GPUJPEG_444_U8_P012 GPUJPEG_4_4_4
#define GPUJPEG_422_U8_P1020 GPUJPEG_4_2_2
#endif
#if LIBGPUJPEG_API_VERSION < 7
#define GPUJPEG_YCBCR_JPEG GPUJPEG_YCBCR_BT601_256LVLS
#endif
namespace {
struct state_video_compress_gpujpeg;
@@ -156,9 +146,7 @@ public:
int m_quality;
bool m_force_interleaved = false;
int m_subsampling = 0; // 444, 422 or 420; 0 -> autoselect
codec_t m_use_internal_codec = VIDEO_CODEC_NONE; // RGB or UYVY,
// VIDEO_CODEC_NONE
// if no preferrence
enum gpujpeg_color_space m_use_internal_codec = GPUJPEG_NONE; // requested internal codec
synchronized_queue<shared_ptr<struct video_frame>, 1> m_out_queue; ///< queue for compressed frames
mutex m_occupancy_lock;
@@ -261,7 +249,7 @@ bool encoder_state::configure_with(struct video_desc desc)
compressed_desc.color_spec = JPEG;
if (IS_I420(desc.color_spec)) {
if (m_parent_state->m_use_internal_codec == RGB ||
if ((m_parent_state->m_use_internal_codec != GPUJPEG_NONE && m_parent_state->m_use_internal_codec != GPUJPEG_YCBCR_BT709) ||
(m_parent_state->m_subsampling != 0 && m_parent_state->m_subsampling != 420)) {
log_msg(LOG_LEVEL_ERROR, MOD_NAME "Converting from planar pixel formats is "
"possible only without subsampling/color space change.\n");
@@ -297,7 +285,7 @@ bool encoder_state::configure_with(struct video_desc desc)
m_encoder_param.restart_interval = codec_is_a_rgb(m_enc_input_codec) ? 8 : 4;
}
m_encoder_param.verbose = 0;
m_encoder_param.verbose = max<int>(0, log_level - LOG_LEVEL_INFO);
m_encoder_param.segment_info = 1;
/* LUMA */
@@ -315,10 +303,11 @@ bool encoder_state::configure_with(struct video_desc desc)
m_encoder_param.sampling_factor[2].vertical = 1;
m_encoder_param.interleaved = (codec_is_a_rgb(m_enc_input_codec) && !m_parent_state->m_force_interleaved) ? 0 : 1;
if (m_parent_state->m_use_internal_codec == RGB ||
(codec_is_a_rgb(m_enc_input_codec) && !m_parent_state->m_use_internal_codec)) {
m_encoder_param.color_space_internal = GPUJPEG_RGB;
if (m_parent_state->m_use_internal_codec == GPUJPEG_NONE) {
m_encoder_param.color_space_internal = codec_is_a_rgb(m_enc_input_codec)
? GPUJPEG_RGB : GPUJPEG_YCBCR_BT709;
} else {
m_encoder_param.color_space_internal = m_parent_state->m_use_internal_codec;
}
gpujpeg_image_set_default_parameters(&m_param_image);
@@ -327,16 +316,13 @@ bool encoder_state::configure_with(struct video_desc desc)
m_param_image.height = desc.height;
m_param_image.comp_count = 3;
m_param_image.color_space = codec_is_a_rgb(m_enc_input_codec) ? GPUJPEG_RGB : (IS_I420(desc.color_spec) ? GPUJPEG_YCBCR_JPEG : GPUJPEG_YCBCR_BT709);
m_param_image.color_space = codec_is_a_rgb(m_enc_input_codec) ? GPUJPEG_RGB : GPUJPEG_YCBCR_BT709;
#if LIBGPUJPEG_API_VERSION > 2
switch (m_enc_input_codec) {
case CUDA_I420:
case I420: m_param_image.pixel_format = GPUJPEG_420_U8_P0P1P2; break;
case RGB: m_param_image.pixel_format = GPUJPEG_444_U8_P012; break;
#if GJ_RGBA_SUPP == 1
case RGBA: m_param_image.pixel_format = GPUJPEG_444_U8_P012Z; break;
#endif
case UYVY: m_param_image.pixel_format = GPUJPEG_422_U8_P1020; break;
default:
log_msg(LOG_LEVEL_FATAL, MOD_NAME "Unexpected codec: %s\n",
@@ -344,10 +330,6 @@ bool encoder_state::configure_with(struct video_desc desc)
abort();
}
m_encoder = gpujpeg_encoder_create(NULL);
#else
m_param_image.sampling_factor = m_enc_input_codec == RGB ? GPUJPEG_4_4_4 : GPUJPEG_4_2_2;
m_encoder = gpujpeg_encoder_create(&m_encoder_param, &m_param_image);
#endif
int data_len = desc.width * desc.height * 3;
m_pool.reconfigure(compressed_desc, data_len);
@@ -392,15 +374,14 @@ bool state_video_compress_gpujpeg::parse_fmt(char *fmt)
m_quality = atoi(tok + strlen("restart="));
} else if (strcasecmp(tok, "interleaved") == 0) {
m_force_interleaved = true;
} else if (strcasecmp(tok, "YUV") == 0) {
m_use_internal_codec = UYVY;
} else if (strcasecmp(tok, "Y601") == 0) {
m_use_internal_codec = GPUJPEG_YCBCR_BT601;
} else if (strcasecmp(tok, "Y601full") == 0) {
m_use_internal_codec = GPUJPEG_YCBCR_BT601_256LVLS;
} else if (strcasecmp(tok, "Y709") == 0) {
m_use_internal_codec = GPUJPEG_YCBCR_BT709;
} else if (strcasecmp(tok, "RGB") == 0) {
#if LIBGPUJPEG_API_VERSION >= 4
m_use_internal_codec = RGB;
#else
log_msg(LOG_LEVEL_ERROR, "[GPUJPEG] Cannot use RGB as an internal colorspace (old GPUJPEG).\n");
return false;
#endif
m_use_internal_codec = GPUJPEG_RGB;
} else if (strstr(tok, "subsampling=") == tok) {
m_subsampling = atoi(tok + strlen("subsampling="));
assert(set<int>({444, 422, 420}).count(m_subsampling) == 1);
@@ -468,17 +449,15 @@ state_video_compress_gpujpeg *state_video_compress_gpujpeg::create(struct module
struct module * gpujpeg_compress_init(struct module *parent, const char *opts)
{
#if LIBGPUJPEG_API_VERSION >= 7
if (gpujpeg_version() != LIBGPUJPEG_API_VERSION) {
LOG(LOG_LEVEL_WARNING) << "GPUJPEG API version mismatch! (" <<
gpujpeg_version() << " vs " << LIBGPUJPEG_API_VERSION << ")\n";
}
#endif
struct state_video_compress_gpujpeg *s;
if(opts && strcmp(opts, "help") == 0) {
cout << "GPUJPEG comperssion usage:\n";
cout << "\t" << BOLD(RED("-c GPUJPEG") << "[:<quality>[:<restart_interval>]][:interleaved][:RGB|:YUV][:subsampling=<sub>]\n");
cout << "\t" << BOLD(RED("-c GPUJPEG") << "[:<quality>[:<restart_interval>]][:interleaved][:RGB|Y601|Y601full|Y709]][:subsampling=<sub>]\n");
cout << "where\n";
cout << BOLD("\tquality\n") <<
"\t\tJPEG quality coefficient [0..100] - more is better\n";
@@ -493,9 +472,8 @@ struct module * gpujpeg_compress_init(struct module *parent, const char *opts)
"\t\tNon-interleaved has slightly better performance for RGB at the\n"
"\t\texpense of worse compatibility. Therefore this option may be\n"
"\t\tenabled safely.\n";
cout << BOLD("\tRGB|YUV\n") <<
"\t\tforce RGB or YUV as an internal JPEG color space (otherwise\n"
"\t\tsource color space is kept).\n";
cout << BOLD("\tRGB|Y601|Y601full|Y709\n") <<
"\t\tforce internal JPEG color space (otherwise source color space is kept).\n";
cout << BOLD("\t<sub>\n") <<
"\t\tUse specified JPEG subsampling (444, 422 or 420).\n";
cout << "\n";
@@ -584,11 +562,7 @@ shared_ptr<video_frame> encoder_state::compress_step(shared_ptr<video_frame> tx)
} else {
gpujpeg_encoder_input_set_image(&encoder_input, jpeg_enc_input_data);
}
#if LIBGPUJPEG_API_VERSION <= 2
ret = gpujpeg_encoder_encode(m_encoder, &encoder_input, &compressed, &size);
#else
ret = gpujpeg_encoder_encode(m_encoder, &m_encoder_param, &m_param_image, &encoder_input, &compressed, &size);
#endif
if(ret != 0) {
return {};

View File

@@ -262,14 +262,14 @@ static void print_codec_info(AVCodecID id, char *buf, size_t buflen)
if (strlen(enc) || strlen(dec)) {
strncat(buf, " (", buflen - strlen(buf) - 1);
if (strlen(enc)) {
strncat(buf, "enc:", buflen - strlen(buf) - 1);
strncat(buf, "encoders:", buflen - strlen(buf) - 1);
strncat(buf, enc, buflen - strlen(buf) - 1);
}
if (strlen(dec)) {
if (strlen(enc)) {
strncat(buf, ", ", buflen - strlen(buf) - 1);
}
strncat(buf, "dec:", buflen - strlen(buf) - 1);
strncat(buf, "decoders:", buflen - strlen(buf) - 1);
strncat(buf, dec, buflen - strlen(buf) - 1);
}
strncat(buf, ")", buflen - strlen(buf) - 1);
@@ -277,7 +277,7 @@ static void print_codec_info(AVCodecID id, char *buf, size_t buflen)
#elif LIBAVCODEC_VERSION_MAJOR >= 54
const AVCodec *codec;
if ((codec = avcodec_find_encoder(id))) {
strncpy(buf, " (enc:", buflen - 1);
strncpy(buf, " (encoders:", buflen - 1);
buf[buflen - 1] = '\0';
do {
if (av_codec_is_encoder(codec) && codec->id == id) {
@@ -293,7 +293,7 @@ static void print_codec_info(AVCodecID id, char *buf, size_t buflen)
} else {
strncat(buf, " (", buflen - strlen(buf) - 1);
}
strncat(buf, "dec:", buflen - strlen(buf) - 1);
strncat(buf, "decoders:", buflen - strlen(buf) - 1);
do {
if (av_codec_is_decoder(codec) && codec->id == id) {
strncat(buf, " ", buflen - strlen(buf) - 1);
@@ -841,10 +841,10 @@ static list<enum AVPixelFormat> get_available_pix_fmts(struct video_desc in_desc
bool is_rgb = codec_is_a_rgb(in_desc.color_spec);
int preferred_subsampling = requested_subsampling;
if (requested_subsampling == 0) {
if (in_desc.interlacing == INTERLACED_MERGED) {
preferred_subsampling = 422;
} else {
if (codec_is_420(in_desc.color_spec)) { /// @todo perhaps better would be take the subs. directly
preferred_subsampling = 420;
} else {
preferred_subsampling = 422;
}
}
// sort
@@ -937,7 +937,7 @@ static bool try_open_codec(struct state_video_compress_libav *s,
AVPixelFormat &pix_fmt,
struct video_desc desc,
codec_t ug_codec,
AVCodec *codec)
const AVCodec *codec)
{
// avcodec_alloc_context3 allocates context and sets default value
s->codec_ctx = avcodec_alloc_context3(codec);
@@ -967,6 +967,13 @@ static bool try_open_codec(struct state_video_compress_libav *s,
pix_fmt = AV_PIX_FMT_NV12;
}
#endif
if (const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get(pix_fmt)) { // defaults
s->codec_ctx->colorspace = (desc->flags & AV_PIX_FMT_FLAG_RGB) != 0U ? AVCOL_SPC_RGB : AVCOL_SPC_BT709;
s->codec_ctx->color_range = (desc->flags & AV_PIX_FMT_FLAG_RGB) != 0U ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
}
get_av_pixfmt_details(ug_codec, pix_fmt, &s->codec_ctx->colorspace, &s->codec_ctx->color_range);
/* open it */
pthread_mutex_lock(s->lavcd_global_lock);
if (avcodec_open2(s->codec_ctx, codec, NULL) < 0) {
@@ -976,8 +983,8 @@ static bool try_open_codec(struct state_video_compress_libav *s,
pthread_mutex_unlock(s->lavcd_global_lock);
return false;
}
pthread_mutex_unlock(s->lavcd_global_lock);
return true;
}

View File

@@ -52,20 +52,8 @@
#include <stdlib.h>
#include "lib_common.h"
#if LIBGPUJPEG_API_VERSION >= 7
#define GJ_RGBA_SUPP 1
#else
#define GJ_RGBA_SUPP 0
#endif
// compat
#if LIBGPUJPEG_API_VERSION <= 2
#define GPUJPEG_444_U8_P012 GPUJPEG_4_4_4
#define GPUJPEG_422_U8_P1020 GPUJPEG_4_2_2
#endif
#if LIBGPUJPEG_API_VERSION < 9
#define GPUJPEG_444_U8_P012A GPUJPEG_444_U8_P012Z
#if LIBGPUJPEG_API_VERSION < 11
#error "GPUJPEG API 11 or more requested!"
#endif
#define MOD_NAME "[GPUJPEG dec.] "
@@ -85,27 +73,36 @@ static int configure_with(struct state_decompress_gpujpeg *s, struct video_desc
{
s->desc = desc;
#if LIBGPUJPEG_API_VERSION <= 2
s->decoder = gpujpeg_decoder_create();
#else
s->decoder = gpujpeg_decoder_create(NULL);
#endif
if(!s->decoder) {
return FALSE;
}
// setting verbosity - a bit tricky now, gpujpeg_decoder_init needs to be called with some "valid" data
// otherwise, parameter setting is unneeded - it is done automaticaly by the image
struct gpujpeg_parameters param;
gpujpeg_set_default_parameters(&param);
param.verbose = MAX(0, log_level - LOG_LEVEL_INFO);
struct gpujpeg_image_parameters param_image;
gpujpeg_image_set_default_parameters(&param_image);
param_image.width = desc.width; // size must be non-zero in order the init to succeed
param_image.height = desc.height;
param_image.color_space = GPUJPEG_YCBCR_BT709; // assume now BT.709 as default - this is mainly applicable for FFmpeg-encoded
// JPEGs that doesn't indicate explicitly color spec (no JFIF marker, only CS=ITU601
// for BT.601 limited range - not enabled by UG encoder because FFmpeg emits it also for 709)
int rc = gpujpeg_decoder_init(s->decoder, &param, &param_image);
assert(rc == 0);
switch (s->out_codec) {
case I420:
gpujpeg_decoder_set_output_format(s->decoder, GPUJPEG_YCBCR_JPEG,
gpujpeg_decoder_set_output_format(s->decoder, GPUJPEG_YCBCR_BT709,
GPUJPEG_420_U8_P0P1P2);
break;
case RGBA:
#if GJ_RGBA_SUPP == 1
gpujpeg_decoder_set_output_format(s->decoder, GPUJPEG_RGB,
s->out_codec == RGBA && s->rshift == 0 && s->gshift == 8 && s->bshift == 16 && vc_get_linesize(desc.width, RGBA) == s->pitch ?
GPUJPEG_444_U8_P012A : GPUJPEG_444_U8_P012);
break;
#endif
case RGB:
gpujpeg_decoder_set_output_format(s->decoder, GPUJPEG_RGB,
GPUJPEG_444_U8_P012);
@@ -125,15 +122,12 @@ static int configure_with(struct state_decompress_gpujpeg *s, struct video_desc
static void * gpujpeg_decompress_init(void)
{
#if LIBGPUJPEG_API_VERSION >= 7
if (gpujpeg_version() != LIBGPUJPEG_API_VERSION) {
log_msg(LOG_LEVEL_WARNING, "GPUJPEG API version mismatch! (%d vs %d)\n",
gpujpeg_version(), LIBGPUJPEG_API_VERSION);
}
#endif
struct state_decompress_gpujpeg *s;
s = (struct state_decompress_gpujpeg *) calloc(1, sizeof(struct state_decompress_gpujpeg));
struct state_decompress_gpujpeg *s = (struct state_decompress_gpujpeg *) calloc(1, sizeof(struct state_decompress_gpujpeg));
int ret;
printf("Initializing CUDA device %d...\n", cuda_devices[0]);
@@ -176,17 +170,10 @@ static int gpujpeg_decompress_reconfigure(void *state, struct video_desc desc,
}
}
#if LIBGPUJPEG_API_VERSION >= 4
static decompress_status gpujpeg_probe_internal_codec(unsigned char *buffer, size_t len, codec_t *internal_codec) {
*internal_codec = VIDEO_CODEC_NONE;
struct gpujpeg_image_parameters params = { 0 };
#if LIBGPUJPEG_API_VERSION >= 6
if (gpujpeg_decoder_get_image_info(buffer, len, &params, NULL) != 0) {
#elif LIBGPUJPEG_API_VERSION >= 5
if (gpujpeg_reader_get_image_info(buffer, len, &params, NULL) != 0) {
#else
if (gpujpeg_decoder_get_image_info(buffer, len, &params) != 0) {
#endif
if (gpujpeg_decoder_get_image_info(buffer, len, &params, NULL, MAX(0, log_level - LOG_LEVEL_INFO)) != 0) {
log_msg(LOG_LEVEL_WARNING, MOD_NAME "probe - cannot get image info!\n");
return DECODER_GOT_FRAME;
}
@@ -203,11 +190,7 @@ static decompress_status gpujpeg_probe_internal_codec(unsigned char *buffer, siz
case GPUJPEG_YCBCR_BT601:
case GPUJPEG_YCBCR_BT601_256LVLS:
case GPUJPEG_YCBCR_BT709:
#if LIBGPUJPEG_API_VERSION < 8
*internal_codec = UYVY;
#else
*internal_codec = params.pixel_format == GPUJPEG_420_U8_P0P1P2 ? I420 : UYVY;
#endif
break;
default:
log_msg(LOG_LEVEL_WARNING, MOD_NAME "probe - unhandled color space: %s\n",
@@ -218,7 +201,6 @@ static decompress_status gpujpeg_probe_internal_codec(unsigned char *buffer, siz
log_msg(LOG_LEVEL_VERBOSE, "JPEG color space: %s\n", gpujpeg_color_space_get_name(params.color_space));
return DECODER_GOT_CODEC;
}
#endif
static decompress_status gpujpeg_decompress(void *state, unsigned char *dst, unsigned char *buffer,
unsigned int src_len, int frame_seq, struct video_frame_callbacks *callbacks, codec_t *internal_codec)
@@ -231,11 +213,7 @@ static decompress_status gpujpeg_decompress(void *state, unsigned char *dst, uns
int linesize;
if (s->out_codec == VIDEO_CODEC_NONE) {
#if LIBGPUJPEG_API_VERSION >= 4
return gpujpeg_probe_internal_codec(buffer, src_len, internal_codec);
#else
assert("Old GPUJPEG, cannot probe!" && 0);
#endif
}
linesize = vc_get_linesize(s->desc.width, s->out_codec);
@@ -243,9 +221,7 @@ static decompress_status gpujpeg_decompress(void *state, unsigned char *dst, uns
gpujpeg_set_device(cuda_devices[0]);
if (s->pitch == linesize && (s->out_codec == UYVY || s->out_codec == RGB
#if GJ_RGBA_SUPP == 1
|| (s->out_codec == RGBA && s->rshift == 0 && s->gshift == 8 && s->bshift == 16)
#endif
)) {
gpujpeg_decoder_output_set_custom(&decoder_output, dst);
//int data_decompressed_size = decoder_output.data_size;
@@ -315,38 +291,33 @@ static void gpujpeg_decompress_done(void *state)
static const struct decode_from_to *gpujpeg_decompress_get_decoders() {
static const struct decode_from_to ret[] = {
#if LIBGPUJPEG_API_VERSION >= 4
{ JPEG, VIDEO_CODEC_NONE, VIDEO_CODEC_NONE, 50 },
#endif
{ JPEG, VIDEO_CODEC_NONE, VIDEO_CODEC_NONE, 50 }, // for probe
{ JPEG, RGB, RGB, 300 },
{ JPEG, RGB, RGBA, 300 + (1 - GJ_RGBA_SUPP) * 50 }, // 300 when GJ support RGBA natively,
// 350 when using CPU conversion
{ JPEG, RGB, RGBA, 300 },
{ JPEG, UYVY, UYVY, 300 },
{ JPEG, I420, I420, 300 },
{ JPEG, I420, UYVY, 500 },
{ JPEG, RGB, UYVY, 700 },
{ JPEG, UYVY, RGB, 700 },
{ JPEG, UYVY, RGBA, 700 + (1 - GJ_RGBA_SUPP) * 50},
{ JPEG, UYVY, RGBA, 700 },
{ JPEG, VIDEO_CODEC_NONE, RGB, 900 },
{ JPEG, VIDEO_CODEC_NONE, UYVY, 900 },
{ JPEG, VIDEO_CODEC_NONE, RGBA, 900 + (1 - GJ_RGBA_SUPP) * 50},
#if LIBGPUJPEG_API_VERSION > 6
{ JPEG, VIDEO_CODEC_NONE, RGBA, 900 },
// decoding from FFmpeg MJPG has lower priority than libavcodec
// decoder because those files doesn't has much independent
// segments (1 per MCU row -> 68 for HD) -> lavd may be better
{ MJPG, VIDEO_CODEC_NONE, VIDEO_CODEC_NONE, 90 },
{ MJPG, RGB, RGB, 600 },
{ MJPG, RGB, RGBA, 600 + (1 - GJ_RGBA_SUPP) * 50 },
{ MJPG, RGB, RGBA, 600 },
{ MJPG, UYVY, UYVY, 600 },
{ MJPG, I420, I420, 600 },
{ MJPG, I420, UYVY, 700 },
{ MJPG, RGB, UYVY, 800 },
{ MJPG, UYVY, RGB, 800 },
{ MJPG, UYVY, RGBA, 800 + (1 - GJ_RGBA_SUPP) * 50},
{ MJPG, UYVY, RGBA, 800 },
{ MJPG, VIDEO_CODEC_NONE, RGB, 920 },
{ MJPG, VIDEO_CODEC_NONE, UYVY, 920 },
{ MJPG, VIDEO_CODEC_NONE, RGBA, 920 + (1 - GJ_RGBA_SUPP) * 50},
#endif
{ MJPG, VIDEO_CODEC_NONE, RGBA, 920 },
{ VIDEO_CODEC_NONE, VIDEO_CODEC_NONE, VIDEO_CODEC_NONE, 0 },
};
return ret;

View File

@@ -607,6 +607,7 @@ static bool lavd_sws_convert(struct state_libavcodec_decompress_sws *sws, enum A
static int change_pixfmt(AVFrame *frame, unsigned char *dst, int av_codec, codec_t out_codec, int width, int height,
int pitch, int rgb_shift[static restrict 3], struct state_libavcodec_decompress_sws *sws) {
av_to_uv_convert_p convert = NULL;
for (const struct av_to_uv_conversion *c = get_av_to_uv_conversions(); c->uv_codec != VIDEO_CODEC_NONE; c++) {
if (c->av_codec == av_codec && c->uv_codec == out_codec) {
convert = c->convert;
@@ -724,7 +725,8 @@ static decompress_status libavcodec_decompress(void *state, unsigned char *dst,
}
if(got_frame) {
log_msg(LOG_LEVEL_DEBUG, "[lavd] Decompressing %c frame took %f sec.\n", av_get_picture_type_char(s->frame->pict_type), tv_diff(t1, t0));
struct timeval t3;
gettimeofday(&t3, NULL);
s->frame->opaque = callbacks;
/* Skip the frame if this is not an I-frame
@@ -758,6 +760,9 @@ static decompress_status libavcodec_decompress(void *state, unsigned char *dst,
res = DECODER_GOT_FRAME;
}
}
struct timeval t4;
gettimeofday(&t4, NULL);
log_msg(LOG_LEVEL_DEBUG, MOD_NAME "Decompressing %c frame took %f sec, pixfmt change %f s.\n", av_get_picture_type_char(s->frame->pict_type), tv_diff(t1, t0), tv_diff(t4, t3));
}
if (len <= 0) {

View File

@@ -103,8 +103,8 @@ void list_video_display_devices(bool full)
/**
* @brief Initializes video display.
* @param[in] id video display identifier that will be initialized
* @param[in] fmt command-line entered format string
* @param[in] requested_display video display module name, not NULL
* @param[in] fmt command-line entered format string, not NULL
* @param[in] flags bit sum of @ref display_flags
* @param[out] state output display state. Defined only if initialization was successful.
* @retval 0 if sucessful
@@ -114,6 +114,8 @@ void list_video_display_devices(bool full)
int initialize_video_display(struct module *parent, const char *requested_display,
const char *fmt, unsigned int flags, const char *postprocess, struct display **out)
{
assert (requested_display != NULL && fmt != NULL && out != NULL);
if (postprocess && (strcmp(postprocess, "help") == 0 || strcmp(postprocess, "fullhelp") == 0)) {
show_vo_postprocess_help(strcmp(postprocess, "fullhelp") == 0);
return 1;

View File

@@ -174,8 +174,8 @@ extern int display_init_noerr;
void list_video_display_devices(bool full);
int initialize_video_display(struct module *parent,
const char *requested_display, const char *fmt, unsigned int flags,
const char *postprocess, struct display **out);
/* not_null */ const char *requested_display, /* not_null */ const char *fmt,
unsigned int flags, const char *postprocess, /* not_null */ struct display **out);
bool display_needs_mainloop(struct display *d);
void display_run(struct display *d);
void display_join(struct display *d);

View File

@@ -139,7 +139,7 @@ static void *display_aggregate_init(struct module *parent, const char *fmt, unsi
while((item = strtok_r(tmp, "#", &save_ptr))) {
char *device;
char *config = strdup(item);
char *device_cfg = NULL;
const char *device_cfg = "";
unsigned int dev_flags = 0u;
device = config;
if(strchr(config, ':')) {
@@ -398,5 +398,5 @@ static const struct video_display_info display_aggregate_info = {
DISPLAY_DOESNT_NEED_MAINLOOP,
};
REGISTER_MODULE(aggregate, &display_aggregate_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(aggregate, &display_aggregate_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -717,5 +717,5 @@ static const struct video_display_info display_conference_info = {
DISPLAY_DOESNT_NEED_MAINLOOP,
};
REGISTER_MODULE(conference, &display_conference_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(conference, &display_conference_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -1741,7 +1741,9 @@ void HDRMetadata::Init(const string &fmt) {
auto opts = unique_ptr<char []>(new char [fmt.size() + 1]);
strcpy(opts.get(), fmt.c_str());
char *save_ptr = nullptr;
string mode = strtok_r(opts.get(), ",", &save_ptr);
char *mode_c = strtok_r(opts.get(), ",", &save_ptr);
assert(mode_c != nullptr);
string mode = mode_c;
std::for_each(std::begin(mode), std::end(mode), [](char& c) {
c = static_cast<char>(std::toupper(static_cast<unsigned char>(c)));
});

View File

@@ -58,6 +58,7 @@
using namespace std;
static constexpr unsigned int IN_QUEUE_MAX_BUFFER_LEN = 5;
static constexpr const char *MOD_NAME = "[multiplier] ";
static constexpr int SKIP_FIRST_N_FRAMES_IN_STREAM = 5;
struct sub_display {
@@ -102,8 +103,6 @@ static void *display_multiplier_init(struct module *parent, const char *fmt, uns
{
struct state_multiplier *s;
char *fmt_copy = NULL;
const char *requested_display = NULL;
const char *cfg = NULL;
s = new state_multiplier();
@@ -113,7 +112,7 @@ static void *display_multiplier_init(struct module *parent, const char *fmt, uns
delete s;
return &display_init_noerr;
}
if (isdigit(fmt[0])) { // fork
struct state_multiplier *orig;
sscanf(fmt, "%p", &orig);
@@ -133,9 +132,9 @@ static void *display_multiplier_init(struct module *parent, const char *fmt, uns
char *saveptr;
for(char *token = strtok_r(fmt_copy, "#", &saveptr); token; token = strtok_r(NULL, "#", &saveptr)){
requested_display = token;
printf("%s\n", token);
cfg = NULL;
LOG(LOG_LEVEL_VERBOSE) << MOD_NAME << "Initializing display " << token << "\n";
const char *requested_display = token;
const char *cfg = "";
char *delim = strchr(token, ':');
if (delim) {
*delim = '\0';
@@ -347,5 +346,5 @@ static const struct video_display_info display_multiplier_info = {
display_multiplier_needs_mainloop,
};
REGISTER_MODULE(multiplier, &display_multiplier_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(multiplier, &display_multiplier_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -282,5 +282,5 @@ static const struct video_display_info display_pipe_info = {
DISPLAY_DOESNT_NEED_MAINLOOP,
};
REGISTER_MODULE(pipe, &display_pipe_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(pipe, &display_pipe_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -292,5 +292,5 @@ static const struct video_display_info display_preview_info = {
DISPLAY_DOESNT_NEED_MAINLOOP,
};
REGISTER_MODULE(preview, &display_preview_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(preview, &display_preview_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -51,6 +51,7 @@
#include <cinttypes>
#include <condition_variable>
#include <chrono>
#include <iostream>
#include <list>
#include <map>
#include <memory>
@@ -124,29 +125,31 @@ static void *display_run_worker(void *arg) {
static void *display_proxy_init(struct module *parent, const char *fmt, unsigned int flags)
{
struct state_proxy *s;
char *fmt_copy = NULL;
const char *requested_display = "gl";
const char *cfg = NULL;
const char *requested_display = "";
const char *cfg = "";
int ret;
s = new state_proxy();
if (fmt == nullptr || strlen(fmt) == 0 || "help"s == fmt) {
cout << "Proxy is a helper display to combine (blend) multiple incoming streams.\n"
"Please do not use directly, intended for internal purposes!\n";
return nullptr;
}
if (fmt && strlen(fmt) > 0) {
if (isdigit(fmt[0])) { // fork
struct state_proxy *orig;
sscanf(fmt, "%p", &orig);
s->common = orig->common;
return s;
} else {
fmt_copy = strdup(fmt);
requested_display = fmt_copy;
char *delim = strchr(fmt_copy, ':');
if (delim) {
*delim = '\0';
cfg = delim + 1;
}
}
auto *s = new state_proxy();
if (isdigit(fmt[0]) != 0) { // fork
struct state_proxy *orig = nullptr;
sscanf(fmt, "%p", &orig);
s->common = orig->common;
return s;
}
fmt_copy = strdup(fmt);
requested_display = fmt_copy;
char *delim = strchr(fmt_copy, ':');
if (delim != nullptr) {
*delim = '\0';
cfg = delim + 1;
}
s->common = shared_ptr<state_proxy_common>(new state_proxy_common());
ret = initialize_video_display(parent, requested_display, cfg, flags, NULL, &s->common->real_display);
@@ -426,5 +429,5 @@ static const struct video_display_info display_proxy_info = {
DISPLAY_DOESNT_NEED_MAINLOOP,
};
REGISTER_MODULE(proxy, &display_proxy_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);
REGISTER_HIDDEN_MODULE(proxy, &display_proxy_info, LIBRARY_CLASS_VIDEO_DISPLAY, VIDEO_DISPLAY_ABI_VERSION);

View File

@@ -0,0 +1,202 @@
#ifdef HAVE_CONFIG_H
#include "config.h"
#include "config_unix.h"
#include "config_win32.h"
#endif
#if defined HAVE_CPPUNIT && defined HAVE_LAVC
#include <algorithm>
#include <array>
#include <cppunit/config/SourcePrefix.h>
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "ff_codec_conversions_test.h"
#include "libavcodec_common.h"
#include "tv.h"
#include "video_capture/testcard_common.h"
#include "video_codec.h"
using std::array;
using std::copy;
using std::cout;
using std::default_random_engine;
using std::max;
using std::to_string;
using std::vector;
// Registers the fixture into the 'registry'
CPPUNIT_TEST_SUITE_REGISTRATION( ff_codec_conversions_test );
ff_codec_conversions_test::ff_codec_conversions_test()
{
}
ff_codec_conversions_test::~ff_codec_conversions_test()
{
}
void
ff_codec_conversions_test::setUp()
{
}
void
ff_codec_conversions_test::tearDown()
{
}
#define TIMER(t) struct timeval t{}; gettimeofday(&(t), nullptr)
void
ff_codec_conversions_test::test_yuv444p16le_from_to_r10k()
{
using namespace std::string_literals;
constexpr int width = 1920;
constexpr int height = 1080;
vector <unsigned char> rgba_buf(width * height * 4);
/// @todo Use 10-bit natively
auto test_pattern = [&] {
vector <unsigned char> r10k_buf(width * height * 4);
copy(rgba_buf.begin(), rgba_buf.end(), r10k_buf.begin());
toR10k(r10k_buf.data(), width, height);
AVFrame frame;
frame.format = AV_PIX_FMT_YUV444P16LE;
frame.width = width;
frame.height = height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
assert(av_image_alloc(frame.data, frame.linesize,
width, height, (AVPixelFormat) frame.format, 32) >= 0);
auto from_conv = get_uv_to_av_conversion(R10k, frame.format);
auto to_conv = get_av_to_uv_conversion(frame.format, R10k);
assert(to_conv != nullptr && from_conv != nullptr);
TIMER(t0);
from_conv(&frame, r10k_buf.data(), width, height);
TIMER(t1);
to_conv(reinterpret_cast<char*>(r10k_buf.data()), &frame, width, height, vc_get_linesize(width, R10k), nullptr);
TIMER(t2);
if (getenv("PERF") != nullptr) {
cout << "test_yuv444p16le_from_to_r10k: duration - enc " << tv_diff(t1, t0) << ", dec " <<tv_diff(t2, t1) << "\n";
}
av_freep(frame.data);
vector <unsigned char> rgba_buf_res(width * height * 4);
vc_copyliner10k(rgba_buf_res.data(), r10k_buf.data(), height * vc_get_linesize(width, RGBA), 0, 8, 16);
int max_diff = 0;
for (size_t i = 0; i < width * height; ++i) {
for (int j = 0; j < 3; ++j) {
max_diff = max<int>(max_diff, abs(rgba_buf[4 * i + j] - rgba_buf_res[4 * i + j]));
//fprintf(stderr, "%d %d\n", (int) rgba_buf[4 * i + j], (int) rgba_buf_res[4 * i + j]);
}
//fprintf(stderr, "R in 10 bits = %d\n", (int) (r10k_buf[4 * i] << 2) + (r10k_buf[4 * i + 1] >> 6));
}
if (getenv("DEBUG_DUMP") != nullptr) {
FILE *out = fopen("out.rgba","w");
fwrite(rgba_buf_res.data(), width * height * 4, 1, out);
fclose(out);
}
CPPUNIT_ASSERT_MESSAGE("Maximal allowed difference 1, found "s + to_string(max_diff), max_diff <= 1);
};
int i = 0;
for_each(rgba_buf.begin(), rgba_buf.end(), [&](unsigned char & c) { c = (i++ / 4) % 0x100; });
test_pattern();
array<unsigned char, 4> pattern{ 0xFFU, 0, 0, 0xFFU };
for_each(rgba_buf.begin(), rgba_buf.end(), [&](unsigned char & c) { c = pattern[i++ % 4]; });
test_pattern();
default_random_engine rand_gen;
for_each(rgba_buf.begin(), rgba_buf.end(), [&](unsigned char & c) { c = rand_gen() % 0x100; });
test_pattern();
}
void
ff_codec_conversions_test::test_yuv444p16le_from_to_r12l()
{
using namespace std::string_literals;
constexpr int width = 1920;
constexpr int height = 1080;
vector <unsigned char> rgb_buf(width * height * 3);
/// @todo Use 12-bit natively
auto test_pattern = [&] {
vector <unsigned char> r12l_buf(vc_get_datalen(width, height, R12L));
decoder_t rgb_to_r12l = get_decoder_from_to(RGB, R12L, true);
rgb_to_r12l(r12l_buf.data(), rgb_buf.data(), vc_get_datalen(width, height, R12L), 0, 8, 16);
AVFrame frame;
frame.format = AV_PIX_FMT_YUV444P16LE;
frame.width = width;
frame.height = height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
assert(av_image_alloc(frame.data, frame.linesize,
width, height, (AVPixelFormat) frame.format, 32) >= 0);
auto from_conv = get_uv_to_av_conversion(R12L, frame.format);
auto to_conv = get_av_to_uv_conversion(frame.format, R12L);
assert(to_conv != nullptr && from_conv != nullptr);
TIMER(t0);
from_conv(&frame, r12l_buf.data(), width, height);
TIMER(t1);
to_conv(reinterpret_cast<char*>(r12l_buf.data()), &frame, width, height, vc_get_linesize(width, R12L), nullptr);
TIMER(t2);
if (getenv("PERF") != nullptr) {
cout << "test_yuv444p16le_from_to_r12l: duration - enc " << tv_diff(t1, t0) << ", dec " <<tv_diff(t2, t1) << "\n";
}
av_freep(frame.data);
vector <unsigned char> rgb_buf_res(width * height * 3);
decoder_t r12l_to_rgb = get_decoder_from_to(R12L, RGB, true);
r12l_to_rgb(rgb_buf_res.data(), r12l_buf.data(), vc_get_datalen(width, height, RGB), 0, 8, 16);
int max_diff = 0;
for (size_t i = 0; i < width * height; ++i) {
for (int j = 0; j < 3; ++j) {
max_diff = max<int>(max_diff, abs(rgb_buf[3 * i + j] - rgb_buf_res[3 * i + j]));
}
}
if (getenv("DEBUG_DUMP") != nullptr) {
FILE *out = fopen("out.rgb","w");
fwrite(rgb_buf_res.data(), width * height * 3, 1, out);
fclose(out);
}
CPPUNIT_ASSERT_MESSAGE("Maximal allowed difference 1, found "s + to_string(max_diff), max_diff <= 1);
};
int i = 0;
array<unsigned char, 3> pattern{ 0xFFU, 0, 0 };
for_each(rgb_buf.begin(), rgb_buf.end(), [&](unsigned char & c) { c = pattern[i++ % 3]; });
test_pattern();
for_each(rgb_buf.begin(), rgb_buf.end(), [&](unsigned char & c) { c = (i++ / 3) % 0x100; });
test_pattern();
default_random_engine rand_gen;
for_each(rgb_buf.begin(), rgb_buf.end(), [&](unsigned char & c) { c = rand_gen() % 0x100; });
test_pattern();
}
#endif // defined HAVE_CPPUNIT && HAVE_LAVC

View File

@@ -0,0 +1,29 @@
#ifndef FF_CODEC_CONVERSIONS_TEST_H_277D34B0_7056_45BF_9A47_EA2AD1DEA846
#define FF_CODEC_CONVERSIONS_TEST_H_277D34B0_7056_45BF_9A47_EA2AD1DEA846
#include "config.h"
#ifdef HAVE_LAVC
#include <cppunit/extensions/HelperMacros.h>
class ff_codec_conversions_test : public CPPUNIT_NS::TestFixture
{
CPPUNIT_TEST_SUITE( ff_codec_conversions_test );
CPPUNIT_TEST( test_yuv444p16le_from_to_r10k );
CPPUNIT_TEST( test_yuv444p16le_from_to_r12l );
CPPUNIT_TEST_SUITE_END();
public:
ff_codec_conversions_test();
~ff_codec_conversions_test();
void setUp();
void tearDown();
void test_yuv444p16le_from_to_r10k();
void test_yuv444p16le_from_to_r12l();
};
#endif // defined HAVE_LAVC
#endif // defined FF_CODEC_CONVERSIONS_TEST_H_277D34B0_7056_45BF_9A47_EA2AD1DEA846

View File

@@ -47,6 +47,7 @@
#include <cppunit/extensions/TestFactoryRegistry.h>
#include <cppunit/ui/text/TestRunner.h>
#endif
#include <iostream>
#include "debug.h"
#include "host.h"
@@ -65,6 +66,8 @@ extern "C" {
#include "test_video_display.h"
}
using std::clog;
#define TEST_AV_HW 1
/* These globals should be fixed in the future as well */
@@ -84,13 +87,9 @@ void exit_uv(int status)
exit(status);
}
int main(int argc, char **argv)
static bool run_standard_tests()
{
bool success = true;
struct init_data *init;
if ((init = common_preinit(argc, argv)) == NULL) {
return 2;
}
if (test_bitstream() != 0)
success = false;
@@ -118,8 +117,13 @@ int main(int argc, char **argv)
success = false;
#endif
return success;
}
static bool run_unit_tests()
{
#ifdef HAVE_CPPUNIT
printf("Running CppUnit tests:\n");
std::clog << "Running CppUnit tests:\n";
// Get the top level suite from the registry
CPPUNIT_NS::Test *suite = CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest();
@@ -131,10 +135,36 @@ int main(int argc, char **argv)
runner.setOutputter( new CPPUNIT_NS::CompilerOutputter( &runner.result(),
CPPUNIT_NS::stdCOut() ) );
// Run the test.
success = runner.run() && success;
#else
printf("CppUnit was not found, skipping CppUnit tests!\n");
return runner.run();
#endif
std::clog << "CppUnit was not found, skipping CppUnit tests!\n";
return true;
}
int main(int argc, char **argv)
{
struct init_data *init = nullptr;
if ((init = common_preinit(argc, argv)) == nullptr) {
return 2;
}
bool run_standard = true;
bool run_unit = true;
if (argc == 2) {
run_standard = run_unit = false;
if (strcmp("unit", argv[1]) == 0) {
run_unit = true;
}
if (strcmp("standard", argv[1]) == 0) {
run_standard = true;
}
if (strcmp("all", argv[1]) == 0) {
run_standard = run_unit = true;
}
}
bool success = (run_standard ? run_standard_tests() : true);
success = (run_unit ? run_unit_tests() : true) && success;
common_cleanup(init);

View File

@@ -49,7 +49,7 @@ int test_video_display(void)
printf
("Testing video hardware detection ......................................... ");
if (initialize_video_display(NULL, "none", NULL, 0, NULL, &d) != 0) {
if (initialize_video_display(NULL, "none", "", 0, NULL, &d) != 0) {
printf("FAIL\n");
printf(" Cannot intiialize dummy device\n");
return 1;