Refactor camera code to support different camera handlers

I split this off the sdl camera PR due to rebase conflicts.
It can also be seen as a partial integration test.
This commit is contained in:
Megamouse 2026-01-01 07:42:25 +01:00
parent cfbc416521
commit ae72de3881
15 changed files with 496 additions and 355 deletions

View file

@ -36,32 +36,38 @@ void cfg_camera::save() const
}
}
cfg_camera::camera_setting cfg_camera::get_camera_setting(std::string_view camera, bool& success)
cfg_camera::camera_setting cfg_camera::get_camera_setting(std::string_view handler, std::string_view camera, bool& success)
{
camera_setting setting;
const std::string value = cameras.get_value(camera);
camera_setting setting {};
const std::string value = cameras.get_value(fmt::format("%s-%s", handler, camera));
success = !value.empty();
if (success)
{
setting.from_string(cameras.get_value(camera));
setting.from_string(value);
}
return setting;
}
void cfg_camera::set_camera_setting(const std::string& camera, const camera_setting& setting)
void cfg_camera::set_camera_setting(std::string_view handler, std::string_view camera, const camera_setting& setting)
{
if (handler.empty())
{
camera_log.error("String '%s' cannot be used as handler key.", handler);
return;
}
if (camera.empty())
{
camera_log.error("String '%s' cannot be used as camera key.", camera);
return;
}
cameras.set_value(camera, setting.to_string());
cameras.set_value(fmt::format("%s-%s", handler, camera), setting.to_string());
}
std::string cfg_camera::camera_setting::to_string() const
{
return fmt::format("%d,%d,%f,%f,%d", width, height, min_fps, max_fps, format);
return fmt::format("%d,%d,%f,%f,%d,%d", width, height, min_fps, max_fps, format, colorspace);
}
void cfg_camera::camera_setting::from_string(std::string_view text)
@ -102,16 +108,19 @@ void cfg_camera::camera_setting::from_string(std::string_view text)
return true;
};
if (!to_integer(::at32(list, 0), width) ||
!to_integer(::at32(list, 1), height) ||
!to_double(::at32(list, 2), min_fps) ||
!to_double(::at32(list, 3), max_fps) ||
!to_integer(::at32(list, 4), format))
usz pos = 0;
if (!to_integer(::at32(list, pos++), width) ||
!to_integer(::at32(list, pos++), height) ||
!to_double(::at32(list, pos++), min_fps) ||
!to_double(::at32(list, pos++), max_fps) ||
!to_integer(::at32(list, pos++), format) ||
!to_integer(::at32(list, pos++), colorspace))
{
width = 0;
height = 0;
min_fps = 0;
max_fps = 0;
format = 0;
colorspace = 0;
}
}

View file

@ -15,18 +15,20 @@ struct cfg_camera final : cfg::node
double min_fps = 0;
double max_fps = 0;
int format = 0;
int colorspace = 0;
static constexpr u32 member_count = 5;
static constexpr u32 member_count = 6;
std::string to_string() const;
void from_string(std::string_view text);
};
camera_setting get_camera_setting(std::string_view camera, bool& success);
void set_camera_setting(const std::string& camera, const camera_setting& setting);
camera_setting get_camera_setting(std::string_view handler, std::string_view camera, bool& success);
void set_camera_setting(std::string_view handler, std::string_view camera, const camera_setting& setting);
const std::string path;
cfg::map_entry cameras{ this, "Cameras" }; // <camera>: <width>,<height>,<min_fps>,<max_fps>,<format>
cfg::map_entry cameras{ this, "Cameras" }; // <handler-camera>: <width>,<height>,<min_fps>,<max_fps>,<format>,<colorspace>
};
extern cfg_camera g_cfg_camera;

View file

@ -0,0 +1,245 @@
#include "stdafx.h"
#include "camera_video_sink.h"
#include "Emu/Cell/Modules/cellCamera.h"
#include "Emu/system_config.h"
LOG_CHANNEL(camera_log, "Camera");
camera_video_sink::camera_video_sink(bool front_facing)
: m_front_facing(front_facing)
{
}
camera_video_sink::~camera_video_sink()
{
}
bool camera_video_sink::present(u32 src_width, u32 src_height, u32 src_pitch, u32 src_bytes_per_pixel, std::function<const u8*(u32)> src_line_ptr)
{
ensure(!!src_line_ptr);
const u64 new_size = m_bytesize;
image_buffer& image_buffer = m_image_buffer[m_write_index];
// Reset buffer if necessary
if (image_buffer.data.size() != new_size)
{
image_buffer.data.clear();
}
// Create buffer if necessary
if (image_buffer.data.empty() && new_size > 0)
{
image_buffer.data.resize(new_size);
image_buffer.width = m_width;
image_buffer.height = m_height;
}
if (!image_buffer.data.empty() && src_width && src_height)
{
// Convert image to proper layout
// TODO: check if pixel format and bytes per pixel match and convert if necessary
// TODO: implement or improve more conversions
const u32 width = std::min<u32>(image_buffer.width, src_width);
const u32 height = std::min<u32>(image_buffer.height, src_height);
switch (m_format)
{
case CELL_CAMERA_RAW8: // The game seems to expect BGGR
{
// Let's use a very simple algorithm to convert the image to raw BGGR
u8* dst = image_buffer.data.data();
for (u32 y = 0; y < height; y++)
{
const u8* src = src_line_ptr(y);
const u8* srcu = src_line_ptr(std::max<s32>(0, y - 1));
const u8* srcd = src_line_ptr(std::min(height - 1, y + 1));
const bool is_top_pixel = (y % 2) == 0;
// We apply gaussian blur to get better demosaicing results later when debayering again
const auto blurred = [&](s32 x, s32 c)
{
const s32 i = x * 4 + c;
const s32 il = std::max(0, x - 1) * 4 + c;
const s32 ir = std::min<s32>(width - 1, x + 1) * 4 + c;
const s32 sum =
srcu[i] +
src[il] + 4 * src[i] + src[ir] +
srcd[i];
return static_cast<u8>(std::clamp((sum + 4) / 8, 0, 255));
};
// Split loops (roughly twice the performance by removing one condition)
if (is_top_pixel)
{
for (u32 x = 0; x < width; x++, dst++)
{
const bool is_left_pixel = (x % 2) == 0;
if (is_left_pixel)
{
*dst = blurred(x, 2); // Blue
}
else
{
*dst = blurred(x, 1); // Green
}
}
}
else
{
for (u32 x = 0; x < width; x++, dst++)
{
const bool is_left_pixel = (x % 2) == 0;
if (is_left_pixel)
{
*dst = blurred(x, 1); // Green
}
else
{
*dst = blurred(x, 0); // Red
}
}
}
}
break;
}
//case CELL_CAMERA_YUV422:
case CELL_CAMERA_Y0_U_Y1_V:
case CELL_CAMERA_V_Y1_U_Y0:
{
// Simple RGB to Y0_U_Y1_V conversion from stackoverflow.
constexpr s32 yuv_bytes_per_pixel = 2;
const s32 yuv_pitch = image_buffer.width * yuv_bytes_per_pixel;
const s32 y0_offset = (m_format == CELL_CAMERA_Y0_U_Y1_V) ? 0 : 3;
const s32 u_offset = (m_format == CELL_CAMERA_Y0_U_Y1_V) ? 1 : 2;
const s32 y1_offset = (m_format == CELL_CAMERA_Y0_U_Y1_V) ? 2 : 1;
const s32 v_offset = (m_format == CELL_CAMERA_Y0_U_Y1_V) ? 3 : 0;
for (u32 y = 0; y < height; y++)
{
const u8* src = src_line_ptr(y);
u8* yuv_row_ptr = &image_buffer.data[y * yuv_pitch];
for (u32 x = 0; x < width - 1; x += 2, src += 8)
{
const f32 r1 = src[0];
const f32 g1 = src[1];
const f32 b1 = src[2];
const f32 r2 = src[4];
const f32 g2 = src[5];
const f32 b2 = src[6];
const f32 y0 = (0.257f * r1) + (0.504f * g1) + (0.098f * b1) + 16.0f;
const f32 u = -(0.148f * r1) - (0.291f * g1) + (0.439f * b1) + 128.0f;
const f32 v = (0.439f * r1) - (0.368f * g1) - (0.071f * b1) + 128.0f;
const f32 y1 = (0.257f * r2) + (0.504f * g2) + (0.098f * b2) + 16.0f;
const s32 yuv_index = x * yuv_bytes_per_pixel;
yuv_row_ptr[yuv_index + y0_offset] = static_cast<u8>(std::clamp(y0, 0.0f, 255.0f));
yuv_row_ptr[yuv_index + u_offset] = static_cast<u8>(std::clamp( u, 0.0f, 255.0f));
yuv_row_ptr[yuv_index + y1_offset] = static_cast<u8>(std::clamp(y1, 0.0f, 255.0f));
yuv_row_ptr[yuv_index + v_offset] = static_cast<u8>(std::clamp( v, 0.0f, 255.0f));
}
}
break;
}
case CELL_CAMERA_JPG:
case CELL_CAMERA_RGBA:
case CELL_CAMERA_RAW10:
case CELL_CAMERA_YUV420:
case CELL_CAMERA_FORMAT_UNKNOWN:
default:
const u32 bytes_per_line = src_bytes_per_pixel * src_width;
if (src_pitch == bytes_per_line)
{
std::memcpy(image_buffer.data.data(), src_line_ptr(0), std::min<usz>(image_buffer.data.size(), src_height * bytes_per_line));
}
else
{
for (u32 y = 0, pos = 0; y < src_height && pos < image_buffer.data.size(); y++, pos += bytes_per_line)
{
std::memcpy(&image_buffer.data[pos], src_line_ptr(y), std::min<usz>(image_buffer.data.size() - pos, bytes_per_line));
}
}
break;
}
}
camera_log.trace("Wrote image to video surface. index=%d, m_frame_number=%d, width=%d, height=%d, bytesize=%d",
m_write_index, m_frame_number.load(), m_width, m_height, m_bytesize);
// Toggle write/read index
std::lock_guard lock(m_mutex);
image_buffer.frame_number = m_frame_number++;
m_write_index = read_index();
return true;
}
void camera_video_sink::set_format(s32 format, u32 bytesize)
{
camera_log.notice("Setting format: format=%d, bytesize=%d", format, bytesize);
m_format = format;
m_bytesize = bytesize;
}
void camera_video_sink::set_resolution(u32 width, u32 height)
{
camera_log.notice("Setting resolution: width=%d, height=%d", width, height);
m_width = width;
m_height = height;
}
void camera_video_sink::set_mirrored(bool mirrored)
{
camera_log.notice("Setting mirrored: mirrored=%d", mirrored);
m_mirrored = mirrored;
}
u64 camera_video_sink::frame_number() const
{
return m_frame_number.load();
}
void camera_video_sink::get_image(u8* buf, u64 size, u32& width, u32& height, u64& frame_number, u64& bytes_read)
{
// Lock read buffer
std::lock_guard lock(m_mutex);
const image_buffer& image_buffer = m_image_buffer[read_index()];
width = image_buffer.width;
height = image_buffer.height;
frame_number = image_buffer.frame_number;
// Copy to out buffer
if (buf && !image_buffer.data.empty())
{
bytes_read = std::min<u64>(image_buffer.data.size(), size);
std::memcpy(buf, image_buffer.data.data(), bytes_read);
if (image_buffer.data.size() != size)
{
camera_log.error("Buffer size mismatch: in=%d, out=%d. Cropping to incoming size. Please contact a developer.", size, image_buffer.data.size());
}
}
else
{
bytes_read = 0;
}
}
u32 camera_video_sink::read_index() const
{
// The read buffer index cannot be the same as the write index
return (m_write_index + 1u) % ::narrow<u32>(m_image_buffer.size());
}

View file

@ -0,0 +1,43 @@
#pragma once
#include <mutex>
class camera_video_sink
{
public:
camera_video_sink(bool front_facing);
virtual ~camera_video_sink();
void set_format(s32 format, u32 bytesize);
void set_resolution(u32 width, u32 height);
void set_mirrored(bool mirrored);
u64 frame_number() const;
bool present(u32 src_width, u32 src_height, u32 src_pitch, u32 src_bytes_per_pixel, std::function<const u8*(u32)> src_line_ptr);
void get_image(u8* buf, u64 size, u32& width, u32& height, u64& frame_number, u64& bytes_read);
protected:
u32 read_index() const;
bool m_front_facing = false;
bool m_mirrored = false; // Set by cellCamera
s32 m_format = 2; // CELL_CAMERA_RAW8, set by cellCamera
u32 m_bytesize = 0;
u32 m_width = 640;
u32 m_height = 480;
std::mutex m_mutex;
atomic_t<u64> m_frame_number{0};
u32 m_write_index{0};
struct image_buffer
{
u64 frame_number = 0;
u32 width = 0;
u32 height = 0;
std::vector<u8> data;
};
std::array<image_buffer, 2> m_image_buffer;
};

View file

@ -190,6 +190,7 @@
<ItemGroup>
<ClCompile Include="display_sleep_control.cpp" />
<ClCompile Include="gamemode_control.cpp" />
<ClCompile Include="Input\camera_video_sink.cpp" />
<ClCompile Include="Input\dualsense_pad_handler.cpp" />
<ClCompile Include="Input\gui_pad_thread.cpp" />
<ClCompile Include="Input\hid_pad_handler.cpp" />
@ -944,6 +945,7 @@
<ClInclude Include="Input\basic_mouse_handler.h" />
<ClInclude Include="display_sleep_control.h" />
<ClInclude Include="gamemode_control.h" />
<ClInclude Include="Input\camera_video_sink.h" />
<ClInclude Include="Input\ds3_pad_handler.h" />
<ClInclude Include="Input\ds4_pad_handler.h" />
<ClInclude Include="Input\dualsense_pad_handler.h" />

View file

@ -1245,6 +1245,9 @@
<ClCompile Include="rpcs3qt\clans_settings_dialog.cpp">
<Filter>Gui\rpcn</Filter>
</ClCompile>
<ClCompile Include="Input\camera_video_sink.cpp">
<Filter>Io\camera</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Input\ds4_pad_handler.h">
@ -1475,6 +1478,9 @@
<ClInclude Include="rpcs3qt\custom_tree_widget.h">
<Filter>Gui\widgets</Filter>
</ClInclude>
<ClInclude Include="Input\camera_video_sink.h">
<Filter>Io\camera</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClInclude Include="resource.h">

View file

@ -146,6 +146,7 @@ add_library(rpcs3_ui STATIC
../Input/basic_keyboard_handler.cpp
../Input/basic_mouse_handler.cpp
../Input/camera_video_sink.cpp
../Input/ds3_pad_handler.cpp
../Input/ds4_pad_handler.cpp
../Input/dualsense_pad_handler.cpp

View file

@ -3,11 +3,14 @@
#include "ui_camera_settings_dialog.h"
#include "permissions.h"
#include "Emu/Io/camera_config.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include <QCameraDevice>
#include <QMediaDevices>
#include <QMessageBox>
#include <QPushButton>
#include <QVideoSink>
LOG_CHANNEL(camera_log, "Camera");
@ -61,15 +64,13 @@ camera_settings_dialog::camera_settings_dialog(QWidget* parent)
{
ui->setupUi(this);
setAttribute(Qt::WA_DeleteOnClose);
load_config();
for (const QCameraDevice& camera_info : QMediaDevices::videoInputs())
{
if (camera_info.isNull()) continue;
ui->combo_camera->addItem(camera_info.description(), QVariant::fromValue(camera_info));
camera_log.notice("Found camera: '%s'", camera_info.description());
}
ui->combo_handlers->addItem("Qt", QVariant::fromValue(static_cast<int>(camera_handler::qt)));
connect(ui->combo_handlers, &QComboBox::currentIndexChanged, this, &camera_settings_dialog::handle_handler_change);
connect(ui->combo_camera, &QComboBox::currentIndexChanged, this, &camera_settings_dialog::handle_camera_change);
connect(ui->combo_settings, &QComboBox::currentIndexChanged, this, &camera_settings_dialog::handle_settings_change);
connect(ui->buttonBox, &QDialogButtonBox::clicked, [this](QAbstractButton* button)
@ -85,33 +86,132 @@ camera_settings_dialog::camera_settings_dialog(QWidget* parent)
}
});
if (ui->combo_camera->count() == 0)
{
ui->combo_camera->setEnabled(false);
ui->combo_settings->setEnabled(false);
ui->buttonBox->button(QDialogButtonBox::Save)->setEnabled(false);
ui->buttonBox->button(QDialogButtonBox::Apply)->setEnabled(false);
}
else
const int handler_index = ui->combo_handlers->findData(static_cast<int>(g_cfg.io.camera.get()));
ui->combo_handlers->setCurrentIndex(std::max(0, handler_index));
}
camera_settings_dialog::~camera_settings_dialog()
{
reset_cameras();
}
void camera_settings_dialog::enable_combos()
{
const bool is_enabled = ui->combo_camera->count() > 0;
ui->combo_camera->setEnabled(is_enabled);
ui->combo_settings->setEnabled(is_enabled);
ui->buttonBox->button(QDialogButtonBox::Save)->setEnabled(is_enabled);
ui->buttonBox->button(QDialogButtonBox::Apply)->setEnabled(is_enabled);
if (is_enabled)
{
// TODO: show camera ID somewhere
ui->combo_camera->setCurrentIndex(0);
}
}
camera_settings_dialog::~camera_settings_dialog()
void camera_settings_dialog::reset_cameras()
{
m_media_capture_session.reset();
m_camera.reset();
}
void camera_settings_dialog::handle_handler_change(int index)
{
reset_cameras();
if (index < 0 || !ui->combo_handlers->itemData(index).canConvert<int>())
{
ui->combo_settings->clear();
ui->combo_camera->clear();
enable_combos();
return;
}
m_handler = static_cast<camera_handler>(ui->combo_handlers->itemData(index).value<int>());
ui->combo_settings->blockSignals(true);
ui->combo_camera->blockSignals(true);
ui->combo_settings->clear();
ui->combo_camera->clear();
switch (m_handler)
{
case camera_handler::qt:
{
for (const QCameraDevice& camera_info : QMediaDevices::videoInputs())
{
if (camera_info.isNull()) continue;
ui->combo_camera->addItem(camera_info.description(), QVariant::fromValue(camera_info));
camera_log.notice("Found camera: '%s'", camera_info.description());
}
break;
}
default:
fmt::throw_exception("Unexpected camera handler %d", static_cast<int>(m_handler));
}
ui->combo_settings->blockSignals(false);
ui->combo_camera->blockSignals(false);
enable_combos();
}
void camera_settings_dialog::handle_camera_change(int index)
{
if (index < 0 || !ui->combo_camera->itemData(index).canConvert<QCameraDevice>())
if (index < 0)
{
ui->combo_settings->clear();
return;
}
const QCameraDevice camera_info = ui->combo_camera->itemData(index).value<QCameraDevice>();
reset_cameras();
switch (m_handler)
{
case camera_handler::qt:
handle_qt_camera_change(ui->combo_camera->itemData(index));
break;
default:
fmt::throw_exception("Unexpected camera handler %d", static_cast<int>(m_handler));
}
}
void camera_settings_dialog::handle_settings_change(int index)
{
if (index < 0)
{
return;
}
if (!gui::utils::check_camera_permission(this,
[this, index](){ handle_settings_change(index); },
[this](){ QMessageBox::warning(this, tr("Camera permissions denied!"), tr("RPCS3 has no permissions to access cameras on this device.")); }))
{
return;
}
switch (m_handler)
{
case camera_handler::qt:
handle_qt_settings_change(ui->combo_settings->itemData(index));
break;
default:
fmt::throw_exception("Unexpected camera handler %d", static_cast<int>(m_handler));
}
}
void camera_settings_dialog::handle_qt_camera_change(const QVariant& item_data)
{
if (!item_data.canConvert<QCameraDevice>())
{
ui->combo_settings->clear();
return;
}
const QCameraDevice camera_info = item_data.value<QCameraDevice>();
if (camera_info.isNull())
{
@ -119,10 +219,10 @@ void camera_settings_dialog::handle_camera_change(int index)
return;
}
m_camera.reset(new QCamera(camera_info));
m_media_capture_session.reset(new QMediaCaptureSession(nullptr));
m_camera = std::make_unique<QCamera>(camera_info);
m_media_capture_session = std::make_unique<QMediaCaptureSession>(nullptr);
m_media_capture_session->setCamera(m_camera.get());
m_media_capture_session->setVideoSink(ui->videoWidget->videoSink());
m_media_capture_session->setVideoOutput(ui->videoWidget);
if (!m_camera->isAvailable())
{
@ -173,14 +273,14 @@ void camera_settings_dialog::handle_camera_change(int index)
int index = 0;
bool success = false;
const std::string key = camera_info.id().toStdString();
cfg_camera::camera_setting cfg_setting = g_cfg_camera.get_camera_setting(key, success);
cfg_camera::camera_setting cfg_setting = g_cfg_camera.get_camera_setting(fmt::format("%s", camera_handler::qt), key, success);
if (success)
{
camera_log.notice("Found config entry for camera \"%s\"", key);
// Select matching drowdown entry
const double epsilon = 0.001;
// Select matching dropdown entry
constexpr double epsilon = 0.001;
for (int i = 0; i < ui->combo_settings->count(); i++)
{
@ -202,19 +302,10 @@ void camera_settings_dialog::handle_camera_change(int index)
ui->combo_settings->setCurrentIndex(std::max<int>(0, index));
ui->combo_settings->setEnabled(true);
// Update config to match user interface outcome
const QCameraFormat setting = ui->combo_settings->currentData().value<QCameraFormat>();
cfg_setting.width = setting.resolution().width();
cfg_setting.height = setting.resolution().height();
cfg_setting.min_fps = setting.minFrameRate();
cfg_setting.max_fps = setting.maxFrameRate();
cfg_setting.format = static_cast<int>(setting.pixelFormat());
g_cfg_camera.set_camera_setting(key, cfg_setting);
}
}
void camera_settings_dialog::handle_settings_change(int index)
void camera_settings_dialog::handle_qt_settings_change(const QVariant& item_data)
{
if (!m_camera)
{
@ -227,28 +318,22 @@ void camera_settings_dialog::handle_settings_change(int index)
return;
}
if (!gui::utils::check_camera_permission(this,
[this, index](){ handle_settings_change(index); },
[this](){ QMessageBox::warning(this, tr("Camera permissions denied!"), tr("RPCS3 has no permissions to access cameras on this device.")); }))
if (item_data.canConvert<QCameraFormat>() && ui->combo_camera->currentData().canConvert<QCameraDevice>())
{
return;
}
if (index >= 0 && ui->combo_settings->itemData(index).canConvert<QCameraFormat>() && ui->combo_camera->currentData().canConvert<QCameraDevice>())
{
const QCameraFormat setting = ui->combo_settings->itemData(index).value<QCameraFormat>();
const QCameraFormat setting = item_data.value<QCameraFormat>();
if (!setting.isNull())
{
m_camera->setCameraFormat(setting);
}
cfg_camera::camera_setting cfg_setting;
cfg_camera::camera_setting cfg_setting {};
cfg_setting.width = setting.resolution().width();
cfg_setting.height = setting.resolution().height();
cfg_setting.min_fps = setting.minFrameRate();
cfg_setting.max_fps = setting.maxFrameRate();
cfg_setting.format = static_cast<int>(setting.pixelFormat());
g_cfg_camera.set_camera_setting(ui->combo_camera->currentData().value<QCameraDevice>().id().toStdString(), cfg_setting);
cfg_setting.colorspace = 0;
g_cfg_camera.set_camera_setting(fmt::format("%s", camera_handler::qt), ui->combo_camera->currentData().value<QCameraDevice>().id().toStdString(), cfg_setting);
}
m_camera->start();

View file

@ -1,5 +1,7 @@
#pragma once
#include "Emu/system_config_types.h"
#include <QCamera>
#include <QDialog>
#include <QMediaCaptureSession>
@ -18,14 +20,22 @@ public:
virtual ~camera_settings_dialog();
private Q_SLOTS:
void handle_handler_change(int index);
void handle_camera_change(int index);
void handle_settings_change(int index);
private:
void enable_combos();
void reset_cameras();
void load_config();
void save_config();
void handle_qt_camera_change(const QVariant& item_data);
void handle_qt_settings_change(const QVariant& item_data);
std::unique_ptr<Ui::camera_settings_dialog> ui;
std::unique_ptr<QCamera> m_camera;
std::unique_ptr<QMediaCaptureSession> m_media_capture_session;
camera_handler m_handler = camera_handler::qt;
};

View file

@ -15,7 +15,23 @@
</property>
<layout class="QVBoxLayout" name="mainLayout" stretch="0,1,0">
<item>
<layout class="QHBoxLayout" name="settingsLayout" stretch="1,2">
<layout class="QHBoxLayout" name="settingsLayout" stretch="0,1,2">
<item>
<widget class="QGroupBox" name="gbHandler">
<property name="title">
<string>Handler</string>
</property>
<layout class="QVBoxLayout" name="handler_layout">
<item>
<widget class="QComboBox" name="combo_handlers">
<property name="placeholderText">
<string>No handlers found</string>
</property>
</widget>
</item>
</layout>
</widget>
</item>
<item>
<widget class="QGroupBox" name="gbCamera">
<property name="title">
@ -75,10 +91,10 @@
<item>
<widget class="QDialogButtonBox" name="buttonBox">
<property name="orientation">
<enum>Qt::Horizontal</enum>
<enum>Qt::Orientation::Horizontal</enum>
</property>
<property name="standardButtons">
<set>QDialogButtonBox::Apply|QDialogButtonBox::Cancel|QDialogButtonBox::Save</set>
<set>QDialogButtonBox::StandardButton::Apply|QDialogButtonBox::StandardButton::Cancel|QDialogButtonBox::StandardButton::Save</set>
</property>
</widget>
</item>

View file

@ -2905,8 +2905,8 @@ void main_window::CreateConnects()
connect(ui->confCamerasAct, &QAction::triggered, this, [this]()
{
camera_settings_dialog dlg(this);
dlg.exec();
camera_settings_dialog* dlg = new camera_settings_dialog(this);
dlg->open();
});
const auto open_rpcn_settings = [this]

View file

@ -16,8 +16,8 @@ movie_item_base::~movie_item_base()
void movie_item_base::init_pointers()
{
m_icon_loading_aborted.reset(new atomic_t<bool>(false));
m_size_on_disk_loading_aborted.reset(new atomic_t<bool>(false));
m_icon_loading_aborted = std::make_shared<atomic_t<bool>>(false);
m_size_on_disk_loading_aborted = std::make_shared<atomic_t<bool>>(false);
}
void movie_item_base::call_icon_load_func(int index)

View file

@ -341,14 +341,14 @@ void qt_camera_handler::update_camera_settings()
// Load selected settings from config file
bool success = false;
cfg_camera::camera_setting cfg_setting = g_cfg_camera.get_camera_setting(camera_id, success);
cfg_camera::camera_setting cfg_setting = g_cfg_camera.get_camera_setting(fmt::format("%s", camera_handler::qt), camera_id, success);
if (success)
{
camera_log.notice("Found config entry for camera \"%s\" (m_camera_id='%s')", camera_id, m_camera_id);
// List all available settings and choose the proper value if possible.
const double epsilon = 0.001;
constexpr double epsilon = 0.001;
success = false;
for (const QCameraFormat& supported_setting : m_camera->cameraDevice().videoFormats())
{

View file

@ -1,7 +1,6 @@
#include "stdafx.h"
#include "qt_camera_video_sink.h"
#include "Emu/Cell/Modules/cellCamera.h"
#include "Emu/system_config.h"
#include <QtConcurrent>
@ -9,7 +8,7 @@
LOG_CHANNEL(camera_log, "Camera");
qt_camera_video_sink::qt_camera_video_sink(bool front_facing, QObject *parent)
: QVideoSink(parent), m_front_facing(front_facing)
: camera_video_sink(front_facing), QVideoSink(parent)
{
connect(this, &QVideoSink::videoFrameChanged, this, &qt_camera_video_sink::present);
}
@ -36,6 +35,8 @@ bool qt_camera_video_sink::present(const QVideoFrame& frame)
// Get image. This usually also converts the image to ARGB32.
QImage image = frame.toImage();
const u32 width = image.isNull() ? 0 : static_cast<u32>(image.width());
const u32 height = image.isNull() ? 0 : static_cast<u32>(image.height());
if (image.isNull())
{
@ -44,7 +45,7 @@ bool qt_camera_video_sink::present(const QVideoFrame& frame)
else
{
// Scale image if necessary
if (m_width > 0 && m_height > 0 && m_width != static_cast<u32>(image.width()) && m_height != static_cast<u32>(image.height()))
if (m_width > 0 && m_height > 0 && m_width != width && m_height != height)
{
image = image.scaled(m_width, m_height, Qt::AspectRatioMode::IgnoreAspectRatio, Qt::SmoothTransformation);
}
@ -87,254 +88,10 @@ bool qt_camera_video_sink::present(const QVideoFrame& frame)
}
}
const u64 new_size = m_bytesize;
image_buffer& image_buffer = m_image_buffer[m_write_index];
// Reset buffer if necessary
if (image_buffer.data.size() != new_size)
{
image_buffer.data.clear();
}
// Create buffer if necessary
if (image_buffer.data.empty() && new_size > 0)
{
image_buffer.data.resize(new_size);
image_buffer.width = m_width;
image_buffer.height = m_height;
}
if (!image_buffer.data.empty() && !image.isNull())
{
// Convert image to proper layout
// TODO: check if pixel format and bytes per pixel match and convert if necessary
// TODO: implement or improve more conversions
const u32 width = std::min<u32>(image_buffer.width, image.width());
const u32 height = std::min<u32>(image_buffer.height, image.height());
switch (m_format)
{
case CELL_CAMERA_RAW8: // The game seems to expect BGGR
{
// Let's use a very simple algorithm to convert the image to raw BGGR
const auto convert_to_bggr = [this, &image_buffer, &image, width, height](u32 y_begin, u32 y_end)
{
u8* dst = &image_buffer.data[image_buffer.width * y_begin];
for (u32 y = y_begin; y < height && y < y_end; y++)
{
const u8* src = image.constScanLine(y);
const u8* srcu = image.constScanLine(std::max<s32>(0, y - 1));
const u8* srcd = image.constScanLine(std::min(height - 1, y + 1));
const bool is_top_pixel = (y % 2) == 0;
// We apply gaussian blur to get better demosaicing results later when debayering again
const auto blurred = [&](s32 x, s32 c)
{
const s32 i = x * 4 + c;
const s32 il = std::max(0, x - 1) * 4 + c;
const s32 ir = std::min<s32>(width - 1, x + 1) * 4 + c;
const s32 sum =
srcu[i] +
src[il] + 4 * src[i] + src[ir] +
srcd[i];
return static_cast<u8>(std::clamp((sum + 4) / 8, 0, 255));
};
// Split loops (roughly twice the performance by removing one condition)
if (is_top_pixel)
{
for (u32 x = 0; x < width; x++, dst++)
{
const bool is_left_pixel = (x % 2) == 0;
if (is_left_pixel)
{
*dst = blurred(x, 2); // Blue
}
else
{
*dst = blurred(x, 1); // Green
}
}
}
else
{
for (u32 x = 0; x < width; x++, dst++)
{
const bool is_left_pixel = (x % 2) == 0;
if (is_left_pixel)
{
*dst = blurred(x, 1); // Green
}
else
{
*dst = blurred(x, 0); // Red
}
}
}
}
};
// Use a multithreaded workload. The faster we get this done, the better.
constexpr u32 thread_count = 4;
const u32 lines_per_thread = std::ceil(image_buffer.height / static_cast<double>(thread_count));
u32 y_begin = 0;
u32 y_end = lines_per_thread;
QFutureSynchronizer<void> synchronizer;
for (u32 i = 0; i < thread_count; i++)
{
synchronizer.addFuture(QtConcurrent::run(convert_to_bggr, y_begin, y_end));
y_begin = y_end;
y_end += lines_per_thread;
}
synchronizer.waitForFinished();
break;
}
//case CELL_CAMERA_YUV422:
case CELL_CAMERA_Y0_U_Y1_V:
case CELL_CAMERA_V_Y1_U_Y0:
{
// Simple RGB to Y0_U_Y1_V conversion from stackoverflow.
const auto convert_to_yuv422 = [&image_buffer, &image, width, height, format = m_format](u32 y_begin, u32 y_end)
{
constexpr s32 yuv_bytes_per_pixel = 2;
const s32 yuv_pitch = image_buffer.width * yuv_bytes_per_pixel;
const s32 y0_offset = (format == CELL_CAMERA_Y0_U_Y1_V) ? 0 : 3;
const s32 u_offset = (format == CELL_CAMERA_Y0_U_Y1_V) ? 1 : 2;
const s32 y1_offset = (format == CELL_CAMERA_Y0_U_Y1_V) ? 2 : 1;
const s32 v_offset = (format == CELL_CAMERA_Y0_U_Y1_V) ? 3 : 0;
for (u32 y = y_begin; y < height && y < y_end; y++)
{
const u8* src = image.constScanLine(y);
u8* yuv_row_ptr = &image_buffer.data[y * yuv_pitch];
for (u32 x = 0; x < width - 1; x += 2, src += 8)
{
const f32 r1 = src[0];
const f32 g1 = src[1];
const f32 b1 = src[2];
const f32 r2 = src[4];
const f32 g2 = src[5];
const f32 b2 = src[6];
const s32 y0 = (0.257f * r1) + (0.504f * g1) + (0.098f * b1) + 16.0f;
const s32 u = -(0.148f * r1) - (0.291f * g1) + (0.439f * b1) + 128.0f;
const s32 v = (0.439f * r1) - (0.368f * g1) - (0.071f * b1) + 128.0f;
const s32 y1 = (0.257f * r2) + (0.504f * g2) + (0.098f * b2) + 16.0f;
const s32 yuv_index = x * yuv_bytes_per_pixel;
yuv_row_ptr[yuv_index + y0_offset] = static_cast<u8>(std::clamp(y0, 0, 255));
yuv_row_ptr[yuv_index + u_offset] = static_cast<u8>(std::clamp( u, 0, 255));
yuv_row_ptr[yuv_index + y1_offset] = static_cast<u8>(std::clamp(y1, 0, 255));
yuv_row_ptr[yuv_index + v_offset] = static_cast<u8>(std::clamp( v, 0, 255));
}
}
};
// Use a multithreaded workload. The faster we get this done, the better.
constexpr u32 thread_count = 4;
const u32 lines_per_thread = std::ceil(image_buffer.height / static_cast<double>(thread_count));
u32 y_begin = 0;
u32 y_end = lines_per_thread;
QFutureSynchronizer<void> synchronizer;
for (u32 i = 0; i < thread_count; i++)
{
synchronizer.addFuture(QtConcurrent::run(convert_to_yuv422, y_begin, y_end));
y_begin = y_end;
y_end += lines_per_thread;
}
synchronizer.waitForFinished();
break;
}
case CELL_CAMERA_JPG:
case CELL_CAMERA_RGBA:
case CELL_CAMERA_RAW10:
case CELL_CAMERA_YUV420:
case CELL_CAMERA_FORMAT_UNKNOWN:
default:
std::memcpy(image_buffer.data.data(), image.constBits(), std::min<usz>(image_buffer.data.size(), image.height() * image.bytesPerLine()));
break;
}
}
camera_video_sink::present(width, height, image.bytesPerLine(), 4, [&image](u32 y){ return image.constScanLine(y); });
// Unmap frame memory
tmp.unmap();
camera_log.trace("Wrote image to video surface. index=%d, m_frame_number=%d, width=%d, height=%d, bytesize=%d",
m_write_index, m_frame_number.load(), m_width, m_height, m_bytesize);
// Toggle write/read index
std::lock_guard lock(m_mutex);
image_buffer.frame_number = m_frame_number++;
m_write_index = read_index();
return true;
}
void qt_camera_video_sink::set_format(s32 format, u32 bytesize)
{
camera_log.notice("Setting format: format=%d, bytesize=%d", format, bytesize);
m_format = format;
m_bytesize = bytesize;
}
void qt_camera_video_sink::set_resolution(u32 width, u32 height)
{
camera_log.notice("Setting resolution: width=%d, height=%d", width, height);
m_width = width;
m_height = height;
}
void qt_camera_video_sink::set_mirrored(bool mirrored)
{
camera_log.notice("Setting mirrored: mirrored=%d", mirrored);
m_mirrored = mirrored;
}
u64 qt_camera_video_sink::frame_number() const
{
return m_frame_number.load();
}
void qt_camera_video_sink::get_image(u8* buf, u64 size, u32& width, u32& height, u64& frame_number, u64& bytes_read)
{
// Lock read buffer
std::lock_guard lock(m_mutex);
const image_buffer& image_buffer = m_image_buffer[read_index()];
width = image_buffer.width;
height = image_buffer.height;
frame_number = image_buffer.frame_number;
// Copy to out buffer
if (buf && !image_buffer.data.empty())
{
bytes_read = std::min<u64>(image_buffer.data.size(), size);
std::memcpy(buf, image_buffer.data.data(), bytes_read);
if (image_buffer.data.size() != size)
{
camera_log.error("Buffer size mismatch: in=%d, out=%d. Cropping to incoming size. Please contact a developer.", size, image_buffer.data.size());
}
}
else
{
bytes_read = 0;
}
}
u32 qt_camera_video_sink::read_index() const
{
// The read buffer index cannot be the same as the write index
return (m_write_index + 1u) % ::narrow<u32>(m_image_buffer.size());
}

View file

@ -1,50 +1,15 @@
#pragma once
#include "util/atomic.hpp"
#include "util/types.hpp"
#include "Input/camera_video_sink.h"
#include <QVideoFrame>
#include <QVideoSink>
#include <QImage>
#include <array>
#include <mutex>
class qt_camera_video_sink final : public QVideoSink
class qt_camera_video_sink final : public camera_video_sink, public QVideoSink
{
public:
qt_camera_video_sink(bool front_facing, QObject *parent = nullptr);
virtual ~qt_camera_video_sink();
bool present(const QVideoFrame& frame);
void set_format(s32 format, u32 bytesize);
void set_resolution(u32 width, u32 height);
void set_mirrored(bool mirrored);
u64 frame_number() const;
void get_image(u8* buf, u64 size, u32& width, u32& height, u64& frame_number, u64& bytes_read);
private:
u32 read_index() const;
bool m_front_facing = false;
bool m_mirrored = false; // Set by cellCamera
s32 m_format = 2; // CELL_CAMERA_RAW8, set by cellCamera
u32 m_bytesize = 0;
u32 m_width = 640;
u32 m_height = 480;
std::mutex m_mutex;
atomic_t<u64> m_frame_number{0};
u32 m_write_index{0};
struct image_buffer
{
u64 frame_number = 0;
u32 width = 0;
u32 height = 0;
std::vector<u8> data;
};
std::array<image_buffer, 2> m_image_buffer;
};