aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/android/camera_capabilities.cpp50
-rw-r--r--src/android/camera_device.cpp53
-rw-r--r--src/android/camera_hal_manager.cpp6
-rw-r--r--src/android/jpeg/exif.cpp13
-rw-r--r--src/android/jpeg/exif.h5
-rw-r--r--src/android/jpeg/post_processor_jpeg.cpp2
-rw-r--r--src/cam/capture-script.yaml29
-rw-r--r--src/cam/capture_script.cpp229
-rw-r--r--src/cam/capture_script.h10
-rw-r--r--src/cam/drm.cpp2
-rw-r--r--src/cam/drm.h1
-rw-r--r--src/cam/kms_sink.cpp116
-rw-r--r--src/cam/kms_sink.h13
-rw-r--r--src/cam/main.cpp11
-rw-r--r--src/cam/meson.build10
-rw-r--r--src/cam/sdl_sink.cpp36
-rw-r--r--src/cam/sdl_texture.cpp6
-rw-r--r--src/cam/sdl_texture.h11
-rw-r--r--src/cam/sdl_texture_mjpg.cpp72
-rw-r--r--src/cam/sdl_texture_mjpg.h8
-rw-r--r--src/cam/sdl_texture_yuv.cpp33
-rw-r--r--src/cam/sdl_texture_yuv.h26
-rw-r--r--src/cam/sdl_texture_yuyv.cpp20
-rw-r--r--src/cam/sdl_texture_yuyv.h17
-rw-r--r--src/cam/stream_options.cpp9
-rw-r--r--src/cam/stream_options.h2
-rw-r--r--src/gstreamer/gstlibcamera-utils.cpp201
-rw-r--r--src/gstreamer/gstlibcamera-utils.h4
-rw-r--r--src/gstreamer/gstlibcamerapad.cpp35
-rw-r--r--src/gstreamer/gstlibcamerapad.h6
-rw-r--r--src/gstreamer/gstlibcamerapool.cpp7
-rw-r--r--src/gstreamer/gstlibcamerapool.h2
-rw-r--r--src/gstreamer/gstlibcamerasrc.cpp311
-rw-r--r--src/gstreamer/meson.build2
-rw-r--r--src/ipa/ipu3/algorithms/af.cpp2
-rw-r--r--src/ipa/ipu3/algorithms/agc.cpp4
-rw-r--r--src/ipa/ipu3/algorithms/algorithm.h8
-rw-r--r--src/ipa/ipu3/algorithms/awb.cpp2
-rw-r--r--src/ipa/ipu3/algorithms/blc.cpp2
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.cpp2
-rw-r--r--src/ipa/ipu3/data/meson.build8
-rw-r--r--src/ipa/ipu3/data/uncalibrated.yaml11
-rw-r--r--src/ipa/ipu3/ipu3.cpp70
-rw-r--r--src/ipa/ipu3/meson.build1
-rw-r--r--src/ipa/ipu3/module.h27
-rw-r--r--src/ipa/libipa/algorithm.cpp96
-rw-r--r--src/ipa/libipa/algorithm.h78
-rw-r--r--src/ipa/libipa/histogram.cpp2
-rw-r--r--src/ipa/libipa/histogram.h2
-rw-r--r--src/ipa/libipa/libipa.cpp22
-rw-r--r--src/ipa/libipa/meson.build6
-rw-r--r--src/ipa/libipa/module.cpp126
-rw-r--r--src/ipa/libipa/module.h124
-rw-r--r--src/ipa/meson.build10
-rw-r--r--src/ipa/raspberrypi/cam_helper.cpp99
-rw-r--r--src/ipa/raspberrypi/cam_helper.h127
-rw-r--r--src/ipa/raspberrypi/cam_helper.hpp123
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx219.cpp40
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx290.cpp36
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx296.cpp28
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx477.cpp81
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx519.cpp76
-rw-r--r--src/ipa/raspberrypi/cam_helper_ov5647.cpp48
-rw-r--r--src/ipa/raspberrypi/cam_helper_ov9281.cpp32
-rw-r--r--src/ipa/raspberrypi/controller/agc_algorithm.h31
-rw-r--r--src/ipa/raspberrypi/controller/agc_algorithm.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/agc_status.h48
-rw-r--r--src/ipa/raspberrypi/controller/algorithm.cpp27
-rw-r--r--src/ipa/raspberrypi/controller/algorithm.h64
-rw-r--r--src/ipa/raspberrypi/controller/algorithm.hpp60
-rw-r--r--src/ipa/raspberrypi/controller/alsc_status.h26
-rw-r--r--src/ipa/raspberrypi/controller/awb_algorithm.h23
-rw-r--r--src/ipa/raspberrypi/controller/awb_algorithm.hpp23
-rw-r--r--src/ipa/raspberrypi/controller/awb_status.h24
-rw-r--r--src/ipa/raspberrypi/controller/black_level_status.h18
-rw-r--r--src/ipa/raspberrypi/controller/camera_mode.h56
-rw-r--r--src/ipa/raspberrypi/controller/ccm_algorithm.h21
-rw-r--r--src/ipa/raspberrypi/controller/ccm_algorithm.hpp21
-rw-r--r--src/ipa/raspberrypi/controller/ccm_status.h12
-rw-r--r--src/ipa/raspberrypi/controller/contrast_algorithm.h22
-rw-r--r--src/ipa/raspberrypi/controller/contrast_algorithm.hpp22
-rw-r--r--src/ipa/raspberrypi/controller/contrast_status.h20
-rw-r--r--src/ipa/raspberrypi/controller/controller.cpp142
-rw-r--r--src/ipa/raspberrypi/controller/controller.h60
-rw-r--r--src/ipa/raspberrypi/controller/controller.hpp54
-rw-r--r--src/ipa/raspberrypi/controller/denoise_algorithm.h (renamed from src/ipa/raspberrypi/controller/denoise_algorithm.hpp)12
-rw-r--r--src/ipa/raspberrypi/controller/denoise_status.h16
-rw-r--r--src/ipa/raspberrypi/controller/device_status.cpp23
-rw-r--r--src/ipa/raspberrypi/controller/device_status.h22
-rw-r--r--src/ipa/raspberrypi/controller/dpc_status.h14
-rw-r--r--src/ipa/raspberrypi/controller/focus_status.h20
-rw-r--r--src/ipa/raspberrypi/controller/geq_status.h12
-rw-r--r--src/ipa/raspberrypi/controller/histogram.cpp46
-rw-r--r--src/ipa/raspberrypi/controller/histogram.h48
-rw-r--r--src/ipa/raspberrypi/controller/histogram.hpp44
-rw-r--r--src/ipa/raspberrypi/controller/lux_status.h28
-rw-r--r--src/ipa/raspberrypi/controller/metadata.h (renamed from src/ipa/raspberrypi/controller/metadata.hpp)40
-rw-r--r--src/ipa/raspberrypi/controller/noise_status.h16
-rw-r--r--src/ipa/raspberrypi/controller/pwl.cpp200
-rw-r--r--src/ipa/raspberrypi/controller/pwl.h127
-rw-r--r--src/ipa/raspberrypi/controller/pwl.hpp112
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.cpp1171
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.h141
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.hpp139
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.cpp961
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.h110
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.hpp106
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.cpp866
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.h191
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.hpp179
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.cpp47
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.h30
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.hpp30
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.cpp162
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.h (renamed from src/ipa/raspberrypi/controller/rpi/ccm.hpp)24
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.cpp196
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.h52
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.hpp50
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.cpp42
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.h32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/focus.cpp18
-rw-r--r--src/ipa/raspberrypi/controller/rpi/focus.h (renamed from src/ipa/raspberrypi/controller/rpi/focus.hpp)12
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.cpp80
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.h34
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.hpp34
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.cpp111
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.h45
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.hpp43
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.cpp73
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.h32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.cpp55
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.h32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.cpp79
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.h34
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.hpp34
-rw-r--r--src/ipa/raspberrypi/controller/sharpen_algorithm.h21
-rw-r--r--src/ipa/raspberrypi/controller/sharpen_algorithm.hpp21
-rw-r--r--src/ipa/raspberrypi/controller/sharpen_status.h22
-rw-r--r--src/ipa/raspberrypi/data/imx219.json824
-rw-r--r--src/ipa/raspberrypi/data/imx219_noir.json686
-rw-r--r--src/ipa/raspberrypi/data/imx290.json326
-rw-r--r--src/ipa/raspberrypi/data/imx296.json369
-rw-r--r--src/ipa/raspberrypi/data/imx378.json677
-rw-r--r--src/ipa/raspberrypi/data/imx477.json881
-rw-r--r--src/ipa/raspberrypi/data/imx477_noir.json734
-rw-r--r--src/ipa/raspberrypi/data/imx519.json677
-rw-r--r--src/ipa/raspberrypi/data/ov5647.json824
-rw-r--r--src/ipa/raspberrypi/data/ov5647_noir.json686
-rw-r--r--src/ipa/raspberrypi/data/ov9281.json195
-rw-r--r--src/ipa/raspberrypi/data/se327m12.json683
-rw-r--r--src/ipa/raspberrypi/data/uncalibrated.json180
-rw-r--r--src/ipa/raspberrypi/md_parser.h (renamed from src/ipa/raspberrypi/md_parser.hpp)50
-rw-r--r--src/ipa/raspberrypi/md_parser_smia.cpp112
-rw-r--r--src/ipa/raspberrypi/meson.build1
-rw-r--r--src/ipa/raspberrypi/raspberrypi.cpp366
-rw-r--r--src/ipa/rkisp1/algorithms/agc.cpp2
-rw-r--r--src/ipa/rkisp1/algorithms/agc.h2
-rw-r--r--src/ipa/rkisp1/algorithms/algorithm.h10
-rw-r--r--src/ipa/rkisp1/algorithms/awb.cpp40
-rw-r--r--src/ipa/rkisp1/algorithms/awb.h4
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp51
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h14
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.cpp97
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.h30
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.cpp254
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.h31
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.cpp258
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.h36
-rw-r--r--src/ipa/rkisp1/algorithms/filter.cpp201
-rw-r--r--src/ipa/rkisp1/algorithms/filter.h30
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.cpp149
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.h34
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.cpp188
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.h39
-rw-r--r--src/ipa/rkisp1/algorithms/meson.build6
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml13
-rw-r--r--src/ipa/rkisp1/data/meson.build10
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml175
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml8
-rw-r--r--src/ipa/rkisp1/ipa_context.cpp63
-rw-r--r--src/ipa/rkisp1/ipa_context.h25
-rw-r--r--src/ipa/rkisp1/meson.build1
-rw-r--r--src/ipa/rkisp1/module.h27
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp90
-rw-r--r--src/libcamera/base/backtrace.cpp12
-rw-r--r--src/libcamera/base/meson.build4
-rw-r--r--src/libcamera/base/utils.cpp21
-rw-r--r--src/libcamera/bayer_format.cpp2
-rw-r--r--src/libcamera/camera.cpp67
-rw-r--r--src/libcamera/camera_manager.cpp2
-rw-r--r--src/libcamera/camera_sensor.cpp4
-rw-r--r--src/libcamera/color_space.cpp429
-rw-r--r--src/libcamera/control_ids.yaml16
-rw-r--r--src/libcamera/control_serializer.cpp28
-rw-r--r--src/libcamera/controls.cpp32
-rw-r--r--src/libcamera/delayed_controls.cpp14
-rw-r--r--src/libcamera/device_enumerator.cpp2
-rw-r--r--src/libcamera/formats.cpp344
-rw-r--r--src/libcamera/formats.yaml6
-rw-r--r--src/libcamera/ipa_manager.cpp5
-rw-r--r--src/libcamera/media_device.cpp16
-rw-r--r--src/libcamera/meson.build18
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp64
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.cpp2
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.h2
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp157
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.cpp2
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.h3
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp159
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp6
-rw-r--r--src/libcamera/pipeline/simple/converter.cpp14
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp175
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp217
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp10
-rw-r--r--src/libcamera/pipeline_handler.cpp67
-rw-r--r--src/libcamera/property_ids.yaml6
-rw-r--r--src/libcamera/pub_key.cpp50
-rw-r--r--src/libcamera/request.cpp15
-rw-r--r--src/libcamera/transform.cpp2
-rw-r--r--src/libcamera/v4l2_device.cpp45
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp48
-rw-r--r--src/libcamera/v4l2_subdevice.cpp354
-rw-r--r--src/libcamera/v4l2_videodevice.cpp97
-rw-r--r--src/libcamera/yaml_parser.cpp352
-rw-r--r--src/meson.build3
-rwxr-xr-xsrc/py/cam/cam.py8
-rwxr-xr-xsrc/py/examples/simple-cam.py5
-rwxr-xr-xsrc/py/examples/simple-capture.py12
-rwxr-xr-xsrc/py/examples/simple-continuous-capture.py5
-rw-r--r--src/py/libcamera/meson.build5
-rw-r--r--src/py/libcamera/py_camera_manager.cpp131
-rw-r--r--src/py/libcamera/py_camera_manager.h45
-rw-r--r--src/py/libcamera/py_helpers.cpp97
-rw-r--r--src/py/libcamera/py_helpers.h13
-rw-r--r--src/py/libcamera/py_main.cpp205
-rw-r--r--src/py/libcamera/py_main.h14
-rw-r--r--src/qcam/assets/shader/YUV_2_planes.frag29
-rw-r--r--src/qcam/assets/shader/YUV_3_planes.frag27
-rw-r--r--src/qcam/assets/shader/YUV_packed.frag17
-rw-r--r--src/qcam/assets/shader/bayer_8.frag3
-rw-r--r--src/qcam/cam_select_dialog.cpp111
-rw-r--r--src/qcam/cam_select_dialog.h47
-rw-r--r--src/qcam/dng_writer.cpp45
-rw-r--r--src/qcam/dng_writer.h2
-rw-r--r--src/qcam/format_converter.cpp4
-rw-r--r--src/qcam/main_window.cpp83
-rw-r--r--src/qcam/main_window.h29
-rw-r--r--src/qcam/meson.build2
-rw-r--r--src/qcam/viewfinder.h2
-rw-r--r--src/qcam/viewfinder_gl.cpp84
-rw-r--r--src/qcam/viewfinder_gl.h3
-rw-r--r--src/qcam/viewfinder_qt.cpp14
-rw-r--r--src/qcam/viewfinder_qt.h1
-rw-r--r--src/v4l2/v4l2_camera_proxy.cpp6
259 files changed, 14598 insertions, 9128 deletions
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
index 6f197eb8..64bd8dde 100644
--- a/src/android/camera_capabilities.cpp
+++ b/src/android/camera_capabilities.cpp
@@ -31,13 +31,20 @@ namespace {
/*
* \var camera3Resolutions
- * \brief The list of image resolutions defined as mandatory to be supported by
- * the Android Camera3 specification
+ * \brief The list of image resolutions commonly supported by Android
+ *
+ * The following are defined as mandatory to be supported by the Android
+ * Camera3 specification: (320x240), (640x480), (1280x720), (1920x1080).
+ *
+ * The following 4:3 resolutions are defined as optional, but commonly
+ * supported by Android devices: (1280x960), (1600x1200).
*/
const std::vector<Size> camera3Resolutions = {
{ 320, 240 },
{ 640, 480 },
{ 1280, 720 },
+ { 1280, 960 },
+ { 1600, 1200 },
{ 1920, 1080 }
};
@@ -492,8 +499,8 @@ int CameraCapabilities::initializeStreamConfigurations()
/*
* Build the list of supported image resolutions.
*
- * The resolutions listed in camera3Resolution are mandatory to be
- * supported, up to the camera maximum resolution.
+ * The resolutions listed in camera3Resolution are supported, up to the
+ * camera maximum resolution.
*
* Augment the list by adding resolutions calculated from the camera
* maximum one.
@@ -687,6 +694,14 @@ int CameraCapabilities::initializeStreamConfigurations()
minFrameDuration = minFrameDurationCap;
}
+ /*
+ * Calculate FPS as CTS does and adjust the minimum
+ * frame duration accordingly: see
+ * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
+ */
+ minFrameDuration =
+ 1e9 / static_cast<unsigned int>(floor(1e9 / minFrameDuration + 0.05f));
+
streamConfigurations_.push_back({
res, androidFormat, minFrameDuration, maxFrameDuration,
});
@@ -1042,18 +1057,18 @@ int CameraCapabilities::initializeStaticMetadata()
/* Sensor static metadata. */
std::array<int32_t, 2> pixelArraySize;
{
- const Size &size = properties.get(properties::PixelArraySize);
+ const Size &size = properties.get(properties::PixelArraySize).value_or(Size{});
pixelArraySize[0] = size.width;
pixelArraySize[1] = size.height;
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
pixelArraySize);
}
- if (properties.contains(properties::UnitCellSize)) {
- const Size &cellSize = properties.get<Size>(properties::UnitCellSize);
+ const auto &cellSize = properties.get<Size>(properties::UnitCellSize);
+ if (cellSize) {
std::array<float, 2> physicalSize{
- cellSize.width * pixelArraySize[0] / 1e6f,
- cellSize.height * pixelArraySize[1] / 1e6f
+ cellSize->width * pixelArraySize[0] / 1e6f,
+ cellSize->height * pixelArraySize[1] / 1e6f
};
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
physicalSize);
@@ -1061,7 +1076,7 @@ int CameraCapabilities::initializeStaticMetadata()
{
const Span<const Rectangle> &rects =
- properties.get(properties::PixelArrayActiveAreas);
+ properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
std::vector<int32_t> data{
static_cast<int32_t>(rects[0].x),
static_cast<int32_t>(rects[0].y),
@@ -1079,11 +1094,10 @@ int CameraCapabilities::initializeStaticMetadata()
sensitivityRange);
/* Report the color filter arrangement if the camera reports it. */
- if (properties.contains(properties::draft::ColorFilterArrangement)) {
- uint8_t filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ const auto &filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ if (filterArr)
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- filterArr);
- }
+ *filterArr);
const auto &exposureInfo = controlsInfo.find(&controls::ExposureTime);
if (exposureInfo != controlsInfo.end()) {
@@ -1287,12 +1301,10 @@ int CameraCapabilities::initializeStaticMetadata()
* recording profile. Inspecting the Intel IPU3 HAL
* implementation confirms this but no reference has been found
* in the metadata documentation.
- *
- * Calculate FPS as CTS does: see
- * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
*/
- unsigned int fps = static_cast<unsigned int>
- (floor(1e9 / entry.minFrameDurationNsec + 0.05f));
+ unsigned int fps =
+ static_cast<unsigned int>(floor(1e9 / entry.minFrameDurationNsec));
+
if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB && fps < 30)
continue;
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index 8c039fb9..b20e389b 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -305,9 +305,9 @@ int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
*/
const ControlList &properties = camera_->properties();
- if (properties.contains(properties::Location)) {
- int32_t location = properties.get(properties::Location);
- switch (location) {
+ const auto &location = properties.get(properties::Location);
+ if (location) {
+ switch (*location) {
case properties::CameraLocationFront:
facing_ = CAMERA_FACING_FRONT;
break;
@@ -355,9 +355,9 @@ int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
* value for clockwise direction as required by the Android orientation
* metadata.
*/
- if (properties.contains(properties::Rotation)) {
- int rotation = properties.get(properties::Rotation);
- orientation_ = (360 - rotation) % 360;
+ const auto &rotation = properties.get(properties::Rotation);
+ if (rotation) {
+ orientation_ = (360 - *rotation) % 360;
if (cameraConfigData && cameraConfigData->rotation != -1 &&
orientation_ != cameraConfigData->rotation) {
LOG(HAL, Warning)
@@ -1181,7 +1181,8 @@ void CameraDevice::requestComplete(Request *request)
* as soon as possible, earlier than request completion time.
*/
uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
- .get(controls::SensorTimestamp));
+ .get(controls::SensorTimestamp)
+ .value_or(0));
notifyShutter(descriptor->frameNumber_, sensorTimestamp);
LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
@@ -1560,29 +1561,27 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
rolling_shutter_skew);
/* Add metadata tags reported by libcamera. */
- const int64_t timestamp = metadata.get(controls::SensorTimestamp);
+ const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
- if (metadata.contains(controls::draft::PipelineDepth)) {
- uint8_t pipeline_depth =
- metadata.get<int32_t>(controls::draft::PipelineDepth);
+ const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
+ if (pipelineDepth)
resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
- pipeline_depth);
- }
+ *pipelineDepth);
- if (metadata.contains(controls::ExposureTime)) {
- int64_t exposure = metadata.get(controls::ExposureTime) * 1000ULL;
- resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME, exposure);
- }
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
+ *exposureTime * 1000ULL);
- if (metadata.contains(controls::FrameDuration)) {
- int64_t duration = metadata.get(controls::FrameDuration) * 1000;
+ const auto &frameDuration = metadata.get(controls::FrameDuration);
+ if (frameDuration)
resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
- duration);
- }
+ *frameDuration * 1000);
- if (metadata.contains(controls::ScalerCrop)) {
- Rectangle crop = metadata.get(controls::ScalerCrop);
+ const auto &scalerCrop = metadata.get(controls::ScalerCrop);
+ if (scalerCrop) {
+ const Rectangle &crop = *scalerCrop;
int32_t cropRect[] = {
crop.x, crop.y, static_cast<int32_t>(crop.width),
static_cast<int32_t>(crop.height),
@@ -1590,12 +1589,10 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
}
- if (metadata.contains(controls::draft::TestPatternMode)) {
- const int32_t testPatternMode =
- metadata.get(controls::draft::TestPatternMode);
+ const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
+ if (testPatternMode)
resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
- testPatternMode);
- }
+ *testPatternMode);
/*
* Return the result metadata pack even is not valid: get() will return
diff --git a/src/android/camera_hal_manager.cpp b/src/android/camera_hal_manager.cpp
index 5f7bfe26..7512cc4e 100644
--- a/src/android/camera_hal_manager.cpp
+++ b/src/android/camera_hal_manager.cpp
@@ -228,11 +228,7 @@ void CameraHalManager::cameraRemoved(std::shared_ptr<Camera> cam)
int32_t CameraHalManager::cameraLocation(const Camera *cam)
{
- const ControlList &properties = cam->properties();
- if (!properties.contains(properties::Location))
- return -1;
-
- return properties.get(properties::Location);
+ return cam->properties().get(properties::Location).value_or(-1);
}
CameraDevice *CameraHalManager::cameraDeviceFromHalId(unsigned int id)
diff --git a/src/android/jpeg/exif.cpp b/src/android/jpeg/exif.cpp
index 3220b458..6b1d0f1f 100644
--- a/src/android/jpeg/exif.cpp
+++ b/src/android/jpeg/exif.cpp
@@ -430,16 +430,13 @@ void Exif::setOrientation(int orientation)
setShort(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
}
-/*
- * The thumbnail data should remain valid until the Exif object is destroyed.
- * Failing to do so, might result in no thumbnail data being set even after a
- * call to Exif::setThumbnail().
- */
-void Exif::setThumbnail(Span<const unsigned char> thumbnail,
+void Exif::setThumbnail(std::vector<unsigned char> &&thumbnail,
Compression compression)
{
- data_->data = const_cast<unsigned char *>(thumbnail.data());
- data_->size = thumbnail.size();
+ thumbnailData_ = std::move(thumbnail);
+
+ data_->data = thumbnailData_.data();
+ data_->size = thumbnailData_.size();
setShort(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression);
}
diff --git a/src/android/jpeg/exif.h b/src/android/jpeg/exif.h
index 2ff8fb78..e68716f3 100644
--- a/src/android/jpeg/exif.h
+++ b/src/android/jpeg/exif.h
@@ -10,6 +10,7 @@
#include <chrono>
#include <string>
#include <time.h>
+#include <vector>
#include <libexif/exif-data.h>
@@ -60,7 +61,7 @@ public:
void setOrientation(int orientation);
void setSize(const libcamera::Size &size);
- void setThumbnail(libcamera::Span<const unsigned char> thumbnail,
+ void setThumbnail(std::vector<unsigned char> &&thumbnail,
Compression compression);
void setTimestamp(time_t timestamp, std::chrono::milliseconds msec);
@@ -106,4 +107,6 @@ private:
unsigned char *exifData_;
unsigned int size_;
+
+ std::vector<unsigned char> thumbnailData_;
};
diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
index d72ebc3c..0cf56716 100644
--- a/src/android/jpeg/post_processor_jpeg.cpp
+++ b/src/android/jpeg/post_processor_jpeg.cpp
@@ -166,7 +166,7 @@ void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBu
std::vector<unsigned char> thumbnail;
generateThumbnail(source, thumbnailSize, quality, &thumbnail);
if (!thumbnail.empty())
- exif.setThumbnail(thumbnail, Exif::Compression::JPEG);
+ exif.setThumbnail(std::move(thumbnail), Exif::Compression::JPEG);
}
resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, data, 2);
diff --git a/src/cam/capture-script.yaml b/src/cam/capture-script.yaml
index 6a749bc6..7118865e 100644
--- a/src/cam/capture-script.yaml
+++ b/src/cam/capture-script.yaml
@@ -4,6 +4,19 @@
#
# A capture script allows to associate a list of controls and their values
# to frame numbers.
+#
+# The script allows defining a list of frames associated with controls
+# and an optional list of properties that can control the script behaviour.
+
+# properties:
+# # Repeat the controls every 'idx' frames.
+# - loop: idx
+#
+# # List of frame number with associated a list of controls to be applied
+# frames:
+# - frame-number:
+# Control1: value1
+# Control2: value2
# \todo Formally define the capture script structure with a schema
@@ -12,10 +25,16 @@
# libcamera::controls:: enumeration
# - Controls not supported by the camera currently operated are ignored
# - Frame numbers shall be monotonically incrementing, gaps are allowed
+# - If a loop limit is specified, frame numbers in the 'frames' list shall be
+# less than the loop control
+
+# Example: Turn brightness up and down every 460 frames
+
+properties:
+ - loop: 460
-# Example:
frames:
- - 1:
+ - 0:
Brightness: 0.0
- 40:
@@ -44,3 +63,9 @@ frames:
- 340:
Brightness: -0.8
+
+ - 380:
+ Brightness: -0.4
+
+ - 420:
+ Brightness: -0.2
diff --git a/src/cam/capture_script.cpp b/src/cam/capture_script.cpp
index 9f22d5f7..5a27361c 100644
--- a/src/cam/capture_script.cpp
+++ b/src/cam/capture_script.cpp
@@ -15,7 +15,7 @@ using namespace libcamera;
CaptureScript::CaptureScript(std::shared_ptr<Camera> camera,
const std::string &fileName)
- : camera_(camera), valid_(false)
+ : camera_(camera), loop_(0), valid_(false)
{
FILE *fh = fopen(fileName.c_str(), "r");
if (!fh) {
@@ -44,8 +44,13 @@ CaptureScript::CaptureScript(std::shared_ptr<Camera> camera,
const ControlList &CaptureScript::frameControls(unsigned int frame)
{
static ControlList controls{};
+ unsigned int idx = frame;
- auto it = frameControls_.find(frame);
+ /* If we loop, repeat the controls every 'loop_' frames. */
+ if (loop_)
+ idx = frame % loop_;
+
+ auto it = frameControls_.find(idx);
if (it == frameControls_.end())
return controls;
@@ -149,8 +154,14 @@ int CaptureScript::parseScript(FILE *script)
std::string section = eventScalarValue(event);
- if (section == "frames") {
- parseFrames();
+ if (section == "properties") {
+ ret = parseProperties();
+ if (ret)
+ return ret;
+ } else if (section == "frames") {
+ ret = parseFrames();
+ if (ret)
+ return ret;
} else {
std::cerr << "Unsupported section '" << section << "'"
<< std::endl;
@@ -159,6 +170,65 @@ int CaptureScript::parseScript(FILE *script)
}
}
+int CaptureScript::parseProperty()
+{
+ EventPtr event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ std::string prop = parseScalar();
+ if (prop.empty())
+ return -EINVAL;
+
+ if (prop == "loop") {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return -EINVAL;
+
+ loop_ = atoi(value.c_str());
+ if (!loop_) {
+ std::cerr << "Invalid loop limit '" << loop_ << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+ } else {
+ std::cerr << "Unsupported property '" << prop << "'" << std::endl;
+ return -EINVAL;
+ }
+
+ event = nextEvent(YAML_MAPPING_END_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ return 0;
+}
+
+int CaptureScript::parseProperties()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ if (event->type == YAML_SEQUENCE_END_EVENT)
+ return 0;
+
+ int ret = parseProperty();
+ if (ret)
+ return ret;
+
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int CaptureScript::parseFrames()
{
EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
@@ -189,6 +259,12 @@ int CaptureScript::parseFrame(EventPtr event)
return -EINVAL;
unsigned int frameId = atoi(key.c_str());
+ if (loop_ && frameId >= loop_) {
+ std::cerr
+ << "Frame id (" << frameId << ") shall be smaller than"
+ << "loop limit (" << loop_ << ")" << std::endl;
+ return -EINVAL;
+ }
event = nextEvent(YAML_MAPPING_START_EVENT);
if (!event)
@@ -232,12 +308,15 @@ int CaptureScript::parseControl(EventPtr event, ControlList &controls)
return -EINVAL;
}
- std::string value = parseScalar();
- if (value.empty())
+ const ControlId *controlId = it->second;
+
+ ControlValue val = unpackControl(controlId);
+ if (val.isNone()) {
+ std::cerr << "Error unpacking control '" << name << "'"
+ << std::endl;
return -EINVAL;
+ }
- const ControlId *controlId = it->second;
- ControlValue val = unpackControl(controlId, value);
controls.set(controlId->id(), val);
return 0;
@@ -252,6 +331,104 @@ std::string CaptureScript::parseScalar()
return eventScalarValue(event);
}
+ControlValue CaptureScript::parseRectangles()
+{
+ std::vector<libcamera::Rectangle> rectangles;
+
+ std::vector<std::vector<std::string>> arrays = parseArrays();
+ if (arrays.empty())
+ return {};
+
+ for (const std::vector<std::string> &values : arrays) {
+ if (values.size() != 4) {
+ std::cerr << "Error parsing Rectangle: expected "
+ << "array with 4 parameters" << std::endl;
+ return {};
+ }
+
+ Rectangle rect = unpackRectangle(values);
+ rectangles.push_back(rect);
+ }
+
+ ControlValue controlValue;
+ controlValue.set(Span<const Rectangle>(rectangles));
+
+ return controlValue;
+}
+
+std::vector<std::vector<std::string>> CaptureScript::parseArrays()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return {};
+
+ event = nextEvent();
+ if (!event)
+ return {};
+
+ std::vector<std::vector<std::string>> valueArrays;
+
+ /* Parse single array. */
+ if (event->type == YAML_SCALAR_EVENT) {
+ std::string firstValue = eventScalarValue(event);
+ if (firstValue.empty())
+ return {};
+
+ std::vector<std::string> remaining = parseSingleArray();
+
+ std::vector<std::string> values = { firstValue };
+ values.insert(std::end(values),
+ std::begin(remaining), std::end(remaining));
+ valueArrays.push_back(values);
+
+ return valueArrays;
+ }
+
+ /* Parse array of arrays. */
+ while (1) {
+ switch (event->type) {
+ case YAML_SEQUENCE_START_EVENT: {
+ std::vector<std::string> values = parseSingleArray();
+ valueArrays.push_back(values);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return valueArrays;
+ default:
+ return {};
+ }
+
+ event = nextEvent();
+ if (!event)
+ return {};
+ }
+}
+
+std::vector<std::string> CaptureScript::parseSingleArray()
+{
+ std::vector<std::string> values;
+
+ while (1) {
+ EventPtr event = nextEvent();
+ if (!event)
+ return {};
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT: {
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return {};
+ values.push_back(value);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return values;
+ default:
+ return {};
+ }
+ }
+}
+
void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr)
{
static const std::map<unsigned int, const char *> typeNames = {
@@ -277,9 +454,24 @@ void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr)
<< typeName << " control " << id->name() << std::endl;
}
-ControlValue CaptureScript::unpackControl(const ControlId *id,
- const std::string &repr)
+ControlValue CaptureScript::unpackControl(const ControlId *id)
{
+ /* Parse complex types. */
+ switch (id->type()) {
+ case ControlTypeRectangle:
+ return parseRectangles();
+ case ControlTypeSize:
+ /* \todo Parse Sizes. */
+ return {};
+ default:
+ break;
+ }
+
+ /* Parse basic types represented by a single scalar. */
+ const std::string repr = parseScalar();
+ if (repr.empty())
+ return {};
+
ControlValue value{};
switch (id->type()) {
@@ -324,13 +516,20 @@ ControlValue CaptureScript::unpackControl(const ControlId *id,
value.set<std::string>(repr);
break;
}
- case ControlTypeRectangle:
- /* \todo Parse rectangles. */
- break;
- case ControlTypeSize:
- /* \todo Parse Sizes. */
+ default:
+ std::cerr << "Unsupported control type" << std::endl;
break;
}
return value;
}
+
+libcamera::Rectangle CaptureScript::unpackRectangle(const std::vector<std::string> &strVec)
+{
+ int x = strtol(strVec[0].c_str(), NULL, 10);
+ int y = strtol(strVec[1].c_str(), NULL, 10);
+ unsigned int width = strtoul(strVec[2].c_str(), NULL, 10);
+ unsigned int height = strtoul(strVec[3].c_str(), NULL, 10);
+
+ return Rectangle(x, y, width, height);
+}
diff --git a/src/cam/capture_script.h b/src/cam/capture_script.h
index 8b4f8f62..7a0ddebb 100644
--- a/src/cam/capture_script.h
+++ b/src/cam/capture_script.h
@@ -40,6 +40,7 @@ private:
std::map<unsigned int, libcamera::ControlList> frameControls_;
std::shared_ptr<libcamera::Camera> camera_;
yaml_parser_t parser_;
+ unsigned int loop_;
bool valid_;
EventPtr nextEvent(yaml_event_type_t expectedType = YAML_NO_EVENT);
@@ -49,14 +50,19 @@ private:
int parseScript(FILE *script);
+ int parseProperties();
+ int parseProperty();
int parseFrames();
int parseFrame(EventPtr event);
int parseControl(EventPtr event, libcamera::ControlList &controls);
std::string parseScalar();
+ libcamera::ControlValue parseRectangles();
+ std::vector<std::vector<std::string>> parseArrays();
+ std::vector<std::string> parseSingleArray();
void unpackFailure(const libcamera::ControlId *id,
const std::string &repr);
- libcamera::ControlValue unpackControl(const libcamera::ControlId *id,
- const std::string &repr);
+ libcamera::ControlValue unpackControl(const libcamera::ControlId *id);
+ libcamera::Rectangle unpackRectangle(const std::vector<std::string> &strVec);
};
diff --git a/src/cam/drm.cpp b/src/cam/drm.cpp
index fbfc0a59..b0602c94 100644
--- a/src/cam/drm.cpp
+++ b/src/cam/drm.cpp
@@ -377,6 +377,8 @@ int AtomicRequest::commit(unsigned int flags)
drmFlags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
if (flags & FlagAsync)
drmFlags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
+ if (flags & FlagTestOnly)
+ drmFlags |= DRM_MODE_ATOMIC_TEST_ONLY;
return drmModeAtomicCommit(dev_->fd(), request_, drmFlags, this);
}
diff --git a/src/cam/drm.h b/src/cam/drm.h
index 655a7509..ebaea04d 100644
--- a/src/cam/drm.h
+++ b/src/cam/drm.h
@@ -251,6 +251,7 @@ public:
enum Flags {
FlagAllowModeset = (1 << 0),
FlagAsync = (1 << 1),
+ FlagTestOnly = (1 << 2),
};
AtomicRequest(Device *dev);
diff --git a/src/cam/kms_sink.cpp b/src/cam/kms_sink.cpp
index 37a3bd50..17e2fa69 100644
--- a/src/cam/kms_sink.cpp
+++ b/src/cam/kms_sink.cpp
@@ -284,6 +284,94 @@ int KMSSink::stop()
return FrameSink::stop();
}
+bool KMSSink::testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst)
+{
+ DRM::AtomicRequest drmRequest{ &dev_ };
+
+ drmRequest.addProperty(connector_, "CRTC_ID", crtc_->id());
+
+ drmRequest.addProperty(crtc_, "ACTIVE", 1);
+ drmRequest.addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
+
+ drmRequest.addProperty(plane_, "CRTC_ID", crtc_->id());
+ drmRequest.addProperty(plane_, "FB_ID", drmBuffer->id());
+ drmRequest.addProperty(plane_, "SRC_X", src.x << 16);
+ drmRequest.addProperty(plane_, "SRC_Y", src.y << 16);
+ drmRequest.addProperty(plane_, "SRC_W", src.width << 16);
+ drmRequest.addProperty(plane_, "SRC_H", src.height << 16);
+ drmRequest.addProperty(plane_, "CRTC_X", dst.x);
+ drmRequest.addProperty(plane_, "CRTC_Y", dst.y);
+ drmRequest.addProperty(plane_, "CRTC_W", dst.width);
+ drmRequest.addProperty(plane_, "CRTC_H", dst.height);
+
+ return !drmRequest.commit(DRM::AtomicRequest::FlagAllowModeset |
+ DRM::AtomicRequest::FlagTestOnly);
+}
+
+bool KMSSink::setupComposition(DRM::FrameBuffer *drmBuffer)
+{
+ /*
+ * Test composition options, from most to least desirable, to select the
+ * best one.
+ */
+ const libcamera::Rectangle framebuffer{ size_ };
+ const libcamera::Rectangle display{ 0, 0, mode_->hdisplay, mode_->vdisplay };
+
+ /* 1. Scale the frame buffer to full screen, preserving aspect ratio. */
+ libcamera::Rectangle src = framebuffer;
+ libcamera::Rectangle dst = display.size().boundedToAspectRatio(framebuffer.size())
+ .centeredTo(display.center());
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /*
+ * 2. Scale the frame buffer to full screen, without preserving aspect
+ * ratio.
+ */
+ src = framebuffer;
+ dst = display;
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, non-square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 3. Center the frame buffer on the display. */
+ src = display.size().centeredTo(framebuffer.center()).boundedTo(framebuffer);
+ dst = framebuffer.size().centeredTo(display.center()).boundedTo(display);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: centered output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 4. Align the frame buffer on the top-left of the display. */
+ src = framebuffer.boundedTo(display);
+ dst = display.boundedTo(framebuffer);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: top-left aligned output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ return false;
+}
+
bool KMSSink::processRequest(libcamera::Request *camRequest)
{
/*
@@ -301,35 +389,41 @@ bool KMSSink::processRequest(libcamera::Request *camRequest)
DRM::FrameBuffer *drmBuffer = iter->second.get();
unsigned int flags = DRM::AtomicRequest::FlagAsync;
- DRM::AtomicRequest *drmRequest = new DRM::AtomicRequest(&dev_);
+ std::unique_ptr<DRM::AtomicRequest> drmRequest =
+ std::make_unique<DRM::AtomicRequest>(&dev_);
drmRequest->addProperty(plane_, "FB_ID", drmBuffer->id());
if (!active_ && !queued_) {
/* Enable the display pipeline on the first frame. */
+ if (!setupComposition(drmBuffer)) {
+ std::cerr << "Failed to setup composition" << std::endl;
+ return true;
+ }
+
drmRequest->addProperty(connector_, "CRTC_ID", crtc_->id());
drmRequest->addProperty(crtc_, "ACTIVE", 1);
drmRequest->addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
drmRequest->addProperty(plane_, "CRTC_ID", crtc_->id());
- drmRequest->addProperty(plane_, "SRC_X", 0 << 16);
- drmRequest->addProperty(plane_, "SRC_Y", 0 << 16);
- drmRequest->addProperty(plane_, "SRC_W", size_.width << 16);
- drmRequest->addProperty(plane_, "SRC_H", size_.height << 16);
- drmRequest->addProperty(plane_, "CRTC_X", 0);
- drmRequest->addProperty(plane_, "CRTC_Y", 0);
- drmRequest->addProperty(plane_, "CRTC_W", size_.width);
- drmRequest->addProperty(plane_, "CRTC_H", size_.height);
+ drmRequest->addProperty(plane_, "SRC_X", src_.x << 16);
+ drmRequest->addProperty(plane_, "SRC_Y", src_.y << 16);
+ drmRequest->addProperty(plane_, "SRC_W", src_.width << 16);
+ drmRequest->addProperty(plane_, "SRC_H", src_.height << 16);
+ drmRequest->addProperty(plane_, "CRTC_X", dst_.x);
+ drmRequest->addProperty(plane_, "CRTC_Y", dst_.y);
+ drmRequest->addProperty(plane_, "CRTC_W", dst_.width);
+ drmRequest->addProperty(plane_, "CRTC_H", dst_.height);
flags |= DRM::AtomicRequest::FlagAllowModeset;
}
- pending_ = std::make_unique<Request>(drmRequest, camRequest);
+ pending_ = std::make_unique<Request>(std::move(drmRequest), camRequest);
std::lock_guard<std::mutex> lock(lock_);
if (!queued_) {
- int ret = drmRequest->commit(flags);
+ int ret = pending_->drmRequest_->commit(flags);
if (ret < 0) {
std::cerr
<< "Failed to commit atomic request: "
diff --git a/src/cam/kms_sink.h b/src/cam/kms_sink.h
index 4a0a872c..76c4e611 100644
--- a/src/cam/kms_sink.h
+++ b/src/cam/kms_sink.h
@@ -38,8 +38,9 @@ private:
class Request
{
public:
- Request(DRM::AtomicRequest *drmRequest, libcamera::Request *camRequest)
- : drmRequest_(drmRequest), camRequest_(camRequest)
+ Request(std::unique_ptr<DRM::AtomicRequest> drmRequest,
+ libcamera::Request *camRequest)
+ : drmRequest_(std::move(drmRequest)), camRequest_(camRequest)
{
}
@@ -49,6 +50,11 @@ private:
int selectPipeline(const libcamera::PixelFormat &format);
int configurePipeline(const libcamera::PixelFormat &format);
+ bool testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst);
+ bool setupComposition(DRM::FrameBuffer *drmBuffer);
+
void requestComplete(DRM::AtomicRequest *request);
DRM::Device dev_;
@@ -62,6 +68,9 @@ private:
libcamera::Size size_;
unsigned int stride_;
+ libcamera::Rectangle src_;
+ libcamera::Rectangle dst_;
+
std::map<libcamera::FrameBuffer *, std::unique_ptr<DRM::FrameBuffer>> buffers_;
std::mutex lock_;
diff --git a/src/cam/main.cpp b/src/cam/main.cpp
index 79875ed7..53c2ffde 100644
--- a/src/cam/main.cpp
+++ b/src/cam/main.cpp
@@ -300,8 +300,9 @@ std::string CamApp::cameraName(const Camera *camera)
* Construct the name from the camera location, model and ID. The model
* is only used if the location isn't present or is set to External.
*/
- if (props.contains(properties::Location)) {
- switch (props.get(properties::Location)) {
+ const auto &location = props.get(properties::Location);
+ if (location) {
+ switch (*location) {
case properties::CameraLocationFront:
addModel = false;
name = "Internal front camera ";
@@ -316,12 +317,14 @@ std::string CamApp::cameraName(const Camera *camera)
}
}
- if (addModel && props.contains(properties::Model)) {
+ if (addModel) {
/*
* If the camera location is not availble use the camera model
* to build the camera name.
*/
- name = "'" + props.get(properties::Model) + "' ";
+ const auto &model = props.get(properties::Model);
+ if (model)
+ name = "'" + *model + "' ";
}
name += "(" + camera->id() + ")";
diff --git a/src/cam/meson.build b/src/cam/meson.build
index 5957ce14..8259239f 100644
--- a/src/cam/meson.build
+++ b/src/cam/meson.build
@@ -24,8 +24,8 @@ cam_sources = files([
cam_cpp_args = []
libdrm = dependency('libdrm', required : false)
+libjpeg = dependency('libjpeg', required : false)
libsdl2 = dependency('SDL2', required : false)
-libsdl2_image = dependency('SDL2_image', required : false)
if libdrm.found()
cam_cpp_args += [ '-DHAVE_KMS' ]
@@ -40,11 +40,11 @@ if libsdl2.found()
cam_sources += files([
'sdl_sink.cpp',
'sdl_texture.cpp',
- 'sdl_texture_yuyv.cpp'
+ 'sdl_texture_yuv.cpp',
])
- if libsdl2_image.found()
- cam_cpp_args += ['-DHAVE_SDL_IMAGE']
+ if libjpeg.found()
+ cam_cpp_args += ['-DHAVE_LIBJPEG']
cam_sources += files([
'sdl_texture_mjpg.cpp'
])
@@ -57,8 +57,8 @@ cam = executable('cam', cam_sources,
libcamera_public,
libdrm,
libevent,
+ libjpeg,
libsdl2,
- libsdl2_image,
libyaml,
],
cpp_args : cam_cpp_args,
diff --git a/src/cam/sdl_sink.cpp b/src/cam/sdl_sink.cpp
index f8e3e95d..ee177227 100644
--- a/src/cam/sdl_sink.cpp
+++ b/src/cam/sdl_sink.cpp
@@ -21,10 +21,10 @@
#include "event_loop.h"
#include "image.h"
-#ifdef HAVE_SDL_IMAGE
+#ifdef HAVE_LIBJPEG
#include "sdl_texture_mjpg.h"
#endif
-#include "sdl_texture_yuyv.h"
+#include "sdl_texture_yuv.h"
using namespace libcamera;
@@ -62,13 +62,18 @@ int SDLSink::configure(const libcamera::CameraConfiguration &config)
rect_.h = cfg.size.height;
switch (cfg.pixelFormat) {
-#ifdef HAVE_SDL_IMAGE
+#ifdef HAVE_LIBJPEG
case libcamera::formats::MJPEG:
texture_ = std::make_unique<SDLTextureMJPG>(rect_);
break;
#endif
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+ case libcamera::formats::NV12:
+ texture_ = std::make_unique<SDLTextureNV12>(rect_, cfg.stride);
+ break;
+#endif
case libcamera::formats::YUYV:
- texture_ = std::make_unique<SDLTextureYUYV>(rect_);
+ texture_ = std::make_unique<SDLTextureYUYV>(rect_, cfg.stride);
break;
default:
std::cerr << "Unsupported pixel format "
@@ -185,16 +190,23 @@ void SDLSink::renderBuffer(FrameBuffer *buffer)
{
Image *image = mappedBuffers_[buffer].get();
- /* \todo Implement support for multi-planar formats. */
- const FrameMetadata::Plane &meta = buffer->metadata().planes()[0];
+ std::vector<Span<const uint8_t>> planes;
+ unsigned int i = 0;
- Span<uint8_t> data = image->data(0);
- if (meta.bytesused > data.size())
- std::cerr << "payload size " << meta.bytesused
- << " larger than plane size " << data.size()
- << std::endl;
+ planes.reserve(buffer->metadata().planes().size());
+
+ for (const FrameMetadata::Plane &meta : buffer->metadata().planes()) {
+ Span<uint8_t> data = image->data(i);
+ if (meta.bytesused > data.size())
+ std::cerr << "payload size " << meta.bytesused
+ << " larger than plane size " << data.size()
+ << std::endl;
+
+ planes.push_back(data);
+ i++;
+ }
- texture_->update(data);
+ texture_->update(planes);
SDL_RenderClear(renderer_);
SDL_RenderCopy(renderer_, texture_->get(), nullptr, nullptr);
diff --git a/src/cam/sdl_texture.cpp b/src/cam/sdl_texture.cpp
index 2ca2add2..e9040bc5 100644
--- a/src/cam/sdl_texture.cpp
+++ b/src/cam/sdl_texture.cpp
@@ -9,9 +9,9 @@
#include <iostream>
-SDLTexture::SDLTexture(const SDL_Rect &rect, SDL_PixelFormatEnum pixelFormat,
- const int pitch)
- : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), pitch_(pitch)
+SDLTexture::SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat,
+ const int stride)
+ : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), stride_(stride)
{
}
diff --git a/src/cam/sdl_texture.h b/src/cam/sdl_texture.h
index 90974798..6ccd85ea 100644
--- a/src/cam/sdl_texture.h
+++ b/src/cam/sdl_texture.h
@@ -7,6 +7,8 @@
#pragma once
+#include <vector>
+
#include <SDL2/SDL.h>
#include "image.h"
@@ -14,16 +16,15 @@
class SDLTexture
{
public:
- SDLTexture(const SDL_Rect &rect, SDL_PixelFormatEnum pixelFormat,
- const int pitch);
+ SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat, const int stride);
virtual ~SDLTexture();
int create(SDL_Renderer *renderer);
- virtual void update(const libcamera::Span<uint8_t> &data) = 0;
+ virtual void update(const std::vector<libcamera::Span<const uint8_t>> &data) = 0;
SDL_Texture *get() const { return ptr_; }
protected:
SDL_Texture *ptr_;
const SDL_Rect rect_;
- const SDL_PixelFormatEnum pixelFormat_;
- const int pitch_;
+ const uint32_t pixelFormat_;
+ const int stride_;
};
diff --git a/src/cam/sdl_texture_mjpg.cpp b/src/cam/sdl_texture_mjpg.cpp
index 69e99ad3..da958e03 100644
--- a/src/cam/sdl_texture_mjpg.cpp
+++ b/src/cam/sdl_texture_mjpg.cpp
@@ -7,19 +7,77 @@
#include "sdl_texture_mjpg.h"
-#include <SDL2/SDL_image.h>
+#include <iostream>
+#include <setjmp.h>
+#include <stdio.h>
+
+#include <jpeglib.h>
using namespace libcamera;
+struct JpegErrorManager : public jpeg_error_mgr {
+ JpegErrorManager()
+ {
+ jpeg_std_error(this);
+ error_exit = errorExit;
+ output_message = outputMessage;
+ }
+
+ static void errorExit(j_common_ptr cinfo)
+ {
+ JpegErrorManager *self =
+ static_cast<JpegErrorManager *>(cinfo->err);
+ longjmp(self->escape_, 1);
+ }
+
+ static void outputMessage([[maybe_unused]] j_common_ptr cinfo)
+ {
+ }
+
+ jmp_buf escape_;
+};
+
SDLTextureMJPG::SDLTextureMJPG(const SDL_Rect &rect)
- : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, 0)
+ : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, rect.w * 3),
+ rgb_(std::make_unique<unsigned char[]>(stride_ * rect.h))
{
}
-void SDLTextureMJPG::update(const Span<uint8_t> &data)
+int SDLTextureMJPG::decompress(Span<const uint8_t> data)
+{
+ struct jpeg_decompress_struct cinfo;
+
+ JpegErrorManager errorManager;
+ if (setjmp(errorManager.escape_)) {
+ /* libjpeg found an error */
+ jpeg_destroy_decompress(&cinfo);
+ std::cerr << "JPEG decompression error" << std::endl;
+ return -EINVAL;
+ }
+
+ cinfo.err = &errorManager;
+ jpeg_create_decompress(&cinfo);
+
+ jpeg_mem_src(&cinfo, data.data(), data.size());
+
+ jpeg_read_header(&cinfo, TRUE);
+
+ jpeg_start_decompress(&cinfo);
+
+ for (int i = 0; cinfo.output_scanline < cinfo.output_height; ++i) {
+ JSAMPROW rowptr = rgb_.get() + i * stride_;
+ jpeg_read_scanlines(&cinfo, &rowptr, 1);
+ }
+
+ jpeg_finish_decompress(&cinfo);
+
+ jpeg_destroy_decompress(&cinfo);
+
+ return 0;
+}
+
+void SDLTextureMJPG::update(const std::vector<libcamera::Span<const uint8_t>> &data)
{
- SDL_RWops *bufferStream = SDL_RWFromMem(data.data(), data.size());
- SDL_Surface *frame = IMG_Load_RW(bufferStream, 0);
- SDL_UpdateTexture(ptr_, nullptr, frame->pixels, frame->pitch);
- SDL_FreeSurface(frame);
+ decompress(data[0]);
+ SDL_UpdateTexture(ptr_, nullptr, rgb_.get(), stride_);
}
diff --git a/src/cam/sdl_texture_mjpg.h b/src/cam/sdl_texture_mjpg.h
index b103f801..814ca79a 100644
--- a/src/cam/sdl_texture_mjpg.h
+++ b/src/cam/sdl_texture_mjpg.h
@@ -13,5 +13,11 @@ class SDLTextureMJPG : public SDLTexture
{
public:
SDLTextureMJPG(const SDL_Rect &rect);
- void update(const libcamera::Span<uint8_t> &data) override;
+
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+
+private:
+ int decompress(libcamera::Span<const uint8_t> data);
+
+ std::unique_ptr<unsigned char[]> rgb_;
};
diff --git a/src/cam/sdl_texture_yuv.cpp b/src/cam/sdl_texture_yuv.cpp
new file mode 100644
index 00000000..b29c3b93
--- /dev/null
+++ b/src/cam/sdl_texture_yuv.cpp
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * sdl_texture_yuv.cpp - SDL YUV Textures
+ */
+
+#include "sdl_texture_yuv.h"
+
+using namespace libcamera;
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+SDLTextureNV12::SDLTextureNV12(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_NV12, stride)
+{
+}
+
+void SDLTextureNV12::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateNVTexture(ptr_, &rect_, data[0].data(), stride_,
+ data[1].data(), stride_);
+}
+#endif
+
+SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, stride)
+{
+}
+
+void SDLTextureYUYV::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateTexture(ptr_, &rect_, data[0].data(), stride_);
+}
diff --git a/src/cam/sdl_texture_yuv.h b/src/cam/sdl_texture_yuv.h
new file mode 100644
index 00000000..310e4e50
--- /dev/null
+++ b/src/cam/sdl_texture_yuv.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * sdl_texture_yuv.h - SDL YUV Textures
+ */
+
+#pragma once
+
+#include "sdl_texture.h"
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+class SDLTextureNV12 : public SDLTexture
+{
+public:
+ SDLTextureNV12(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
+#endif
+
+class SDLTextureYUYV : public SDLTexture
+{
+public:
+ SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
diff --git a/src/cam/sdl_texture_yuyv.cpp b/src/cam/sdl_texture_yuyv.cpp
deleted file mode 100644
index cc161b2c..00000000
--- a/src/cam/sdl_texture_yuyv.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * sdl_texture_yuyv.cpp - SDL Texture YUYV
- */
-
-#include "sdl_texture_yuyv.h"
-
-using namespace libcamera;
-
-SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect)
- : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, 4 * ((rect.w + 1) / 2))
-{
-}
-
-void SDLTextureYUYV::update(const Span<uint8_t> &data)
-{
- SDL_UpdateTexture(ptr_, &rect_, data.data(), pitch_);
-}
diff --git a/src/cam/sdl_texture_yuyv.h b/src/cam/sdl_texture_yuyv.h
deleted file mode 100644
index 9f7c72f0..00000000
--- a/src/cam/sdl_texture_yuyv.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * sdl_texture_yuyv.h - SDL Texture YUYV
- */
-
-#pragma once
-
-#include "sdl_texture.h"
-
-class SDLTextureYUYV : public SDLTexture
-{
-public:
- SDLTextureYUYV(const SDL_Rect &rect);
- void update(const libcamera::Span<uint8_t> &data) override;
-};
diff --git a/src/cam/stream_options.cpp b/src/cam/stream_options.cpp
index 150bd27c..3a5625f5 100644
--- a/src/cam/stream_options.cpp
+++ b/src/cam/stream_options.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* stream_options.cpp - Helper to parse options for streams
*/
@@ -8,6 +8,8 @@
#include <iostream>
+#include <libcamera/color_space.h>
+
using namespace libcamera;
StreamKeyValueParser::StreamKeyValueParser()
@@ -21,6 +23,8 @@ StreamKeyValueParser::StreamKeyValueParser()
ArgumentRequired);
addOption("pixelformat", OptionString, "Pixel format name",
ArgumentRequired);
+ addOption("colorspace", OptionString, "Color space",
+ ArgumentRequired);
}
KeyValueParser::Options StreamKeyValueParser::parse(const char *arguments)
@@ -96,6 +100,9 @@ int StreamKeyValueParser::updateConfiguration(CameraConfiguration *config,
if (opts.isSet("pixelformat"))
cfg.pixelFormat = PixelFormat::fromString(opts["pixelformat"].toString());
+
+ if (opts.isSet("colorspace"))
+ cfg.colorSpace = ColorSpace::fromString(opts["colorspace"].toString());
}
return 0;
diff --git a/src/cam/stream_options.h b/src/cam/stream_options.h
index d235b77f..35e4e7c0 100644
--- a/src/cam/stream_options.h
+++ b/src/cam/stream_options.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* stream_options.h - Helper to parse options for streams
*/
diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp
index 3f242286..244a4a79 100644
--- a/src/gstreamer/gstlibcamera-utils.cpp
+++ b/src/gstreamer/gstlibcamera-utils.cpp
@@ -19,9 +19,21 @@ static struct {
/* Compressed */
{ GST_VIDEO_FORMAT_ENCODED, formats::MJPEG },
- /* RGB */
+ /* RGB16 */
+ { GST_VIDEO_FORMAT_RGB16, formats::RGB565 },
+
+ /* RGB24 */
{ GST_VIDEO_FORMAT_RGB, formats::BGR888 },
{ GST_VIDEO_FORMAT_BGR, formats::RGB888 },
+
+ /* RGB32 */
+ { GST_VIDEO_FORMAT_BGRx, formats::XRGB8888 },
+ { GST_VIDEO_FORMAT_RGBx, formats::XBGR8888 },
+ { GST_VIDEO_FORMAT_xBGR, formats::RGBX8888 },
+ { GST_VIDEO_FORMAT_xRGB, formats::BGRX8888 },
+ { GST_VIDEO_FORMAT_BGRA, formats::ARGB8888 },
+ { GST_VIDEO_FORMAT_RGBA, formats::ABGR8888 },
+ { GST_VIDEO_FORMAT_ABGR, formats::RGBA8888 },
{ GST_VIDEO_FORMAT_ARGB, formats::BGRA8888 },
/* YUV Semiplanar */
@@ -45,6 +57,154 @@ static struct {
/* \todo NV42 is used in libcamera but is not mapped in GStreamer yet. */
};
+static GstVideoColorimetry
+colorimetry_from_colorspace(const ColorSpace &colorSpace)
+{
+ GstVideoColorimetry colorimetry;
+
+ switch (colorSpace.primaries) {
+ case ColorSpace::Primaries::Raw:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
+ break;
+ case ColorSpace::Primaries::Smpte170m:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
+ break;
+ case ColorSpace::Primaries::Rec709:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
+ break;
+ case ColorSpace::Primaries::Rec2020:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT2020;
+ break;
+ }
+
+ switch (colorSpace.transferFunction) {
+ case ColorSpace::TransferFunction::Linear:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
+ break;
+ case ColorSpace::TransferFunction::Srgb:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_SRGB;
+ break;
+ case ColorSpace::TransferFunction::Rec709:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
+ break;
+ }
+
+ switch (colorSpace.ycbcrEncoding) {
+ case ColorSpace::YcbcrEncoding::None:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec601:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec709:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec2020:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
+ break;
+ }
+
+ switch (colorSpace.range) {
+ case ColorSpace::Range::Full:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255;
+ break;
+ case ColorSpace::Range::Limited:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;
+ break;
+ }
+
+ return colorimetry;
+}
+
+static std::optional<ColorSpace>
+colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry)
+{
+ std::optional<ColorSpace> colorspace = ColorSpace::Raw;
+
+ switch (colorimetry.primaries) {
+ case GST_VIDEO_COLOR_PRIMARIES_UNKNOWN:
+ /* Unknown primaries map to raw colorspace in gstreamer */
+ return ColorSpace::Raw;
+ case GST_VIDEO_COLOR_PRIMARIES_SMPTE170M:
+ colorspace->primaries = ColorSpace::Primaries::Smpte170m;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT709:
+ colorspace->primaries = ColorSpace::Primaries::Rec709;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT2020:
+ colorspace->primaries = ColorSpace::Primaries::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry primaries %d not mapped in gstlibcamera",
+ colorimetry.primaries);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.transfer) {
+ /* Transfer function mappings inspired from v4l2src plugin */
+ case GST_VIDEO_TRANSFER_GAMMA18:
+ case GST_VIDEO_TRANSFER_GAMMA20:
+ case GST_VIDEO_TRANSFER_GAMMA22:
+ case GST_VIDEO_TRANSFER_GAMMA28:
+ GST_WARNING("GAMMA 18, 20, 22, 28 transfer functions not supported");
+ /* fallthrough */
+ case GST_VIDEO_TRANSFER_GAMMA10:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Linear;
+ break;
+ case GST_VIDEO_TRANSFER_SRGB:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Srgb;
+ break;
+#if GST_CHECK_VERSION(1, 18, 0)
+ case GST_VIDEO_TRANSFER_BT601:
+ case GST_VIDEO_TRANSFER_BT2020_10:
+#endif
+ case GST_VIDEO_TRANSFER_BT2020_12:
+ case GST_VIDEO_TRANSFER_BT709:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Rec709;
+ break;
+ default:
+ GST_WARNING("Colorimetry transfer function %d not mapped in gstlibcamera",
+ colorimetry.transfer);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.matrix) {
+ case GST_VIDEO_COLOR_MATRIX_RGB:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ break;
+ /* FCC is about the same as BT601 with less digit */
+ case GST_VIDEO_COLOR_MATRIX_FCC:
+ case GST_VIDEO_COLOR_MATRIX_BT601:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec601;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT709:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec709;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT2020:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry matrix %d not mapped in gstlibcamera",
+ colorimetry.matrix);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.range) {
+ case GST_VIDEO_COLOR_RANGE_0_255:
+ colorspace->range = ColorSpace::Range::Full;
+ break;
+ case GST_VIDEO_COLOR_RANGE_16_235:
+ colorspace->range = ColorSpace::Range::Limited;
+ break;
+ default:
+ GST_WARNING("Colorimetry range %d not mapped in gstlibcamera",
+ colorimetry.range);
+ return std::nullopt;
+ }
+
+ return colorspace;
+}
+
static GstVideoFormat
pixel_format_to_gst_format(const PixelFormat &format)
{
@@ -139,6 +299,18 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg
"width", G_TYPE_INT, stream_cfg.size.width,
"height", G_TYPE_INT, stream_cfg.size.height,
nullptr);
+
+ if (stream_cfg.colorSpace) {
+ GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value());
+ gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry);
+
+ if (colorimetry_str)
+ gst_structure_set(s, "colorimetry", G_TYPE_STRING, colorimetry_str, nullptr);
+ else
+ g_error("Got invalid colorimetry from ColorSpace: %s",
+ ColorSpace::toString(stream_cfg.colorSpace).c_str());
+ }
+
gst_caps_append_structure(caps, s);
return caps;
@@ -222,18 +394,33 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
gst_structure_get_int(s, "height", &height);
stream_cfg.size.width = width;
stream_cfg.size.height = height;
+
+ /* Configure colorimetry */
+ if (gst_structure_has_field(s, "colorimetry")) {
+ const gchar *colorimetry_str = gst_structure_get_string(s, "colorimetry");
+ GstVideoColorimetry colorimetry;
+
+ if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str))
+ g_critical("Invalid colorimetry %s", colorimetry_str);
+
+ stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry);
+ }
}
-void
-gst_libcamera_resume_task(GstTask *task)
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean
+gst_task_resume(GstTask *task)
{
/* We only want to resume the task if it's paused. */
GLibLocker lock(GST_OBJECT(task));
- if (GST_TASK_STATE(task) == GST_TASK_PAUSED) {
- GST_TASK_STATE(task) = GST_TASK_STARTED;
- GST_TASK_SIGNAL(task);
- }
+ if (GST_TASK_STATE(task) != GST_TASK_PAUSED)
+ return FALSE;
+
+ GST_TASK_STATE(task) = GST_TASK_STARTED;
+ GST_TASK_SIGNAL(task);
+ return TRUE;
}
+#endif
G_LOCK_DEFINE_STATIC(cm_singleton_lock);
static std::weak_ptr<CameraManager> cm_singleton_ptr;
diff --git a/src/gstreamer/gstlibcamera-utils.h b/src/gstreamer/gstlibcamera-utils.h
index d54f1588..164189a2 100644
--- a/src/gstreamer/gstlibcamera-utils.h
+++ b/src/gstreamer/gstlibcamera-utils.h
@@ -18,7 +18,9 @@ GstCaps *gst_libcamera_stream_formats_to_caps(const libcamera::StreamFormats &fo
GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg);
void gst_libcamera_configure_stream_from_caps(libcamera::StreamConfiguration &stream_cfg,
GstCaps *caps);
-void gst_libcamera_resume_task(GstTask *task);
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean gst_task_resume(GstTask *task);
+#endif
std::shared_ptr<libcamera::CameraManager> gst_libcamera_get_camera_manager(int &ret);
/**
diff --git a/src/gstreamer/gstlibcamerapad.cpp b/src/gstreamer/gstlibcamerapad.cpp
index c00e81c8..87b4057a 100644
--- a/src/gstreamer/gstlibcamerapad.cpp
+++ b/src/gstreamer/gstlibcamerapad.cpp
@@ -18,7 +18,6 @@ struct _GstLibcameraPad {
GstPad parent;
StreamRole role;
GstLibcameraPool *pool;
- GQueue pending_buffers;
GstClockTime latency;
};
@@ -156,40 +155,6 @@ gst_libcamera_pad_get_stream(GstPad *pad)
}
void
-gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
-
- g_queue_push_head(&self->pending_buffers, buffer);
-}
-
-GstFlowReturn
-gst_libcamera_pad_push_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GstBuffer *buffer;
-
- {
- GLibLocker lock(GST_OBJECT(self));
- buffer = GST_BUFFER(g_queue_pop_tail(&self->pending_buffers));
- }
-
- if (!buffer)
- return GST_FLOW_OK;
-
- return gst_pad_push(pad, buffer);
-}
-
-bool
-gst_libcamera_pad_has_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
- return self->pending_buffers.length > 0;
-}
-
-void
gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency)
{
auto *self = GST_LIBCAMERA_PAD(pad);
diff --git a/src/gstreamer/gstlibcamerapad.h b/src/gstreamer/gstlibcamerapad.h
index 20769517..103ee57a 100644
--- a/src/gstreamer/gstlibcamerapad.h
+++ b/src/gstreamer/gstlibcamerapad.h
@@ -25,10 +25,4 @@ void gst_libcamera_pad_set_pool(GstPad *pad, GstLibcameraPool *pool);
libcamera::Stream *gst_libcamera_pad_get_stream(GstPad *pad);
-void gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer);
-
-GstFlowReturn gst_libcamera_pad_push_pending(GstPad *pad);
-
-bool gst_libcamera_pad_has_pending(GstPad *pad);
-
void gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency);
diff --git a/src/gstreamer/gstlibcamerapool.cpp b/src/gstreamer/gstlibcamerapool.cpp
index 1fde4213..0c2be43c 100644
--- a/src/gstreamer/gstlibcamerapool.cpp
+++ b/src/gstreamer/gstlibcamerapool.cpp
@@ -134,13 +134,6 @@ gst_libcamera_pool_get_stream(GstLibcameraPool *self)
return self->stream;
}
-Stream *
-gst_libcamera_buffer_get_stream(GstBuffer *buffer)
-{
- auto *self = (GstLibcameraPool *)buffer->pool;
- return self->stream;
-}
-
FrameBuffer *
gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer)
{
diff --git a/src/gstreamer/gstlibcamerapool.h b/src/gstreamer/gstlibcamerapool.h
index 05795d21..ce3bf60b 100644
--- a/src/gstreamer/gstlibcamerapool.h
+++ b/src/gstreamer/gstlibcamerapool.h
@@ -25,6 +25,4 @@ GstLibcameraPool *gst_libcamera_pool_new(GstLibcameraAllocator *allocator,
libcamera::Stream *gst_libcamera_pool_get_stream(GstLibcameraPool *self);
-libcamera::Stream *gst_libcamera_buffer_get_stream(GstBuffer *buffer);
-
libcamera::FrameBuffer *gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer);
diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp
index 46fd02d2..16d70fea 100644
--- a/src/gstreamer/gstlibcamerasrc.cpp
+++ b/src/gstreamer/gstlibcamerasrc.cpp
@@ -32,8 +32,11 @@
#include <queue>
#include <vector>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
+#include <libcamera/control_ids.h>
#include <gst/base/base.h>
@@ -51,15 +54,18 @@ struct RequestWrap {
RequestWrap(std::unique_ptr<Request> request);
~RequestWrap();
- void attachBuffer(GstBuffer *buffer);
+ void attachBuffer(Stream *stream, GstBuffer *buffer);
GstBuffer *detachBuffer(Stream *stream);
std::unique_ptr<Request> request_;
std::map<Stream *, GstBuffer *> buffers_;
+
+ GstClockTime latency_;
+ GstClockTime pts_;
};
RequestWrap::RequestWrap(std::unique_ptr<Request> request)
- : request_(std::move(request))
+ : request_(std::move(request)), latency_(0), pts_(GST_CLOCK_TIME_NONE)
{
}
@@ -71,10 +77,9 @@ RequestWrap::~RequestWrap()
}
}
-void RequestWrap::attachBuffer(GstBuffer *buffer)
+void RequestWrap::attachBuffer(Stream *stream, GstBuffer *buffer)
{
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- Stream *stream = gst_libcamera_buffer_get_stream(buffer);
request_->addBuffer(stream, fb);
@@ -107,11 +112,30 @@ struct GstLibcameraSrcState {
std::shared_ptr<CameraManager> cm_;
std::shared_ptr<Camera> cam_;
std::unique_ptr<CameraConfiguration> config_;
- std::vector<GstPad *> srcpads_;
- std::queue<std::unique_ptr<RequestWrap>> requests_;
+
+ std::vector<GstPad *> srcpads_; /* Protected by stream_lock */
+
+ /*
+ * Contention on this lock_ must be minimized, as it has to be taken in
+ * the realtime-sensitive requestCompleted() handler to protect
+ * queuedRequests_ and completedRequests_.
+ *
+ * stream_lock must be taken before lock_ in contexts where both locks
+ * need to be taken. In particular, this means that the lock_ must not
+ * be held while calling into other graph elements (e.g. when calling
+ * gst_pad_query()).
+ */
+ Mutex lock_;
+ std::queue<std::unique_ptr<RequestWrap>> queuedRequests_
+ LIBCAMERA_TSA_GUARDED_BY(lock_);
+ std::queue<std::unique_ptr<RequestWrap>> completedRequests_
+ LIBCAMERA_TSA_GUARDED_BY(lock_);
+
guint group_id_;
+ int queueRequest();
void requestCompleted(Request *request);
+ int processRequest();
};
struct _GstLibcameraSrc {
@@ -148,15 +172,59 @@ GstStaticPadTemplate request_src_template = {
"src_%u", GST_PAD_SRC, GST_PAD_REQUEST, TEMPLATE_CAPS
};
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::queueRequest()
+{
+ std::unique_ptr<Request> request = cam_->createRequest();
+ if (!request)
+ return -ENOMEM;
+
+ std::unique_ptr<RequestWrap> wrap =
+ std::make_unique<RequestWrap>(std::move(request));
+
+ for (GstPad *srcpad : srcpads_) {
+ Stream *stream = gst_libcamera_pad_get_stream(srcpad);
+ GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
+ GstBuffer *buffer;
+ GstFlowReturn ret;
+
+ ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
+ &buffer, nullptr);
+ if (ret != GST_FLOW_OK) {
+ /*
+ * RequestWrap has ownership of the request, and we
+ * won't be queueing this one due to lack of buffers.
+ */
+ return -ENOBUFS;
+ }
+
+ wrap->attachBuffer(stream, buffer);
+ }
+
+ GST_TRACE_OBJECT(src_, "Requesting buffers");
+ cam_->queueRequest(wrap->request_.get());
+
+ {
+ MutexLocker locker(lock_);
+ queuedRequests_.push(std::move(wrap));
+ }
+
+ /* The RequestWrap will be deleted in the completion handler. */
+ return 0;
+}
+
void
GstLibcameraSrcState::requestCompleted(Request *request)
{
- GLibLocker lock(GST_OBJECT(src_));
-
GST_DEBUG_OBJECT(src_, "buffers are ready");
- std::unique_ptr<RequestWrap> wrap = std::move(requests_.front());
- requests_.pop();
+ std::unique_ptr<RequestWrap> wrap;
+
+ {
+ MutexLocker locker(lock_);
+ wrap = std::move(queuedRequests_.front());
+ queuedRequests_.pop();
+ }
g_return_if_fail(wrap->request_.get() == request);
@@ -165,23 +233,61 @@ GstLibcameraSrcState::requestCompleted(Request *request)
return;
}
- GstBuffer *buffer;
+ if (GST_ELEMENT_CLOCK(src_)) {
+ int64_t timestamp = request->metadata().get(controls::SensorTimestamp).value_or(0);
+
+ GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
+ GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
+ /* \todo Need to expose which reference clock the timestamp relates to. */
+ GstClockTime sys_now = g_get_monotonic_time() * 1000;
+
+ /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
+ GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
+ wrap->pts_ = timestamp - sys_base_time;
+ wrap->latency_ = sys_now - timestamp;
+ }
+
+ {
+ MutexLocker locker(lock_);
+ completedRequests_.push(std::move(wrap));
+ }
+
+ gst_task_resume(src_->task);
+}
+
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::processRequest()
+{
+ std::unique_ptr<RequestWrap> wrap;
+ int err = 0;
+
+ {
+ MutexLocker locker(lock_);
+
+ if (!completedRequests_.empty()) {
+ wrap = std::move(completedRequests_.front());
+ completedRequests_.pop();
+ }
+
+ if (completedRequests_.empty())
+ err = -ENOBUFS;
+ }
+
+ if (!wrap)
+ return -ENOBUFS;
+
+ GstFlowReturn ret = GST_FLOW_OK;
+ gst_flow_combiner_reset(src_->flow_combiner);
+
for (GstPad *srcpad : srcpads_) {
Stream *stream = gst_libcamera_pad_get_stream(srcpad);
- buffer = wrap->detachBuffer(stream);
+ GstBuffer *buffer = wrap->detachBuffer(stream);
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- if (GST_ELEMENT_CLOCK(src_)) {
- GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
- GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
- /* \todo Need to expose which reference clock the timestamp relates to. */
- GstClockTime sys_now = g_get_monotonic_time() * 1000;
-
- /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
- GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
- GST_BUFFER_PTS(buffer) = fb->metadata().timestamp - sys_base_time;
- gst_libcamera_pad_set_latency(srcpad, sys_now - fb->metadata().timestamp);
+ if (GST_CLOCK_TIME_IS_VALID(wrap->pts_)) {
+ GST_BUFFER_PTS(buffer) = wrap->pts_;
+ gst_libcamera_pad_set_latency(srcpad, wrap->latency_);
} else {
GST_BUFFER_PTS(buffer) = 0;
}
@@ -189,10 +295,26 @@ GstLibcameraSrcState::requestCompleted(Request *request)
GST_BUFFER_OFFSET(buffer) = fb->metadata().sequence;
GST_BUFFER_OFFSET_END(buffer) = fb->metadata().sequence;
- gst_libcamera_pad_queue_buffer(srcpad, buffer);
+ ret = gst_pad_push(srcpad, buffer);
+ ret = gst_flow_combiner_update_pad_flow(src_->flow_combiner,
+ srcpad, ret);
}
- gst_libcamera_resume_task(this->src_->task);
+ if (ret != GST_FLOW_OK) {
+ if (ret == GST_FLOW_EOS) {
+ g_autoptr(GstEvent) eos = gst_event_new_eos();
+ guint32 seqnum = gst_util_seqnum_next();
+ gst_event_set_seqnum(eos, seqnum);
+ for (GstPad *srcpad : srcpads_)
+ gst_pad_push_event(srcpad, gst_event_ref(eos));
+ } else if (ret != GST_FLOW_FLUSHING) {
+ GST_ELEMENT_FLOW_ERROR(src_, ret);
+ }
+
+ return -EPIPE;
+ }
+
+ return err;
}
static bool
@@ -262,87 +384,72 @@ gst_libcamera_src_task_run(gpointer user_data)
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GstLibcameraSrcState *state = self->state;
- std::unique_ptr<Request> request = state->cam_->createRequest();
- if (!request) {
+ /*
+ * Start by pausing the task. The task may also get resumed by the
+ * buffer-notify signal when new buffers are queued back to the pool,
+ * or by the request completion handler when a new request has
+ * completed. Both will resume the task after adding the buffers or
+ * request to their respective lists, which are checked below to decide
+ * if the task needs to be resumed for another iteration. This is thus
+ * guaranteed to be race-free, the lock taken by gst_task_pause() and
+ * gst_task_resume() serves as a memory barrier.
+ */
+ gst_task_pause(self->task);
+
+ bool doResume = false;
+
+ /*
+ * Create and queue one request. If no buffers are available the
+ * function returns -ENOBUFS, which we ignore here as that's not a
+ * fatal error.
+ */
+ int ret = state->queueRequest();
+ switch (ret) {
+ case 0:
+ /*
+ * The request was successfully queued, there may be enough
+ * buffers to create a new one. Don't pause the task to give it
+ * another try.
+ */
+ doResume = true;
+ break;
+
+ case -ENOMEM:
GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
("Failed to allocate request for camera '%s'.",
state->cam_->id().c_str()),
("libcamera::Camera::createRequest() failed"));
gst_task_stop(self->task);
return;
- }
-
- std::unique_ptr<RequestWrap> wrap =
- std::make_unique<RequestWrap>(std::move(request));
-
- for (GstPad *srcpad : state->srcpads_) {
- GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
- GstBuffer *buffer;
- GstFlowReturn ret;
-
- ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
- &buffer, nullptr);
- if (ret != GST_FLOW_OK) {
- /*
- * RequestWrap has ownership of the request, and we
- * won't be queueing this one due to lack of buffers.
- */
- wrap.release();
- break;
- }
- wrap->attachBuffer(buffer);
+ case -ENOBUFS:
+ default:
+ break;
}
- if (wrap) {
- GLibLocker lock(GST_OBJECT(self));
- GST_TRACE_OBJECT(self, "Requesting buffers");
- state->cam_->queueRequest(wrap->request_.get());
- state->requests_.push(std::move(wrap));
+ /*
+ * Process one completed request, if available, and record if further
+ * requests are ready for processing.
+ */
+ ret = state->processRequest();
+ switch (ret) {
+ case 0:
+ /* Another completed request is available, resume the task. */
+ doResume = true;
+ break;
- /* The RequestWrap will be deleted in the completion handler. */
- }
+ case -EPIPE:
+ gst_task_stop(self->task);
+ return;
- GstFlowReturn ret = GST_FLOW_OK;
- gst_flow_combiner_reset(self->flow_combiner);
- for (GstPad *srcpad : state->srcpads_) {
- ret = gst_libcamera_pad_push_pending(srcpad);
- ret = gst_flow_combiner_update_pad_flow(self->flow_combiner,
- srcpad, ret);
+ case -ENOBUFS:
+ default:
+ break;
}
- {
- if (ret != GST_FLOW_OK) {
- if (ret == GST_FLOW_EOS) {
- g_autoptr(GstEvent) eos = gst_event_new_eos();
- guint32 seqnum = gst_util_seqnum_next();
- gst_event_set_seqnum(eos, seqnum);
- for (GstPad *srcpad : state->srcpads_)
- gst_pad_push_event(srcpad, gst_event_ref(eos));
- } else if (ret != GST_FLOW_FLUSHING) {
- GST_ELEMENT_FLOW_ERROR(self, ret);
- }
- gst_task_stop(self->task);
- return;
- }
-
- /*
- * Here we need to decide if we want to pause. This needs to
- * happen in lock step with the callback thread which may want
- * to resume the task and might push pending buffers.
- */
- GLibLocker lock(GST_OBJECT(self));
- bool do_pause = true;
- for (GstPad *srcpad : state->srcpads_) {
- if (gst_libcamera_pad_has_pending(srcpad)) {
- do_pause = false;
- break;
- }
- }
-
- if (do_pause)
- gst_task_pause(self->task);
- }
+ /* Resume the task for another iteration if needed. */
+ if (doResume)
+ gst_task_resume(self->task);
}
static void
@@ -453,7 +560,7 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator,
stream_cfg.stream());
g_signal_connect_swapped(pool, "buffer-notify",
- G_CALLBACK(gst_libcamera_resume_task), task);
+ G_CALLBACK(gst_task_resume), task);
gst_libcamera_pad_set_pool(srcpad, pool);
gst_flow_combiner_add_pad(self->flow_combiner, srcpad);
@@ -491,8 +598,16 @@ gst_libcamera_src_task_leave([[maybe_unused]] GstTask *task,
state->cam_->stop();
- for (GstPad *srcpad : state->srcpads_)
- gst_libcamera_pad_set_pool(srcpad, nullptr);
+ {
+ MutexLocker locker(state->lock_);
+ state->completedRequests_ = {};
+ }
+
+ {
+ GLibRecLocker locker(&self->stream_lock);
+ for (GstPad *srcpad : state->srcpads_)
+ gst_libcamera_pad_set_pool(srcpad, nullptr);
+ }
g_clear_object(&self->allocator);
g_clear_pointer(&self->flow_combiner,
@@ -631,7 +746,7 @@ gst_libcamera_src_init(GstLibcameraSrc *self)
gst_task_set_lock(self->task, &self->stream_lock);
state->srcpads_.push_back(gst_pad_new_from_template(templ, "src"));
- gst_element_add_pad(GST_ELEMENT(self), state->srcpads_[0]);
+ gst_element_add_pad(GST_ELEMENT(self), state->srcpads_.back());
/* C-style friend. */
state->src_ = self;
@@ -651,7 +766,7 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ,
g_object_ref_sink(pad);
if (gst_element_add_pad(element, pad)) {
- GLibLocker lock(GST_OBJECT(self));
+ GLibRecLocker lock(&self->stream_lock);
self->state->srcpads_.push_back(reinterpret_cast<GstPad *>(g_object_ref(pad)));
} else {
GST_ELEMENT_ERROR(element, STREAM, FAILED,
@@ -671,7 +786,7 @@ gst_libcamera_src_release_pad(GstElement *element, GstPad *pad)
GST_DEBUG_OBJECT(self, "Pad %" GST_PTR_FORMAT " being released", pad);
{
- GLibLocker lock(GST_OBJECT(self));
+ GLibRecLocker lock(&self->stream_lock);
std::vector<GstPad *> &pads = self->state->srcpads_;
auto begin_iterator = pads.begin();
auto end_iterator = pads.end();
diff --git a/src/gstreamer/meson.build b/src/gstreamer/meson.build
index 77c79140..eda246d7 100644
--- a/src/gstreamer/meson.build
+++ b/src/gstreamer/meson.build
@@ -42,7 +42,7 @@ endif
libcamera_gst = shared_library('gstlibcamera',
libcamera_gst_sources,
cpp_args : libcamera_gst_cpp_args,
- dependencies : [libcamera_public, gstvideo_dep, gstallocator_dep],
+ dependencies : [libcamera_private, gstvideo_dep, gstallocator_dep],
install: true,
install_dir : '@0@/gstreamer-1.0'.format(get_option('libdir')),
)
diff --git a/src/ipa/ipu3/algorithms/af.cpp b/src/ipa/ipu3/algorithms/af.cpp
index d07521a0..4835a034 100644
--- a/src/ipa/ipu3/algorithms/af.cpp
+++ b/src/ipa/ipu3/algorithms/af.cpp
@@ -450,6 +450,8 @@ void Af::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameCon
}
}
+REGISTER_IPA_ALGORITHM(Af, "Af")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp
index f16be534..ed4809d9 100644
--- a/src/ipa/ipu3/algorithms/agc.cpp
+++ b/src/ipa/ipu3/algorithms/agc.cpp
@@ -229,7 +229,7 @@ void Agc::computeExposure(IPAContext &context, IPAFrameContext *frameContext,
/*
* Filter the exposure.
- * \todo: estimate if we need to desaturate
+ * \todo estimate if we need to desaturate
*/
exposureValue = filterExposure(exposureValue);
@@ -363,6 +363,8 @@ void Agc::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameCo
frameCount_++;
}
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/algorithm.h b/src/ipa/ipu3/algorithms/algorithm.h
index 234b2bd7..ae134a94 100644
--- a/src/ipa/ipu3/algorithms/algorithm.h
+++ b/src/ipa/ipu3/algorithms/algorithm.h
@@ -7,19 +7,15 @@
#pragma once
-#include <libcamera/ipa/ipu3_ipa_interface.h>
-
#include <libipa/algorithm.h>
-#include "ipa_context.h"
+#include "module.h"
namespace libcamera {
namespace ipa::ipu3 {
-using Algorithm = libcamera::ipa::Algorithm<IPAContext, IPAFrameContext,
- IPAConfigInfo, ipu3_uapi_params,
- ipu3_uapi_stats_3a>;
+using Algorithm = libcamera::ipa::Algorithm<Module>;
} /* namespace ipa::ipu3 */
diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp
index 70426722..b658ee54 100644
--- a/src/ipa/ipu3/algorithms/awb.cpp
+++ b/src/ipa/ipu3/algorithms/awb.cpp
@@ -483,6 +483,8 @@ void Awb::prepare(IPAContext &context, ipu3_uapi_params *params)
params->use.acc_ccm = 1;
}
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/blc.cpp b/src/ipa/ipu3/algorithms/blc.cpp
index 78ab7bff..c561aa85 100644
--- a/src/ipa/ipu3/algorithms/blc.cpp
+++ b/src/ipa/ipu3/algorithms/blc.cpp
@@ -62,6 +62,8 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
params->use.obgrid_param = 1;
}
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.cpp b/src/ipa/ipu3/algorithms/tone_mapping.cpp
index f86e79b2..49a5558b 100644
--- a/src/ipa/ipu3/algorithms/tone_mapping.cpp
+++ b/src/ipa/ipu3/algorithms/tone_mapping.cpp
@@ -105,6 +105,8 @@ void ToneMapping::process(IPAContext &context, [[maybe_unused]] IPAFrameContext
context.activeState.toneMapping.gamma = gamma_;
}
+REGISTER_IPA_ALGORITHM(ToneMapping, "ToneMapping")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/data/meson.build b/src/ipa/ipu3/data/meson.build
new file mode 100644
index 00000000..1f50b630
--- /dev/null
+++ b/src/ipa/ipu3/data/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'ipu3')
diff --git a/src/ipa/ipu3/data/uncalibrated.yaml b/src/ipa/ipu3/data/uncalibrated.yaml
new file mode 100644
index 00000000..794ab3ed
--- /dev/null
+++ b/src/ipa/ipu3/data/uncalibrated.yaml
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Af:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - ToneMapping:
+...
diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp
index 2f6bb672..e3c1e816 100644
--- a/src/ipa/ipu3/ipu3.cpp
+++ b/src/ipa/ipu3/ipu3.cpp
@@ -18,6 +18,7 @@
#include <linux/intel-ipu3.h>
#include <linux/v4l2-controls.h>
+#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -29,6 +30,7 @@
#include <libcamera/request.h>
#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
#include "algorithms/af.h"
#include "algorithms/agc.h"
@@ -71,7 +73,7 @@ namespace ipa::ipu3 {
*
* At initialisation time, a CameraSensorHelper is instantiated to support
* camera-specific calculations, while the default controls are computed, and
- * the algorithms are constructed and placed in an ordered list.
+ * the algorithms are instantiated from the tuning data file.
*
* The IPU3 ImgU operates with a grid layout to divide the overall frame into
* rectangular cells of pixels. When the IPA is configured, we determine the
@@ -92,12 +94,14 @@ namespace ipa::ipu3 {
* fillParamsBuffer() call.
*
* The individual algorithms are split into modular components that are called
- * iteratively to allow them to process statistics from the ImgU in a defined
- * order.
+ * iteratively to allow them to process statistics from the ImgU in the order
+ * defined in the tuning data file.
*
- * The current implementation supports three core algorithms:
- * - Automatic white balance (AWB)
+ * The current implementation supports five core algorithms:
+ *
+ * - Auto focus (AF)
* - Automatic gain and exposure control (AGC)
+ * - Automatic white balance (AWB)
* - Black level correction (BLC)
* - Tone mapping (Gamma)
*
@@ -128,7 +132,7 @@ namespace ipa::ipu3 {
* sensor-specific tuning to adapt for Black Level compensation (BLC), Lens
* shading correction (SHD) and Color correction (CCM).
*/
-class IPAIPU3 : public IPAIPU3Interface
+class IPAIPU3 : public IPAIPU3Interface, public Module
{
public:
int init(const IPASettings &settings,
@@ -150,6 +154,10 @@ public:
void processStatsBuffer(const uint32_t frame, const int64_t frameTimestamp,
const uint32_t bufferId,
const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
private:
void updateControls(const IPACameraSensorInfo &sensorInfo,
const ControlInfoMap &sensorControls,
@@ -171,13 +179,15 @@ private:
/* Interface to the Camera Helper */
std::unique_ptr<CameraSensorHelper> camHelper_;
- /* Maintain the algorithms used by the IPA */
- std::list<std::unique_ptr<ipa::ipu3::Algorithm>> algorithms_;
-
/* Local parameter storage */
struct IPAContext context_;
};
+std::string IPAIPU3::logPrefix() const
+{
+ return "ipu3";
+}
+
/**
* \brief Compute IPASessionConfiguration using the sensor information and the
* sensor V4L2 controls
@@ -316,12 +326,36 @@ int IPAIPU3::init(const IPASettings &settings,
context_.configuration = {};
context_.configuration.sensor.lineDuration = sensorInfo.lineLength * 1.0s / sensorInfo.pixelRate;
- /* Construct our Algorithms */
- algorithms_.push_back(std::make_unique<algorithms::Af>());
- algorithms_.push_back(std::make_unique<algorithms::Agc>());
- algorithms_.push_back(std::make_unique<algorithms::Awb>());
- algorithms_.push_back(std::make_unique<algorithms::BlackLevelCorrection>());
- algorithms_.push_back(std::make_unique<algorithms::ToneMapping>());
+ /* Load the tuning data file. */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPAIPU3, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ if (version != 1) {
+ LOG(IPAIPU3, Error)
+ << "Invalid tuning file version " << version;
+ return -EINVAL;
+ }
+
+ if (!data->contains("algorithms")) {
+ LOG(IPAIPU3, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
/* Initialize controls. */
updateControls(sensorInfo, sensorControls, ipaControls);
@@ -470,7 +504,7 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo,
/* Update the IPASessionConfiguration using the sensor settings. */
updateSessionConfiguration(sensorCtrls_);
- for (auto const &algo : algorithms_) {
+ for (auto const &algo : algorithms()) {
int ret = algo->configure(context_, configInfo);
if (ret)
return ret;
@@ -538,7 +572,7 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
*/
params->use = {};
- for (auto const &algo : algorithms_)
+ for (auto const &algo : algorithms())
algo->prepare(context_, params);
paramsBufferReady.emit(frame);
@@ -581,7 +615,7 @@ void IPAIPU3::processStatsBuffer(const uint32_t frame,
int32_t vBlank = context_.configuration.sensor.defVBlank;
ControlList ctrls(controls::controls);
- for (auto const &algo : algorithms_)
+ for (auto const &algo : algorithms())
algo->process(context_, &frameContext, stats);
setControls(frame);
diff --git a/src/ipa/ipu3/meson.build b/src/ipa/ipu3/meson.build
index 3194111a..658e7c9b 100644
--- a/src/ipa/ipu3/meson.build
+++ b/src/ipa/ipu3/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
subdir('algorithms')
+subdir('data')
ipa_name = 'ipa_ipu3'
diff --git a/src/ipa/ipu3/module.h b/src/ipa/ipu3/module.h
new file mode 100644
index 00000000..d94fc459
--- /dev/null
+++ b/src/ipa/ipu3/module.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * module.h - IPU3 IPA Module
+ */
+
+#pragma once
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3 {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPAConfigInfo,
+ ipu3_uapi_params, ipu3_uapi_stats_3a>;
+
+} /* namespace ipa::ipu3 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
index cce2ed62..38200e57 100644
--- a/src/ipa/libipa/algorithm.cpp
+++ b/src/ipa/libipa/algorithm.cpp
@@ -19,14 +19,35 @@ namespace ipa {
/**
* \class Algorithm
* \brief The base class for all IPA algorithms
- * \tparam Context The type of shared IPA context
- * \tparam Config The type of the IPA configuration data
- * \tparam Params The type of the ISP specific parameters
- * \tparam Stats The type of the IPA statistics and ISP results
- *
- * The Algorithm class defines a standard interface for IPA algorithms. By
- * abstracting algorithms, it makes possible the implementation of generic code
- * to manage algorithms regardless of their specific type.
+ * \tparam Module The IPA module type for this class of algorithms
+ *
+ * The Algorithm class defines a standard interface for IPA algorithms
+ * compatible with the \a Module. By abstracting algorithms, it makes possible
+ * the implementation of generic code to manage algorithms regardless of their
+ * specific type.
+ *
+ * To specialize the Algorithm class template, an IPA module shall specialize
+ * the Module class template with module-specific context and configuration
+ * types, and pass the specialized Module class as the \a Module template
+ * argument.
+ */
+
+/**
+ * \typedef Algorithm::Module
+ * \brief The IPA module type for this class of algorithms
+ */
+
+/**
+ * \fn Algorithm::init()
+ * \brief Initialize the Algorithm with tuning data
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The tuning data for the algorithm
+ *
+ * This function is called once, when the IPA module is initialized, to
+ * initialize the algorithm. The \a tuningData YamlObject contains the tuning
+ * data for algorithm.
+ *
+ * \return 0 if successful, an error code otherwise
*/
/**
@@ -61,6 +82,22 @@ namespace ipa {
*/
/**
+ * \fn Algorithm::queueRequest()
+ * \brief Provide control values to the algorithm
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame number to apply the control values
+ * \param[in] controls The list of user controls
+ *
+ * This function is called for each request queued to the camera. It provides
+ * the controls stored in the request to the algorithm. The \a frame number
+ * is the Request sequence number and identifies the desired corresponding
+ * frame to target for the controls to take effect.
+ *
+ * Algorithms shall read the applicable controls and store their value for later
+ * use during frame processing.
+ */
+
+/**
* \fn Algorithm::process()
* \brief Process ISP statistics, and run algorithm operations
* \param[in] context The shared IPA context
@@ -91,6 +128,49 @@ namespace ipa {
* such that the algorithms use up to date state as required.
*/
+/**
+ * \class AlgorithmFactory
+ * \brief Registration of Algorithm classes and creation of instances
+ * \tparam _Algorithm The algorithm class type for this factory
+ *
+ * To facilitate instantiation of Algorithm classes, the AlgorithmFactory class
+ * implements auto-registration of algorithms with the IPA Module class. Each
+ * Algorithm subclass shall register itself using the REGISTER_IPA_ALGORITHM()
+ * macro, which will create a corresponding instance of an AlgorithmFactory and
+ * register it with the IPA Module.
+ */
+
+/**
+ * \fn AlgorithmFactory::AlgorithmFactory()
+ * \brief Construct an algorithm factory
+ * \param[in] name Name of the algorithm class
+ *
+ * Creating an instance of the factory automatically registers is with the IPA
+ * Module class, enabling creation of algorithm instances through
+ * Module::createAlgorithm().
+ *
+ * The factory \a name identifies the algorithm and shall be unique.
+ */
+
+/**
+ * \fn AlgorithmFactory::create()
+ * \brief Create an instance of the Algorithm corresponding to the factory
+ * \return A pointer to a newly constructed instance of the Algorithm subclass
+ * corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_IPA_ALGORITHM
+ * \brief Register an algorithm with the IPA module
+ * \param[in] algorithm Class name of Algorithm derived class to register
+ * \param[in] name Name of the algorithm
+ *
+ * Register an Algorithm subclass with the IPA module to make it available for
+ * instantiation through Module::createAlgorithm(). The \a name identifies the
+ * algorithm and must be unique across all algorithms registered for the IPA
+ * module.
+ */
+
} /* namespace ipa */
} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
index 032a05b5..ccc659a6 100644
--- a/src/ipa/libipa/algorithm.h
+++ b/src/ipa/libipa/algorithm.h
@@ -6,35 +6,95 @@
*/
#pragma once
+#include <memory>
+#include <string>
+
+#include <libcamera/controls.h>
+
namespace libcamera {
+class YamlObject;
+
namespace ipa {
-template<typename Context, typename FrameContext, typename Config,
- typename Params, typename Stats>
+template<typename _Module>
class Algorithm
{
public:
+ using Module = _Module;
+
virtual ~Algorithm() {}
- virtual int configure([[maybe_unused]] Context &context,
- [[maybe_unused]] const Config &configInfo)
+ virtual int init([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+ {
+ return 0;
+ }
+
+ virtual int configure([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const typename Module::Config &configInfo)
{
return 0;
}
- virtual void prepare([[maybe_unused]] Context &context,
- [[maybe_unused]] Params *params)
+ virtual void prepare([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] typename Module::Params *params)
+ {
+ }
+
+ virtual void queueRequest([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] const ControlList &controls)
+ {
+ }
+
+ virtual void process([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] typename Module::FrameContext *frameContext,
+ [[maybe_unused]] const typename Module::Stats *stats)
+ {
+ }
+};
+
+template<typename _Module>
+class AlgorithmFactoryBase
+{
+public:
+ AlgorithmFactoryBase(const char *name)
+ : name_(name)
+ {
+ _Module::registerAlgorithm(this);
+ }
+
+ virtual ~AlgorithmFactoryBase() = default;
+
+ const std::string &name() const { return name_; }
+
+ virtual std::unique_ptr<Algorithm<_Module>> create() const = 0;
+
+private:
+ std::string name_;
+};
+
+template<typename _Algorithm>
+class AlgorithmFactory : public AlgorithmFactoryBase<typename _Algorithm::Module>
+{
+public:
+ AlgorithmFactory(const char *name)
+ : AlgorithmFactoryBase<typename _Algorithm::Module>(name)
{
}
- virtual void process([[maybe_unused]] Context &context,
- [[maybe_unused]] FrameContext *frameContext,
- [[maybe_unused]] const Stats *stats)
+ ~AlgorithmFactory() = default;
+
+ std::unique_ptr<Algorithm<typename _Algorithm::Module>> create() const override
{
+ return std::make_unique<_Algorithm>();
}
};
+#define REGISTER_IPA_ALGORITHM(algorithm, name) \
+static AlgorithmFactory<algorithm> global_##algorithm##Factory(name);
+
} /* namespace ipa */
} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
index d8ad1c89..69b46177 100644
--- a/src/ipa/libipa/histogram.cpp
+++ b/src/ipa/libipa/histogram.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* histogram.cpp - histogram calculations
*/
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
index 164d4603..05bb4b80 100644
--- a/src/ipa/libipa/histogram.h
+++ b/src/ipa/libipa/histogram.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* histogram.h - histogram calculation interface
*/
diff --git a/src/ipa/libipa/libipa.cpp b/src/ipa/libipa/libipa.cpp
deleted file mode 100644
index 08bc3541..00000000
--- a/src/ipa/libipa/libipa.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2021, Ideas On Board
- *
- * libipa.cpp - libipa interface
- */
-
-namespace libcamera {
-
-/**
- * \brief The IPA namespace
- *
- * The IPA namespace groups all types specific to IPA modules. It serves as the
- * top-level namespace for the IPA library libipa, and also contains
- * module-specific namespaces for IPA modules.
- */
-namespace ipa {
-
-} /* namespace ipa */
-
-} /* namespace libcamera */
-
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index 161cc5a1..fb894bc6 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -3,13 +3,15 @@
libipa_headers = files([
'algorithm.h',
'camera_sensor_helper.h',
- 'histogram.h'
+ 'histogram.h',
+ 'module.h',
])
libipa_sources = files([
+ 'algorithm.cpp',
'camera_sensor_helper.cpp',
'histogram.cpp',
- 'libipa.cpp',
+ 'module.cpp',
])
libipa_includes = include_directories('..')
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
new file mode 100644
index 00000000..77352104
--- /dev/null
+++ b/src/ipa/libipa/module.cpp
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * module.cpp - IPA Module
+ */
+
+#include "module.h"
+
+/**
+ * \file module.h
+ * \brief IPA Module common interface
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAModuleAlgo)
+
+/**
+ * \brief The IPA namespace
+ *
+ * The IPA namespace groups all types specific to IPA modules. It serves as the
+ * top-level namespace for the IPA library libipa, and also contains
+ * module-specific namespaces for IPA modules.
+ */
+namespace ipa {
+
+/**
+ * \class Module
+ * \brief The base class for all IPA modules
+ * \tparam Context The type of the shared IPA context
+ * \tparam FrameContext The type of the frame context
+ * \tparam Config The type of the IPA configuration data
+ * \tparam Params The type of the ISP specific parameters
+ * \tparam Stats The type of the IPA statistics and ISP results
+ *
+ * The Module class template defines a standard internal interface between IPA
+ * modules and libipa.
+ *
+ * While IPA modules are platform-specific, many of their internal functions are
+ * conceptually similar, even if they take different types of platform-specifc
+ * parameters. For instance, IPA modules could share code that instantiates,
+ * initializes and run algorithms if it wasn't for the fact that the the format
+ * of ISP parameters or statistics passed to the related functions is
+ * device-dependent.
+ *
+ * To enable a shared implementation of those common tasks in libipa, the Module
+ * class template defines a standard internal interface between IPA modules and
+ * libipa. The template parameters specify the types of module-dependent data.
+ * IPA modules shall create a specialization of the Module class template in
+ * their namespace, and use it to specialize other classes of libipa, such as
+ * the Algorithm class.
+ */
+
+/**
+ * \typedef Module::Context
+ * \brief The type of the shared IPA context
+ */
+
+/**
+ * \typedef Module::FrameContext
+ * \brief The type of the frame context
+ */
+
+/**
+ * \typedef Module::Config
+ * \brief The type of the IPA configuration data
+ */
+
+/**
+ * \typedef Module::Params
+ * \brief The type of the ISP specific parameters
+ */
+
+/**
+ * \typedef Module::Stats
+ * \brief The type of the IPA statistics and ISP results
+ */
+
+/**
+ * \fn Module::algorithms()
+ * \brief Retrieve the list of instantiated algorithms
+ * \return The list of instantiated algorithms
+ */
+
+/**
+ * \fn Module::createAlgorithms()
+ * \brief Create algorithms from YAML configuration data
+ * \param[in] context The IPA context
+ * \param[in] algorithms Algorithms configuration data as a parsed YamlObject
+ *
+ * This function iterates over the list of \a algorithms parsed from the YAML
+ * configuration file, and instantiates and initializes the corresponding
+ * algorithms. The configuration data is expected to be correct, any error
+ * causes the function to fail and return immediately.
+ *
+ * \return 0 on success, or a negative error code on failure
+ */
+
+/**
+ * \fn Module::registerAlgorithm()
+ * \brief Add an algorithm factory class to the list of available algorithms
+ * \param[in] factory Factory to use to construct the algorithm
+ *
+ * This function registers an algorithm factory. It is meant to be called by the
+ * AlgorithmFactory constructor only.
+ */
+
+/**
+ * \fn Module::createAlgorithm(const std::string &name)
+ * \brief Create an instance of an Algorithm by name
+ * \param[in] name The algorithm name
+ *
+ * This function is the entry point to algorithm instantiation for the IPA
+ * module. It creates and returns an instance of an algorithm identified by its
+ * \a name. If no such algorithm exists, the function returns nullptr.
+ *
+ * To make an algorithm available to the IPA module, it shall be registered with
+ * the REGISTER_IPA_ALGORITHM() macro.
+ *
+ * \return A new instance of the Algorithm subclass corresponding to the \a name
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
new file mode 100644
index 00000000..4149a353
--- /dev/null
+++ b/src/ipa/libipa/module.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * module.h - IPA module
+ */
+
+#pragma once
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPAModuleAlgo)
+
+namespace ipa {
+
+template<typename _Context, typename _FrameContext, typename _Config,
+ typename _Params, typename _Stats>
+class Module : public Loggable
+{
+public:
+ using Context = _Context;
+ using FrameContext = _FrameContext;
+ using Config = _Config;
+ using Params = _Params;
+ using Stats = _Stats;
+
+ virtual ~Module() {}
+
+ const std::list<std::unique_ptr<Algorithm<Module>>> &algorithms() const
+ {
+ return algorithms_;
+ }
+
+ int createAlgorithms(Context &context, const YamlObject &algorithms)
+ {
+ const auto &list = algorithms.asList();
+
+ for (const auto &[i, algo] : utils::enumerate(list)) {
+ if (!algo.isDictionary()) {
+ LOG(IPAModuleAlgo, Error)
+ << "Invalid YAML syntax for algorithm " << i;
+ algorithms_.clear();
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithm(context, algo);
+ if (ret) {
+ algorithms_.clear();
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ static void registerAlgorithm(AlgorithmFactoryBase<Module> *factory)
+ {
+ factories().push_back(factory);
+ }
+
+private:
+ int createAlgorithm(Context &context, const YamlObject &data)
+ {
+ const auto &[name, algoData] = *data.asDict().begin();
+ std::unique_ptr<Algorithm<Module>> algo = createAlgorithm(name);
+ if (!algo) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' not found";
+ return -EINVAL;
+ }
+
+ int ret = algo->init(context, algoData);
+ if (ret) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' failed to initialize";
+ return ret;
+ }
+
+ LOG(IPAModuleAlgo, Debug)
+ << "Instantiated algorithm '" << name << "'";
+
+ algorithms_.push_back(std::move(algo));
+ return 0;
+ }
+
+ static std::unique_ptr<Algorithm<Module>> createAlgorithm(const std::string &name)
+ {
+ for (const AlgorithmFactoryBase<Module> *factory : factories()) {
+ if (factory->name() == name)
+ return factory->create();
+ }
+
+ return nullptr;
+ }
+
+ static std::vector<AlgorithmFactoryBase<Module> *> &factories()
+ {
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on
+ * link order.
+ */
+ static std::vector<AlgorithmFactoryBase<Module> *> factories;
+ return factories;
+ }
+
+ std::list<std::unique_ptr<Algorithm<Module>>> algorithms_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/meson.build b/src/ipa/meson.build
index e15a8a06..76ad5b44 100644
--- a/src/ipa/meson.build
+++ b/src/ipa/meson.build
@@ -28,6 +28,15 @@ ipa_names = []
ipa_modules = get_option('ipas')
+# Tests require the vimc IPA, similar to vimc pipline-handler for their
+# execution. Include it automatically when tests are enabled.
+if get_option('test') and 'vimc' not in ipa_modules
+ message('Enabling vimc IPA to support tests')
+ ipa_modules += ['vimc']
+endif
+
+enabled_ipa_modules = []
+
# The ipa-sign-install.sh script which uses the ipa_names variable will itself
# prepend MESON_INSTALL_DESTDIR_PREFIX to each ipa module name, therefore we
# must not include the prefix string here.
@@ -35,6 +44,7 @@ foreach pipeline : pipelines
if ipa_modules.contains(pipeline)
subdir(pipeline)
ipa_names += ipa_install_dir / ipa_name + '.so'
+ enabled_ipa_modules += pipeline
endif
endforeach
diff --git a/src/ipa/raspberrypi/cam_helper.cpp b/src/ipa/raspberrypi/cam_helper.cpp
index 74179399..cac8f39e 100644
--- a/src/ipa/raspberrypi/cam_helper.cpp
+++ b/src/ipa/raspberrypi/cam_helper.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* cam_helper.cpp - helper information for different sensors
*/
@@ -13,8 +13,8 @@
#include "libcamera/internal/v4l2_videodevice.h"
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
+#include "cam_helper.h"
+#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
@@ -24,16 +24,16 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(IPARPI)
}
-static std::map<std::string, CamHelperCreateFunc> cam_helpers;
+static std::map<std::string, CamHelperCreateFunc> camHelpers;
-CamHelper *CamHelper::Create(std::string const &cam_name)
+CamHelper *CamHelper::create(std::string const &camName)
{
/*
* CamHelpers get registered by static RegisterCamHelper
* initialisers.
*/
- for (auto &p : cam_helpers) {
- if (cam_name.find(p.first) != std::string::npos)
+ for (auto &p : camHelpers) {
+ if (camName.find(p.first) != std::string::npos)
return p.second();
}
@@ -50,35 +50,35 @@ CamHelper::~CamHelper()
{
}
-void CamHelper::Prepare(Span<const uint8_t> buffer,
+void CamHelper::prepare(Span<const uint8_t> buffer,
Metadata &metadata)
{
parseEmbeddedData(buffer, metadata);
}
-void CamHelper::Process([[maybe_unused]] StatisticsPtr &stats,
+void CamHelper::process([[maybe_unused]] StatisticsPtr &stats,
[[maybe_unused]] Metadata &metadata)
{
}
-uint32_t CamHelper::ExposureLines(const Duration exposure) const
+uint32_t CamHelper::exposureLines(const Duration exposure) const
{
assert(initialized_);
- return exposure / mode_.line_length;
+ return exposure / mode_.lineLength;
}
-Duration CamHelper::Exposure(uint32_t exposure_lines) const
+Duration CamHelper::exposure(uint32_t exposureLines) const
{
assert(initialized_);
- return exposure_lines * mode_.line_length;
+ return exposureLines * mode_.lineLength;
}
-uint32_t CamHelper::GetVBlanking(Duration &exposure,
+uint32_t CamHelper::getVBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLengthMin, frameLengthMax, vblank;
- uint32_t exposureLines = ExposureLines(exposure);
+ uint32_t exposureLines = CamHelper::exposureLines(exposure);
assert(initialized_);
@@ -86,15 +86,15 @@ uint32_t CamHelper::GetVBlanking(Duration &exposure,
* minFrameDuration and maxFrameDuration are clamped by the caller
* based on the limits for the active sensor mode.
*/
- frameLengthMin = minFrameDuration / mode_.line_length;
- frameLengthMax = maxFrameDuration / mode_.line_length;
+ frameLengthMin = minFrameDuration / mode_.lineLength;
+ frameLengthMax = maxFrameDuration / mode_.lineLength;
/*
* Limit the exposure to the maximum frame duration requested, and
* re-calculate if it has been clipped.
*/
exposureLines = std::min(frameLengthMax - frameIntegrationDiff_, exposureLines);
- exposure = Exposure(exposureLines);
+ exposure = CamHelper::exposure(exposureLines);
/* Limit the vblank to the range allowed by the frame length limits. */
vblank = std::clamp(exposureLines + frameIntegrationDiff_,
@@ -102,34 +102,35 @@ uint32_t CamHelper::GetVBlanking(Duration &exposure,
return vblank;
}
-void CamHelper::SetCameraMode(const CameraMode &mode)
+void CamHelper::setCameraMode(const CameraMode &mode)
{
mode_ = mode;
if (parser_) {
- parser_->SetBitsPerPixel(mode.bitdepth);
- parser_->SetLineLengthBytes(0); /* We use SetBufferSize. */
+ parser_->reset();
+ parser_->setBitsPerPixel(mode.bitdepth);
+ parser_->setLineLengthBytes(0); /* We use SetBufferSize. */
}
initialized_ = true;
}
-void CamHelper::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelper::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
/*
* These values are correct for many sensors. Other sensors will
* need to over-ride this function.
*/
- exposure_delay = 2;
- gain_delay = 1;
- vblank_delay = 2;
+ exposureDelay = 2;
+ gainDelay = 1;
+ vblankDelay = 2;
}
-bool CamHelper::SensorEmbeddedDataPresent() const
+bool CamHelper::sensorEmbeddedDataPresent() const
{
return false;
}
-double CamHelper::GetModeSensitivity([[maybe_unused]] const CameraMode &mode) const
+double CamHelper::getModeSensitivity([[maybe_unused]] const CameraMode &mode) const
{
/*
* Most sensors have the same sensitivity in every mode, but this
@@ -140,7 +141,7 @@ double CamHelper::GetModeSensitivity([[maybe_unused]] const CameraMode &mode) co
return 1.0;
}
-unsigned int CamHelper::HideFramesStartup() const
+unsigned int CamHelper::hideFramesStartup() const
{
/*
* The number of frames when a camera first starts that shouldn't be
@@ -149,19 +150,19 @@ unsigned int CamHelper::HideFramesStartup() const
return 0;
}
-unsigned int CamHelper::HideFramesModeSwitch() const
+unsigned int CamHelper::hideFramesModeSwitch() const
{
/* After a mode switch, many sensors return valid frames immediately. */
return 0;
}
-unsigned int CamHelper::MistrustFramesStartup() const
+unsigned int CamHelper::mistrustFramesStartup() const
{
/* Many sensors return a single bad frame on start-up. */
return 1;
}
-unsigned int CamHelper::MistrustFramesModeSwitch() const
+unsigned int CamHelper::mistrustFramesModeSwitch() const
{
/* Many sensors return valid metadata immediately. */
return 0;
@@ -176,42 +177,44 @@ void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
if (buffer.empty())
return;
- if (parser_->Parse(buffer, registers) != MdParser::Status::OK) {
+ if (parser_->parse(buffer, registers) != MdParser::Status::OK) {
LOG(IPARPI, Error) << "Embedded data buffer parsing failed";
return;
}
- PopulateMetadata(registers, parsedMetadata);
- metadata.Merge(parsedMetadata);
+ populateMetadata(registers, parsedMetadata);
+ metadata.merge(parsedMetadata);
/*
- * Overwrite the exposure/gain values in the existing DeviceStatus with
- * values from the parsed embedded buffer. Fetch it first in case any
- * other fields were set meaningfully.
+ * Overwrite the exposure/gain, frame length and sensor temperature values
+ * in the existing DeviceStatus with values from the parsed embedded buffer.
+ * Fetch it first in case any other fields were set meaningfully.
*/
DeviceStatus deviceStatus, parsedDeviceStatus;
- if (metadata.Get("device.status", deviceStatus) ||
- parsedMetadata.Get("device.status", parsedDeviceStatus)) {
+ if (metadata.get("device.status", deviceStatus) ||
+ parsedMetadata.get("device.status", parsedDeviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found";
return;
}
- deviceStatus.shutter_speed = parsedDeviceStatus.shutter_speed;
- deviceStatus.analogue_gain = parsedDeviceStatus.analogue_gain;
- deviceStatus.frame_length = parsedDeviceStatus.frame_length;
+ deviceStatus.shutterSpeed = parsedDeviceStatus.shutterSpeed;
+ deviceStatus.analogueGain = parsedDeviceStatus.analogueGain;
+ deviceStatus.frameLength = parsedDeviceStatus.frameLength;
+ if (parsedDeviceStatus.sensorTemperature)
+ deviceStatus.sensorTemperature = parsedDeviceStatus.sensorTemperature;
LOG(IPARPI, Debug) << "Metadata updated - " << deviceStatus;
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-void CamHelper::PopulateMetadata([[maybe_unused]] const MdParser::RegisterMap &registers,
+void CamHelper::populateMetadata([[maybe_unused]] const MdParser::RegisterMap &registers,
[[maybe_unused]] Metadata &metadata) const
{
}
-RegisterCamHelper::RegisterCamHelper(char const *cam_name,
- CamHelperCreateFunc create_func)
+RegisterCamHelper::RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc)
{
- cam_helpers[std::string(cam_name)] = create_func;
+ camHelpers[std::string(camName)] = createFunc;
}
diff --git a/src/ipa/raspberrypi/cam_helper.h b/src/ipa/raspberrypi/cam_helper.h
new file mode 100644
index 00000000..70d62719
--- /dev/null
+++ b/src/ipa/raspberrypi/cam_helper.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * cam_helper.h - helper class providing camera information
+ */
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include "camera_mode.h"
+#include "controller/controller.h"
+#include "controller/metadata.h"
+#include "md_parser.h"
+
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace RPiController {
+
+/*
+ * The CamHelper class provides a number of facilities that anyone trying
+ * to drive a camera will need to know, but which are not provided by the
+ * standard driver framework. Specifically, it provides:
+ *
+ * A "CameraMode" structure to describe extra information about the chosen
+ * mode of the driver. For example, how it is cropped from the full sensor
+ * area, how it is scaled, whether pixels are averaged compared to the full
+ * resolution.
+ *
+ * The ability to convert between number of lines of exposure and actual
+ * exposure time, and to convert between the sensor's gain codes and actual
+ * gains.
+ *
+ * A function to return the number of frames of delay between updating exposure,
+ * analogue gain and vblanking, and for the changes to take effect. For many
+ * sensors these take the values 2, 1 and 2 respectively, but sensors that are
+ * different will need to over-ride the default function provided.
+ *
+ * A function to query if the sensor outputs embedded data that can be parsed.
+ *
+ * A function to return the sensitivity of a given camera mode.
+ *
+ * A parser to parse the embedded data buffers provided by some sensors (for
+ * example, the imx219 does; the ov5647 doesn't). This allows us to know for
+ * sure the exposure and gain of the frame we're looking at. CamHelper
+ * provides functions for converting analogue gains to and from the sensor's
+ * native gain codes.
+ *
+ * Finally, a set of functions that determine how to handle the vagaries of
+ * different camera modules on start-up or when switching modes. Some
+ * modules may produce one or more frames that are not yet correctly exposed,
+ * or where the metadata may be suspect. We have the following functions:
+ * HideFramesStartup(): Tell the pipeline handler not to return this many
+ * frames at start-up. This can also be used to hide initial frames
+ * while the AGC and other algorithms are sorting themselves out.
+ * HideFramesModeSwitch(): Tell the pipeline handler not to return this
+ * many frames after a mode switch (other than start-up). Some sensors
+ * may produce innvalid frames after a mode switch; others may not.
+ * MistrustFramesStartup(): At start-up a sensor may return frames for
+ * which we should not run any control algorithms (for example, metadata
+ * may be invalid).
+ * MistrustFramesModeSwitch(): The number of frames, after a mode switch
+ * (other than start-up), for which control algorithms should not run
+ * (for example, metadata may be unreliable).
+ */
+
+class CamHelper
+{
+public:
+ static CamHelper *create(std::string const &camName);
+ CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
+ virtual ~CamHelper();
+ void setCameraMode(const CameraMode &mode);
+ virtual void prepare(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void process(StatisticsPtr &stats, Metadata &metadata);
+ virtual uint32_t exposureLines(libcamera::utils::Duration exposure) const;
+ virtual libcamera::utils::Duration exposure(uint32_t exposureLines) const;
+ virtual uint32_t getVBlanking(libcamera::utils::Duration &exposure,
+ libcamera::utils::Duration minFrameDuration,
+ libcamera::utils::Duration maxFrameDuration) const;
+ virtual uint32_t gainCode(double gain) const = 0;
+ virtual double gain(uint32_t gainCode) const = 0;
+ virtual void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const;
+ virtual bool sensorEmbeddedDataPresent() const;
+ virtual double getModeSensitivity(const CameraMode &mode) const;
+ virtual unsigned int hideFramesStartup() const;
+ virtual unsigned int hideFramesModeSwitch() const;
+ virtual unsigned int mistrustFramesStartup() const;
+ virtual unsigned int mistrustFramesModeSwitch() const;
+
+protected:
+ void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const;
+
+ std::unique_ptr<MdParser> parser_;
+ CameraMode mode_;
+
+private:
+ bool initialized_;
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ unsigned int frameIntegrationDiff_;
+};
+
+/*
+ * This is for registering camera helpers with the system, so that the
+ * CamHelper::Create function picks them up automatically.
+ */
+
+typedef CamHelper *(*CamHelperCreateFunc)();
+struct RegisterCamHelper
+{
+ RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc);
+};
+
+} /* namespace RPi */
diff --git a/src/ipa/raspberrypi/cam_helper.hpp b/src/ipa/raspberrypi/cam_helper.hpp
deleted file mode 100644
index 300f8f8a..00000000
--- a/src/ipa/raspberrypi/cam_helper.hpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * cam_helper.hpp - helper class providing camera information
- */
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include <libcamera/base/span.h>
-#include <libcamera/base/utils.h>
-
-#include "camera_mode.h"
-#include "controller/controller.hpp"
-#include "controller/metadata.hpp"
-#include "md_parser.hpp"
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace RPiController {
-
-// The CamHelper class provides a number of facilities that anyone trying
-// to drive a camera will need to know, but which are not provided by the
-// standard driver framework. Specifically, it provides:
-//
-// A "CameraMode" structure to describe extra information about the chosen
-// mode of the driver. For example, how it is cropped from the full sensor
-// area, how it is scaled, whether pixels are averaged compared to the full
-// resolution.
-//
-// The ability to convert between number of lines of exposure and actual
-// exposure time, and to convert between the sensor's gain codes and actual
-// gains.
-//
-// A function to return the number of frames of delay between updating exposure,
-// analogue gain and vblanking, and for the changes to take effect. For many
-// sensors these take the values 2, 1 and 2 respectively, but sensors that are
-// different will need to over-ride the default function provided.
-//
-// A function to query if the sensor outputs embedded data that can be parsed.
-//
-// A function to return the sensitivity of a given camera mode.
-//
-// A parser to parse the embedded data buffers provided by some sensors (for
-// example, the imx219 does; the ov5647 doesn't). This allows us to know for
-// sure the exposure and gain of the frame we're looking at. CamHelper
-// provides functions for converting analogue gains to and from the sensor's
-// native gain codes.
-//
-// Finally, a set of functions that determine how to handle the vagaries of
-// different camera modules on start-up or when switching modes. Some
-// modules may produce one or more frames that are not yet correctly exposed,
-// or where the metadata may be suspect. We have the following functions:
-// HideFramesStartup(): Tell the pipeline handler not to return this many
-// frames at start-up. This can also be used to hide initial frames
-// while the AGC and other algorithms are sorting themselves out.
-// HideFramesModeSwitch(): Tell the pipeline handler not to return this
-// many frames after a mode switch (other than start-up). Some sensors
-// may produce innvalid frames after a mode switch; others may not.
-// MistrustFramesStartup(): At start-up a sensor may return frames for
-// which we should not run any control algorithms (for example, metadata
-// may be invalid).
-// MistrustFramesModeSwitch(): The number of frames, after a mode switch
-// (other than start-up), for which control algorithms should not run
-// (for example, metadata may be unreliable).
-
-class CamHelper
-{
-public:
- static CamHelper *Create(std::string const &cam_name);
- CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
- virtual ~CamHelper();
- void SetCameraMode(const CameraMode &mode);
- virtual void Prepare(libcamera::Span<const uint8_t> buffer,
- Metadata &metadata);
- virtual void Process(StatisticsPtr &stats, Metadata &metadata);
- virtual uint32_t ExposureLines(libcamera::utils::Duration exposure) const;
- virtual libcamera::utils::Duration Exposure(uint32_t exposure_lines) const;
- virtual uint32_t GetVBlanking(libcamera::utils::Duration &exposure,
- libcamera::utils::Duration minFrameDuration,
- libcamera::utils::Duration maxFrameDuration) const;
- virtual uint32_t GainCode(double gain) const = 0;
- virtual double Gain(uint32_t gain_code) const = 0;
- virtual void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const;
- virtual bool SensorEmbeddedDataPresent() const;
- virtual double GetModeSensitivity(const CameraMode &mode) const;
- virtual unsigned int HideFramesStartup() const;
- virtual unsigned int HideFramesModeSwitch() const;
- virtual unsigned int MistrustFramesStartup() const;
- virtual unsigned int MistrustFramesModeSwitch() const;
-
-protected:
- void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
- Metadata &metadata);
- virtual void PopulateMetadata(const MdParser::RegisterMap &registers,
- Metadata &metadata) const;
-
- std::unique_ptr<MdParser> parser_;
- CameraMode mode_;
-
-private:
- bool initialized_;
- /*
- * Smallest difference between the frame length and integration time,
- * in units of lines.
- */
- unsigned int frameIntegrationDiff_;
-};
-
-// This is for registering camera helpers with the system, so that the
-// CamHelper::Create function picks them up automatically.
-
-typedef CamHelper *(*CamHelperCreateFunc)();
-struct RegisterCamHelper
-{
- RegisterCamHelper(char const *cam_name,
- CamHelperCreateFunc create_func);
-};
-
-} // namespace RPi
diff --git a/src/ipa/raspberrypi/cam_helper_imx219.cpp b/src/ipa/raspberrypi/cam_helper_imx219.cpp
index a3caab71..7ded07a2 100644
--- a/src/ipa/raspberrypi/cam_helper_imx219.cpp
+++ b/src/ipa/raspberrypi/cam_helper_imx219.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* cam_helper_imx219.cpp - camera helper for imx219 sensor
*/
@@ -16,9 +16,9 @@
*/
#define ENABLE_EMBEDDED_DATA 0
-#include "cam_helper.hpp"
+#include "cam_helper.h"
#if ENABLE_EMBEDDED_DATA
-#include "md_parser.hpp"
+#include "md_parser.h"
#endif
using namespace RPiController;
@@ -39,10 +39,10 @@ class CamHelperImx219 : public CamHelper
{
public:
CamHelperImx219();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- unsigned int MistrustFramesModeSwitch() const override;
- bool SensorEmbeddedDataPresent() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int mistrustFramesModeSwitch() const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -51,7 +51,7 @@ private:
*/
static constexpr int frameIntegrationDiff = 4;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -64,17 +64,17 @@ CamHelperImx219::CamHelperImx219()
{
}
-uint32_t CamHelperImx219::GainCode(double gain) const
+uint32_t CamHelperImx219::gainCode(double gain) const
{
return (uint32_t)(256 - 256 / gain);
}
-double CamHelperImx219::Gain(uint32_t gain_code) const
+double CamHelperImx219::gain(uint32_t gainCode) const
{
- return 256.0 / (256 - gain_code);
+ return 256.0 / (256 - gainCode);
}
-unsigned int CamHelperImx219::MistrustFramesModeSwitch() const
+unsigned int CamHelperImx219::mistrustFramesModeSwitch() const
{
/*
* For reasons unknown, we do occasionally get a bogus metadata frame
@@ -84,26 +84,26 @@ unsigned int CamHelperImx219::MistrustFramesModeSwitch() const
return 1;
}
-bool CamHelperImx219::SensorEmbeddedDataPresent() const
+bool CamHelperImx219::sensorEmbeddedDataPresent() const
{
return ENABLE_EMBEDDED_DATA;
}
-void CamHelperImx219::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx219::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
+ deviceStatus.analogueGain = gain(registers.at(gainReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx219();
}
-static RegisterCamHelper reg("imx219", &Create);
+static RegisterCamHelper reg("imx219", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx290.cpp b/src/ipa/raspberrypi/cam_helper_imx290.cpp
index 871c1f8e..25f23d53 100644
--- a/src/ipa/raspberrypi/cam_helper_imx290.cpp
+++ b/src/ipa/raspberrypi/cam_helper_imx290.cpp
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
* cam_helper_imx290.cpp - camera helper for imx290 sensor
*/
#include <math.h>
-#include "cam_helper.hpp"
+#include "cam_helper.h"
using namespace RPiController;
@@ -15,11 +15,11 @@ class CamHelperImx290 : public CamHelper
{
public:
CamHelperImx290();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- unsigned int HideFramesModeSwitch() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const override;
+ unsigned int hideFramesModeSwitch() const override;
private:
/*
@@ -34,34 +34,34 @@ CamHelperImx290::CamHelperImx290()
{
}
-uint32_t CamHelperImx290::GainCode(double gain) const
+uint32_t CamHelperImx290::gainCode(double gain) const
{
int code = 66.6667 * log10(gain);
return std::max(0, std::min(code, 0xf0));
}
-double CamHelperImx290::Gain(uint32_t gain_code) const
+double CamHelperImx290::gain(uint32_t gainCode) const
{
- return pow(10, 0.015 * gain_code);
+ return pow(10, 0.015 * gainCode);
}
-void CamHelperImx290::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperImx290::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
}
-unsigned int CamHelperImx290::HideFramesModeSwitch() const
+unsigned int CamHelperImx290::hideFramesModeSwitch() const
{
/* After a mode switch, we seem to get 1 bad frame. */
return 1;
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx290();
}
-static RegisterCamHelper reg("imx290", &Create);
+static RegisterCamHelper reg("imx290", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx296.cpp b/src/ipa/raspberrypi/cam_helper_imx296.cpp
index a1a771cb..ab1d157a 100644
--- a/src/ipa/raspberrypi/cam_helper_imx296.cpp
+++ b/src/ipa/raspberrypi/cam_helper_imx296.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* cam_helper_imx296.cpp - Camera helper for IMX296 sensor
*/
@@ -9,7 +9,7 @@
#include <cmath>
#include <stddef.h>
-#include "cam_helper.hpp"
+#include "cam_helper.h"
using namespace RPiController;
using libcamera::utils::Duration;
@@ -19,10 +19,10 @@ class CamHelperImx296 : public CamHelper
{
public:
CamHelperImx296();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- uint32_t ExposureLines(Duration exposure) const override;
- Duration Exposure(uint32_t exposure_lines) const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ uint32_t exposureLines(Duration exposure) const override;
+ Duration exposure(uint32_t exposureLines) const override;
private:
static constexpr uint32_t maxGainCode = 239;
@@ -40,30 +40,30 @@ CamHelperImx296::CamHelperImx296()
{
}
-uint32_t CamHelperImx296::GainCode(double gain) const
+uint32_t CamHelperImx296::gainCode(double gain) const
{
uint32_t code = 20 * std::log10(gain) * 10;
return std::min(code, maxGainCode);
}
-double CamHelperImx296::Gain(uint32_t gain_code) const
+double CamHelperImx296::gain(uint32_t gainCode) const
{
- return std::pow(10.0, gain_code / 200.0);
+ return std::pow(10.0, gainCode / 200.0);
}
-uint32_t CamHelperImx296::ExposureLines(Duration exposure) const
+uint32_t CamHelperImx296::exposureLines(Duration exposure) const
{
return (exposure - 14.26us) / timePerLine;
}
-Duration CamHelperImx296::Exposure(uint32_t exposure_lines) const
+Duration CamHelperImx296::exposure(uint32_t exposureLines) const
{
- return exposure_lines * timePerLine + 14.26us;
+ return exposureLines * timePerLine + 14.26us;
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx296();
}
-static RegisterCamHelper reg("imx296", &Create);
+static RegisterCamHelper reg("imx296", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx477.cpp b/src/ipa/raspberrypi/cam_helper_imx477.cpp
index 338fdc0c..aa306d66 100644
--- a/src/ipa/raspberrypi/cam_helper_imx477.cpp
+++ b/src/ipa/raspberrypi/cam_helper_imx477.cpp
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* cam_helper_imx477.cpp - camera helper for imx477 sensor
*/
+#include <algorithm>
#include <assert.h>
#include <cmath>
#include <stddef.h>
@@ -13,8 +14,8 @@
#include <libcamera/base/log.h>
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
+#include "cam_helper.h"
+#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
@@ -34,21 +35,22 @@ constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t temperatureReg = 0x013a;
constexpr std::initializer_list<uint32_t> registerList =
- { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg };
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg, temperatureReg };
class CamHelperImx477 : public CamHelper
{
public:
CamHelperImx477();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
- uint32_t GetVBlanking(Duration &exposure, Duration minFrameDuration,
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ uint32_t getVBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- bool SensorEmbeddedDataPresent() const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -61,7 +63,7 @@ private:
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -70,22 +72,22 @@ CamHelperImx477::CamHelperImx477()
{
}
-uint32_t CamHelperImx477::GainCode(double gain) const
+uint32_t CamHelperImx477::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
-double CamHelperImx477::Gain(uint32_t gain_code) const
+double CamHelperImx477::gain(uint32_t gainCode) const
{
- return 1024.0 / (1024 - gain_code);
+ return 1024.0 / (1024 - gainCode);
}
-void CamHelperImx477::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
- if (metadata.Get("device.status", deviceStatus)) {
+ if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
@@ -103,27 +105,27 @@ void CamHelperImx477::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
- if (deviceStatus.frame_length > frameLengthMax) {
+ if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
- metadata.Get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutter_speed = deviceStatus.shutter_speed;
- parsedDeviceStatus.frame_length = deviceStatus.frame_length;
- metadata.Set("device.status", parsedDeviceStatus);
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
-uint32_t CamHelperImx477::GetVBlanking(Duration &exposure,
+uint32_t CamHelperImx477::getVBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
- frameLength = mode_.height + CamHelper::GetVBlanking(exposure, minFrameDuration,
+ frameLength = mode_.height + CamHelper::getVBlanking(exposure, minFrameDuration,
maxFrameDuration);
/*
* Check if the frame length calculated needs to be setup for long
@@ -142,42 +144,43 @@ uint32_t CamHelperImx477::GetVBlanking(Duration &exposure,
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
- exposureLines = ExposureLines(exposure);
+ exposureLines = CamHelperImx477::exposureLines(exposure);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
- exposure = Exposure(exposureLines);
+ exposure = CamHelperImx477::exposure(exposureLines);
}
return frameLength - mode_.height;
}
-void CamHelperImx477::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperImx477::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 3;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 3;
}
-bool CamHelperImx477::SensorEmbeddedDataPresent() const
+bool CamHelperImx477::sensorEmbeddedDataPresent() const
{
return true;
}
-void CamHelperImx477::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx477::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx477();
}
-static RegisterCamHelper reg("imx477", &Create);
+static RegisterCamHelper reg("imx477", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx519.cpp b/src/ipa/raspberrypi/cam_helper_imx519.cpp
index eaf24982..54e104e7 100644
--- a/src/ipa/raspberrypi/cam_helper_imx519.cpp
+++ b/src/ipa/raspberrypi/cam_helper_imx519.cpp
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Based on cam_helper_imx477.cpp
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* cam_helper_imx519.cpp - camera helper for imx519 sensor
* Copyright (C) 2021, Arducam Technology co., Ltd.
@@ -15,8 +15,8 @@
#include <libcamera/base/log.h>
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
+#include "cam_helper.h"
+#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
@@ -43,14 +43,14 @@ class CamHelperImx519 : public CamHelper
{
public:
CamHelperImx519();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
- uint32_t GetVBlanking(Duration &exposure, Duration minFrameDuration,
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ uint32_t getVBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- bool SensorEmbeddedDataPresent() const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -63,7 +63,7 @@ private:
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -72,22 +72,22 @@ CamHelperImx519::CamHelperImx519()
{
}
-uint32_t CamHelperImx519::GainCode(double gain) const
+uint32_t CamHelperImx519::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
-double CamHelperImx519::Gain(uint32_t gain_code) const
+double CamHelperImx519::gain(uint32_t gainCode) const
{
- return 1024.0 / (1024 - gain_code);
+ return 1024.0 / (1024 - gainCode);
}
-void CamHelperImx519::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
- if (metadata.Get("device.status", deviceStatus)) {
+ if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
@@ -105,27 +105,27 @@ void CamHelperImx519::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
- if (deviceStatus.frame_length > frameLengthMax) {
+ if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
- metadata.Get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutter_speed = deviceStatus.shutter_speed;
- parsedDeviceStatus.frame_length = deviceStatus.frame_length;
- metadata.Set("device.status", parsedDeviceStatus);
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
-uint32_t CamHelperImx519::GetVBlanking(Duration &exposure,
+uint32_t CamHelperImx519::getVBlanking(Duration &exposure,
Duration minFrameDuration,
Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
- frameLength = mode_.height + CamHelper::GetVBlanking(exposure, minFrameDuration,
+ frameLength = mode_.height + CamHelper::getVBlanking(exposure, minFrameDuration,
maxFrameDuration);
/*
* Check if the frame length calculated needs to be setup for long
@@ -144,42 +144,42 @@ uint32_t CamHelperImx519::GetVBlanking(Duration &exposure,
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
- exposureLines = ExposureLines(exposure);
+ exposureLines = CamHelperImx519::exposureLines(exposure);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
- exposure = Exposure(exposureLines);
+ exposure = CamHelperImx519::exposure(exposureLines);
}
return frameLength - mode_.height;
}
-void CamHelperImx519::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperImx519::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 3;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 3;
}
-bool CamHelperImx519::SensorEmbeddedDataPresent() const
+bool CamHelperImx519::sensorEmbeddedDataPresent() const
{
return true;
}
-void CamHelperImx519::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx519::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx519();
}
-static RegisterCamHelper reg("imx519", &Create);
+static RegisterCamHelper reg("imx519", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_ov5647.cpp b/src/ipa/raspberrypi/cam_helper_ov5647.cpp
index 702c2d07..04fb725d 100644
--- a/src/ipa/raspberrypi/cam_helper_ov5647.cpp
+++ b/src/ipa/raspberrypi/cam_helper_ov5647.cpp
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* cam_helper_ov5647.cpp - camera information for ov5647 sensor
*/
#include <assert.h>
-#include "cam_helper.hpp"
+#include "cam_helper.h"
using namespace RPiController;
@@ -15,14 +15,14 @@ class CamHelperOv5647 : public CamHelper
{
public:
CamHelperOv5647();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- unsigned int HideFramesStartup() const override;
- unsigned int HideFramesModeSwitch() const override;
- unsigned int MistrustFramesStartup() const override;
- unsigned int MistrustFramesModeSwitch() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+ unsigned int mistrustFramesStartup() const override;
+ unsigned int mistrustFramesModeSwitch() const override;
private:
/*
@@ -42,29 +42,29 @@ CamHelperOv5647::CamHelperOv5647()
{
}
-uint32_t CamHelperOv5647::GainCode(double gain) const
+uint32_t CamHelperOv5647::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 16.0);
}
-double CamHelperOv5647::Gain(uint32_t gain_code) const
+double CamHelperOv5647::gain(uint32_t gainCode) const
{
- return static_cast<double>(gain_code) / 16.0;
+ return static_cast<double>(gainCode) / 16.0;
}
-void CamHelperOv5647::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperOv5647::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
/*
* We run this sensor in a mode where the gain delay is bumped up to
* 2. It seems to be the only way to make the delays "predictable".
*/
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
}
-unsigned int CamHelperOv5647::HideFramesStartup() const
+unsigned int CamHelperOv5647::hideFramesStartup() const
{
/*
* On startup, we get a couple of under-exposed frames which
@@ -73,7 +73,7 @@ unsigned int CamHelperOv5647::HideFramesStartup() const
return 2;
}
-unsigned int CamHelperOv5647::HideFramesModeSwitch() const
+unsigned int CamHelperOv5647::hideFramesModeSwitch() const
{
/*
* After a mode switch, we get a couple of under-exposed frames which
@@ -82,7 +82,7 @@ unsigned int CamHelperOv5647::HideFramesModeSwitch() const
return 2;
}
-unsigned int CamHelperOv5647::MistrustFramesStartup() const
+unsigned int CamHelperOv5647::mistrustFramesStartup() const
{
/*
* First couple of frames are under-exposed and are no good for control
@@ -91,7 +91,7 @@ unsigned int CamHelperOv5647::MistrustFramesStartup() const
return 2;
}
-unsigned int CamHelperOv5647::MistrustFramesModeSwitch() const
+unsigned int CamHelperOv5647::mistrustFramesModeSwitch() const
{
/*
* First couple of frames are under-exposed even after a simple
@@ -100,9 +100,9 @@ unsigned int CamHelperOv5647::MistrustFramesModeSwitch() const
return 2;
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperOv5647();
}
-static RegisterCamHelper reg("ov5647", &Create);
+static RegisterCamHelper reg("ov5647", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_ov9281.cpp b/src/ipa/raspberrypi/cam_helper_ov9281.cpp
index 9de868c3..66f56a31 100644
--- a/src/ipa/raspberrypi/cam_helper_ov9281.cpp
+++ b/src/ipa/raspberrypi/cam_helper_ov9281.cpp
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
* cam_helper_ov9281.cpp - camera information for ov9281 sensor
*/
#include <assert.h>
-#include "cam_helper.hpp"
+#include "cam_helper.h"
using namespace RPiController;
@@ -15,10 +15,10 @@ class CamHelperOv9281 : public CamHelper
{
public:
CamHelperOv9281();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const override;
private:
/*
@@ -38,28 +38,28 @@ CamHelperOv9281::CamHelperOv9281()
{
}
-uint32_t CamHelperOv9281::GainCode(double gain) const
+uint32_t CamHelperOv9281::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 16.0);
}
-double CamHelperOv9281::Gain(uint32_t gain_code) const
+double CamHelperOv9281::gain(uint32_t gainCode) const
{
- return static_cast<double>(gain_code) / 16.0;
+ return static_cast<double>(gainCode) / 16.0;
}
-void CamHelperOv9281::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperOv9281::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay) const
{
/* The driver appears to behave as follows: */
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperOv9281();
}
-static RegisterCamHelper reg("ov9281", &Create);
+static RegisterCamHelper reg("ov9281", &create);
diff --git a/src/ipa/raspberrypi/controller/agc_algorithm.h b/src/ipa/raspberrypi/controller/agc_algorithm.h
new file mode 100644
index 00000000..3a91444c
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/agc_algorithm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * agc_algorithm.h - AGC/AEC control algorithm interface
+ */
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AgcAlgorithm : public Algorithm
+{
+public:
+ AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AGC algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual void setEv(double ev) = 0;
+ virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0;
+ virtual void setFixedShutter(libcamera::utils::Duration fixedShutter) = 0;
+ virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0;
+ virtual void setFixedAnalogueGain(double fixedAnalogueGain) = 0;
+ virtual void setMeteringMode(std::string const &meteringModeName) = 0;
+ virtual void setExposureMode(std::string const &exposureModeName) = 0;
+ virtual void setConstraintMode(std::string const &contraintModeName) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/agc_algorithm.hpp b/src/ipa/raspberrypi/controller/agc_algorithm.hpp
deleted file mode 100644
index 61595ea2..00000000
--- a/src/ipa/raspberrypi/controller/agc_algorithm.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc_algorithm.hpp - AGC/AEC control algorithm interface
- */
-#pragma once
-
-#include <libcamera/base/utils.h>
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class AgcAlgorithm : public Algorithm
-{
-public:
- AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
- // An AGC algorithm must provide the following:
- virtual unsigned int GetConvergenceFrames() const = 0;
- virtual void SetEv(double ev) = 0;
- virtual void SetFlickerPeriod(libcamera::utils::Duration flicker_period) = 0;
- virtual void SetFixedShutter(libcamera::utils::Duration fixed_shutter) = 0;
- virtual void SetMaxShutter(libcamera::utils::Duration max_shutter) = 0;
- virtual void SetFixedAnalogueGain(double fixed_analogue_gain) = 0;
- virtual void SetMeteringMode(std::string const &metering_mode_name) = 0;
- virtual void SetExposureMode(std::string const &exposure_mode_name) = 0;
- virtual void
- SetConstraintMode(std::string const &contraint_mode_name) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/agc_status.h b/src/ipa/raspberrypi/controller/agc_status.h
index 20cb1b62..6abf09d9 100644
--- a/src/ipa/raspberrypi/controller/agc_status.h
+++ b/src/ipa/raspberrypi/controller/agc_status.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* agc_status.h - AGC/AEC control algorithm status
*/
@@ -8,34 +8,30 @@
#include <libcamera/base/utils.h>
-// The AGC algorithm should post the following structure into the image's
-// "agc.status" metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The AGC algorithm should post the following structure into the image's
+ * "agc.status" metadata.
+ */
-// Note: total_exposure_value will be reported as zero until the algorithm has
-// seen statistics and calculated meaningful values. The contents should be
-// ignored until then.
+/*
+ * Note: total_exposure_value will be reported as zero until the algorithm has
+ * seen statistics and calculated meaningful values. The contents should be
+ * ignored until then.
+ */
struct AgcStatus {
- libcamera::utils::Duration total_exposure_value; // value for all exposure and gain for this image
- libcamera::utils::Duration target_exposure_value; // (unfiltered) target total exposure AGC is aiming for
- libcamera::utils::Duration shutter_time;
- double analogue_gain;
- char exposure_mode[32];
- char constraint_mode[32];
- char metering_mode[32];
+ libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */
+ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */
+ libcamera::utils::Duration shutterTime;
+ double analogueGain;
+ char exposureMode[32];
+ char constraintMode[32];
+ char meteringMode[32];
double ev;
- libcamera::utils::Duration flicker_period;
- int floating_region_enable;
- libcamera::utils::Duration fixed_shutter;
- double fixed_analogue_gain;
- double digital_gain;
+ libcamera::utils::Duration flickerPeriod;
+ int floatingRegionEnable;
+ libcamera::utils::Duration fixedShutter;
+ double fixedAnalogueGain;
+ double digitalGain;
int locked;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/algorithm.cpp b/src/ipa/raspberrypi/controller/algorithm.cpp
index 43ad0a2b..6d91ee29 100644
--- a/src/ipa/raspberrypi/controller/algorithm.cpp
+++ b/src/ipa/raspberrypi/controller/algorithm.cpp
@@ -1,44 +1,47 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* algorithm.cpp - ISP control algorithms
*/
-#include "algorithm.hpp"
+#include "algorithm.h"
using namespace RPiController;
-void Algorithm::Read([[maybe_unused]] boost::property_tree::ptree const &params)
+int Algorithm::read([[maybe_unused]] const libcamera::YamlObject &params)
{
+ return 0;
}
-void Algorithm::Initialise() {}
+void Algorithm::initialise()
+{
+}
-void Algorithm::SwitchMode([[maybe_unused]] CameraMode const &camera_mode,
+void Algorithm::switchMode([[maybe_unused]] CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
}
-void Algorithm::Prepare([[maybe_unused]] Metadata *image_metadata)
+void Algorithm::prepare([[maybe_unused]] Metadata *imageMetadata)
{
}
-void Algorithm::Process([[maybe_unused]] StatisticsPtr &stats,
- [[maybe_unused]] Metadata *image_metadata)
+void Algorithm::process([[maybe_unused]] StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
{
}
-// For registering algorithms with the system:
+/* For registering algorithms with the system: */
static std::map<std::string, AlgoCreateFunc> algorithms;
-std::map<std::string, AlgoCreateFunc> const &RPiController::GetAlgorithms()
+std::map<std::string, AlgoCreateFunc> const &RPiController::getAlgorithms()
{
return algorithms;
}
RegisterAlgorithm::RegisterAlgorithm(char const *name,
- AlgoCreateFunc create_func)
+ AlgoCreateFunc createFunc)
{
- algorithms[std::string(name)] = create_func;
+ algorithms[std::string(name)] = createFunc;
}
diff --git a/src/ipa/raspberrypi/controller/algorithm.h b/src/ipa/raspberrypi/controller/algorithm.h
new file mode 100644
index 00000000..cbbb13ba
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/algorithm.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * algorithm.h - ISP control algorithm interface
+ */
+#pragma once
+
+/*
+ * All algorithms should be derived from this class and made available to the
+ * Controller.
+ */
+
+#include <string>
+#include <memory>
+#include <map>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "controller.h"
+
+namespace RPiController {
+
+/* This defines the basic interface for all control algorithms. */
+
+class Algorithm
+{
+public:
+ Algorithm(Controller *controller)
+ : controller_(controller), paused_(false)
+ {
+ }
+ virtual ~Algorithm() = default;
+ virtual char const *name() const = 0;
+ virtual bool isPaused() const { return paused_; }
+ virtual void pause() { paused_ = true; }
+ virtual void resume() { paused_ = false; }
+ virtual int read(const libcamera::YamlObject &params);
+ virtual void initialise();
+ virtual void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ virtual void prepare(Metadata *imageMetadata);
+ virtual void process(StatisticsPtr &stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata() const
+ {
+ return controller_->getGlobalMetadata();
+ }
+
+private:
+ Controller *controller_;
+ bool paused_;
+};
+
+/*
+ * This code is for automatic registration of Front End algorithms with the
+ * system.
+ */
+
+typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
+struct RegisterAlgorithm {
+ RegisterAlgorithm(char const *name, AlgoCreateFunc createFunc);
+};
+std::map<std::string, AlgoCreateFunc> const &getAlgorithms();
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/algorithm.hpp b/src/ipa/raspberrypi/controller/algorithm.hpp
deleted file mode 100644
index 5123c87b..00000000
--- a/src/ipa/raspberrypi/controller/algorithm.hpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * algorithm.hpp - ISP control algorithm interface
- */
-#pragma once
-
-// All algorithms should be derived from this class and made available to the
-// Controller.
-
-#include <string>
-#include <memory>
-#include <map>
-
-#include "controller.hpp"
-
-#include <boost/property_tree/ptree.hpp>
-
-namespace RPiController {
-
-// This defines the basic interface for all control algorithms.
-
-class Algorithm
-{
-public:
- Algorithm(Controller *controller)
- : controller_(controller), paused_(false)
- {
- }
- virtual ~Algorithm() = default;
- virtual char const *Name() const = 0;
- virtual bool IsPaused() const { return paused_; }
- virtual void Pause() { paused_ = true; }
- virtual void Resume() { paused_ = false; }
- virtual void Read(boost::property_tree::ptree const &params);
- virtual void Initialise();
- virtual void SwitchMode(CameraMode const &camera_mode, Metadata *metadata);
- virtual void Prepare(Metadata *image_metadata);
- virtual void Process(StatisticsPtr &stats, Metadata *image_metadata);
- Metadata &GetGlobalMetadata() const
- {
- return controller_->GetGlobalMetadata();
- }
-
-private:
- Controller *controller_;
- bool paused_;
-};
-
-// This code is for automatic registration of Front End algorithms with the
-// system.
-
-typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
-struct RegisterAlgorithm {
- RegisterAlgorithm(char const *name, AlgoCreateFunc create_func);
-};
-std::map<std::string, AlgoCreateFunc> const &GetAlgorithms();
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/alsc_status.h b/src/ipa/raspberrypi/controller/alsc_status.h
index d3f57971..e5aa7e37 100644
--- a/src/ipa/raspberrypi/controller/alsc_status.h
+++ b/src/ipa/raspberrypi/controller/alsc_status.h
@@ -1,27 +1,21 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* alsc_status.h - ALSC (auto lens shading correction) control algorithm status
*/
#pragma once
-// The ALSC algorithm should post the following structure into the image's
-// "alsc.status" metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The ALSC algorithm should post the following structure into the image's
+ * "alsc.status" metadata.
+ */
-#define ALSC_CELLS_X 16
-#define ALSC_CELLS_Y 12
+constexpr unsigned int AlscCellsX = 16;
+constexpr unsigned int AlscCellsY = 12;
struct AlscStatus {
- double r[ALSC_CELLS_Y][ALSC_CELLS_X];
- double g[ALSC_CELLS_Y][ALSC_CELLS_X];
- double b[ALSC_CELLS_Y][ALSC_CELLS_X];
+ double r[AlscCellsY][AlscCellsX];
+ double g[AlscCellsY][AlscCellsX];
+ double b[AlscCellsY][AlscCellsX];
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/awb_algorithm.h b/src/ipa/raspberrypi/controller/awb_algorithm.h
new file mode 100644
index 00000000..48e08b60
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/awb_algorithm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * awb_algorithm.h - AWB control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AwbAlgorithm : public Algorithm
+{
+public:
+ AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AWB algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual void setMode(std::string const &modeName) = 0;
+ virtual void setManualGains(double manualR, double manualB) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/awb_algorithm.hpp b/src/ipa/raspberrypi/controller/awb_algorithm.hpp
deleted file mode 100644
index 96f88afc..00000000
--- a/src/ipa/raspberrypi/controller/awb_algorithm.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb_algorithm.hpp - AWB control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class AwbAlgorithm : public Algorithm
-{
-public:
- AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
- // An AWB algorithm must provide the following:
- virtual unsigned int GetConvergenceFrames() const = 0;
- virtual void SetMode(std::string const &mode_name) = 0;
- virtual void SetManualGains(double manual_r, double manual_b) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/awb_status.h b/src/ipa/raspberrypi/controller/awb_status.h
index 46d7c842..dd5a79e3 100644
--- a/src/ipa/raspberrypi/controller/awb_status.h
+++ b/src/ipa/raspberrypi/controller/awb_status.h
@@ -1,26 +1,20 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* awb_status.h - AWB control algorithm status
*/
#pragma once
-// The AWB algorithm places its results into both the image and global metadata,
-// under the tag "awb.status".
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The AWB algorithm places its results into both the image and global metadata,
+ * under the tag "awb.status".
+ */
struct AwbStatus {
char mode[32];
- double temperature_K;
- double gain_r;
- double gain_g;
- double gain_b;
+ double temperatureK;
+ double gainR;
+ double gainG;
+ double gainB;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/black_level_status.h b/src/ipa/raspberrypi/controller/black_level_status.h
index d085f64b..fd5e4ccb 100644
--- a/src/ipa/raspberrypi/controller/black_level_status.h
+++ b/src/ipa/raspberrypi/controller/black_level_status.h
@@ -1,23 +1,15 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* black_level_status.h - black level control algorithm status
*/
#pragma once
-// The "black level" algorithm stores the black levels to use.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "black level" algorithm stores the black levels to use. */
struct BlackLevelStatus {
- uint16_t black_level_r; // out of 16 bits
- uint16_t black_level_g;
- uint16_t black_level_b;
+ uint16_t blackLevelR; /* out of 16 bits */
+ uint16_t blackLevelG;
+ uint16_t blackLevelB;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/camera_mode.h b/src/ipa/raspberrypi/controller/camera_mode.h
index e2b82828..a6ccf8c1 100644
--- a/src/ipa/raspberrypi/controller/camera_mode.h
+++ b/src/ipa/raspberrypi/controller/camera_mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2020, Raspberry Pi Ltd
*
* camera_mode.h - description of a particular operating mode of a sensor
*/
@@ -10,41 +10,33 @@
#include <libcamera/base/utils.h>
-// Description of a "camera mode", holding enough information for control
-// algorithms to adapt their behaviour to the different modes of the camera,
-// including binning, scaling, cropping etc.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CAMERA_MODE_NAME_LEN 32
+/*
+ * Description of a "camera mode", holding enough information for control
+ * algorithms to adapt their behaviour to the different modes of the camera,
+ * including binning, scaling, cropping etc.
+ */
struct CameraMode {
- // bit depth of the raw camera output
+ /* bit depth of the raw camera output */
uint32_t bitdepth;
- // size in pixels of frames in this mode
+ /* size in pixels of frames in this mode */
uint16_t width, height;
- // size of full resolution uncropped frame ("sensor frame")
- uint16_t sensor_width, sensor_height;
- // binning factor (1 = no binning, 2 = 2-pixel binning etc.)
- uint8_t bin_x, bin_y;
- // location of top left pixel in the sensor frame
- uint16_t crop_x, crop_y;
- // scaling factor (so if uncropped, width*scale_x is sensor_width)
- double scale_x, scale_y;
- // scaling of the noise compared to the native sensor mode
- double noise_factor;
- // line time
- libcamera::utils::Duration line_length;
- // any camera transform *not* reflected already in the camera tuning
+ /* size of full resolution uncropped frame ("sensor frame") */
+ uint16_t sensorWidth, sensorHeight;
+ /* binning factor (1 = no binning, 2 = 2-pixel binning etc.) */
+ uint8_t binX, binY;
+ /* location of top left pixel in the sensor frame */
+ uint16_t cropX, cropY;
+ /* scaling factor (so if uncropped, width*scaleX is sensorWidth) */
+ double scaleX, scaleY;
+ /* scaling of the noise compared to the native sensor mode */
+ double noiseFactor;
+ /* line time */
+ libcamera::utils::Duration lineLength;
+ /* any camera transform *not* reflected already in the camera tuning */
libcamera::Transform transform;
- // minimum and maximum fame lengths in units of lines
- uint32_t min_frame_length, max_frame_length;
- // sensitivity of this mode
+ /* minimum and maximum fame lengths in units of lines */
+ uint32_t minFrameLength, maxFrameLength;
+ /* sensitivity of this mode */
double sensitivity;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/ccm_algorithm.h b/src/ipa/raspberrypi/controller/ccm_algorithm.h
new file mode 100644
index 00000000..e2c4d771
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/ccm_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ccm_algorithm.h - CCM (colour correction matrix) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class CcmAlgorithm : public Algorithm
+{
+public:
+ CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A CCM algorithm must provide the following: */
+ virtual void setSaturation(double saturation) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/ccm_algorithm.hpp b/src/ipa/raspberrypi/controller/ccm_algorithm.hpp
deleted file mode 100644
index 33d0e30d..00000000
--- a/src/ipa/raspberrypi/controller/ccm_algorithm.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * ccm_algorithm.hpp - CCM (colour correction matrix) control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class CcmAlgorithm : public Algorithm
-{
-public:
- CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A CCM algorithm must provide the following:
- virtual void SetSaturation(double saturation) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/ccm_status.h b/src/ipa/raspberrypi/controller/ccm_status.h
index 7e41dd1f..5e28ee7c 100644
--- a/src/ipa/raspberrypi/controller/ccm_status.h
+++ b/src/ipa/raspberrypi/controller/ccm_status.h
@@ -1,22 +1,14 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* ccm_status.h - CCM (colour correction matrix) control algorithm status
*/
#pragma once
-// The "ccm" algorithm generates an appropriate colour matrix.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "ccm" algorithm generates an appropriate colour matrix. */
struct CcmStatus {
double matrix[9];
double saturation;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/contrast_algorithm.h b/src/ipa/raspberrypi/controller/contrast_algorithm.h
new file mode 100644
index 00000000..ce17a4f9
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/contrast_algorithm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast_algorithm.h - contrast (gamma) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class ContrastAlgorithm : public Algorithm
+{
+public:
+ ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A contrast algorithm must provide the following: */
+ virtual void setBrightness(double brightness) = 0;
+ virtual void setContrast(double contrast) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/contrast_algorithm.hpp b/src/ipa/raspberrypi/controller/contrast_algorithm.hpp
deleted file mode 100644
index 7f03bba5..00000000
--- a/src/ipa/raspberrypi/controller/contrast_algorithm.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast_algorithm.hpp - contrast (gamma) control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class ContrastAlgorithm : public Algorithm
-{
-public:
- ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A contrast algorithm must provide the following:
- virtual void SetBrightness(double brightness) = 0;
- virtual void SetContrast(double contrast) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/contrast_status.h b/src/ipa/raspberrypi/controller/contrast_status.h
index d7edd4e9..ef2a7c68 100644
--- a/src/ipa/raspberrypi/controller/contrast_status.h
+++ b/src/ipa/raspberrypi/controller/contrast_status.h
@@ -1,19 +1,17 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* contrast_status.h - contrast (gamma) control algorithm status
*/
#pragma once
-// The "contrast" algorithm creates a gamma curve, optionally doing a little bit
-// of contrast stretching based on the AGC histogram.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The "contrast" algorithm creates a gamma curve, optionally doing a little bit
+ * of contrast stretching based on the AGC histogram.
+ */
-#define CONTRAST_NUM_POINTS 33
+constexpr unsigned int ContrastNumPoints = 33;
struct ContrastPoint {
uint16_t x;
@@ -21,11 +19,7 @@ struct ContrastPoint {
};
struct ContrastStatus {
- struct ContrastPoint points[CONTRAST_NUM_POINTS];
+ struct ContrastPoint points[ContrastNumPoints];
double brightness;
double contrast;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/controller.cpp b/src/ipa/raspberrypi/controller/controller.cpp
index d3433ad2..f4892786 100644
--- a/src/ipa/raspberrypi/controller/controller.cpp
+++ b/src/ipa/raspberrypi/controller/controller.cpp
@@ -1,17 +1,19 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* controller.cpp - ISP controller
*/
+#include <assert.h>
+
+#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
-#include "algorithm.hpp"
-#include "controller.hpp"
+#include "libcamera/internal/yaml_parser.h"
-#include <boost/property_tree/json_parser.hpp>
-#include <boost/property_tree/ptree.hpp>
+#include "algorithm.h"
+#include "controller.h"
using namespace RPiController;
using namespace libcamera;
@@ -19,85 +21,125 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiController)
Controller::Controller()
- : switch_mode_called_(false) {}
-
-Controller::Controller(char const *json_filename)
- : switch_mode_called_(false)
+ : switchModeCalled_(false)
{
- Read(json_filename);
- Initialise();
}
Controller::~Controller() {}
-void Controller::Read(char const *filename)
+int Controller::read(char const *filename)
{
- boost::property_tree::ptree root;
- boost::property_tree::read_json(filename, root);
- for (auto const &key_and_value : root) {
- Algorithm *algo = CreateAlgorithm(key_and_value.first.c_str());
- if (algo) {
- algo->Read(key_and_value.second);
- algorithms_.push_back(AlgorithmPtr(algo));
- } else
- LOG(RPiController, Warning)
- << "No algorithm found for \"" << key_and_value.first << "\"";
+ File file(filename);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPiController, Warning)
+ << "Failed to open tuning file '" << filename << "'";
+ return -EINVAL;
+ }
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ double version = (*root)["version"].get<double>(1.0);
+
+ if (version < 2.0) {
+ LOG(RPiController, Warning)
+ << "This format of the tuning file will be deprecated soon!"
+ << " Please use the convert_tuning.py utility to update to version 2.0.";
+
+ for (auto const &[key, value] : root->asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else if (version < 3.0) {
+ if (!root->contains("algorithms")) {
+ LOG(RPiController, Error)
+ << "Tuning file " << filename
+ << " does not have an \"algorithms\" list!";
+ return -EINVAL;
+ }
+
+ for (auto const &rootAlgo : (*root)["algorithms"].asList())
+ for (auto const &[key, value] : rootAlgo.asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else {
+ LOG(RPiController, Error)
+ << "Unrecognised version " << version
+ << " for the tuning file " << filename;
+ return -EINVAL;
}
+
+ return 0;
}
-Algorithm *Controller::CreateAlgorithm(char const *name)
+int Controller::createAlgorithm(const std::string &name, const YamlObject &params)
{
- auto it = GetAlgorithms().find(std::string(name));
- return it != GetAlgorithms().end() ? (*it->second)(this) : nullptr;
+ auto it = getAlgorithms().find(name);
+ if (it == getAlgorithms().end()) {
+ LOG(RPiController, Warning)
+ << "No algorithm found for \"" << name << "\"";
+ return 0;
+ }
+
+ Algorithm *algo = (*it->second)(this);
+ int ret = algo->read(params);
+ if (ret)
+ return ret;
+
+ algorithms_.push_back(AlgorithmPtr(algo));
+ return 0;
}
-void Controller::Initialise()
+void Controller::initialise()
{
for (auto &algo : algorithms_)
- algo->Initialise();
+ algo->initialise();
}
-void Controller::SwitchMode(CameraMode const &camera_mode, Metadata *metadata)
+void Controller::switchMode(CameraMode const &cameraMode, Metadata *metadata)
{
for (auto &algo : algorithms_)
- algo->SwitchMode(camera_mode, metadata);
- switch_mode_called_ = true;
+ algo->switchMode(cameraMode, metadata);
+ switchModeCalled_ = true;
}
-void Controller::Prepare(Metadata *image_metadata)
+void Controller::prepare(Metadata *imageMetadata)
{
- assert(switch_mode_called_);
+ assert(switchModeCalled_);
for (auto &algo : algorithms_)
- if (!algo->IsPaused())
- algo->Prepare(image_metadata);
+ if (!algo->isPaused())
+ algo->prepare(imageMetadata);
}
-void Controller::Process(StatisticsPtr stats, Metadata *image_metadata)
+void Controller::process(StatisticsPtr stats, Metadata *imageMetadata)
{
- assert(switch_mode_called_);
+ assert(switchModeCalled_);
for (auto &algo : algorithms_)
- if (!algo->IsPaused())
- algo->Process(stats, image_metadata);
+ if (!algo->isPaused())
+ algo->process(stats, imageMetadata);
}
-Metadata &Controller::GetGlobalMetadata()
+Metadata &Controller::getGlobalMetadata()
{
- return global_metadata_;
+ return globalMetadata_;
}
-Algorithm *Controller::GetAlgorithm(std::string const &name) const
+Algorithm *Controller::getAlgorithm(std::string const &name) const
{
- // The passed name must be the entire algorithm name, or must match the
- // last part of it with a period (.) just before.
- size_t name_len = name.length();
+ /*
+ * The passed name must be the entire algorithm name, or must match the
+ * last part of it with a period (.) just before.
+ */
+ size_t nameLen = name.length();
for (auto &algo : algorithms_) {
- char const *algo_name = algo->Name();
- size_t algo_name_len = strlen(algo_name);
- if (algo_name_len >= name_len &&
+ char const *algoName = algo->name();
+ size_t algoNameLen = strlen(algoName);
+ if (algoNameLen >= nameLen &&
strcasecmp(name.c_str(),
- algo_name + algo_name_len - name_len) == 0 &&
- (name_len == algo_name_len ||
- algo_name[algo_name_len - name_len - 1] == '.'))
+ algoName + algoNameLen - nameLen) == 0 &&
+ (nameLen == algoNameLen ||
+ algoName[algoNameLen - nameLen - 1] == '.'))
return algo.get();
}
return nullptr;
diff --git a/src/ipa/raspberrypi/controller/controller.h b/src/ipa/raspberrypi/controller/controller.h
new file mode 100644
index 00000000..3e1e0517
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/controller.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * controller.h - ISP controller interface
+ */
+#pragma once
+
+/*
+ * The Controller is simply a container for a collecting together a number of
+ * "control algorithms" (such as AWB etc.) and for running them all in a
+ * convenient manner.
+ */
+
+#include <vector>
+#include <string>
+
+#include <linux/bcm2835-isp.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "camera_mode.h"
+#include "device_status.h"
+#include "metadata.h"
+
+namespace RPiController {
+
+class Algorithm;
+typedef std::unique_ptr<Algorithm> AlgorithmPtr;
+typedef std::shared_ptr<bcm2835_isp_stats> StatisticsPtr;
+
+/*
+ * The Controller holds a pointer to some global_metadata, which is how
+ * different controllers and control algorithms within them can exchange
+ * information. The Prepare function returns a pointer to metadata for this
+ * specific image, and which should be passed on to the Process function.
+ */
+
+class Controller
+{
+public:
+ Controller();
+ ~Controller();
+ int read(char const *filename);
+ void initialise();
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ void prepare(Metadata *imageMetadata);
+ void process(StatisticsPtr stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata();
+ Algorithm *getAlgorithm(std::string const &name) const;
+
+protected:
+ int createAlgorithm(const std::string &name, const libcamera::YamlObject &params);
+
+ Metadata globalMetadata_;
+ std::vector<AlgorithmPtr> algorithms_;
+ bool switchModeCalled_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/controller.hpp b/src/ipa/raspberrypi/controller/controller.hpp
deleted file mode 100644
index 3b50ae77..00000000
--- a/src/ipa/raspberrypi/controller/controller.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * controller.hpp - ISP controller interface
- */
-#pragma once
-
-// The Controller is simply a container for a collecting together a number of
-// "control algorithms" (such as AWB etc.) and for running them all in a
-// convenient manner.
-
-#include <vector>
-#include <string>
-
-#include <linux/bcm2835-isp.h>
-
-#include "camera_mode.h"
-#include "device_status.h"
-#include "metadata.hpp"
-
-namespace RPiController {
-
-class Algorithm;
-typedef std::unique_ptr<Algorithm> AlgorithmPtr;
-typedef std::shared_ptr<bcm2835_isp_stats> StatisticsPtr;
-
-// The Controller holds a pointer to some global_metadata, which is how
-// different controllers and control algorithms within them can exchange
-// information. The Prepare function returns a pointer to metadata for this
-// specific image, and which should be passed on to the Process function.
-
-class Controller
-{
-public:
- Controller();
- Controller(char const *json_filename);
- ~Controller();
- Algorithm *CreateAlgorithm(char const *name);
- void Read(char const *filename);
- void Initialise();
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata);
- void Prepare(Metadata *image_metadata);
- void Process(StatisticsPtr stats, Metadata *image_metadata);
- Metadata &GetGlobalMetadata();
- Algorithm *GetAlgorithm(std::string const &name) const;
-
-protected:
- Metadata global_metadata_;
- std::vector<AlgorithmPtr> algorithms_;
- bool switch_mode_called_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/denoise_algorithm.hpp b/src/ipa/raspberrypi/controller/denoise_algorithm.h
index 39fcd7e9..52009ba9 100644
--- a/src/ipa/raspberrypi/controller/denoise_algorithm.hpp
+++ b/src/ipa/raspberrypi/controller/denoise_algorithm.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
- * denoise.hpp - Denoise control algorithm interface
+ * denoise.h - Denoise control algorithm interface
*/
#pragma once
-#include "algorithm.hpp"
+#include "algorithm.h"
namespace RPiController {
@@ -16,8 +16,8 @@ class DenoiseAlgorithm : public Algorithm
{
public:
DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A Denoise algorithm must provide the following:
- virtual void SetMode(DenoiseMode mode) = 0;
+ /* A Denoise algorithm must provide the following: */
+ virtual void setMode(DenoiseMode mode) = 0;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/denoise_status.h b/src/ipa/raspberrypi/controller/denoise_status.h
index 67a3c361..f6b9ee29 100644
--- a/src/ipa/raspberrypi/controller/denoise_status.h
+++ b/src/ipa/raspberrypi/controller/denoise_status.h
@@ -1,24 +1,16 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* denoise_status.h - Denoise control algorithm status
*/
#pragma once
-// This stores the parameters required for Denoise.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* This stores the parameters required for Denoise. */
struct DenoiseStatus {
- double noise_constant;
- double noise_slope;
+ double noiseConstant;
+ double noiseSlope;
double strength;
unsigned int mode;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/device_status.cpp b/src/ipa/raspberrypi/controller/device_status.cpp
index f052ea8b..2360a77b 100644
--- a/src/ipa/raspberrypi/controller/device_status.cpp
+++ b/src/ipa/raspberrypi/controller/device_status.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
* device_status.cpp - device (image sensor) status
*/
@@ -10,12 +10,21 @@ using namespace libcamera; /* for the Duration operator<< overload */
std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
{
- out << "Exposure: " << d.shutter_speed
- << " Frame length: " << d.frame_length
- << " Gain: " << d.analogue_gain
- << " Aperture: " << d.aperture
- << " Lens: " << d.lens_position
- << " Flash: " << d.flash_intensity;
+ out << "Exposure: " << d.shutterSpeed
+ << " Frame length: " << d.frameLength
+ << " Gain: " << d.analogueGain;
+
+ if (d.aperture)
+ out << " Aperture: " << *d.aperture;
+
+ if (d.lensPosition)
+ out << " Lens: " << *d.lensPosition;
+
+ if (d.flashIntensity)
+ out << " Flash: " << *d.flashIntensity;
+
+ if (d.sensorTemperature)
+ out << " Temperature: " << *d.sensorTemperature;
return out;
}
diff --git a/src/ipa/raspberrypi/controller/device_status.h b/src/ipa/raspberrypi/controller/device_status.h
index c4a5d9c8..8f74e21b 100644
--- a/src/ipa/raspberrypi/controller/device_status.h
+++ b/src/ipa/raspberrypi/controller/device_status.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* device_status.h - device (image sensor) status
*/
#pragma once
#include <iostream>
+#include <optional>
#include <libcamera/base/utils.h>
@@ -17,23 +18,24 @@
struct DeviceStatus {
DeviceStatus()
- : shutter_speed(std::chrono::seconds(0)), frame_length(0),
- analogue_gain(0.0), lens_position(0.0), aperture(0.0),
- flash_intensity(0.0)
+ : shutterSpeed(std::chrono::seconds(0)), frameLength(0),
+ analogueGain(0.0)
{
}
friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d);
/* time shutter is open */
- libcamera::utils::Duration shutter_speed;
+ libcamera::utils::Duration shutterSpeed;
/* frame length given in number of lines */
- uint32_t frame_length;
- double analogue_gain;
+ uint32_t frameLength;
+ double analogueGain;
/* 1.0/distance-in-metres, or 0 if unknown */
- double lens_position;
+ std::optional<double> lensPosition;
/* 1/f so that brightness quadruples when this doubles, or 0 if unknown */
- double aperture;
+ std::optional<double> aperture;
/* proportional to brightness with 0 = no flash, 1 = maximum flash */
- double flash_intensity;
+ std::optional<double> flashIntensity;
+ /* Sensor reported temperature value (in degrees) */
+ std::optional<double> sensorTemperature;
};
diff --git a/src/ipa/raspberrypi/controller/dpc_status.h b/src/ipa/raspberrypi/controller/dpc_status.h
index a3ec2762..46d0cf34 100644
--- a/src/ipa/raspberrypi/controller/dpc_status.h
+++ b/src/ipa/raspberrypi/controller/dpc_status.h
@@ -1,21 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* dpc_status.h - DPC (defective pixel correction) control algorithm status
*/
#pragma once
-// The "DPC" algorithm sets defective pixel correction strength.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "DPC" algorithm sets defective pixel correction strength. */
struct DpcStatus {
- int strength; // 0 = "off", 1 = "normal", 2 = "strong"
+ int strength; /* 0 = "off", 1 = "normal", 2 = "strong" */
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/focus_status.h b/src/ipa/raspberrypi/controller/focus_status.h
index ace2fe2c..8b74e598 100644
--- a/src/ipa/raspberrypi/controller/focus_status.h
+++ b/src/ipa/raspberrypi/controller/focus_status.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* focus_status.h - focus measurement status
*/
@@ -8,19 +8,13 @@
#include <linux/bcm2835-isp.h>
-// The focus algorithm should post the following structure into the image's
-// "focus.status" metadata. Recall that it's only reporting focus (contrast)
-// measurements, it's not driving any kind of auto-focus algorithm!
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The focus algorithm should post the following structure into the image's
+ * "focus.status" metadata. Recall that it's only reporting focus (contrast)
+ * measurements, it's not driving any kind of auto-focus algorithm!
+ */
struct FocusStatus {
unsigned int num;
- uint32_t focus_measures[FOCUS_REGIONS];
+ uint32_t focusMeasures[FOCUS_REGIONS];
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/geq_status.h b/src/ipa/raspberrypi/controller/geq_status.h
index 07fd5f03..2d749fc9 100644
--- a/src/ipa/raspberrypi/controller/geq_status.h
+++ b/src/ipa/raspberrypi/controller/geq_status.h
@@ -1,22 +1,14 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* geq_status.h - GEQ (green equalisation) control algorithm status
*/
#pragma once
-// The "GEQ" algorithm calculates the green equalisation thresholds
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "GEQ" algorithm calculates the green equalisation thresholds */
struct GeqStatus {
uint16_t offset;
double slope;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/histogram.cpp b/src/ipa/raspberrypi/controller/histogram.cpp
index 9916b3ed..16a9207f 100644
--- a/src/ipa/raspberrypi/controller/histogram.cpp
+++ b/src/ipa/raspberrypi/controller/histogram.cpp
@@ -1,42 +1,42 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* histogram.cpp - histogram calculations
*/
#include <math.h>
#include <stdio.h>
-#include "histogram.hpp"
+#include "histogram.h"
using namespace RPiController;
-uint64_t Histogram::CumulativeFreq(double bin) const
+uint64_t Histogram::cumulativeFreq(double bin) const
{
if (bin <= 0)
return 0;
- else if (bin >= Bins())
- return Total();
+ else if (bin >= bins())
+ return total();
int b = (int)bin;
return cumulative_[b] +
(bin - b) * (cumulative_[b + 1] - cumulative_[b]);
}
-double Histogram::Quantile(double q, int first, int last) const
+double Histogram::quantile(double q, int first, int last) const
{
if (first == -1)
first = 0;
if (last == -1)
last = cumulative_.size() - 2;
assert(first <= last);
- uint64_t items = q * Total();
- while (first < last) // binary search to find the right bin
+ uint64_t items = q * total();
+ while (first < last) /* binary search to find the right bin */
{
int middle = (first + last) / 2;
if (cumulative_[middle + 1] > items)
- last = middle; // between first and middle
+ last = middle; /* between first and middle */
else
- first = middle + 1; // after middle
+ first = middle + 1; /* after middle */
}
assert(items >= cumulative_[first] && items <= cumulative_[last + 1]);
double frac = cumulative_[first + 1] == cumulative_[first] ? 0
@@ -45,20 +45,20 @@ double Histogram::Quantile(double q, int first, int last) const
return first + frac;
}
-double Histogram::InterQuantileMean(double q_lo, double q_hi) const
+double Histogram::interQuantileMean(double qLo, double qHi) const
{
- assert(q_hi > q_lo);
- double p_lo = Quantile(q_lo);
- double p_hi = Quantile(q_hi, (int)p_lo);
- double sum_bin_freq = 0, cumul_freq = 0;
- for (double p_next = floor(p_lo) + 1.0; p_next <= ceil(p_hi);
- p_lo = p_next, p_next += 1.0) {
- int bin = floor(p_lo);
+ assert(qHi > qLo);
+ double pLo = quantile(qLo);
+ double pHi = quantile(qHi, (int)pLo);
+ double sumBinFreq = 0, cumulFreq = 0;
+ for (double pNext = floor(pLo) + 1.0; pNext <= ceil(pHi);
+ pLo = pNext, pNext += 1.0) {
+ int bin = floor(pLo);
double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
- (std::min(p_next, p_hi) - p_lo);
- sum_bin_freq += bin * freq;
- cumul_freq += freq;
+ (std::min(pNext, pHi) - pLo);
+ sumBinFreq += bin * freq;
+ cumulFreq += freq;
}
- // add 0.5 to give an average for bin mid-points
- return sum_bin_freq / cumul_freq + 0.5;
+ /* add 0.5 to give an average for bin mid-points */
+ return sumBinFreq / cumulFreq + 0.5;
}
diff --git a/src/ipa/raspberrypi/controller/histogram.h b/src/ipa/raspberrypi/controller/histogram.h
new file mode 100644
index 00000000..66a68b08
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/histogram.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram.h - histogram calculation interface
+ */
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+#include <cassert>
+
+/*
+ * A simple histogram class, for use in particular to find "quantiles" and
+ * averages between "quantiles".
+ */
+
+namespace RPiController {
+
+class Histogram
+{
+public:
+ template<typename T> Histogram(T *histogram, int num)
+ {
+ assert(num);
+ cumulative_.reserve(num + 1);
+ cumulative_.push_back(0);
+ for (int i = 0; i < num; i++)
+ cumulative_.push_back(cumulative_.back() +
+ histogram[i]);
+ }
+ uint32_t bins() const { return cumulative_.size() - 1; }
+ uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
+ /* Cumulative frequency up to a (fractional) point in a bin. */
+ uint64_t cumulativeFreq(double bin) const;
+ /*
+ * Return the (fractional) bin of the point q (0 <= q <= 1) through the
+ * histogram. Optionally provide limits to help.
+ */
+ double quantile(double q, int first = -1, int last = -1) const;
+ /* Return the average histogram bin value between the two quantiles. */
+ double interQuantileMean(double qLo, double qHi) const;
+
+private:
+ std::vector<uint64_t> cumulative_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/histogram.hpp b/src/ipa/raspberrypi/controller/histogram.hpp
deleted file mode 100644
index 90f5ac78..00000000
--- a/src/ipa/raspberrypi/controller/histogram.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * histogram.hpp - histogram calculation interface
- */
-#pragma once
-
-#include <stdint.h>
-#include <vector>
-#include <cassert>
-
-// A simple histogram class, for use in particular to find "quantiles" and
-// averages between "quantiles".
-
-namespace RPiController {
-
-class Histogram
-{
-public:
- template<typename T> Histogram(T *histogram, int num)
- {
- assert(num);
- cumulative_.reserve(num + 1);
- cumulative_.push_back(0);
- for (int i = 0; i < num; i++)
- cumulative_.push_back(cumulative_.back() +
- histogram[i]);
- }
- uint32_t Bins() const { return cumulative_.size() - 1; }
- uint64_t Total() const { return cumulative_[cumulative_.size() - 1]; }
- // Cumulative frequency up to a (fractional) point in a bin.
- uint64_t CumulativeFreq(double bin) const;
- // Return the (fractional) bin of the point q (0 <= q <= 1) through the
- // histogram. Optionally provide limits to help.
- double Quantile(double q, int first = -1, int last = -1) const;
- // Return the average histogram bin value between the two quantiles.
- double InterQuantileMean(double q_lo, double q_hi) const;
-
-private:
- std::vector<uint64_t> cumulative_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/lux_status.h b/src/ipa/raspberrypi/controller/lux_status.h
index 8ccfd933..5eb9faac 100644
--- a/src/ipa/raspberrypi/controller/lux_status.h
+++ b/src/ipa/raspberrypi/controller/lux_status.h
@@ -1,29 +1,23 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* lux_status.h - Lux control algorithm status
*/
#pragma once
-// The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
-// estimates the current lux level of the scene. It does this by a simple ratio
-// calculation comparing to a reference image that was taken in known conditions
-// with known statistics and a properly measured lux level. There is a slight
-// problem with aperture, in that it may be variable without the system knowing
-// or being aware of it. In this case an external application may set a
-// "current_aperture" value if it wishes, which would be used in place of the
-// (presumably meaningless) value in the image metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/*
+ * The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
+ * estimates the current lux level of the scene. It does this by a simple ratio
+ * calculation comparing to a reference image that was taken in known conditions
+ * with known statistics and a properly measured lux level. There is a slight
+ * problem with aperture, in that it may be variable without the system knowing
+ * or being aware of it. In this case an external application may set a
+ * "current_aperture" value if it wishes, which would be used in place of the
+ * (presumably meaningless) value in the image metadata.
+ */
struct LuxStatus {
double lux;
double aperture;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/metadata.hpp b/src/ipa/raspberrypi/controller/metadata.h
index 51e576cf..0f7ebfaf 100644
--- a/src/ipa/raspberrypi/controller/metadata.hpp
+++ b/src/ipa/raspberrypi/controller/metadata.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * metadata.hpp - general metadata class
+ * metadata.h - general metadata class
*/
#pragma once
-// A simple class for carrying arbitrary metadata, for example about an image.
+/* A simple class for carrying arbitrary metadata, for example about an image. */
#include <any>
#include <map>
@@ -22,26 +22,26 @@ public:
Metadata(Metadata const &other)
{
- std::scoped_lock other_lock(other.mutex_);
+ std::scoped_lock otherLock(other.mutex_);
data_ = other.data_;
}
Metadata(Metadata &&other)
{
- std::scoped_lock other_lock(other.mutex_);
+ std::scoped_lock otherLock(other.mutex_);
data_ = std::move(other.data_);
other.data_.clear();
}
template<typename T>
- void Set(std::string const &tag, T const &value)
+ void set(std::string const &tag, T const &value)
{
std::scoped_lock lock(mutex_);
data_[tag] = value;
}
template<typename T>
- int Get(std::string const &tag, T &value) const
+ int get(std::string const &tag, T &value) const
{
std::scoped_lock lock(mutex_);
auto it = data_.find(tag);
@@ -51,7 +51,7 @@ public:
return 0;
}
- void Clear()
+ void clear()
{
std::scoped_lock lock(mutex_);
data_.clear();
@@ -72,17 +72,19 @@ public:
return *this;
}
- void Merge(Metadata &other)
+ void merge(Metadata &other)
{
std::scoped_lock lock(mutex_, other.mutex_);
data_.merge(other.data_);
}
template<typename T>
- T *GetLocked(std::string const &tag)
+ T *getLocked(std::string const &tag)
{
- // This allows in-place access to the Metadata contents,
- // for which you should be holding the lock.
+ /*
+ * This allows in-place access to the Metadata contents,
+ * for which you should be holding the lock.
+ */
auto it = data_.find(tag);
if (it == data_.end())
return nullptr;
@@ -90,15 +92,17 @@ public:
}
template<typename T>
- void SetLocked(std::string const &tag, T const &value)
+ void setLocked(std::string const &tag, T const &value)
{
- // Use this only if you're holding the lock yourself.
+ /* Use this only if you're holding the lock yourself. */
data_[tag] = value;
}
- // Note: use of (lowercase) lock and unlock means you can create scoped
- // locks with the standard lock classes.
- // e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
+ /*
+ * Note: use of (lowercase) lock and unlock means you can create scoped
+ * locks with the standard lock classes.
+ * e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
+ */
void lock() { mutex_.lock(); }
void unlock() { mutex_.unlock(); }
@@ -107,4 +111,4 @@ private:
std::map<std::string, std::any> data_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/noise_status.h b/src/ipa/raspberrypi/controller/noise_status.h
index 8439a402..da194f71 100644
--- a/src/ipa/raspberrypi/controller/noise_status.h
+++ b/src/ipa/raspberrypi/controller/noise_status.h
@@ -1,22 +1,14 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* noise_status.h - Noise control algorithm status
*/
#pragma once
-// The "noise" algorithm stores an estimate of the noise profile for this image.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "noise" algorithm stores an estimate of the noise profile for this image. */
struct NoiseStatus {
- double noise_constant;
- double noise_slope;
+ double noiseConstant;
+ double noiseSlope;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/pwl.cpp b/src/ipa/raspberrypi/controller/pwl.cpp
index 130c820b..c59f5fa1 100644
--- a/src/ipa/raspberrypi/controller/pwl.cpp
+++ b/src/ipa/raspberrypi/controller/pwl.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* pwl.cpp - piecewise linear functions
*/
@@ -8,40 +8,52 @@
#include <cassert>
#include <stdexcept>
-#include "pwl.hpp"
+#include "pwl.h"
using namespace RPiController;
-void Pwl::Read(boost::property_tree::ptree const &params)
+int Pwl::read(const libcamera::YamlObject &params)
{
- for (auto it = params.begin(); it != params.end(); it++) {
- double x = it->second.get_value<double>();
- assert(it == params.begin() || x > points_.back().x);
- it++;
- double y = it->second.get_value<double>();
- points_.push_back(Point(x, y));
+ if (!params.size() || params.size() % 2)
+ return -EINVAL;
+
+ const auto &list = params.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto x = it->get<double>();
+ if (!x)
+ return -EINVAL;
+ if (it != list.begin() && *x <= points_.back().x)
+ return -EINVAL;
+
+ auto y = (++it)->get<double>();
+ if (!y)
+ return -EINVAL;
+
+ points_.push_back(Point(*x, *y));
}
- assert(points_.size() >= 2);
+
+ return 0;
}
-void Pwl::Append(double x, double y, const double eps)
+void Pwl::append(double x, double y, const double eps)
{
if (points_.empty() || points_.back().x + eps < x)
points_.push_back(Point(x, y));
}
-void Pwl::Prepend(double x, double y, const double eps)
+void Pwl::prepend(double x, double y, const double eps)
{
if (points_.empty() || points_.front().x - eps > x)
points_.insert(points_.begin(), Point(x, y));
}
-Pwl::Interval Pwl::Domain() const
+Pwl::Interval Pwl::domain() const
{
return Interval(points_[0].x, points_[points_.size() - 1].x);
}
-Pwl::Interval Pwl::Range() const
+Pwl::Interval Pwl::range() const
{
double lo = points_[0].y, hi = lo;
for (auto &p : points_)
@@ -49,18 +61,16 @@ Pwl::Interval Pwl::Range() const
return Interval(lo, hi);
}
-bool Pwl::Empty() const
+bool Pwl::empty() const
{
return points_.empty();
}
-double Pwl::Eval(double x, int *span_ptr, bool update_span) const
+double Pwl::eval(double x, int *spanPtr, bool updateSpan) const
{
- int span = findSpan(x, span_ptr && *span_ptr != -1
- ? *span_ptr
- : points_.size() / 2 - 1);
- if (span_ptr && update_span)
- *span_ptr = span;
+ int span = findSpan(x, spanPtr && *spanPtr != -1 ? *spanPtr : points_.size() / 2 - 1);
+ if (spanPtr && updateSpan)
+ *spanPtr = span;
return points_[span].y +
(x - points_[span].x) * (points_[span + 1].y - points_[span].y) /
(points_[span + 1].x - points_[span].x);
@@ -68,133 +78,145 @@ double Pwl::Eval(double x, int *span_ptr, bool update_span) const
int Pwl::findSpan(double x, int span) const
{
- // Pwls are generally small, so linear search may well be faster than
- // binary, though could review this if large PWls start turning up.
- int last_span = points_.size() - 2;
- // some algorithms may call us with span pointing directly at the last
- // control point
- span = std::max(0, std::min(last_span, span));
- while (span < last_span && x >= points_[span + 1].x)
+ /*
+ * Pwls are generally small, so linear search may well be faster than
+ * binary, though could review this if large PWls start turning up.
+ */
+ int lastSpan = points_.size() - 2;
+ /*
+ * some algorithms may call us with span pointing directly at the last
+ * control point
+ */
+ span = std::max(0, std::min(lastSpan, span));
+ while (span < lastSpan && x >= points_[span + 1].x)
span++;
while (span && x < points_[span].x)
span--;
return span;
}
-Pwl::PerpType Pwl::Invert(Point const &xy, Point &perp, int &span,
+Pwl::PerpType Pwl::invert(Point const &xy, Point &perp, int &span,
const double eps) const
{
assert(span >= -1);
- bool prev_off_end = false;
+ bool prevOffEnd = false;
for (span = span + 1; span < (int)points_.size() - 1; span++) {
- Point span_vec = points_[span + 1] - points_[span];
- double t = ((xy - points_[span]) % span_vec) / span_vec.Len2();
- if (t < -eps) // off the start of this span
+ Point spanVec = points_[span + 1] - points_[span];
+ double t = ((xy - points_[span]) % spanVec) / spanVec.len2();
+ if (t < -eps) /* off the start of this span */
{
if (span == 0) {
perp = points_[span];
return PerpType::Start;
- } else if (prev_off_end) {
+ } else if (prevOffEnd) {
perp = points_[span];
return PerpType::Vertex;
}
- } else if (t > 1 + eps) // off the end of this span
+ } else if (t > 1 + eps) /* off the end of this span */
{
if (span == (int)points_.size() - 2) {
perp = points_[span + 1];
return PerpType::End;
}
- prev_off_end = true;
- } else // a true perpendicular
+ prevOffEnd = true;
+ } else /* a true perpendicular */
{
- perp = points_[span] + span_vec * t;
+ perp = points_[span] + spanVec * t;
return PerpType::Perpendicular;
}
}
return PerpType::None;
}
-Pwl Pwl::Inverse(bool *true_inverse, const double eps) const
+Pwl Pwl::inverse(bool *trueInverse, const double eps) const
{
bool appended = false, prepended = false, neither = false;
Pwl inverse;
for (Point const &p : points_) {
- if (inverse.Empty())
- inverse.Append(p.y, p.x, eps);
+ if (inverse.empty())
+ inverse.append(p.y, p.x, eps);
else if (std::abs(inverse.points_.back().x - p.y) <= eps ||
std::abs(inverse.points_.front().x - p.y) <= eps)
/* do nothing */;
else if (p.y > inverse.points_.back().x) {
- inverse.Append(p.y, p.x, eps);
+ inverse.append(p.y, p.x, eps);
appended = true;
} else if (p.y < inverse.points_.front().x) {
- inverse.Prepend(p.y, p.x, eps);
+ inverse.prepend(p.y, p.x, eps);
prepended = true;
} else
neither = true;
}
- // This is not a proper inverse if we found ourselves putting points
- // onto both ends of the inverse, or if there were points that couldn't
- // go on either.
- if (true_inverse)
- *true_inverse = !(neither || (appended && prepended));
+ /*
+ * This is not a proper inverse if we found ourselves putting points
+ * onto both ends of the inverse, or if there were points that couldn't
+ * go on either.
+ */
+ if (trueInverse)
+ *trueInverse = !(neither || (appended && prepended));
return inverse;
}
-Pwl Pwl::Compose(Pwl const &other, const double eps) const
+Pwl Pwl::compose(Pwl const &other, const double eps) const
{
- double this_x = points_[0].x, this_y = points_[0].y;
- int this_span = 0, other_span = other.findSpan(this_y, 0);
- Pwl result({ { this_x, other.Eval(this_y, &other_span, false) } });
- while (this_span != (int)points_.size() - 1) {
- double dx = points_[this_span + 1].x - points_[this_span].x,
- dy = points_[this_span + 1].y - points_[this_span].y;
+ double thisX = points_[0].x, thisY = points_[0].y;
+ int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
+ Pwl result({ { thisX, other.eval(thisY, &otherSpan, false) } });
+ while (thisSpan != (int)points_.size() - 1) {
+ double dx = points_[thisSpan + 1].x - points_[thisSpan].x,
+ dy = points_[thisSpan + 1].y - points_[thisSpan].y;
if (abs(dy) > eps &&
- other_span + 1 < (int)other.points_.size() &&
- points_[this_span + 1].y >=
- other.points_[other_span + 1].x + eps) {
- // next control point in result will be where this
- // function's y reaches the next span in other
- this_x = points_[this_span].x +
- (other.points_[other_span + 1].x -
- points_[this_span].y) * dx / dy;
- this_y = other.points_[++other_span].x;
- } else if (abs(dy) > eps && other_span > 0 &&
- points_[this_span + 1].y <=
- other.points_[other_span - 1].x - eps) {
- // next control point in result will be where this
- // function's y reaches the previous span in other
- this_x = points_[this_span].x +
- (other.points_[other_span + 1].x -
- points_[this_span].y) * dx / dy;
- this_y = other.points_[--other_span].x;
+ otherSpan + 1 < (int)other.points_.size() &&
+ points_[thisSpan + 1].y >=
+ other.points_[otherSpan + 1].x + eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the next span in other
+ */
+ thisX = points_[thisSpan].x +
+ (other.points_[otherSpan + 1].x -
+ points_[thisSpan].y) *
+ dx / dy;
+ thisY = other.points_[++otherSpan].x;
+ } else if (abs(dy) > eps && otherSpan > 0 &&
+ points_[thisSpan + 1].y <=
+ other.points_[otherSpan - 1].x - eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the previous span in other
+ */
+ thisX = points_[thisSpan].x +
+ (other.points_[otherSpan + 1].x -
+ points_[thisSpan].y) *
+ dx / dy;
+ thisY = other.points_[--otherSpan].x;
} else {
- // we stay in the same span in other
- this_span++;
- this_x = points_[this_span].x,
- this_y = points_[this_span].y;
+ /* we stay in the same span in other */
+ thisSpan++;
+ thisX = points_[thisSpan].x,
+ thisY = points_[thisSpan].y;
}
- result.Append(this_x, other.Eval(this_y, &other_span, false),
+ result.append(thisX, other.eval(thisY, &otherSpan, false),
eps);
}
return result;
}
-void Pwl::Map(std::function<void(double x, double y)> f) const
+void Pwl::map(std::function<void(double x, double y)> f) const
{
for (auto &pt : points_)
f(pt.x, pt.y);
}
-void Pwl::Map2(Pwl const &pwl0, Pwl const &pwl1,
+void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
std::function<void(double x, double y0, double y1)> f)
{
int span0 = 0, span1 = 0;
double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x);
- f(x, pwl0.Eval(x, &span0, false), pwl1.Eval(x, &span1, false));
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
while (span0 < (int)pwl0.points_.size() - 1 ||
span1 < (int)pwl1.points_.size() - 1) {
if (span0 == (int)pwl0.points_.size() - 1)
@@ -205,28 +227,28 @@ void Pwl::Map2(Pwl const &pwl0, Pwl const &pwl1,
x = pwl1.points_[++span1].x;
else
x = pwl0.points_[++span0].x;
- f(x, pwl0.Eval(x, &span0, false), pwl1.Eval(x, &span1, false));
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
}
}
-Pwl Pwl::Combine(Pwl const &pwl0, Pwl const &pwl1,
+Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
std::function<double(double x, double y0, double y1)> f,
const double eps)
{
Pwl result;
- Map2(pwl0, pwl1, [&](double x, double y0, double y1) {
- result.Append(x, f(x, y0, y1), eps);
+ map2(pwl0, pwl1, [&](double x, double y0, double y1) {
+ result.append(x, f(x, y0, y1), eps);
});
return result;
}
-void Pwl::MatchDomain(Interval const &domain, bool clip, const double eps)
+void Pwl::matchDomain(Interval const &domain, bool clip, const double eps)
{
int span = 0;
- Prepend(domain.start, Eval(clip ? points_[0].x : domain.start, &span),
+ prepend(domain.start, eval(clip ? points_[0].x : domain.start, &span),
eps);
span = points_.size() - 2;
- Append(domain.end, Eval(clip ? points_.back().x : domain.end, &span),
+ append(domain.end, eval(clip ? points_.back().x : domain.end, &span),
eps);
}
@@ -237,7 +259,7 @@ Pwl &Pwl::operator*=(double d)
return *this;
}
-void Pwl::Debug(FILE *fp) const
+void Pwl::debug(FILE *fp) const
{
fprintf(fp, "Pwl {\n");
for (auto &p : points_)
diff --git a/src/ipa/raspberrypi/controller/pwl.h b/src/ipa/raspberrypi/controller/pwl.h
new file mode 100644
index 00000000..aacf6039
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/pwl.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * pwl.h - piecewise linear functions interface
+ */
+#pragma once
+
+#include <functional>
+#include <math.h>
+#include <vector>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace RPiController {
+
+class Pwl
+{
+public:
+ struct Interval {
+ Interval(double _start, double _end)
+ : start(_start), end(_end)
+ {
+ }
+ double start, end;
+ bool contains(double value)
+ {
+ return value >= start && value <= end;
+ }
+ double clip(double value)
+ {
+ return value < start ? start
+ : (value > end ? end : value);
+ }
+ double len() const { return end - start; }
+ };
+ struct Point {
+ Point() : x(0), y(0) {}
+ Point(double _x, double _y)
+ : x(_x), y(_y) {}
+ double x, y;
+ Point operator-(Point const &p) const
+ {
+ return Point(x - p.x, y - p.y);
+ }
+ Point operator+(Point const &p) const
+ {
+ return Point(x + p.x, y + p.y);
+ }
+ double operator%(Point const &p) const
+ {
+ return x * p.x + y * p.y;
+ }
+ Point operator*(double f) const { return Point(x * f, y * f); }
+ Point operator/(double f) const { return Point(x / f, y / f); }
+ double len2() const { return x * x + y * y; }
+ double len() const { return sqrt(len2()); }
+ };
+ Pwl() {}
+ Pwl(std::vector<Point> const &points) : points_(points) {}
+ int read(const libcamera::YamlObject &params);
+ void append(double x, double y, const double eps = 1e-6);
+ void prepend(double x, double y, const double eps = 1e-6);
+ Interval domain() const;
+ Interval range() const;
+ bool empty() const;
+ /*
+ * Evaluate Pwl, optionally supplying an initial guess for the
+ * "span". The "span" may be optionally be updated. If you want to know
+ * the "span" value but don't have an initial guess you can set it to
+ * -1.
+ */
+ double eval(double x, int *spanPtr = nullptr,
+ bool updateSpan = true) const;
+ /*
+ * Find perpendicular closest to xy, starting from span+1 so you can
+ * call it repeatedly to check for multiple closest points (set span to
+ * -1 on the first call). Also returns "pseudo" perpendiculars; see
+ * PerpType enum.
+ */
+ enum class PerpType {
+ None, /* no perpendicular found */
+ Start, /* start of Pwl is closest point */
+ End, /* end of Pwl is closest point */
+ Vertex, /* vertex of Pwl is closest point */
+ Perpendicular /* true perpendicular found */
+ };
+ PerpType invert(Point const &xy, Point &perp, int &span,
+ const double eps = 1e-6) const;
+ /*
+ * Compute the inverse function. Indicate if it is a proper (true)
+ * inverse, or only a best effort (e.g. input was non-monotonic).
+ */
+ Pwl inverse(bool *trueInverse = nullptr, const double eps = 1e-6) const;
+ /* Compose two Pwls together, doing "this" first and "other" after. */
+ Pwl compose(Pwl const &other, const double eps = 1e-6) const;
+ /* Apply function to (x,y) values at every control point. */
+ void map(std::function<void(double x, double y)> f) const;
+ /*
+ * Apply function to (x, y0, y1) values wherever either Pwl has a
+ * control point.
+ */
+ static void map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f);
+ /*
+ * Combine two Pwls, meaning we create a new Pwl where the y values are
+ * given by running f wherever either has a knot.
+ */
+ static Pwl
+ combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps = 1e-6);
+ /*
+ * Make "this" match (at least) the given domain. Any extension my be
+ * clipped or linear.
+ */
+ void matchDomain(Interval const &domain, bool clip = true,
+ const double eps = 1e-6);
+ Pwl &operator*=(double d);
+ void debug(FILE *fp = stdout) const;
+
+private:
+ int findSpan(double x, int span) const;
+ std::vector<Point> points_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/pwl.hpp b/src/ipa/raspberrypi/controller/pwl.hpp
deleted file mode 100644
index 484672f6..00000000
--- a/src/ipa/raspberrypi/controller/pwl.hpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * pwl.hpp - piecewise linear functions interface
- */
-#pragma once
-
-#include <math.h>
-#include <vector>
-
-#include <boost/property_tree/ptree.hpp>
-
-namespace RPiController {
-
-class Pwl
-{
-public:
- struct Interval {
- Interval(double _start, double _end) : start(_start), end(_end)
- {
- }
- double start, end;
- bool Contains(double value)
- {
- return value >= start && value <= end;
- }
- double Clip(double value)
- {
- return value < start ? start
- : (value > end ? end : value);
- }
- double Len() const { return end - start; }
- };
- struct Point {
- Point() : x(0), y(0) {}
- Point(double _x, double _y) : x(_x), y(_y) {}
- double x, y;
- Point operator-(Point const &p) const
- {
- return Point(x - p.x, y - p.y);
- }
- Point operator+(Point const &p) const
- {
- return Point(x + p.x, y + p.y);
- }
- double operator%(Point const &p) const
- {
- return x * p.x + y * p.y;
- }
- Point operator*(double f) const { return Point(x * f, y * f); }
- Point operator/(double f) const { return Point(x / f, y / f); }
- double Len2() const { return x * x + y * y; }
- double Len() const { return sqrt(Len2()); }
- };
- Pwl() {}
- Pwl(std::vector<Point> const &points) : points_(points) {}
- void Read(boost::property_tree::ptree const &params);
- void Append(double x, double y, const double eps = 1e-6);
- void Prepend(double x, double y, const double eps = 1e-6);
- Interval Domain() const;
- Interval Range() const;
- bool Empty() const;
- // Evaluate Pwl, optionally supplying an initial guess for the
- // "span". The "span" may be optionally be updated. If you want to know
- // the "span" value but don't have an initial guess you can set it to
- // -1.
- double Eval(double x, int *span_ptr = nullptr,
- bool update_span = true) const;
- // Find perpendicular closest to xy, starting from span+1 so you can
- // call it repeatedly to check for multiple closest points (set span to
- // -1 on the first call). Also returns "pseudo" perpendiculars; see
- // PerpType enum.
- enum class PerpType {
- None, // no perpendicular found
- Start, // start of Pwl is closest point
- End, // end of Pwl is closest point
- Vertex, // vertex of Pwl is closest point
- Perpendicular // true perpendicular found
- };
- PerpType Invert(Point const &xy, Point &perp, int &span,
- const double eps = 1e-6) const;
- // Compute the inverse function. Indicate if it is a proper (true)
- // inverse, or only a best effort (e.g. input was non-monotonic).
- Pwl Inverse(bool *true_inverse = nullptr, const double eps = 1e-6) const;
- // Compose two Pwls together, doing "this" first and "other" after.
- Pwl Compose(Pwl const &other, const double eps = 1e-6) const;
- // Apply function to (x,y) values at every control point.
- void Map(std::function<void(double x, double y)> f) const;
- // Apply function to (x, y0, y1) values wherever either Pwl has a
- // control point.
- static void Map2(Pwl const &pwl0, Pwl const &pwl1,
- std::function<void(double x, double y0, double y1)> f);
- // Combine two Pwls, meaning we create a new Pwl where the y values are
- // given by running f wherever either has a knot.
- static Pwl
- Combine(Pwl const &pwl0, Pwl const &pwl1,
- std::function<double(double x, double y0, double y1)> f,
- const double eps = 1e-6);
- // Make "this" match (at least) the given domain. Any extension my be
- // clipped or linear.
- void MatchDomain(Interval const &domain, bool clip = true,
- const double eps = 1e-6);
- Pwl &operator*=(double d);
- void Debug(FILE *fp = stdout) const;
-
-private:
- int findSpan(double x, int span) const;
- std::vector<Point> points_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp
index f6a9cb0a..bd54a639 100644
--- a/src/ipa/raspberrypi/controller/rpi/agc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/agc.cpp
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* agc.cpp - AGC/AEC control algorithm
*/
+#include <algorithm>
#include <map>
+#include <tuple>
#include <linux/bcm2835-isp.h>
@@ -13,11 +15,11 @@
#include "../awb_status.h"
#include "../device_status.h"
-#include "../histogram.hpp"
+#include "../histogram.h"
#include "../lux_status.h"
-#include "../metadata.hpp"
+#include "../metadata.h"
-#include "agc.hpp"
+#include "agc.h"
using namespace RPiController;
using namespace libcamera;
@@ -28,410 +30,486 @@ LOG_DEFINE_CATEGORY(RPiAgc)
#define NAME "rpi.agc"
-#define PIPELINE_BITS 13 // seems to be a 13-bit pipeline
+static constexpr unsigned int PipelineBits = 13; /* seems to be a 13-bit pipeline */
-void AgcMeteringMode::Read(boost::property_tree::ptree const &params)
+int AgcMeteringMode::read(const libcamera::YamlObject &params)
{
- int num = 0;
- for (auto &p : params.get_child("weights")) {
- if (num == AGC_STATS_SIZE)
- throw std::runtime_error("AgcConfig: too many weights");
- weights[num++] = p.second.get_value<double>();
+ const YamlObject &yamlWeights = params["weights"];
+ if (yamlWeights.size() != AgcStatsSize) {
+ LOG(RPiAgc, Error) << "AgcMeteringMode: Incorrect number of weights";
+ return -EINVAL;
}
- if (num != AGC_STATS_SIZE)
- throw std::runtime_error("AgcConfig: insufficient weights");
+
+ unsigned int num = 0;
+ for (const auto &p : yamlWeights.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ weights[num++] = *value;
+ }
+
+ return 0;
}
-static std::string
-read_metering_modes(std::map<std::string, AgcMeteringMode> &metering_modes,
- boost::property_tree::ptree const &params)
+static std::tuple<int, std::string>
+readMeteringModes(std::map<std::string, AgcMeteringMode> &metering_modes,
+ const libcamera::YamlObject &params)
{
std::string first;
- for (auto &p : params) {
- AgcMeteringMode metering_mode;
- metering_mode.Read(p.second);
- metering_modes[p.first] = std::move(metering_mode);
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcMeteringMode meteringMode;
+ ret = meteringMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ metering_modes[key] = std::move(meteringMode);
if (first.empty())
- first = p.first;
+ first = key;
}
- return first;
-}
-static int read_list(std::vector<double> &list,
- boost::property_tree::ptree const &params)
-{
- for (auto &p : params)
- list.push_back(p.second.get_value<double>());
- return list.size();
+ return { 0, first };
}
-static int read_list(std::vector<Duration> &list,
- boost::property_tree::ptree const &params)
+int AgcExposureMode::read(const libcamera::YamlObject &params)
{
- for (auto &p : params)
- list.push_back(p.second.get_value<double>() * 1us);
- return list.size();
-}
+ auto value = params["shutter"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ std::transform(value->begin(), value->end(), std::back_inserter(shutter),
+ [](double v) { return v * 1us; });
-void AgcExposureMode::Read(boost::property_tree::ptree const &params)
-{
- int num_shutters = read_list(shutter, params.get_child("shutter"));
- int num_ags = read_list(gain, params.get_child("gain"));
- if (num_shutters < 2 || num_ags < 2)
- throw std::runtime_error(
- "AgcConfig: must have at least two entries in exposure profile");
- if (num_shutters != num_ags)
- throw std::runtime_error(
- "AgcConfig: expect same number of exposure and gain entries in exposure profile");
+ value = params["gain"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ gain = std::move(*value);
+
+ if (shutter.size() < 2 || gain.size() < 2) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: must have at least two entries in exposure profile";
+ return -EINVAL;
+ }
+
+ if (shutter.size() != gain.size()) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile";
+ return -EINVAL;
+ }
+
+ return 0;
}
-static std::string
-read_exposure_modes(std::map<std::string, AgcExposureMode> &exposure_modes,
- boost::property_tree::ptree const &params)
+static std::tuple<int, std::string>
+readExposureModes(std::map<std::string, AgcExposureMode> &exposureModes,
+ const libcamera::YamlObject &params)
{
std::string first;
- for (auto &p : params) {
- AgcExposureMode exposure_mode;
- exposure_mode.Read(p.second);
- exposure_modes[p.first] = std::move(exposure_mode);
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcExposureMode exposureMode;
+ ret = exposureMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ exposureModes[key] = std::move(exposureMode);
if (first.empty())
- first = p.first;
+ first = key;
}
- return first;
+
+ return { 0, first };
}
-void AgcConstraint::Read(boost::property_tree::ptree const &params)
+int AgcConstraint::read(const libcamera::YamlObject &params)
{
- std::string bound_string = params.get<std::string>("bound", "");
- transform(bound_string.begin(), bound_string.end(),
- bound_string.begin(), ::toupper);
- if (bound_string != "UPPER" && bound_string != "LOWER")
- throw std::runtime_error(
- "AGC constraint type should be UPPER or LOWER");
- bound = bound_string == "UPPER" ? Bound::UPPER : Bound::LOWER;
- q_lo = params.get<double>("q_lo");
- q_hi = params.get<double>("q_hi");
- Y_target.Read(params.get_child("y_target"));
+ std::string boundString = params["bound"].get<std::string>("");
+ transform(boundString.begin(), boundString.end(),
+ boundString.begin(), ::toupper);
+ if (boundString != "UPPER" && boundString != "LOWER") {
+ LOG(RPiAgc, Error) << "AGC constraint type should be UPPER or LOWER";
+ return -EINVAL;
+ }
+ bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER;
+
+ auto value = params["q_lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qLo = *value;
+
+ value = params["q_hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qHi = *value;
+
+ return yTarget.read(params["y_target"]);
}
-static AgcConstraintMode
-read_constraint_mode(boost::property_tree::ptree const &params)
+static std::tuple<int, AgcConstraintMode>
+readConstraintMode(const libcamera::YamlObject &params)
{
AgcConstraintMode mode;
- for (auto &p : params) {
+ int ret;
+
+ for (const auto &p : params.asList()) {
AgcConstraint constraint;
- constraint.Read(p.second);
+ ret = constraint.read(p);
+ if (ret)
+ return { ret, {} };
+
mode.push_back(std::move(constraint));
}
- return mode;
+
+ return { 0, mode };
}
-static std::string read_constraint_modes(
- std::map<std::string, AgcConstraintMode> &constraint_modes,
- boost::property_tree::ptree const &params)
+static std::tuple<int, std::string>
+readConstraintModes(std::map<std::string, AgcConstraintMode> &constraintModes,
+ const libcamera::YamlObject &params)
{
std::string first;
- for (auto &p : params) {
- constraint_modes[p.first] = read_constraint_mode(p.second);
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ std::tie(ret, constraintModes[key]) = readConstraintMode(value);
+ if (ret)
+ return { ret, {} };
+
if (first.empty())
- first = p.first;
+ first = key;
}
- return first;
+
+ return { 0, first };
}
-void AgcConfig::Read(boost::property_tree::ptree const &params)
+int AgcConfig::read(const libcamera::YamlObject &params)
{
LOG(RPiAgc, Debug) << "AgcConfig";
- default_metering_mode = read_metering_modes(
- metering_modes, params.get_child("metering_modes"));
- default_exposure_mode = read_exposure_modes(
- exposure_modes, params.get_child("exposure_modes"));
- default_constraint_mode = read_constraint_modes(
- constraint_modes, params.get_child("constraint_modes"));
- Y_target.Read(params.get_child("y_target"));
- speed = params.get<double>("speed", 0.2);
- startup_frames = params.get<uint16_t>("startup_frames", 10);
- convergence_frames = params.get<unsigned int>("convergence_frames", 6);
- fast_reduce_threshold =
- params.get<double>("fast_reduce_threshold", 0.4);
- base_ev = params.get<double>("base_ev", 1.0);
- // Start with quite a low value as ramping up is easier than ramping down.
- default_exposure_time = params.get<double>("default_exposure_time", 1000) * 1us;
- default_analogue_gain = params.get<double>("default_analogue_gain", 1.0);
+ int ret;
+
+ std::tie(ret, defaultMeteringMode) =
+ readMeteringModes(meteringModes, params["metering_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultExposureMode) =
+ readExposureModes(exposureModes, params["exposure_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultConstraintMode) =
+ readConstraintModes(constraintModes, params["constraint_modes"]);
+ if (ret)
+ return ret;
+
+ ret = yTarget.read(params["y_target"]);
+ if (ret)
+ return ret;
+
+ speed = params["speed"].get<double>(0.2);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(6);
+ fastReduceThreshold = params["fast_reduce_threshold"].get<double>(0.4);
+ baseEv = params["base_ev"].get<double>(1.0);
+
+ /* Start with quite a low value as ramping up is easier than ramping down. */
+ defaultExposureTime = params["default_exposure_time"].get<double>(1000) * 1us;
+ defaultAnalogueGain = params["default_analogue_gain"].get<double>(1.0);
+
+ return 0;
}
Agc::ExposureValues::ExposureValues()
- : shutter(0s), analogue_gain(0),
- total_exposure(0s), total_exposure_no_dg(0s)
+ : shutter(0s), analogueGain(0),
+ totalExposure(0s), totalExposureNoDG(0s)
{
}
Agc::Agc(Controller *controller)
- : AgcAlgorithm(controller), metering_mode_(nullptr),
- exposure_mode_(nullptr), constraint_mode_(nullptr),
- frame_count_(0), lock_count_(0),
- last_target_exposure_(0s), last_sensitivity_(0.0),
- ev_(1.0), flicker_period_(0s),
- max_shutter_(0s), fixed_shutter_(0s), fixed_analogue_gain_(0.0)
+ : AgcAlgorithm(controller), meteringMode_(nullptr),
+ exposureMode_(nullptr), constraintMode_(nullptr),
+ frameCount_(0), lockCount_(0),
+ lastTargetExposure_(0s), lastSensitivity_(0.0),
+ ev_(1.0), flickerPeriod_(0s),
+ maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0)
{
memset(&awb_, 0, sizeof(awb_));
- // Setting status_.total_exposure_value_ to zero initially tells us
- // it's not been calculated yet (i.e. Process hasn't yet run).
+ /*
+ * Setting status_.totalExposureValue_ to zero initially tells us
+ * it's not been calculated yet (i.e. Process hasn't yet run).
+ */
memset(&status_, 0, sizeof(status_));
status_.ev = ev_;
}
-char const *Agc::Name() const
+char const *Agc::name() const
{
return NAME;
}
-void Agc::Read(boost::property_tree::ptree const &params)
+int Agc::read(const libcamera::YamlObject &params)
{
LOG(RPiAgc, Debug) << "Agc";
- config_.Read(params);
- // Set the config's defaults (which are the first ones it read) as our
- // current modes, until someone changes them. (they're all known to
- // exist at this point)
- metering_mode_name_ = config_.default_metering_mode;
- metering_mode_ = &config_.metering_modes[metering_mode_name_];
- exposure_mode_name_ = config_.default_exposure_mode;
- exposure_mode_ = &config_.exposure_modes[exposure_mode_name_];
- constraint_mode_name_ = config_.default_constraint_mode;
- constraint_mode_ = &config_.constraint_modes[constraint_mode_name_];
- // Set up the "last shutter/gain" values, in case AGC starts "disabled".
- status_.shutter_time = config_.default_exposure_time;
- status_.analogue_gain = config_.default_analogue_gain;
-}
-
-bool Agc::IsPaused() const
+
+ int ret = config_.read(params);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the config's defaults (which are the first ones it read) as our
+ * current modes, until someone changes them. (they're all known to
+ * exist at this point)
+ */
+ meteringModeName_ = config_.defaultMeteringMode;
+ meteringMode_ = &config_.meteringModes[meteringModeName_];
+ exposureModeName_ = config_.defaultExposureMode;
+ exposureMode_ = &config_.exposureModes[exposureModeName_];
+ constraintModeName_ = config_.defaultConstraintMode;
+ constraintMode_ = &config_.constraintModes[constraintModeName_];
+ /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */
+ status_.shutterTime = config_.defaultExposureTime;
+ status_.analogueGain = config_.defaultAnalogueGain;
+ return 0;
+}
+
+bool Agc::isPaused() const
{
return false;
}
-void Agc::Pause()
+void Agc::pause()
{
- fixed_shutter_ = status_.shutter_time;
- fixed_analogue_gain_ = status_.analogue_gain;
+ fixedShutter_ = status_.shutterTime;
+ fixedAnalogueGain_ = status_.analogueGain;
}
-void Agc::Resume()
+void Agc::resume()
{
- fixed_shutter_ = 0s;
- fixed_analogue_gain_ = 0;
+ fixedShutter_ = 0s;
+ fixedAnalogueGain_ = 0;
}
-unsigned int Agc::GetConvergenceFrames() const
+unsigned int Agc::getConvergenceFrames() const
{
- // If shutter and gain have been explicitly set, there is no
- // convergence to happen, so no need to drop any frames - return zero.
- if (fixed_shutter_ && fixed_analogue_gain_)
+ /*
+ * If shutter and gain have been explicitly set, there is no
+ * convergence to happen, so no need to drop any frames - return zero.
+ */
+ if (fixedShutter_ && fixedAnalogueGain_)
return 0;
else
- return config_.convergence_frames;
+ return config_.convergenceFrames;
}
-void Agc::SetEv(double ev)
+void Agc::setEv(double ev)
{
ev_ = ev;
}
-void Agc::SetFlickerPeriod(Duration flicker_period)
+void Agc::setFlickerPeriod(Duration flickerPeriod)
{
- flicker_period_ = flicker_period;
+ flickerPeriod_ = flickerPeriod;
}
-void Agc::SetMaxShutter(Duration max_shutter)
+void Agc::setMaxShutter(Duration maxShutter)
{
- max_shutter_ = max_shutter;
+ maxShutter_ = maxShutter;
}
-void Agc::SetFixedShutter(Duration fixed_shutter)
+void Agc::setFixedShutter(Duration fixedShutter)
{
- fixed_shutter_ = fixed_shutter;
- // Set this in case someone calls Pause() straight after.
- status_.shutter_time = clipShutter(fixed_shutter_);
+ fixedShutter_ = fixedShutter;
+ /* Set this in case someone calls Pause() straight after. */
+ status_.shutterTime = clipShutter(fixedShutter_);
}
-void Agc::SetFixedAnalogueGain(double fixed_analogue_gain)
+void Agc::setFixedAnalogueGain(double fixedAnalogueGain)
{
- fixed_analogue_gain_ = fixed_analogue_gain;
- // Set this in case someone calls Pause() straight after.
- status_.analogue_gain = fixed_analogue_gain;
+ fixedAnalogueGain_ = fixedAnalogueGain;
+ /* Set this in case someone calls Pause() straight after. */
+ status_.analogueGain = fixedAnalogueGain;
}
-void Agc::SetMeteringMode(std::string const &metering_mode_name)
+void Agc::setMeteringMode(std::string const &meteringModeName)
{
- metering_mode_name_ = metering_mode_name;
+ meteringModeName_ = meteringModeName;
}
-void Agc::SetExposureMode(std::string const &exposure_mode_name)
+void Agc::setExposureMode(std::string const &exposureModeName)
{
- exposure_mode_name_ = exposure_mode_name;
+ exposureModeName_ = exposureModeName;
}
-void Agc::SetConstraintMode(std::string const &constraint_mode_name)
+void Agc::setConstraintMode(std::string const &constraintModeName)
{
- constraint_mode_name_ = constraint_mode_name;
+ constraintModeName_ = constraintModeName;
}
-void Agc::SwitchMode(CameraMode const &camera_mode,
+void Agc::switchMode(CameraMode const &cameraMode,
Metadata *metadata)
{
/* AGC expects the mode sensitivity always to be non-zero. */
- ASSERT(camera_mode.sensitivity);
+ ASSERT(cameraMode.sensitivity);
housekeepConfig();
- Duration fixed_shutter = clipShutter(fixed_shutter_);
- if (fixed_shutter && fixed_analogue_gain_) {
- // We're going to reset the algorithm here with these fixed values.
+ Duration fixedShutter = clipShutter(fixedShutter_);
+ if (fixedShutter && fixedAnalogueGain_) {
+ /* We're going to reset the algorithm here with these fixed values. */
fetchAwbStatus(metadata);
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
- // This is the equivalent of computeTargetExposure and applyDigitalGain.
- target_.total_exposure_no_dg = fixed_shutter * fixed_analogue_gain_;
- target_.total_exposure = target_.total_exposure_no_dg / min_colour_gain;
+ /* This is the equivalent of computeTargetExposure and applyDigitalGain. */
+ target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_;
+ target_.totalExposure = target_.totalExposureNoDG / minColourGain;
- // Equivalent of filterExposure. This resets any "history".
+ /* Equivalent of filterExposure. This resets any "history". */
filtered_ = target_;
- // Equivalent of divideUpExposure.
- filtered_.shutter = fixed_shutter;
- filtered_.analogue_gain = fixed_analogue_gain_;
- } else if (status_.total_exposure_value) {
- // On a mode switch, various things could happen:
- // - the exposure profile might change
- // - a fixed exposure or gain might be set
- // - the new mode's sensitivity might be different
- // We cope with the last of these by scaling the target values. After
- // that we just need to re-divide the exposure/gain according to the
- // current exposure profile, which takes care of everything else.
-
- double ratio = last_sensitivity_ / camera_mode.sensitivity;
- target_.total_exposure_no_dg *= ratio;
- target_.total_exposure *= ratio;
- filtered_.total_exposure_no_dg *= ratio;
- filtered_.total_exposure *= ratio;
+ /* Equivalent of divideUpExposure. */
+ filtered_.shutter = fixedShutter;
+ filtered_.analogueGain = fixedAnalogueGain_;
+ } else if (status_.totalExposureValue) {
+ /*
+ * On a mode switch, various things could happen:
+ * - the exposure profile might change
+ * - a fixed exposure or gain might be set
+ * - the new mode's sensitivity might be different
+ * We cope with the last of these by scaling the target values. After
+ * that we just need to re-divide the exposure/gain according to the
+ * current exposure profile, which takes care of everything else.
+ */
+
+ double ratio = lastSensitivity_ / cameraMode.sensitivity;
+ target_.totalExposureNoDG *= ratio;
+ target_.totalExposure *= ratio;
+ filtered_.totalExposureNoDG *= ratio;
+ filtered_.totalExposure *= ratio;
divideUpExposure();
} else {
- // We come through here on startup, when at least one of the shutter
- // or gain has not been fixed. We must still write those values out so
- // that they will be applied immediately. We supply some arbitrary defaults
- // for any that weren't set.
-
- // Equivalent of divideUpExposure.
- filtered_.shutter = fixed_shutter ? fixed_shutter : config_.default_exposure_time;
- filtered_.analogue_gain = fixed_analogue_gain_ ? fixed_analogue_gain_ : config_.default_analogue_gain;
+ /*
+ * We come through here on startup, when at least one of the shutter
+ * or gain has not been fixed. We must still write those values out so
+ * that they will be applied immediately. We supply some arbitrary defaults
+ * for any that weren't set.
+ */
+
+ /* Equivalent of divideUpExposure. */
+ filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime;
+ filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain;
}
writeAndFinish(metadata, false);
- // We must remember the sensitivity of this mode for the next SwitchMode.
- last_sensitivity_ = camera_mode.sensitivity;
-}
-
-void Agc::Prepare(Metadata *image_metadata)
-{
- status_.digital_gain = 1.0;
- fetchAwbStatus(image_metadata); // always fetch it so that Process knows it's been done
-
- if (status_.total_exposure_value) {
- // Process has run, so we have meaningful values.
- DeviceStatus device_status;
- if (image_metadata->Get("device.status", device_status) == 0) {
- Duration actual_exposure = device_status.shutter_speed *
- device_status.analogue_gain;
- if (actual_exposure) {
- status_.digital_gain =
- status_.total_exposure_value /
- actual_exposure;
- LOG(RPiAgc, Debug) << "Want total exposure " << status_.total_exposure_value;
- // Never ask for a gain < 1.0, and also impose
- // some upper limit. Make it customisable?
- status_.digital_gain = std::max(
- 1.0,
- std::min(status_.digital_gain, 4.0));
- LOG(RPiAgc, Debug) << "Actual exposure " << actual_exposure;
- LOG(RPiAgc, Debug) << "Use digital_gain " << status_.digital_gain;
+ /* We must remember the sensitivity of this mode for the next SwitchMode. */
+ lastSensitivity_ = cameraMode.sensitivity;
+}
+
+void Agc::prepare(Metadata *imageMetadata)
+{
+ status_.digitalGain = 1.0;
+ fetchAwbStatus(imageMetadata); /* always fetch it so that Process knows it's been done */
+
+ if (status_.totalExposureValue) {
+ /* Process has run, so we have meaningful values. */
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ Duration actualExposure = deviceStatus.shutterSpeed *
+ deviceStatus.analogueGain;
+ if (actualExposure) {
+ status_.digitalGain = status_.totalExposureValue / actualExposure;
+ LOG(RPiAgc, Debug) << "Want total exposure " << status_.totalExposureValue;
+ /*
+ * Never ask for a gain < 1.0, and also impose
+ * some upper limit. Make it customisable?
+ */
+ status_.digitalGain = std::max(1.0, std::min(status_.digitalGain, 4.0));
+ LOG(RPiAgc, Debug) << "Actual exposure " << actualExposure;
+ LOG(RPiAgc, Debug) << "Use digitalGain " << status_.digitalGain;
LOG(RPiAgc, Debug) << "Effective exposure "
- << actual_exposure * status_.digital_gain;
- // Decide whether AEC/AGC has converged.
- updateLockStatus(device_status);
+ << actualExposure * status_.digitalGain;
+ /* Decide whether AEC/AGC has converged. */
+ updateLockStatus(deviceStatus);
}
} else
- LOG(RPiAgc, Warning) << Name() << ": no device metadata";
- image_metadata->Set("agc.status", status_);
+ LOG(RPiAgc, Warning) << name() << ": no device metadata";
+ imageMetadata->set("agc.status", status_);
}
}
-void Agc::Process(StatisticsPtr &stats, Metadata *image_metadata)
+void Agc::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
- frame_count_++;
- // First a little bit of housekeeping, fetching up-to-date settings and
- // configuration, that kind of thing.
+ frameCount_++;
+ /*
+ * First a little bit of housekeeping, fetching up-to-date settings and
+ * configuration, that kind of thing.
+ */
housekeepConfig();
- // Get the current exposure values for the frame that's just arrived.
- fetchCurrentExposure(image_metadata);
- // Compute the total gain we require relative to the current exposure.
- double gain, target_Y;
- computeGain(stats.get(), image_metadata, gain, target_Y);
- // Now compute the target (final) exposure which we think we want.
+ /* Get the current exposure values for the frame that's just arrived. */
+ fetchCurrentExposure(imageMetadata);
+ /* Compute the total gain we require relative to the current exposure. */
+ double gain, targetY;
+ computeGain(stats.get(), imageMetadata, gain, targetY);
+ /* Now compute the target (final) exposure which we think we want. */
computeTargetExposure(gain);
- // Some of the exposure has to be applied as digital gain, so work out
- // what that is. This function also tells us whether it's decided to
- // "desaturate" the image more quickly.
- bool desaturate = applyDigitalGain(gain, target_Y);
- // The results have to be filtered so as not to change too rapidly.
+ /*
+ * Some of the exposure has to be applied as digital gain, so work out
+ * what that is. This function also tells us whether it's decided to
+ * "desaturate" the image more quickly.
+ */
+ bool desaturate = applyDigitalGain(gain, targetY);
+ /* The results have to be filtered so as not to change too rapidly. */
filterExposure(desaturate);
- // The last thing is to divide up the exposure value into a shutter time
- // and analogue_gain, according to the current exposure mode.
+ /*
+ * The last thing is to divide up the exposure value into a shutter time
+ * and analogue gain, according to the current exposure mode.
+ */
divideUpExposure();
- // Finally advertise what we've done.
- writeAndFinish(image_metadata, desaturate);
-}
-
-void Agc::updateLockStatus(DeviceStatus const &device_status)
-{
- const double ERROR_FACTOR = 0.10; // make these customisable?
- const int MAX_LOCK_COUNT = 5;
- // Reset "lock count" when we exceed this multiple of ERROR_FACTOR
- const double RESET_MARGIN = 1.5;
-
- // Add 200us to the exposure time error to allow for line quantisation.
- Duration exposure_error = last_device_status_.shutter_speed * ERROR_FACTOR + 200us;
- double gain_error = last_device_status_.analogue_gain * ERROR_FACTOR;
- Duration target_error = last_target_exposure_ * ERROR_FACTOR;
-
- // Note that we don't know the exposure/gain limits of the sensor, so
- // the values we keep requesting may be unachievable. For this reason
- // we only insist that we're close to values in the past few frames.
- if (device_status.shutter_speed > last_device_status_.shutter_speed - exposure_error &&
- device_status.shutter_speed < last_device_status_.shutter_speed + exposure_error &&
- device_status.analogue_gain > last_device_status_.analogue_gain - gain_error &&
- device_status.analogue_gain < last_device_status_.analogue_gain + gain_error &&
- status_.target_exposure_value > last_target_exposure_ - target_error &&
- status_.target_exposure_value < last_target_exposure_ + target_error)
- lock_count_ = std::min(lock_count_ + 1, MAX_LOCK_COUNT);
- else if (device_status.shutter_speed < last_device_status_.shutter_speed - RESET_MARGIN * exposure_error ||
- device_status.shutter_speed > last_device_status_.shutter_speed + RESET_MARGIN * exposure_error ||
- device_status.analogue_gain < last_device_status_.analogue_gain - RESET_MARGIN * gain_error ||
- device_status.analogue_gain > last_device_status_.analogue_gain + RESET_MARGIN * gain_error ||
- status_.target_exposure_value < last_target_exposure_ - RESET_MARGIN * target_error ||
- status_.target_exposure_value > last_target_exposure_ + RESET_MARGIN * target_error)
- lock_count_ = 0;
-
- last_device_status_ = device_status;
- last_target_exposure_ = status_.target_exposure_value;
-
- LOG(RPiAgc, Debug) << "Lock count updated to " << lock_count_;
- status_.locked = lock_count_ == MAX_LOCK_COUNT;
-}
-
-static void copy_string(std::string const &s, char *d, size_t size)
+ /* Finally advertise what we've done. */
+ writeAndFinish(imageMetadata, desaturate);
+}
+
+void Agc::updateLockStatus(DeviceStatus const &deviceStatus)
+{
+ const double errorFactor = 0.10; /* make these customisable? */
+ const int maxLockCount = 5;
+ /* Reset "lock count" when we exceed this multiple of errorFactor */
+ const double resetMargin = 1.5;
+
+ /* Add 200us to the exposure time error to allow for line quantisation. */
+ Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us;
+ double gainError = lastDeviceStatus_.analogueGain * errorFactor;
+ Duration targetError = lastTargetExposure_ * errorFactor;
+
+ /*
+ * Note that we don't know the exposure/gain limits of the sensor, so
+ * the values we keep requesting may be unachievable. For this reason
+ * we only insist that we're close to values in the past few frames.
+ */
+ if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError &&
+ deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError &&
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError &&
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError &&
+ status_.targetExposureValue > lastTargetExposure_ - targetError &&
+ status_.targetExposureValue < lastTargetExposure_ + targetError)
+ lockCount_ = std::min(lockCount_ + 1, maxLockCount);
+ else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError ||
+ deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError ||
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError ||
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError ||
+ status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError ||
+ status_.targetExposureValue > lastTargetExposure_ + resetMargin * targetError)
+ lockCount_ = 0;
+
+ lastDeviceStatus_ = deviceStatus;
+ lastTargetExposure_ = status_.targetExposureValue;
+
+ LOG(RPiAgc, Debug) << "Lock count updated to " << lockCount_;
+ status_.locked = lockCount_ == maxLockCount;
+}
+
+static void copyString(std::string const &s, char *d, size_t size)
{
size_t length = s.copy(d, size - 1);
d[length] = '\0';
@@ -439,359 +517,374 @@ static void copy_string(std::string const &s, char *d, size_t size)
void Agc::housekeepConfig()
{
- // First fetch all the up-to-date settings, so no one else has to do it.
+ /* First fetch all the up-to-date settings, so no one else has to do it. */
status_.ev = ev_;
- status_.fixed_shutter = clipShutter(fixed_shutter_);
- status_.fixed_analogue_gain = fixed_analogue_gain_;
- status_.flicker_period = flicker_period_;
- LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixed_shutter "
- << status_.fixed_shutter << " fixed_analogue_gain "
- << status_.fixed_analogue_gain;
- // Make sure the "mode" pointers point to the up-to-date things, if
- // they've changed.
- if (strcmp(metering_mode_name_.c_str(), status_.metering_mode)) {
- auto it = config_.metering_modes.find(metering_mode_name_);
- if (it == config_.metering_modes.end())
- throw std::runtime_error("Agc: no metering mode " +
- metering_mode_name_);
- metering_mode_ = &it->second;
- copy_string(metering_mode_name_, status_.metering_mode,
- sizeof(status_.metering_mode));
+ status_.fixedShutter = clipShutter(fixedShutter_);
+ status_.fixedAnalogueGain = fixedAnalogueGain_;
+ status_.flickerPeriod = flickerPeriod_;
+ LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter "
+ << status_.fixedShutter << " fixedAnalogueGain "
+ << status_.fixedAnalogueGain;
+ /*
+ * Make sure the "mode" pointers point to the up-to-date things, if
+ * they've changed.
+ */
+ if (strcmp(meteringModeName_.c_str(), status_.meteringMode)) {
+ auto it = config_.meteringModes.find(meteringModeName_);
+ if (it == config_.meteringModes.end())
+ LOG(RPiAgc, Fatal) << "No metering mode " << meteringModeName_;
+ meteringMode_ = &it->second;
+ copyString(meteringModeName_, status_.meteringMode,
+ sizeof(status_.meteringMode));
}
- if (strcmp(exposure_mode_name_.c_str(), status_.exposure_mode)) {
- auto it = config_.exposure_modes.find(exposure_mode_name_);
- if (it == config_.exposure_modes.end())
- throw std::runtime_error("Agc: no exposure profile " +
- exposure_mode_name_);
- exposure_mode_ = &it->second;
- copy_string(exposure_mode_name_, status_.exposure_mode,
- sizeof(status_.exposure_mode));
+ if (strcmp(exposureModeName_.c_str(), status_.exposureMode)) {
+ auto it = config_.exposureModes.find(exposureModeName_);
+ if (it == config_.exposureModes.end())
+ LOG(RPiAgc, Fatal) << "No exposure profile " << exposureModeName_;
+ exposureMode_ = &it->second;
+ copyString(exposureModeName_, status_.exposureMode,
+ sizeof(status_.exposureMode));
}
- if (strcmp(constraint_mode_name_.c_str(), status_.constraint_mode)) {
+ if (strcmp(constraintModeName_.c_str(), status_.constraintMode)) {
auto it =
- config_.constraint_modes.find(constraint_mode_name_);
- if (it == config_.constraint_modes.end())
- throw std::runtime_error("Agc: no constraint list " +
- constraint_mode_name_);
- constraint_mode_ = &it->second;
- copy_string(constraint_mode_name_, status_.constraint_mode,
- sizeof(status_.constraint_mode));
+ config_.constraintModes.find(constraintModeName_);
+ if (it == config_.constraintModes.end())
+ LOG(RPiAgc, Fatal) << "No constraint list " << constraintModeName_;
+ constraintMode_ = &it->second;
+ copyString(constraintModeName_, status_.constraintMode,
+ sizeof(status_.constraintMode));
}
- LOG(RPiAgc, Debug) << "exposure_mode "
- << exposure_mode_name_ << " constraint_mode "
- << constraint_mode_name_ << " metering_mode "
- << metering_mode_name_;
+ LOG(RPiAgc, Debug) << "exposureMode "
+ << exposureModeName_ << " constraintMode "
+ << constraintModeName_ << " meteringMode "
+ << meteringModeName_;
}
-void Agc::fetchCurrentExposure(Metadata *image_metadata)
+void Agc::fetchCurrentExposure(Metadata *imageMetadata)
{
- std::unique_lock<Metadata> lock(*image_metadata);
- DeviceStatus *device_status =
- image_metadata->GetLocked<DeviceStatus>("device.status");
- if (!device_status)
- throw std::runtime_error("Agc: no device metadata");
- current_.shutter = device_status->shutter_speed;
- current_.analogue_gain = device_status->analogue_gain;
- AgcStatus *agc_status =
- image_metadata->GetLocked<AgcStatus>("agc.status");
- current_.total_exposure = agc_status ? agc_status->total_exposure_value : 0s;
- current_.total_exposure_no_dg = current_.shutter * current_.analogue_gain;
+ std::unique_lock<Metadata> lock(*imageMetadata);
+ DeviceStatus *deviceStatus =
+ imageMetadata->getLocked<DeviceStatus>("device.status");
+ if (!deviceStatus)
+ LOG(RPiAgc, Fatal) << "No device metadata";
+ current_.shutter = deviceStatus->shutterSpeed;
+ current_.analogueGain = deviceStatus->analogueGain;
+ AgcStatus *agcStatus =
+ imageMetadata->getLocked<AgcStatus>("agc.status");
+ current_.totalExposure = agcStatus ? agcStatus->totalExposureValue : 0s;
+ current_.totalExposureNoDG = current_.shutter * current_.analogueGain;
}
-void Agc::fetchAwbStatus(Metadata *image_metadata)
+void Agc::fetchAwbStatus(Metadata *imageMetadata)
{
- awb_.gain_r = 1.0; // in case not found in metadata
- awb_.gain_g = 1.0;
- awb_.gain_b = 1.0;
- if (image_metadata->Get("awb.status", awb_) != 0)
- LOG(RPiAgc, Debug) << "Agc: no AWB status found";
+ awb_.gainR = 1.0; /* in case not found in metadata */
+ awb_.gainG = 1.0;
+ awb_.gainB = 1.0;
+ if (imageMetadata->get("awb.status", awb_) != 0)
+ LOG(RPiAgc, Debug) << "No AWB status found";
}
-static double compute_initial_Y(bcm2835_isp_stats *stats, AwbStatus const &awb,
- double weights[], double gain)
+static double computeInitialY(bcm2835_isp_stats *stats, AwbStatus const &awb,
+ double weights[], double gain)
{
bcm2835_isp_stats_region *regions = stats->agc_stats;
- // Note how the calculation below means that equal weights give you
- // "average" metering (i.e. all pixels equally important).
- double R_sum = 0, G_sum = 0, B_sum = 0, pixel_sum = 0;
- for (int i = 0; i < AGC_STATS_SIZE; i++) {
+ /*
+ * Note how the calculation below means that equal weights give you
+ * "average" metering (i.e. all pixels equally important).
+ */
+ double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0;
+ for (unsigned int i = 0; i < AgcStatsSize; i++) {
double counted = regions[i].counted;
- double r_sum = std::min(regions[i].r_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- double g_sum = std::min(regions[i].g_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- double b_sum = std::min(regions[i].b_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- R_sum += r_sum * weights[i];
- G_sum += g_sum * weights[i];
- B_sum += b_sum * weights[i];
- pixel_sum += counted * weights[i];
+ double rAcc = std::min(regions[i].r_sum * gain, ((1 << PipelineBits) - 1) * counted);
+ double gAcc = std::min(regions[i].g_sum * gain, ((1 << PipelineBits) - 1) * counted);
+ double bAcc = std::min(regions[i].b_sum * gain, ((1 << PipelineBits) - 1) * counted);
+ rSum += rAcc * weights[i];
+ gSum += gAcc * weights[i];
+ bSum += bAcc * weights[i];
+ pixelSum += counted * weights[i];
}
- if (pixel_sum == 0.0) {
- LOG(RPiAgc, Warning) << "compute_initial_Y: pixel_sum is zero";
+ if (pixelSum == 0.0) {
+ LOG(RPiAgc, Warning) << "computeInitialY: pixelSum is zero";
return 0;
}
- double Y_sum = R_sum * awb.gain_r * .299 +
- G_sum * awb.gain_g * .587 +
- B_sum * awb.gain_b * .114;
- return Y_sum / pixel_sum / (1 << PIPELINE_BITS);
+ double ySum = rSum * awb.gainR * .299 +
+ gSum * awb.gainG * .587 +
+ bSum * awb.gainB * .114;
+ return ySum / pixelSum / (1 << PipelineBits);
}
-// We handle extra gain through EV by adjusting our Y targets. However, you
-// simply can't monitor histograms once they get very close to (or beyond!)
-// saturation, so we clamp the Y targets to this value. It does mean that EV
-// increases don't necessarily do quite what you might expect in certain
-// (contrived) cases.
+/*
+ * We handle extra gain through EV by adjusting our Y targets. However, you
+ * simply can't monitor histograms once they get very close to (or beyond!)
+ * saturation, so we clamp the Y targets to this value. It does mean that EV
+ * increases don't necessarily do quite what you might expect in certain
+ * (contrived) cases.
+ */
-#define EV_GAIN_Y_TARGET_LIMIT 0.9
+static constexpr double EvGainYTargetLimit = 0.9;
-static double constraint_compute_gain(AgcConstraint &c, Histogram &h,
- double lux, double ev_gain,
- double &target_Y)
+static double constraintComputeGain(AgcConstraint &c, Histogram &h, double lux,
+ double evGain, double &targetY)
{
- target_Y = c.Y_target.Eval(c.Y_target.Domain().Clip(lux));
- target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
- double iqm = h.InterQuantileMean(c.q_lo, c.q_hi);
- return (target_Y * NUM_HISTOGRAM_BINS) / iqm;
+ targetY = c.yTarget.eval(c.yTarget.domain().clip(lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+ double iqm = h.interQuantileMean(c.qLo, c.qHi);
+ return (targetY * NUM_HISTOGRAM_BINS) / iqm;
}
-void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
- double &gain, double &target_Y)
+void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *imageMetadata,
+ double &gain, double &targetY)
{
struct LuxStatus lux = {};
- lux.lux = 400; // default lux level to 400 in case no metadata found
- if (image_metadata->Get("lux.status", lux) != 0)
- LOG(RPiAgc, Warning) << "Agc: no lux level found";
+ lux.lux = 400; /* default lux level to 400 in case no metadata found */
+ if (imageMetadata->get("lux.status", lux) != 0)
+ LOG(RPiAgc, Warning) << "No lux level found";
Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS);
- double ev_gain = status_.ev * config_.base_ev;
- // The initial gain and target_Y come from some of the regions. After
- // that we consider the histogram constraints.
- target_Y =
- config_.Y_target.Eval(config_.Y_target.Domain().Clip(lux.lux));
- target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
-
- // Do this calculation a few times as brightness increase can be
- // non-linear when there are saturated regions.
+ double evGain = status_.ev * config_.baseEv;
+ /*
+ * The initial gain and target_Y come from some of the regions. After
+ * that we consider the histogram constraints.
+ */
+ targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+
+ /*
+ * Do this calculation a few times as brightness increase can be
+ * non-linear when there are saturated regions.
+ */
gain = 1.0;
for (int i = 0; i < 8; i++) {
- double initial_Y = compute_initial_Y(statistics, awb_,
- metering_mode_->weights, gain);
- double extra_gain = std::min(10.0, target_Y / (initial_Y + .001));
- gain *= extra_gain;
- LOG(RPiAgc, Debug) << "Initial Y " << initial_Y << " target " << target_Y
+ double initialY = computeInitialY(statistics, awb_, meteringMode_->weights, gain);
+ double extraGain = std::min(10.0, targetY / (initialY + .001));
+ gain *= extraGain;
+ LOG(RPiAgc, Debug) << "Initial Y " << initialY << " target " << targetY
<< " gives gain " << gain;
- if (extra_gain < 1.01) // close enough
+ if (extraGain < 1.01) /* close enough */
break;
}
- for (auto &c : *constraint_mode_) {
- double new_target_Y;
- double new_gain =
- constraint_compute_gain(c, h, lux.lux, ev_gain,
- new_target_Y);
+ for (auto &c : *constraintMode_) {
+ double newTargetY;
+ double newGain = constraintComputeGain(c, h, lux.lux, evGain, newTargetY);
LOG(RPiAgc, Debug) << "Constraint has target_Y "
- << new_target_Y << " giving gain " << new_gain;
- if (c.bound == AgcConstraint::Bound::LOWER &&
- new_gain > gain) {
+ << newTargetY << " giving gain " << newGain;
+ if (c.bound == AgcConstraint::Bound::LOWER && newGain > gain) {
LOG(RPiAgc, Debug) << "Lower bound constraint adopted";
- gain = new_gain, target_Y = new_target_Y;
- } else if (c.bound == AgcConstraint::Bound::UPPER &&
- new_gain < gain) {
+ gain = newGain;
+ targetY = newTargetY;
+ } else if (c.bound == AgcConstraint::Bound::UPPER && newGain < gain) {
LOG(RPiAgc, Debug) << "Upper bound constraint adopted";
- gain = new_gain, target_Y = new_target_Y;
+ gain = newGain;
+ targetY = newTargetY;
}
}
- LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << target_Y << " ev "
- << status_.ev << " base_ev " << config_.base_ev
+ LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << targetY << " ev "
+ << status_.ev << " base_ev " << config_.baseEv
<< ")";
}
void Agc::computeTargetExposure(double gain)
{
- if (status_.fixed_shutter && status_.fixed_analogue_gain) {
- // When ag and shutter are both fixed, we need to drive the
- // total exposure so that we end up with a digital gain of at least
- // 1/min_colour_gain. Otherwise we'd desaturate channels causing
- // white to go cyan or magenta.
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
- target_.total_exposure =
- status_.fixed_shutter * status_.fixed_analogue_gain / min_colour_gain;
+ if (status_.fixedShutter && status_.fixedAnalogueGain) {
+ /*
+ * When ag and shutter are both fixed, we need to drive the
+ * total exposure so that we end up with a digital gain of at least
+ * 1/minColourGain. Otherwise we'd desaturate channels causing
+ * white to go cyan or magenta.
+ */
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ target_.totalExposure =
+ status_.fixedShutter * status_.fixedAnalogueGain / minColourGain;
} else {
- // The statistics reflect the image without digital gain, so the final
- // total exposure we're aiming for is:
- target_.total_exposure = current_.total_exposure_no_dg * gain;
- // The final target exposure is also limited to what the exposure
- // mode allows.
- Duration max_shutter = status_.fixed_shutter
- ? status_.fixed_shutter
- : exposure_mode_->shutter.back();
- max_shutter = clipShutter(max_shutter);
- Duration max_total_exposure =
- max_shutter *
- (status_.fixed_analogue_gain != 0.0
- ? status_.fixed_analogue_gain
- : exposure_mode_->gain.back());
- target_.total_exposure = std::min(target_.total_exposure,
- max_total_exposure);
+ /*
+ * The statistics reflect the image without digital gain, so the final
+ * total exposure we're aiming for is:
+ */
+ target_.totalExposure = current_.totalExposureNoDG * gain;
+ /* The final target exposure is also limited to what the exposure mode allows. */
+ Duration maxShutter = status_.fixedShutter
+ ? status_.fixedShutter
+ : exposureMode_->shutter.back();
+ maxShutter = clipShutter(maxShutter);
+ Duration maxTotalExposure =
+ maxShutter *
+ (status_.fixedAnalogueGain != 0.0
+ ? status_.fixedAnalogueGain
+ : exposureMode_->gain.back());
+ target_.totalExposure = std::min(target_.totalExposure, maxTotalExposure);
}
- LOG(RPiAgc, Debug) << "Target total_exposure " << target_.total_exposure;
+ LOG(RPiAgc, Debug) << "Target totalExposure " << target_.totalExposure;
}
-bool Agc::applyDigitalGain(double gain, double target_Y)
+bool Agc::applyDigitalGain(double gain, double targetY)
{
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
- double dg = 1.0 / min_colour_gain;
- // I think this pipeline subtracts black level and rescales before we
- // get the stats, so no need to worry about it.
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ double dg = 1.0 / minColourGain;
+ /*
+ * I think this pipeline subtracts black level and rescales before we
+ * get the stats, so no need to worry about it.
+ */
LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
- << " target_Y " << target_Y;
- // Finally, if we're trying to reduce exposure but the target_Y is
- // "close" to 1.0, then the gain computed for that constraint will be
- // only slightly less than one, because the measured Y can never be
- // larger than 1.0. When this happens, demand a large digital gain so
- // that the exposure can be reduced, de-saturating the image much more
- // quickly (and we then approach the correct value more quickly from
- // below).
- bool desaturate = target_Y > config_.fast_reduce_threshold &&
- gain < sqrt(target_Y);
+ << " target_Y " << targetY;
+ /*
+ * Finally, if we're trying to reduce exposure but the target_Y is
+ * "close" to 1.0, then the gain computed for that constraint will be
+ * only slightly less than one, because the measured Y can never be
+ * larger than 1.0. When this happens, demand a large digital gain so
+ * that the exposure can be reduced, de-saturating the image much more
+ * quickly (and we then approach the correct value more quickly from
+ * below).
+ */
+ bool desaturate = targetY > config_.fastReduceThreshold &&
+ gain < sqrt(targetY);
if (desaturate)
- dg /= config_.fast_reduce_threshold;
+ dg /= config_.fastReduceThreshold;
LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate;
- target_.total_exposure_no_dg = target_.total_exposure / dg;
- LOG(RPiAgc, Debug) << "Target total_exposure_no_dg " << target_.total_exposure_no_dg;
+ target_.totalExposureNoDG = target_.totalExposure / dg;
+ LOG(RPiAgc, Debug) << "Target totalExposureNoDG " << target_.totalExposureNoDG;
return desaturate;
}
void Agc::filterExposure(bool desaturate)
{
double speed = config_.speed;
- // AGC adapts instantly if both shutter and gain are directly specified
- // or we're in the startup phase.
- if ((status_.fixed_shutter && status_.fixed_analogue_gain) ||
- frame_count_ <= config_.startup_frames)
+ /*
+ * AGC adapts instantly if both shutter and gain are directly specified
+ * or we're in the startup phase.
+ */
+ if ((status_.fixedShutter && status_.fixedAnalogueGain) ||
+ frameCount_ <= config_.startupFrames)
speed = 1.0;
- if (!filtered_.total_exposure) {
- filtered_.total_exposure = target_.total_exposure;
- filtered_.total_exposure_no_dg = target_.total_exposure_no_dg;
+ if (!filtered_.totalExposure) {
+ filtered_.totalExposure = target_.totalExposure;
+ filtered_.totalExposureNoDG = target_.totalExposureNoDG;
} else {
- // If close to the result go faster, to save making so many
- // micro-adjustments on the way. (Make this customisable?)
- if (filtered_.total_exposure < 1.2 * target_.total_exposure &&
- filtered_.total_exposure > 0.8 * target_.total_exposure)
+ /*
+ * If close to the result go faster, to save making so many
+ * micro-adjustments on the way. (Make this customisable?)
+ */
+ if (filtered_.totalExposure < 1.2 * target_.totalExposure &&
+ filtered_.totalExposure > 0.8 * target_.totalExposure)
speed = sqrt(speed);
- filtered_.total_exposure = speed * target_.total_exposure +
- filtered_.total_exposure * (1.0 - speed);
- // When desaturing, take a big jump down in exposure_no_dg,
- // which we'll hide with digital gain.
+ filtered_.totalExposure = speed * target_.totalExposure +
+ filtered_.totalExposure * (1.0 - speed);
+ /*
+ * When desaturing, take a big jump down in totalExposureNoDG,
+ * which we'll hide with digital gain.
+ */
if (desaturate)
- filtered_.total_exposure_no_dg =
- target_.total_exposure_no_dg;
+ filtered_.totalExposureNoDG =
+ target_.totalExposureNoDG;
else
- filtered_.total_exposure_no_dg =
- speed * target_.total_exposure_no_dg +
- filtered_.total_exposure_no_dg * (1.0 - speed);
+ filtered_.totalExposureNoDG =
+ speed * target_.totalExposureNoDG +
+ filtered_.totalExposureNoDG * (1.0 - speed);
}
- // We can't let the no_dg exposure deviate too far below the
- // total exposure, as there might not be enough digital gain available
- // in the ISP to hide it (which will cause nasty oscillation).
- if (filtered_.total_exposure_no_dg <
- filtered_.total_exposure * config_.fast_reduce_threshold)
- filtered_.total_exposure_no_dg = filtered_.total_exposure *
- config_.fast_reduce_threshold;
- LOG(RPiAgc, Debug) << "After filtering, total_exposure " << filtered_.total_exposure
- << " no dg " << filtered_.total_exposure_no_dg;
+ /*
+ * We can't let the totalExposureNoDG exposure deviate too far below the
+ * total exposure, as there might not be enough digital gain available
+ * in the ISP to hide it (which will cause nasty oscillation).
+ */
+ if (filtered_.totalExposureNoDG <
+ filtered_.totalExposure * config_.fastReduceThreshold)
+ filtered_.totalExposureNoDG = filtered_.totalExposure * config_.fastReduceThreshold;
+ LOG(RPiAgc, Debug) << "After filtering, totalExposure " << filtered_.totalExposure
+ << " no dg " << filtered_.totalExposureNoDG;
}
void Agc::divideUpExposure()
{
- // Sending the fixed shutter/gain cases through the same code may seem
- // unnecessary, but it will make more sense when extend this to cover
- // variable aperture.
- Duration exposure_value = filtered_.total_exposure_no_dg;
- Duration shutter_time;
- double analogue_gain;
- shutter_time = status_.fixed_shutter
- ? status_.fixed_shutter
- : exposure_mode_->shutter[0];
- shutter_time = clipShutter(shutter_time);
- analogue_gain = status_.fixed_analogue_gain != 0.0
- ? status_.fixed_analogue_gain
- : exposure_mode_->gain[0];
- if (shutter_time * analogue_gain < exposure_value) {
+ /*
+ * Sending the fixed shutter/gain cases through the same code may seem
+ * unnecessary, but it will make more sense when extend this to cover
+ * variable aperture.
+ */
+ Duration exposureValue = filtered_.totalExposureNoDG;
+ Duration shutterTime;
+ double analogueGain;
+ shutterTime = status_.fixedShutter ? status_.fixedShutter
+ : exposureMode_->shutter[0];
+ shutterTime = clipShutter(shutterTime);
+ analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain
+ : exposureMode_->gain[0];
+ if (shutterTime * analogueGain < exposureValue) {
for (unsigned int stage = 1;
- stage < exposure_mode_->gain.size(); stage++) {
- if (!status_.fixed_shutter) {
- Duration stage_shutter =
- clipShutter(exposure_mode_->shutter[stage]);
- if (stage_shutter * analogue_gain >=
- exposure_value) {
- shutter_time =
- exposure_value / analogue_gain;
+ stage < exposureMode_->gain.size(); stage++) {
+ if (!status_.fixedShutter) {
+ Duration stageShutter =
+ clipShutter(exposureMode_->shutter[stage]);
+ if (stageShutter * analogueGain >= exposureValue) {
+ shutterTime = exposureValue / analogueGain;
break;
}
- shutter_time = stage_shutter;
+ shutterTime = stageShutter;
}
- if (status_.fixed_analogue_gain == 0.0) {
- if (exposure_mode_->gain[stage] *
- shutter_time >=
- exposure_value) {
- analogue_gain =
- exposure_value / shutter_time;
+ if (status_.fixedAnalogueGain == 0.0) {
+ if (exposureMode_->gain[stage] * shutterTime >= exposureValue) {
+ analogueGain = exposureValue / shutterTime;
break;
}
- analogue_gain = exposure_mode_->gain[stage];
+ analogueGain = exposureMode_->gain[stage];
}
}
}
- LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutter_time << " and "
- << analogue_gain;
- // Finally adjust shutter time for flicker avoidance (require both
- // shutter and gain not to be fixed).
- if (!status_.fixed_shutter && !status_.fixed_analogue_gain &&
- status_.flicker_period) {
- int flicker_periods = shutter_time / status_.flicker_period;
- if (flicker_periods) {
- Duration new_shutter_time = flicker_periods * status_.flicker_period;
- analogue_gain *= shutter_time / new_shutter_time;
- // We should still not allow the ag to go over the
- // largest value in the exposure mode. Note that this
- // may force more of the total exposure into the digital
- // gain as a side-effect.
- analogue_gain = std::min(analogue_gain,
- exposure_mode_->gain.back());
- shutter_time = new_shutter_time;
+ LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and "
+ << analogueGain;
+ /*
+ * Finally adjust shutter time for flicker avoidance (require both
+ * shutter and gain not to be fixed).
+ */
+ if (!status_.fixedShutter && !status_.fixedAnalogueGain &&
+ status_.flickerPeriod) {
+ int flickerPeriods = shutterTime / status_.flickerPeriod;
+ if (flickerPeriods) {
+ Duration newShutterTime = flickerPeriods * status_.flickerPeriod;
+ analogueGain *= shutterTime / newShutterTime;
+ /*
+ * We should still not allow the ag to go over the
+ * largest value in the exposure mode. Note that this
+ * may force more of the total exposure into the digital
+ * gain as a side-effect.
+ */
+ analogueGain = std::min(analogueGain, exposureMode_->gain.back());
+ shutterTime = newShutterTime;
}
LOG(RPiAgc, Debug) << "After flicker avoidance, shutter "
- << shutter_time << " gain " << analogue_gain;
+ << shutterTime << " gain " << analogueGain;
}
- filtered_.shutter = shutter_time;
- filtered_.analogue_gain = analogue_gain;
+ filtered_.shutter = shutterTime;
+ filtered_.analogueGain = analogueGain;
}
-void Agc::writeAndFinish(Metadata *image_metadata, bool desaturate)
+void Agc::writeAndFinish(Metadata *imageMetadata, bool desaturate)
{
- status_.total_exposure_value = filtered_.total_exposure;
- status_.target_exposure_value = desaturate ? 0s : target_.total_exposure_no_dg;
- status_.shutter_time = filtered_.shutter;
- status_.analogue_gain = filtered_.analogue_gain;
- // Write to metadata as well, in case anyone wants to update the camera
- // immediately.
- image_metadata->Set("agc.status", status_);
+ status_.totalExposureValue = filtered_.totalExposure;
+ status_.targetExposureValue = desaturate ? 0s : target_.totalExposureNoDG;
+ status_.shutterTime = filtered_.shutter;
+ status_.analogueGain = filtered_.analogueGain;
+ /*
+ * Write to metadata as well, in case anyone wants to update the camera
+ * immediately.
+ */
+ imageMetadata->set("agc.status", status_);
LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
- << filtered_.total_exposure;
+ << filtered_.totalExposure;
LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter
- << " analogue gain " << filtered_.analogue_gain;
+ << " analogue gain " << filtered_.analogueGain;
}
Duration Agc::clipShutter(Duration shutter)
{
- if (max_shutter_)
- shutter = std::min(shutter, max_shutter_);
+ if (maxShutter_)
+ shutter = std::min(shutter, maxShutter_);
return shutter;
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Agc(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.h b/src/ipa/raspberrypi/controller/rpi/agc.h
new file mode 100644
index 00000000..6d6b0e5a
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/agc.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * agc.h - AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <vector>
+#include <mutex>
+
+#include <libcamera/base/utils.h>
+
+#include "../agc_algorithm.h"
+#include "../agc_status.h"
+#include "../pwl.h"
+
+/* This is our implementation of AGC. */
+
+/*
+ * This is the number actually set up by the firmware, not the maximum possible
+ * number (which is 16).
+ */
+
+constexpr unsigned int AgcStatsSize = 15;
+
+namespace RPiController {
+
+struct AgcMeteringMode {
+ double weights[AgcStatsSize];
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcExposureMode {
+ std::vector<libcamera::utils::Duration> shutter;
+ std::vector<double> gain;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcConstraint {
+ enum class Bound { LOWER = 0, UPPER = 1 };
+ Bound bound;
+ double qLo;
+ double qHi;
+ Pwl yTarget;
+ int read(const libcamera::YamlObject &params);
+};
+
+typedef std::vector<AgcConstraint> AgcConstraintMode;
+
+struct AgcConfig {
+ int read(const libcamera::YamlObject &params);
+ std::map<std::string, AgcMeteringMode> meteringModes;
+ std::map<std::string, AgcExposureMode> exposureModes;
+ std::map<std::string, AgcConstraintMode> constraintModes;
+ Pwl yTarget;
+ double speed;
+ uint16_t startupFrames;
+ unsigned int convergenceFrames;
+ double maxChange;
+ double minChange;
+ double fastReduceThreshold;
+ double speedUpThreshold;
+ std::string defaultMeteringMode;
+ std::string defaultExposureMode;
+ std::string defaultConstraintMode;
+ double baseEv;
+ libcamera::utils::Duration defaultExposureTime;
+ double defaultAnalogueGain;
+};
+
+class Agc : public AgcAlgorithm
+{
+public:
+ Agc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ /* AGC handles "pausing" for itself. */
+ bool isPaused() const override;
+ void pause() override;
+ void resume() override;
+ unsigned int getConvergenceFrames() const override;
+ void setEv(double ev) override;
+ void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override;
+ void setMaxShutter(libcamera::utils::Duration maxShutter) override;
+ void setFixedShutter(libcamera::utils::Duration fixedShutter) override;
+ void setFixedAnalogueGain(double fixedAnalogueGain) override;
+ void setMeteringMode(std::string const &meteringModeName) override;
+ void setExposureMode(std::string const &exposureModeName) override;
+ void setConstraintMode(std::string const &contraintModeName) override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ void updateLockStatus(DeviceStatus const &deviceStatus);
+ AgcConfig config_;
+ void housekeepConfig();
+ void fetchCurrentExposure(Metadata *imageMetadata);
+ void fetchAwbStatus(Metadata *imageMetadata);
+ void computeGain(bcm2835_isp_stats *statistics, Metadata *imageMetadata,
+ double &gain, double &targetY);
+ void computeTargetExposure(double gain);
+ bool applyDigitalGain(double gain, double targetY);
+ void filterExposure(bool desaturate);
+ void divideUpExposure();
+ void writeAndFinish(Metadata *imageMetadata, bool desaturate);
+ libcamera::utils::Duration clipShutter(libcamera::utils::Duration shutter);
+ AgcMeteringMode *meteringMode_;
+ AgcExposureMode *exposureMode_;
+ AgcConstraintMode *constraintMode_;
+ uint64_t frameCount_;
+ AwbStatus awb_;
+ struct ExposureValues {
+ ExposureValues();
+
+ libcamera::utils::Duration shutter;
+ double analogueGain;
+ libcamera::utils::Duration totalExposure;
+ libcamera::utils::Duration totalExposureNoDG; /* without digital gain */
+ };
+ ExposureValues current_; /* values for the current frame */
+ ExposureValues target_; /* calculate the values we want here */
+ ExposureValues filtered_; /* these values are filtered towards target */
+ AgcStatus status_;
+ int lockCount_;
+ DeviceStatus lastDeviceStatus_;
+ libcamera::utils::Duration lastTargetExposure_;
+ double lastSensitivity_; /* sensitivity of the previous camera mode */
+ /* Below here the "settings" that applications can change. */
+ std::string meteringModeName_;
+ std::string exposureModeName_;
+ std::string constraintModeName_;
+ double ev_;
+ libcamera::utils::Duration flickerPeriod_;
+ libcamera::utils::Duration maxShutter_;
+ libcamera::utils::Duration fixedShutter_;
+ double fixedAnalogueGain_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.hpp b/src/ipa/raspberrypi/controller/rpi/agc.hpp
deleted file mode 100644
index c100d312..00000000
--- a/src/ipa/raspberrypi/controller/rpi/agc.hpp
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc.hpp - AGC/AEC control algorithm
- */
-#pragma once
-
-#include <vector>
-#include <mutex>
-
-#include <libcamera/base/utils.h>
-
-#include "../agc_algorithm.hpp"
-#include "../agc_status.h"
-#include "../pwl.hpp"
-
-// This is our implementation of AGC.
-
-// This is the number actually set up by the firmware, not the maximum possible
-// number (which is 16).
-
-#define AGC_STATS_SIZE 15
-
-namespace RPiController {
-
-struct AgcMeteringMode {
- double weights[AGC_STATS_SIZE];
- void Read(boost::property_tree::ptree const &params);
-};
-
-struct AgcExposureMode {
- std::vector<libcamera::utils::Duration> shutter;
- std::vector<double> gain;
- void Read(boost::property_tree::ptree const &params);
-};
-
-struct AgcConstraint {
- enum class Bound { LOWER = 0, UPPER = 1 };
- Bound bound;
- double q_lo;
- double q_hi;
- Pwl Y_target;
- void Read(boost::property_tree::ptree const &params);
-};
-
-typedef std::vector<AgcConstraint> AgcConstraintMode;
-
-struct AgcConfig {
- void Read(boost::property_tree::ptree const &params);
- std::map<std::string, AgcMeteringMode> metering_modes;
- std::map<std::string, AgcExposureMode> exposure_modes;
- std::map<std::string, AgcConstraintMode> constraint_modes;
- Pwl Y_target;
- double speed;
- uint16_t startup_frames;
- unsigned int convergence_frames;
- double max_change;
- double min_change;
- double fast_reduce_threshold;
- double speed_up_threshold;
- std::string default_metering_mode;
- std::string default_exposure_mode;
- std::string default_constraint_mode;
- double base_ev;
- libcamera::utils::Duration default_exposure_time;
- double default_analogue_gain;
-};
-
-class Agc : public AgcAlgorithm
-{
-public:
- Agc(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- // AGC handles "pausing" for itself.
- bool IsPaused() const override;
- void Pause() override;
- void Resume() override;
- unsigned int GetConvergenceFrames() const override;
- void SetEv(double ev) override;
- void SetFlickerPeriod(libcamera::utils::Duration flicker_period) override;
- void SetMaxShutter(libcamera::utils::Duration max_shutter) override;
- void SetFixedShutter(libcamera::utils::Duration fixed_shutter) override;
- void SetFixedAnalogueGain(double fixed_analogue_gain) override;
- void SetMeteringMode(std::string const &metering_mode_name) override;
- void SetExposureMode(std::string const &exposure_mode_name) override;
- void SetConstraintMode(std::string const &contraint_mode_name) override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- void updateLockStatus(DeviceStatus const &device_status);
- AgcConfig config_;
- void housekeepConfig();
- void fetchCurrentExposure(Metadata *image_metadata);
- void fetchAwbStatus(Metadata *image_metadata);
- void computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
- double &gain, double &target_Y);
- void computeTargetExposure(double gain);
- bool applyDigitalGain(double gain, double target_Y);
- void filterExposure(bool desaturate);
- void divideUpExposure();
- void writeAndFinish(Metadata *image_metadata, bool desaturate);
- libcamera::utils::Duration clipShutter(libcamera::utils::Duration shutter);
- AgcMeteringMode *metering_mode_;
- AgcExposureMode *exposure_mode_;
- AgcConstraintMode *constraint_mode_;
- uint64_t frame_count_;
- AwbStatus awb_;
- struct ExposureValues {
- ExposureValues();
-
- libcamera::utils::Duration shutter;
- double analogue_gain;
- libcamera::utils::Duration total_exposure;
- libcamera::utils::Duration total_exposure_no_dg; // without digital gain
- };
- ExposureValues current_; // values for the current frame
- ExposureValues target_; // calculate the values we want here
- ExposureValues filtered_; // these values are filtered towards target
- AgcStatus status_;
- int lock_count_;
- DeviceStatus last_device_status_;
- libcamera::utils::Duration last_target_exposure_;
- double last_sensitivity_; // sensitivity of the previous camera mode
- // Below here the "settings" that applications can change.
- std::string metering_mode_name_;
- std::string exposure_mode_name_;
- std::string constraint_mode_name_;
- double ev_;
- libcamera::utils::Duration flicker_period_;
- libcamera::utils::Duration max_shutter_;
- libcamera::utils::Duration fixed_shutter_;
- double fixed_analogue_gain_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.cpp b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
index e575c14a..a4afaf84 100644
--- a/src/ipa/raspberrypi/controller/rpi/alsc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* alsc.cpp - ALSC (auto lens shading correction) control algorithm
*/
+#include <functional>
#include <math.h>
#include <numeric>
@@ -12,9 +13,9 @@
#include <libcamera/base/span.h>
#include "../awb_status.h"
-#include "alsc.hpp"
+#include "alsc.h"
-// Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm.
+/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */
using namespace RPiController;
using namespace libcamera;
@@ -23,41 +24,47 @@ LOG_DEFINE_CATEGORY(RPiAlsc)
#define NAME "rpi.alsc"
-static const int X = ALSC_CELLS_X;
-static const int Y = ALSC_CELLS_Y;
+static const int X = AlscCellsX;
+static const int Y = AlscCellsY;
static const int XY = X * Y;
-static const double INSUFFICIENT_DATA = -1.0;
+static const double InsufficientData = -1.0;
Alsc::Alsc(Controller *controller)
: Algorithm(controller)
{
- async_abort_ = async_start_ = async_started_ = async_finished_ = false;
- async_thread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
+ asyncThread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
}
Alsc::~Alsc()
{
{
std::lock_guard<std::mutex> lock(mutex_);
- async_abort_ = true;
+ asyncAbort_ = true;
}
- async_signal_.notify_one();
- async_thread_.join();
+ asyncSignal_.notify_one();
+ asyncThread_.join();
}
-char const *Alsc::Name() const
+char const *Alsc::name() const
{
return NAME;
}
-static void generate_lut(double *lut, boost::property_tree::ptree const &params)
+static int generateLut(double *lut, const libcamera::YamlObject &params)
{
- double cstrength = params.get<double>("corner_strength", 2.0);
- if (cstrength <= 1.0)
- throw std::runtime_error("Alsc: corner_strength must be > 1.0");
- double asymmetry = params.get<double>("asymmetry", 1.0);
- if (asymmetry < 0)
- throw std::runtime_error("Alsc: asymmetry must be >= 0");
+ double cstrength = params["corner_strength"].get<double>(2.0);
+ if (cstrength <= 1.0) {
+ LOG(RPiAlsc, Error) << "corner_strength must be > 1.0";
+ return -EINVAL;
+ }
+
+ double asymmetry = params["asymmetry"].get<double>(1.0);
+ if (asymmetry < 0) {
+ LOG(RPiAlsc, Error) << "asymmetry must be >= 0";
+ return -EINVAL;
+ }
+
double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
int num = 0;
@@ -68,305 +75,336 @@ static void generate_lut(double *lut, boost::property_tree::ptree const &params)
double r2 = (dx * dx + dy * dy) / R2;
lut[num++] =
(f1 * r2 + f2) * (f1 * r2 + f2) /
- (f2 * f2); // this reproduces the cos^4 rule
+ (f2 * f2); /* this reproduces the cos^4 rule */
}
}
+ return 0;
}
-static void read_lut(double *lut, boost::property_tree::ptree const &params)
+static int readLut(double *lut, const libcamera::YamlObject &params)
{
+ if (params.size() != XY) {
+ LOG(RPiAlsc, Error) << "Invalid number of entries in LSC table";
+ return -EINVAL;
+ }
+
int num = 0;
- const int max_num = XY;
- for (auto &p : params) {
- if (num == max_num)
- throw std::runtime_error(
- "Alsc: too many entries in LSC table");
- lut[num++] = p.second.get_value<double>();
+ for (const auto &p : params.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ lut[num++] = *value;
}
- if (num < max_num)
- throw std::runtime_error("Alsc: too few entries in LSC table");
-}
-
-static void read_calibrations(std::vector<AlscCalibration> &calibrations,
- boost::property_tree::ptree const &params,
- std::string const &name)
-{
- if (params.get_child_optional(name)) {
- double last_ct = 0;
- for (auto &p : params.get_child(name)) {
- double ct = p.second.get<double>("ct");
- if (ct <= last_ct)
- throw std::runtime_error(
- "Alsc: entries in " + name +
- " must be in increasing ct order");
+
+ return 0;
+}
+
+static int readCalibrations(std::vector<AlscCalibration> &calibrations,
+ const libcamera::YamlObject &params,
+ std::string const &name)
+{
+ if (params.contains(name)) {
+ double lastCt = 0;
+ for (const auto &p : params[name].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+ if (ct <= lastCt) {
+ LOG(RPiAlsc, Error)
+ << "Entries in " << name << " must be in increasing ct order";
+ return -EINVAL;
+ }
AlscCalibration calibration;
- calibration.ct = last_ct = ct;
- boost::property_tree::ptree const &table =
- p.second.get_child("table");
+ calibration.ct = lastCt = ct;
+
+ const libcamera::YamlObject &table = p["table"];
+ if (table.size() != XY) {
+ LOG(RPiAlsc, Error)
+ << "Incorrect number of values for ct "
+ << ct << " in " << name;
+ return -EINVAL;
+ }
+
int num = 0;
- for (auto it = table.begin(); it != table.end(); it++) {
- if (num == XY)
- throw std::runtime_error(
- "Alsc: too many values for ct " +
- std::to_string(ct) + " in " +
- name);
- calibration.table[num++] =
- it->second.get_value<double>();
+ for (const auto &elem : table.asList()) {
+ value = elem.get<double>();
+ if (!value)
+ return -EINVAL;
+ calibration.table[num++] = *value;
}
- if (num != XY)
- throw std::runtime_error(
- "Alsc: too few values for ct " +
- std::to_string(ct) + " in " + name);
+
calibrations.push_back(calibration);
LOG(RPiAlsc, Debug)
<< "Read " << name << " calibration for ct " << ct;
}
}
-}
-
-void Alsc::Read(boost::property_tree::ptree const &params)
-{
- config_.frame_period = params.get<uint16_t>("frame_period", 12);
- config_.startup_frames = params.get<uint16_t>("startup_frames", 10);
- config_.speed = params.get<double>("speed", 0.05);
- double sigma = params.get<double>("sigma", 0.01);
- config_.sigma_Cr = params.get<double>("sigma_Cr", sigma);
- config_.sigma_Cb = params.get<double>("sigma_Cb", sigma);
- config_.min_count = params.get<double>("min_count", 10.0);
- config_.min_G = params.get<uint16_t>("min_G", 50);
- config_.omega = params.get<double>("omega", 1.3);
- config_.n_iter = params.get<uint32_t>("n_iter", X + Y);
- config_.luminance_strength =
- params.get<double>("luminance_strength", 1.0);
+ return 0;
+}
+
+int Alsc::read(const libcamera::YamlObject &params)
+{
+ config_.framePeriod = params["frame_period"].get<uint16_t>(12);
+ config_.startupFrames = params["startup_frames"].get<uint16_t>(10);
+ config_.speed = params["speed"].get<double>(0.05);
+ double sigma = params["sigma"].get<double>(0.01);
+ config_.sigmaCr = params["sigma_Cr"].get<double>(sigma);
+ config_.sigmaCb = params["sigma_Cb"].get<double>(sigma);
+ config_.minCount = params["min_count"].get<double>(10.0);
+ config_.minG = params["min_G"].get<uint16_t>(50);
+ config_.omega = params["omega"].get<double>(1.3);
+ config_.nIter = params["n_iter"].get<uint32_t>(X + Y);
+ config_.luminanceStrength =
+ params["luminance_strength"].get<double>(1.0);
for (int i = 0; i < XY; i++)
- config_.luminance_lut[i] = 1.0;
- if (params.get_child_optional("corner_strength"))
- generate_lut(config_.luminance_lut, params);
- else if (params.get_child_optional("luminance_lut"))
- read_lut(config_.luminance_lut,
- params.get_child("luminance_lut"));
+ config_.luminanceLut[i] = 1.0;
+
+ int ret = 0;
+
+ if (params.contains("corner_strength"))
+ ret = generateLut(config_.luminanceLut, params);
+ else if (params.contains("luminance_lut"))
+ ret = readLut(config_.luminanceLut, params["luminance_lut"]);
else
LOG(RPiAlsc, Warning)
<< "no luminance table - assume unity everywhere";
- read_calibrations(config_.calibrations_Cr, params, "calibrations_Cr");
- read_calibrations(config_.calibrations_Cb, params, "calibrations_Cb");
- config_.default_ct = params.get<double>("default_ct", 4500.0);
- config_.threshold = params.get<double>("threshold", 1e-3);
- config_.lambda_bound = params.get<double>("lambda_bound", 0.05);
-}
-
-static double get_ct(Metadata *metadata, double default_ct);
-static void get_cal_table(double ct,
- std::vector<AlscCalibration> const &calibrations,
- double cal_table[XY]);
-static void resample_cal_table(double const cal_table_in[XY],
- CameraMode const &camera_mode,
- double cal_table_out[XY]);
-static void compensate_lambdas_for_cal(double const cal_table[XY],
- double const old_lambdas[XY],
- double new_lambdas[XY]);
-static void add_luminance_to_tables(double results[3][Y][X],
- double const lambda_r[XY], double lambda_g,
- double const lambda_b[XY],
- double const luminance_lut[XY],
- double luminance_strength);
-
-void Alsc::Initialise()
-{
- frame_count2_ = frame_count_ = frame_phase_ = 0;
- first_time_ = true;
- ct_ = config_.default_ct;
- // The lambdas are initialised in the SwitchMode.
+ if (ret)
+ return ret;
+
+ ret = readCalibrations(config_.calibrationsCr, params, "calibrations_Cr");
+ if (ret)
+ return ret;
+ ret = readCalibrations(config_.calibrationsCb, params, "calibrations_Cb");
+ if (ret)
+ return ret;
+
+ config_.defaultCt = params["default_ct"].get<double>(4500.0);
+ config_.threshold = params["threshold"].get<double>(1e-3);
+ config_.lambdaBound = params["lambda_bound"].get<double>(0.05);
+
+ return 0;
+}
+
+static double getCt(Metadata *metadata, double defaultCt);
+static void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ double calTable[XY]);
+static void resampleCalTable(double const calTableIn[XY], CameraMode const &cameraMode,
+ double calTableOut[XY]);
+static void compensateLambdasForCal(double const calTable[XY], double const oldLambdas[XY],
+ double newLambdas[XY]);
+static void addLuminanceToTables(double results[3][Y][X], double const lambdaR[XY], double lambdaG,
+ double const lambdaB[XY], double const luminanceLut[XY],
+ double luminanceStrength);
+
+void Alsc::initialise()
+{
+ frameCount2_ = frameCount_ = framePhase_ = 0;
+ firstTime_ = true;
+ ct_ = config_.defaultCt;
+ /* The lambdas are initialised in the SwitchMode. */
}
void Alsc::waitForAysncThread()
{
- if (async_started_) {
- async_started_ = false;
+ if (asyncStarted_) {
+ asyncStarted_ = false;
std::unique_lock<std::mutex> lock(mutex_);
- sync_signal_.wait(lock, [&] {
- return async_finished_;
+ syncSignal_.wait(lock, [&] {
+ return asyncFinished_;
});
- async_finished_ = false;
+ asyncFinished_ = false;
}
}
-static bool compare_modes(CameraMode const &cm0, CameraMode const &cm1)
+static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
{
- // Return true if the modes crop from the sensor significantly differently,
- // or if the user transform has changed.
+ /*
+ * Return true if the modes crop from the sensor significantly differently,
+ * or if the user transform has changed.
+ */
if (cm0.transform != cm1.transform)
return true;
- int left_diff = abs(cm0.crop_x - cm1.crop_x);
- int top_diff = abs(cm0.crop_y - cm1.crop_y);
- int right_diff = fabs(cm0.crop_x + cm0.scale_x * cm0.width -
- cm1.crop_x - cm1.scale_x * cm1.width);
- int bottom_diff = fabs(cm0.crop_y + cm0.scale_y * cm0.height -
- cm1.crop_y - cm1.scale_y * cm1.height);
- // These thresholds are a rather arbitrary amount chosen to trigger
- // when carrying on with the previously calculated tables might be
- // worse than regenerating them (but without the adaptive algorithm).
- int threshold_x = cm0.sensor_width >> 4;
- int threshold_y = cm0.sensor_height >> 4;
- return left_diff > threshold_x || right_diff > threshold_x ||
- top_diff > threshold_y || bottom_diff > threshold_y;
-}
-
-void Alsc::SwitchMode(CameraMode const &camera_mode,
+ int leftDiff = abs(cm0.cropX - cm1.cropX);
+ int topDiff = abs(cm0.cropY - cm1.cropY);
+ int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width -
+ cm1.cropX - cm1.scaleX * cm1.width);
+ int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height -
+ cm1.cropY - cm1.scaleY * cm1.height);
+ /*
+ * These thresholds are a rather arbitrary amount chosen to trigger
+ * when carrying on with the previously calculated tables might be
+ * worse than regenerating them (but without the adaptive algorithm).
+ */
+ int thresholdX = cm0.sensorWidth >> 4;
+ int thresholdY = cm0.sensorHeight >> 4;
+ return leftDiff > thresholdX || rightDiff > thresholdX ||
+ topDiff > thresholdY || bottomDiff > thresholdY;
+}
+
+void Alsc::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // We're going to start over with the tables if there's any "significant"
- // change.
- bool reset_tables = first_time_ || compare_modes(camera_mode_, camera_mode);
+ /*
+ * We're going to start over with the tables if there's any "significant"
+ * change.
+ */
+ bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode);
- // Believe the colour temperature from the AWB, if there is one.
- ct_ = get_ct(metadata, ct_);
+ /* Believe the colour temperature from the AWB, if there is one. */
+ ct_ = getCt(metadata, ct_);
- // Ensure the other thread isn't running while we do this.
+ /* Ensure the other thread isn't running while we do this. */
waitForAysncThread();
- camera_mode_ = camera_mode;
-
- // We must resample the luminance table like we do the others, but it's
- // fixed so we can simply do it up front here.
- resample_cal_table(config_.luminance_lut, camera_mode_, luminance_table_);
-
- if (reset_tables) {
- // Upon every "table reset", arrange for something sensible to be
- // generated. Construct the tables for the previous recorded colour
- // temperature. In order to start over from scratch we initialise
- // the lambdas, but the rest of this code then echoes the code in
- // doAlsc, without the adaptive algorithm.
+ cameraMode_ = cameraMode;
+
+ /*
+ * We must resample the luminance table like we do the others, but it's
+ * fixed so we can simply do it up front here.
+ */
+ resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_);
+
+ if (resetTables) {
+ /*
+ * Upon every "table reset", arrange for something sensible to be
+ * generated. Construct the tables for the previous recorded colour
+ * temperature. In order to start over from scratch we initialise
+ * the lambdas, but the rest of this code then echoes the code in
+ * doAlsc, without the adaptive algorithm.
+ */
for (int i = 0; i < XY; i++)
- lambda_r_[i] = lambda_b_[i] = 1.0;
- double cal_table_r[XY], cal_table_b[XY], cal_table_tmp[XY];
- get_cal_table(ct_, config_.calibrations_Cr, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_r);
- get_cal_table(ct_, config_.calibrations_Cb, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_b);
- compensate_lambdas_for_cal(cal_table_r, lambda_r_,
- async_lambda_r_);
- compensate_lambdas_for_cal(cal_table_b, lambda_b_,
- async_lambda_b_);
- add_luminance_to_tables(sync_results_, async_lambda_r_, 1.0,
- async_lambda_b_, luminance_table_,
- config_.luminance_strength);
- memcpy(prev_sync_results_, sync_results_,
- sizeof(prev_sync_results_));
- frame_phase_ = config_.frame_period; // run the algo again asap
- first_time_ = false;
+ lambdaR_[i] = lambdaB_[i] = 1.0;
+ double calTableR[XY], calTableB[XY], calTableTmp[XY];
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_,
+ luminanceTable_, config_.luminanceStrength);
+ memcpy(prevSyncResults_, syncResults_, sizeof(prevSyncResults_));
+ framePhase_ = config_.framePeriod; /* run the algo again asap */
+ firstTime_ = false;
}
}
void Alsc::fetchAsyncResults()
{
LOG(RPiAlsc, Debug) << "Fetch ALSC results";
- async_finished_ = false;
- async_started_ = false;
- memcpy(sync_results_, async_results_, sizeof(sync_results_));
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ memcpy(syncResults_, asyncResults_, sizeof(syncResults_));
}
-double get_ct(Metadata *metadata, double default_ct)
+double getCt(Metadata *metadata, double defaultCt)
{
- AwbStatus awb_status;
- awb_status.temperature_K = default_ct; // in case nothing found
- if (metadata->Get("awb.status", awb_status) != 0)
+ AwbStatus awbStatus;
+ awbStatus.temperatureK = defaultCt; /* in case nothing found */
+ if (metadata->get("awb.status", awbStatus) != 0)
LOG(RPiAlsc, Debug) << "no AWB results found, using "
- << awb_status.temperature_K;
+ << awbStatus.temperatureK;
else
LOG(RPiAlsc, Debug) << "AWB results found, using "
- << awb_status.temperature_K;
- return awb_status.temperature_K;
+ << awbStatus.temperatureK;
+ return awbStatus.temperatureK;
}
-static void copy_stats(bcm2835_isp_stats_region regions[XY], StatisticsPtr &stats,
- AlscStatus const &status)
+static void copyStats(bcm2835_isp_stats_region regions[XY], StatisticsPtr &stats,
+ AlscStatus const &status)
{
- bcm2835_isp_stats_region *input_regions = stats->awb_stats;
- double *r_table = (double *)status.r;
- double *g_table = (double *)status.g;
- double *b_table = (double *)status.b;
+ bcm2835_isp_stats_region *inputRegions = stats->awb_stats;
+ double *rTable = (double *)status.r;
+ double *gTable = (double *)status.g;
+ double *bTable = (double *)status.b;
for (int i = 0; i < XY; i++) {
- regions[i].r_sum = input_regions[i].r_sum / r_table[i];
- regions[i].g_sum = input_regions[i].g_sum / g_table[i];
- regions[i].b_sum = input_regions[i].b_sum / b_table[i];
- regions[i].counted = input_regions[i].counted;
- // (don't care about the uncounted value)
+ regions[i].r_sum = inputRegions[i].r_sum / rTable[i];
+ regions[i].g_sum = inputRegions[i].g_sum / gTable[i];
+ regions[i].b_sum = inputRegions[i].b_sum / bTable[i];
+ regions[i].counted = inputRegions[i].counted;
+ /* (don't care about the uncounted value) */
}
}
-void Alsc::restartAsync(StatisticsPtr &stats, Metadata *image_metadata)
+void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
{
LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
- // Get the current colour temperature. It's all we need from the
- // metadata. Default to the last CT value (which could be the default).
- ct_ = get_ct(image_metadata, ct_);
- // We have to copy the statistics here, dividing out our best guess of
- // the LSC table that the pipeline applied to them.
- AlscStatus alsc_status;
- if (image_metadata->Get("alsc.status", alsc_status) != 0) {
+ /*
+ * Get the current colour temperature. It's all we need from the
+ * metadata. Default to the last CT value (which could be the default).
+ */
+ ct_ = getCt(imageMetadata, ct_);
+ /*
+ * We have to copy the statistics here, dividing out our best guess of
+ * the LSC table that the pipeline applied to them.
+ */
+ AlscStatus alscStatus;
+ if (imageMetadata->get("alsc.status", alscStatus) != 0) {
LOG(RPiAlsc, Warning)
<< "No ALSC status found for applied gains!";
for (int y = 0; y < Y; y++)
for (int x = 0; x < X; x++) {
- alsc_status.r[y][x] = 1.0;
- alsc_status.g[y][x] = 1.0;
- alsc_status.b[y][x] = 1.0;
+ alscStatus.r[y][x] = 1.0;
+ alscStatus.g[y][x] = 1.0;
+ alscStatus.b[y][x] = 1.0;
}
}
- copy_stats(statistics_, stats, alsc_status);
- frame_phase_ = 0;
- async_started_ = true;
+ copyStats(statistics_, stats, alscStatus);
+ framePhase_ = 0;
+ asyncStarted_ = true;
{
std::lock_guard<std::mutex> lock(mutex_);
- async_start_ = true;
+ asyncStart_ = true;
}
- async_signal_.notify_one();
+ asyncSignal_.notify_one();
}
-void Alsc::Prepare(Metadata *image_metadata)
+void Alsc::prepare(Metadata *imageMetadata)
{
- // Count frames since we started, and since we last poked the async
- // thread.
- if (frame_count_ < (int)config_.startup_frames)
- frame_count_++;
- double speed = frame_count_ < (int)config_.startup_frames
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
? 1.0
: config_.speed;
LOG(RPiAlsc, Debug)
- << "frame_count " << frame_count_ << " speed " << speed;
+ << "frame count " << frameCount_ << " speed " << speed;
{
std::unique_lock<std::mutex> lock(mutex_);
- if (async_started_ && async_finished_)
+ if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
- // Apply IIR filter to results and program into the pipeline.
- double *ptr = (double *)sync_results_,
- *pptr = (double *)prev_sync_results_;
- for (unsigned int i = 0;
- i < sizeof(sync_results_) / sizeof(double); i++)
+ /* Apply IIR filter to results and program into the pipeline. */
+ double *ptr = (double *)syncResults_,
+ *pptr = (double *)prevSyncResults_;
+ for (unsigned int i = 0; i < sizeof(syncResults_) / sizeof(double); i++)
pptr[i] = speed * ptr[i] + (1.0 - speed) * pptr[i];
- // Put output values into status metadata.
+ /* Put output values into status metadata. */
AlscStatus status;
- memcpy(status.r, prev_sync_results_[0], sizeof(status.r));
- memcpy(status.g, prev_sync_results_[1], sizeof(status.g));
- memcpy(status.b, prev_sync_results_[2], sizeof(status.b));
- image_metadata->Set("alsc.status", status);
-}
-
-void Alsc::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- // Count frames since we started, and since we last poked the async
- // thread.
- if (frame_phase_ < (int)config_.frame_period)
- frame_phase_++;
- if (frame_count2_ < (int)config_.startup_frames)
- frame_count2_++;
- LOG(RPiAlsc, Debug) << "frame_phase " << frame_phase_;
- if (frame_phase_ >= (int)config_.frame_period ||
- frame_count2_ < (int)config_.startup_frames) {
- if (async_started_ == false)
- restartAsync(stats, image_metadata);
+ memcpy(status.r, prevSyncResults_[0], sizeof(status.r));
+ memcpy(status.g, prevSyncResults_[1], sizeof(status.g));
+ memcpy(status.b, prevSyncResults_[2], sizeof(status.b));
+ imageMetadata->set("alsc.status", status);
+}
+
+void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ if (frameCount2_ < (int)config_.startupFrames)
+ frameCount2_++;
+ LOG(RPiAlsc, Debug) << "frame_phase " << framePhase_;
+ if (framePhase_ >= (int)config_.framePeriod ||
+ frameCount2_ < (int)config_.startupFrames) {
+ if (asyncStarted_ == false)
+ restartAsync(stats, imageMetadata);
}
}
@@ -375,143 +413,142 @@ void Alsc::asyncFunc()
while (true) {
{
std::unique_lock<std::mutex> lock(mutex_);
- async_signal_.wait(lock, [&] {
- return async_start_ || async_abort_;
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
});
- async_start_ = false;
- if (async_abort_)
+ asyncStart_ = false;
+ if (asyncAbort_)
break;
}
doAlsc();
{
std::lock_guard<std::mutex> lock(mutex_);
- async_finished_ = true;
+ asyncFinished_ = true;
}
- sync_signal_.notify_one();
+ syncSignal_.notify_one();
}
}
-void get_cal_table(double ct, std::vector<AlscCalibration> const &calibrations,
- double cal_table[XY])
+void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ double calTable[XY])
{
if (calibrations.empty()) {
for (int i = 0; i < XY; i++)
- cal_table[i] = 1.0;
+ calTable[i] = 1.0;
LOG(RPiAlsc, Debug) << "no calibrations found";
} else if (ct <= calibrations.front().ct) {
- memcpy(cal_table, calibrations.front().table,
- XY * sizeof(double));
+ memcpy(calTable, calibrations.front().table, XY * sizeof(double));
LOG(RPiAlsc, Debug) << "using calibration for "
<< calibrations.front().ct;
} else if (ct >= calibrations.back().ct) {
- memcpy(cal_table, calibrations.back().table,
- XY * sizeof(double));
+ memcpy(calTable, calibrations.back().table, XY * sizeof(double));
LOG(RPiAlsc, Debug) << "using calibration for "
<< calibrations.back().ct;
} else {
int idx = 0;
while (ct > calibrations[idx + 1].ct)
idx++;
- double ct0 = calibrations[idx].ct,
- ct1 = calibrations[idx + 1].ct;
+ double ct0 = calibrations[idx].ct, ct1 = calibrations[idx + 1].ct;
LOG(RPiAlsc, Debug)
<< "ct is " << ct << ", interpolating between "
<< ct0 << " and " << ct1;
for (int i = 0; i < XY; i++)
- cal_table[i] =
+ calTable[i] =
(calibrations[idx].table[i] * (ct1 - ct) +
calibrations[idx + 1].table[i] * (ct - ct0)) /
(ct1 - ct0);
}
}
-void resample_cal_table(double const cal_table_in[XY],
- CameraMode const &camera_mode, double cal_table_out[XY])
+void resampleCalTable(double const calTableIn[XY],
+ CameraMode const &cameraMode, double calTableOut[XY])
{
- // Precalculate and cache the x sampling locations and phases to save
- // recomputing them on every row.
- int x_lo[X], x_hi[X];
+ /*
+ * Precalculate and cache the x sampling locations and phases to save
+ * recomputing them on every row.
+ */
+ int xLo[X], xHi[X];
double xf[X];
- double scale_x = camera_mode.sensor_width /
- (camera_mode.width * camera_mode.scale_x);
- double x_off = camera_mode.crop_x / (double)camera_mode.sensor_width;
- double x = .5 / scale_x + x_off * X - .5;
- double x_inc = 1 / scale_x;
- for (int i = 0; i < X; i++, x += x_inc) {
- x_lo[i] = floor(x);
- xf[i] = x - x_lo[i];
- x_hi[i] = std::min(x_lo[i] + 1, X - 1);
- x_lo[i] = std::max(x_lo[i], 0);
- if (!!(camera_mode.transform & libcamera::Transform::HFlip)) {
- x_lo[i] = X - 1 - x_lo[i];
- x_hi[i] = X - 1 - x_hi[i];
+ double scaleX = cameraMode.sensorWidth /
+ (cameraMode.width * cameraMode.scaleX);
+ double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth;
+ double x = .5 / scaleX + xOff * X - .5;
+ double xInc = 1 / scaleX;
+ for (int i = 0; i < X; i++, x += xInc) {
+ xLo[i] = floor(x);
+ xf[i] = x - xLo[i];
+ xHi[i] = std::min(xLo[i] + 1, X - 1);
+ xLo[i] = std::max(xLo[i], 0);
+ if (!!(cameraMode.transform & libcamera::Transform::HFlip)) {
+ xLo[i] = X - 1 - xLo[i];
+ xHi[i] = X - 1 - xHi[i];
}
}
- // Now march over the output table generating the new values.
- double scale_y = camera_mode.sensor_height /
- (camera_mode.height * camera_mode.scale_y);
- double y_off = camera_mode.crop_y / (double)camera_mode.sensor_height;
- double y = .5 / scale_y + y_off * Y - .5;
- double y_inc = 1 / scale_y;
- for (int j = 0; j < Y; j++, y += y_inc) {
- int y_lo = floor(y);
- double yf = y - y_lo;
- int y_hi = std::min(y_lo + 1, Y - 1);
- y_lo = std::max(y_lo, 0);
- if (!!(camera_mode.transform & libcamera::Transform::VFlip)) {
- y_lo = Y - 1 - y_lo;
- y_hi = Y - 1 - y_hi;
+ /* Now march over the output table generating the new values. */
+ double scaleY = cameraMode.sensorHeight /
+ (cameraMode.height * cameraMode.scaleY);
+ double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight;
+ double y = .5 / scaleY + yOff * Y - .5;
+ double yInc = 1 / scaleY;
+ for (int j = 0; j < Y; j++, y += yInc) {
+ int yLo = floor(y);
+ double yf = y - yLo;
+ int yHi = std::min(yLo + 1, Y - 1);
+ yLo = std::max(yLo, 0);
+ if (!!(cameraMode.transform & libcamera::Transform::VFlip)) {
+ yLo = Y - 1 - yLo;
+ yHi = Y - 1 - yHi;
}
- double const *row_above = cal_table_in + X * y_lo;
- double const *row_below = cal_table_in + X * y_hi;
+ double const *rowAbove = calTableIn + X * yLo;
+ double const *rowBelow = calTableIn + X * yHi;
for (int i = 0; i < X; i++) {
- double above = row_above[x_lo[i]] * (1 - xf[i]) +
- row_above[x_hi[i]] * xf[i];
- double below = row_below[x_lo[i]] * (1 - xf[i]) +
- row_below[x_hi[i]] * xf[i];
- *(cal_table_out++) = above * (1 - yf) + below * yf;
+ double above = rowAbove[xLo[i]] * (1 - xf[i]) +
+ rowAbove[xHi[i]] * xf[i];
+ double below = rowBelow[xLo[i]] * (1 - xf[i]) +
+ rowBelow[xHi[i]] * xf[i];
+ *(calTableOut++) = above * (1 - yf) + below * yf;
}
}
}
-// Calculate chrominance statistics (R/G and B/G) for each region.
+/* Calculate chrominance statistics (R/G and B/G) for each region. */
static_assert(XY == AWB_REGIONS, "ALSC/AWB statistics region mismatch");
-static void calculate_Cr_Cb(bcm2835_isp_stats_region *awb_region, double Cr[XY],
- double Cb[XY], uint32_t min_count, uint16_t min_G)
+static void calculateCrCb(bcm2835_isp_stats_region *awbRegion, double cr[XY],
+ double cb[XY], uint32_t minCount, uint16_t minG)
{
for (int i = 0; i < XY; i++) {
- bcm2835_isp_stats_region &zone = awb_region[i];
- if (zone.counted <= min_count ||
- zone.g_sum / zone.counted <= min_G) {
- Cr[i] = Cb[i] = INSUFFICIENT_DATA;
+ bcm2835_isp_stats_region &zone = awbRegion[i];
+ if (zone.counted <= minCount ||
+ zone.g_sum / zone.counted <= minG) {
+ cr[i] = cb[i] = InsufficientData;
continue;
}
- Cr[i] = zone.r_sum / (double)zone.g_sum;
- Cb[i] = zone.b_sum / (double)zone.g_sum;
+ cr[i] = zone.r_sum / (double)zone.g_sum;
+ cb[i] = zone.b_sum / (double)zone.g_sum;
}
}
-static void apply_cal_table(double const cal_table[XY], double C[XY])
+static void applyCalTable(double const calTable[XY], double C[XY])
{
for (int i = 0; i < XY; i++)
- if (C[i] != INSUFFICIENT_DATA)
- C[i] *= cal_table[i];
+ if (C[i] != InsufficientData)
+ C[i] *= calTable[i];
}
-void compensate_lambdas_for_cal(double const cal_table[XY],
- double const old_lambdas[XY],
- double new_lambdas[XY])
+void compensateLambdasForCal(double const calTable[XY],
+ double const oldLambdas[XY],
+ double newLambdas[XY])
{
- double min_new_lambda = std::numeric_limits<double>::max();
+ double minNewLambda = std::numeric_limits<double>::max();
for (int i = 0; i < XY; i++) {
- new_lambdas[i] = old_lambdas[i] * cal_table[i];
- min_new_lambda = std::min(min_new_lambda, new_lambdas[i]);
+ newLambdas[i] = oldLambdas[i] * calTable[i];
+ minNewLambda = std::min(minNewLambda, newLambdas[i]);
}
for (int i = 0; i < XY; i++)
- new_lambdas[i] /= min_new_lambda;
+ newLambdas[i] /= minNewLambda;
}
-[[maybe_unused]] static void print_cal_table(double const C[XY])
+[[maybe_unused]] static void printCalTable(double const C[XY])
{
printf("table: [\n");
for (int j = 0; j < Y; j++) {
@@ -525,146 +562,140 @@ void compensate_lambdas_for_cal(double const cal_table[XY],
printf("]\n");
}
-// Compute weight out of 1.0 which reflects how similar we wish to make the
-// colours of these two regions.
-static double compute_weight(double C_i, double C_j, double sigma)
+/*
+ * Compute weight out of 1.0 which reflects how similar we wish to make the
+ * colours of these two regions.
+ */
+static double computeWeight(double Ci, double Cj, double sigma)
{
- if (C_i == INSUFFICIENT_DATA || C_j == INSUFFICIENT_DATA)
+ if (Ci == InsufficientData || Cj == InsufficientData)
return 0;
- double diff = (C_i - C_j) / sigma;
+ double diff = (Ci - Cj) / sigma;
return exp(-diff * diff / 2);
}
-// Compute all weights.
-static void compute_W(double const C[XY], double sigma, double W[XY][4])
+/* Compute all weights. */
+static void computeW(double const C[XY], double sigma, double W[XY][4])
{
for (int i = 0; i < XY; i++) {
- // Start with neighbour above and go clockwise.
- W[i][0] = i >= X ? compute_weight(C[i], C[i - X], sigma) : 0;
- W[i][1] = i % X < X - 1 ? compute_weight(C[i], C[i + 1], sigma)
- : 0;
- W[i][2] =
- i < XY - X ? compute_weight(C[i], C[i + X], sigma) : 0;
- W[i][3] = i % X ? compute_weight(C[i], C[i - 1], sigma) : 0;
+ /* Start with neighbour above and go clockwise. */
+ W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0;
+ W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0;
+ W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0;
+ W[i][3] = i % X ? computeWeight(C[i], C[i - 1], sigma) : 0;
}
}
-// Compute M, the large but sparse matrix such that M * lambdas = 0.
-static void construct_M(double const C[XY], double const W[XY][4],
- double M[XY][4])
+/* Compute M, the large but sparse matrix such that M * lambdas = 0. */
+static void constructM(double const C[XY], double const W[XY][4],
+ double M[XY][4])
{
double epsilon = 0.001;
for (int i = 0; i < XY; i++) {
- // Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
- // be zero so the equation is still set up correctly.
+ /*
+ * Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
+ * be zero so the equation is still set up correctly.
+ */
int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
- !!(i % X); // total number of neighbours
- // we'll divide the diagonal out straight away
- double diagonal =
- (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) *
- C[i];
- M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][1] = i % X < X - 1
- ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][2] = i < XY - X
- ? (W[i][2] * C[i + X] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) /
- diagonal
- : 0;
+ !!(i % X); /* total number of neighbours */
+ /* we'll divide the diagonal out straight away */
+ double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i];
+ M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][2] = i < XY - X ? (W[i][2] * C[i + X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) / diagonal : 0;
}
}
-// In the compute_lambda_ functions, note that the matrix coefficients for the
-// left/right neighbours are zero down the left/right edges, so we don't need
-// need to test the i value to exclude them.
-static double compute_lambda_bottom(int i, double const M[XY][4],
- double lambda[XY])
+/*
+ * In the compute_lambda_ functions, note that the matrix coefficients for the
+ * left/right neighbours are zero down the left/right edges, so we don't need
+ * need to test the i value to exclude them.
+ */
+static double computeLambdaBottom(int i, double const M[XY][4],
+ double lambda[XY])
{
return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X] +
M[i][3] * lambda[i - 1];
}
-static double compute_lambda_bottom_start(int i, double const M[XY][4],
- double lambda[XY])
+static double computeLambdaBottomStart(int i, double const M[XY][4],
+ double lambda[XY])
{
return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X];
}
-static double compute_lambda_interior(int i, double const M[XY][4],
- double lambda[XY])
+static double computeLambdaInterior(int i, double const M[XY][4],
+ double lambda[XY])
{
return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
M[i][2] * lambda[i + X] + M[i][3] * lambda[i - 1];
}
-static double compute_lambda_top(int i, double const M[XY][4],
- double lambda[XY])
+static double computeLambdaTop(int i, double const M[XY][4],
+ double lambda[XY])
{
return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
M[i][3] * lambda[i - 1];
}
-static double compute_lambda_top_end(int i, double const M[XY][4],
- double lambda[XY])
+static double computeLambdaTopEnd(int i, double const M[XY][4],
+ double lambda[XY])
{
return M[i][0] * lambda[i - X] + M[i][3] * lambda[i - 1];
}
-// Gauss-Seidel iteration with over-relaxation.
-static double gauss_seidel2_SOR(double const M[XY][4], double omega,
- double lambda[XY], double lambda_bound)
+/* Gauss-Seidel iteration with over-relaxation. */
+static double gaussSeidel2Sor(double const M[XY][4], double omega,
+ double lambda[XY], double lambdaBound)
{
- const double min = 1 - lambda_bound, max = 1 + lambda_bound;
- double old_lambda[XY];
+ const double min = 1 - lambdaBound, max = 1 + lambdaBound;
+ double oldLambda[XY];
int i;
for (i = 0; i < XY; i++)
- old_lambda[i] = lambda[i];
- lambda[0] = compute_lambda_bottom_start(0, M, lambda);
+ oldLambda[i] = lambda[i];
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
lambda[0] = std::clamp(lambda[0], min, max);
for (i = 1; i < X; i++) {
- lambda[i] = compute_lambda_bottom(i, M, lambda);
+ lambda[i] = computeLambdaBottom(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i < XY - X; i++) {
- lambda[i] = compute_lambda_interior(i, M, lambda);
+ lambda[i] = computeLambdaInterior(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i < XY - 1; i++) {
- lambda[i] = compute_lambda_top(i, M, lambda);
+ lambda[i] = computeLambdaTop(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
- lambda[i] = compute_lambda_top_end(i, M, lambda);
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
- // Also solve the system from bottom to top, to help spread the updates
- // better.
- lambda[i] = compute_lambda_top_end(i, M, lambda);
+ /*
+ * Also solve the system from bottom to top, to help spread the updates
+ * better.
+ */
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
for (i = XY - 2; i >= XY - X; i--) {
- lambda[i] = compute_lambda_top(i, M, lambda);
+ lambda[i] = computeLambdaTop(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i >= X; i--) {
- lambda[i] = compute_lambda_interior(i, M, lambda);
+ lambda[i] = computeLambdaInterior(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
for (; i >= 1; i--) {
- lambda[i] = compute_lambda_bottom(i, M, lambda);
+ lambda[i] = computeLambdaBottom(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
}
- lambda[0] = compute_lambda_bottom_start(0, M, lambda);
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
lambda[0] = std::clamp(lambda[0], min, max);
- double max_diff = 0;
+ double maxDiff = 0;
for (i = 0; i < XY; i++) {
- lambda[i] = old_lambda[i] + (lambda[i] - old_lambda[i]) * omega;
- if (fabs(lambda[i] - old_lambda[i]) > fabs(max_diff))
- max_diff = lambda[i] - old_lambda[i];
+ lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega;
+ if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff))
+ maxDiff = lambda[i] - oldLambda[i];
}
- return max_diff;
+ return maxDiff;
}
-// Normalise the values so that the smallest value is 1.
+/* Normalise the values so that the smallest value is 1. */
static void normalise(double *ptr, size_t n)
{
double minval = ptr[0];
@@ -674,7 +705,7 @@ static void normalise(double *ptr, size_t n)
ptr[i] /= minval;
}
-// Rescale the values so that the average value is 1.
+/* Rescale the values so that the average value is 1. */
static void reaverage(Span<double> data)
{
double sum = std::accumulate(data.begin(), data.end(), 0.0);
@@ -683,105 +714,109 @@ static void reaverage(Span<double> data)
d *= ratio;
}
-static void run_matrix_iterations(double const C[XY], double lambda[XY],
- double const W[XY][4], double omega,
- int n_iter, double threshold, double lambda_bound)
+static void runMatrixIterations(double const C[XY], double lambda[XY],
+ double const W[XY][4], double omega,
+ int nIter, double threshold, double lambdaBound)
{
double M[XY][4];
- construct_M(C, W, M);
- double last_max_diff = std::numeric_limits<double>::max();
- for (int i = 0; i < n_iter; i++) {
- double max_diff = fabs(gauss_seidel2_SOR(M, omega, lambda, lambda_bound));
- if (max_diff < threshold) {
+ constructM(C, W, M);
+ double lastMaxDiff = std::numeric_limits<double>::max();
+ for (int i = 0; i < nIter; i++) {
+ double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
+ if (maxDiff < threshold) {
LOG(RPiAlsc, Debug)
<< "Stop after " << i + 1 << " iterations";
break;
}
- // this happens very occasionally (so make a note), though
- // doesn't seem to matter
- if (max_diff > last_max_diff)
+ /*
+ * this happens very occasionally (so make a note), though
+ * doesn't seem to matter
+ */
+ if (maxDiff > lastMaxDiff)
LOG(RPiAlsc, Debug)
- << "Iteration " << i << ": max_diff gone up "
- << last_max_diff << " to " << max_diff;
- last_max_diff = max_diff;
+ << "Iteration " << i << ": maxDiff gone up "
+ << lastMaxDiff << " to " << maxDiff;
+ lastMaxDiff = maxDiff;
}
- // We're going to normalise the lambdas so the total average is 1.
+ /* We're going to normalise the lambdas so the total average is 1. */
reaverage({ lambda, XY });
}
-static void add_luminance_rb(double result[XY], double const lambda[XY],
- double const luminance_lut[XY],
- double luminance_strength)
+static void addLuminanceRb(double result[XY], double const lambda[XY],
+ double const luminanceLut[XY],
+ double luminanceStrength)
{
for (int i = 0; i < XY; i++)
- result[i] = lambda[i] *
- ((luminance_lut[i] - 1) * luminance_strength + 1);
+ result[i] = lambda[i] * ((luminanceLut[i] - 1) * luminanceStrength + 1);
}
-static void add_luminance_g(double result[XY], double lambda,
- double const luminance_lut[XY],
- double luminance_strength)
+static void addLuminanceG(double result[XY], double lambda,
+ double const luminanceLut[XY],
+ double luminanceStrength)
{
for (int i = 0; i < XY; i++)
- result[i] = lambda *
- ((luminance_lut[i] - 1) * luminance_strength + 1);
+ result[i] = lambda * ((luminanceLut[i] - 1) * luminanceStrength + 1);
}
-void add_luminance_to_tables(double results[3][Y][X], double const lambda_r[XY],
- double lambda_g, double const lambda_b[XY],
- double const luminance_lut[XY],
- double luminance_strength)
+void addLuminanceToTables(double results[3][Y][X], double const lambdaR[XY],
+ double lambdaG, double const lambdaB[XY],
+ double const luminanceLut[XY],
+ double luminanceStrength)
{
- add_luminance_rb((double *)results[0], lambda_r, luminance_lut,
- luminance_strength);
- add_luminance_g((double *)results[1], lambda_g, luminance_lut,
- luminance_strength);
- add_luminance_rb((double *)results[2], lambda_b, luminance_lut,
- luminance_strength);
+ addLuminanceRb((double *)results[0], lambdaR, luminanceLut, luminanceStrength);
+ addLuminanceG((double *)results[1], lambdaG, luminanceLut, luminanceStrength);
+ addLuminanceRb((double *)results[2], lambdaB, luminanceLut, luminanceStrength);
normalise((double *)results, 3 * XY);
}
void Alsc::doAlsc()
{
- double Cr[XY], Cb[XY], Wr[XY][4], Wb[XY][4], cal_table_r[XY],
- cal_table_b[XY], cal_table_tmp[XY];
- // Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
- // usable.
- calculate_Cr_Cb(statistics_, Cr, Cb, config_.min_count, config_.min_G);
- // Fetch the new calibrations (if any) for this CT. Resample them in
- // case the camera mode is not full-frame.
- get_cal_table(ct_, config_.calibrations_Cr, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_r);
- get_cal_table(ct_, config_.calibrations_Cb, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_b);
- // You could print out the cal tables for this image here, if you're
- // tuning the algorithm...
- // Apply any calibration to the statistics, so the adaptive algorithm
- // makes only the extra adjustments.
- apply_cal_table(cal_table_r, Cr);
- apply_cal_table(cal_table_b, Cb);
- // Compute weights between zones.
- compute_W(Cr, config_.sigma_Cr, Wr);
- compute_W(Cb, config_.sigma_Cb, Wb);
- // Run Gauss-Seidel iterations over the resulting matrix, for R and B.
- run_matrix_iterations(Cr, lambda_r_, Wr, config_.omega, config_.n_iter,
- config_.threshold, config_.lambda_bound);
- run_matrix_iterations(Cb, lambda_b_, Wb, config_.omega, config_.n_iter,
- config_.threshold, config_.lambda_bound);
- // Fold the calibrated gains into our final lambda values. (Note that on
- // the next run, we re-start with the lambda values that don't have the
- // calibration gains included.)
- compensate_lambdas_for_cal(cal_table_r, lambda_r_, async_lambda_r_);
- compensate_lambdas_for_cal(cal_table_b, lambda_b_, async_lambda_b_);
- // Fold in the luminance table at the appropriate strength.
- add_luminance_to_tables(async_results_, async_lambda_r_, 1.0,
- async_lambda_b_, luminance_table_,
- config_.luminance_strength);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+ double cr[XY], cb[XY], wr[XY][4], wb[XY][4], calTableR[XY], calTableB[XY], calTableTmp[XY];
+ /*
+ * Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
+ * usable.
+ */
+ calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG);
+ /*
+ * Fetch the new calibrations (if any) for this CT. Resample them in
+ * case the camera mode is not full-frame.
+ */
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ /*
+ * You could print out the cal tables for this image here, if you're
+ * tuning the algorithm...
+ * Apply any calibration to the statistics, so the adaptive algorithm
+ * makes only the extra adjustments.
+ */
+ applyCalTable(calTableR, cr);
+ applyCalTable(calTableB, cb);
+ /* Compute weights between zones. */
+ computeW(cr, config_.sigmaCr, wr);
+ computeW(cb, config_.sigmaCb, wb);
+ /* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */
+ runMatrixIterations(cr, lambdaR_, wr, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ runMatrixIterations(cb, lambdaB_, wb, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ /*
+ * Fold the calibrated gains into our final lambda values. (Note that on
+ * the next run, we re-start with the lambda values that don't have the
+ * calibration gains included.)
+ */
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ /* Fold in the luminance table at the appropriate strength. */
+ addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0,
+ asyncLambdaB_, luminanceTable_,
+ config_.luminanceStrength);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Alsc(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.h b/src/ipa/raspberrypi/controller/rpi/alsc.h
new file mode 100644
index 00000000..a858ef5a
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * alsc.h - ALSC (auto lens shading correction) control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include "../algorithm.h"
+#include "../alsc_status.h"
+
+namespace RPiController {
+
+/* Algorithm to generate automagic LSC (Lens Shading Correction) tables. */
+
+struct AlscCalibration {
+ double ct;
+ double table[AlscCellsX * AlscCellsY];
+};
+
+struct AlscConfig {
+ /* Only repeat the ALSC calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ /* IIR filter speed applied to algorithm results */
+ double speed;
+ double sigmaCr;
+ double sigmaCb;
+ double minCount;
+ uint16_t minG;
+ double omega;
+ uint32_t nIter;
+ double luminanceLut[AlscCellsX * AlscCellsY];
+ double luminanceStrength;
+ std::vector<AlscCalibration> calibrationsCr;
+ std::vector<AlscCalibration> calibrationsCb;
+ double defaultCt; /* colour temperature if no metadata found */
+ double threshold; /* iteration termination threshold */
+ double lambdaBound; /* upper/lower bound for lambda from a value of 1 */
+};
+
+class Alsc : public Algorithm
+{
+public:
+ Alsc(Controller *controller = NULL);
+ ~Alsc();
+ char const *name() const override;
+ void initialise() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ /* configuration is read-only, and available to both threads */
+ AlscConfig config_;
+ bool firstTime_;
+ CameraMode cameraMode_;
+ double luminanceTable_[AlscCellsX * AlscCellsY];
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ /* counts up to startupFrames */
+ int frameCount_;
+ /* counts up to startupFrames for Process function */
+ int frameCount2_;
+ double syncResults_[3][AlscCellsY][AlscCellsX];
+ double prevSyncResults_[3][AlscCellsY][AlscCellsX];
+ void waitForAysncThread();
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, Metadata *imageMetadata);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ double ct_;
+ bcm2835_isp_stats_region statistics_[AlscCellsY * AlscCellsX];
+ double asyncResults_[3][AlscCellsY][AlscCellsX];
+ double asyncLambdaR_[AlscCellsX * AlscCellsY];
+ double asyncLambdaB_[AlscCellsX * AlscCellsY];
+ void doAlsc();
+ double lambdaR_[AlscCellsX * AlscCellsY];
+ double lambdaB_[AlscCellsX * AlscCellsY];
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.hpp b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
deleted file mode 100644
index d1dbe0d1..00000000
--- a/src/ipa/raspberrypi/controller/rpi/alsc.hpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * alsc.hpp - ALSC (auto lens shading correction) control algorithm
- */
-#pragma once
-
-#include <mutex>
-#include <condition_variable>
-#include <thread>
-
-#include "../algorithm.hpp"
-#include "../alsc_status.h"
-
-namespace RPiController {
-
-// Algorithm to generate automagic LSC (Lens Shading Correction) tables.
-
-struct AlscCalibration {
- double ct;
- double table[ALSC_CELLS_X * ALSC_CELLS_Y];
-};
-
-struct AlscConfig {
- // Only repeat the ALSC calculation every "this many" frames
- uint16_t frame_period;
- // number of initial frames for which speed taken as 1.0 (maximum)
- uint16_t startup_frames;
- // IIR filter speed applied to algorithm results
- double speed;
- double sigma_Cr;
- double sigma_Cb;
- double min_count;
- uint16_t min_G;
- double omega;
- uint32_t n_iter;
- double luminance_lut[ALSC_CELLS_X * ALSC_CELLS_Y];
- double luminance_strength;
- std::vector<AlscCalibration> calibrations_Cr;
- std::vector<AlscCalibration> calibrations_Cb;
- double default_ct; // colour temperature if no metadata found
- double threshold; // iteration termination threshold
- double lambda_bound; // upper/lower bound for lambda from a value of 1
-};
-
-class Alsc : public Algorithm
-{
-public:
- Alsc(Controller *controller = NULL);
- ~Alsc();
- char const *Name() const override;
- void Initialise() override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- // configuration is read-only, and available to both threads
- AlscConfig config_;
- bool first_time_;
- CameraMode camera_mode_;
- double luminance_table_[ALSC_CELLS_X * ALSC_CELLS_Y];
- std::thread async_thread_;
- void asyncFunc(); // asynchronous thread function
- std::mutex mutex_;
- // condvar for async thread to wait on
- std::condition_variable async_signal_;
- // condvar for synchronous thread to wait on
- std::condition_variable sync_signal_;
- // for sync thread to check if async thread finished (requires mutex)
- bool async_finished_;
- // for async thread to check if it's been told to run (requires mutex)
- bool async_start_;
- // for async thread to check if it's been told to quit (requires mutex)
- bool async_abort_;
-
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
- bool async_started_;
- // counts up to frame_period before restarting the async thread
- int frame_phase_;
- // counts up to startup_frames
- int frame_count_;
- // counts up to startup_frames for Process function
- int frame_count2_;
- double sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- double prev_sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- void waitForAysncThread();
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
- void restartAsync(StatisticsPtr &stats, Metadata *image_metadata);
- // copy out the results from the async thread so that it can be restarted
- void fetchAsyncResults();
- double ct_;
- bcm2835_isp_stats_region statistics_[ALSC_CELLS_Y * ALSC_CELLS_X];
- double async_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- double async_lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
- double async_lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
- void doAlsc();
- double lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
- double lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.cpp b/src/ipa/raspberrypi/controller/rpi/awb.cpp
index d4c93447..2b88c3b0 100644
--- a/src/ipa/raspberrypi/controller/rpi/awb.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/awb.cpp
@@ -1,15 +1,18 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* awb.cpp - AWB control algorithm
*/
+#include <assert.h>
+#include <functional>
+
#include <libcamera/base/log.h>
#include "../lux_status.h"
-#include "awb.hpp"
+#include "awb.h"
using namespace RPiController;
using namespace libcamera;
@@ -18,318 +21,370 @@ LOG_DEFINE_CATEGORY(RPiAwb)
#define NAME "rpi.awb"
-#define AWB_STATS_SIZE_X DEFAULT_AWB_REGIONS_X
-#define AWB_STATS_SIZE_Y DEFAULT_AWB_REGIONS_Y
+static constexpr unsigned int AwbStatsSizeX = DEFAULT_AWB_REGIONS_X;
+static constexpr unsigned int AwbStatsSizeY = DEFAULT_AWB_REGIONS_Y;
-// todo - the locking in this algorithm needs some tidying up as has been done
-// elsewhere (ALSC and AGC).
+/*
+ * todo - the locking in this algorithm needs some tidying up as has been done
+ * elsewhere (ALSC and AGC).
+ */
-void AwbMode::Read(boost::property_tree::ptree const &params)
+int AwbMode::read(const libcamera::YamlObject &params)
{
- ct_lo = params.get<double>("lo");
- ct_hi = params.get<double>("hi");
+ auto value = params["lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctLo = *value;
+
+ value = params["hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctHi = *value;
+
+ return 0;
}
-void AwbPrior::Read(boost::property_tree::ptree const &params)
+int AwbPrior::read(const libcamera::YamlObject &params)
{
- lux = params.get<double>("lux");
- prior.Read(params.get_child("prior"));
+ auto value = params["lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ lux = *value;
+
+ return prior.read(params["prior"]);
+}
+
+static int readCtCurve(Pwl &ctR, Pwl &ctB, const libcamera::YamlObject &params)
+{
+ if (params.size() % 3) {
+ LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry";
+ return -EINVAL;
+ }
+
+ if (params.size() < 6) {
+ LOG(RPiAwb, Error) << "AwbConfig: insufficient points in CT curve";
+ return -EINVAL;
+ }
+
+ const auto &list = params.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto value = it->get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+
+ assert(it == list.begin() || ct != ctR.domain().end);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctR.append(ct, *value);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctB.append(ct, *value);
+ }
+
+ return 0;
}
-static void read_ct_curve(Pwl &ct_r, Pwl &ct_b,
- boost::property_tree::ptree const &params)
+int AwbConfig::read(const libcamera::YamlObject &params)
{
- int num = 0;
- for (auto it = params.begin(); it != params.end(); it++) {
- double ct = it->second.get_value<double>();
- assert(it == params.begin() || ct != ct_r.Domain().end);
- if (++it == params.end())
- throw std::runtime_error(
- "AwbConfig: incomplete CT curve entry");
- ct_r.Append(ct, it->second.get_value<double>());
- if (++it == params.end())
- throw std::runtime_error(
- "AwbConfig: incomplete CT curve entry");
- ct_b.Append(ct, it->second.get_value<double>());
- num++;
+ int ret;
+
+ bayes = params["bayes"].get<int>(1);
+ framePeriod = params["frame_period"].get<uint16_t>(10);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(3);
+ speed = params["speed"].get<double>(0.05);
+
+ if (params.contains("ct_curve")) {
+ ret = readCtCurve(ctR, ctB, params["ct_curve"]);
+ if (ret)
+ return ret;
}
- if (num < 2)
- throw std::runtime_error(
- "AwbConfig: insufficient points in CT curve");
-}
-
-void AwbConfig::Read(boost::property_tree::ptree const &params)
-{
- bayes = params.get<int>("bayes", 1);
- frame_period = params.get<uint16_t>("frame_period", 10);
- startup_frames = params.get<uint16_t>("startup_frames", 10);
- convergence_frames = params.get<unsigned int>("convergence_frames", 3);
- speed = params.get<double>("speed", 0.05);
- if (params.get_child_optional("ct_curve"))
- read_ct_curve(ct_r, ct_b, params.get_child("ct_curve"));
- if (params.get_child_optional("priors")) {
- for (auto &p : params.get_child("priors")) {
+
+ if (params.contains("priors")) {
+ for (const auto &p : params["priors"].asList()) {
AwbPrior prior;
- prior.Read(p.second);
- if (!priors.empty() && prior.lux <= priors.back().lux)
- throw std::runtime_error(
- "AwbConfig: Prior must be ordered in increasing lux value");
+ ret = prior.read(p);
+ if (ret)
+ return ret;
+ if (!priors.empty() && prior.lux <= priors.back().lux) {
+ LOG(RPiAwb, Error) << "AwbConfig: Prior must be ordered in increasing lux value";
+ return -EINVAL;
+ }
priors.push_back(prior);
}
- if (priors.empty())
- throw std::runtime_error(
- "AwbConfig: no AWB priors configured");
+ if (priors.empty()) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
+ return ret;
+ }
}
- if (params.get_child_optional("modes")) {
- for (auto &p : params.get_child("modes")) {
- modes[p.first].Read(p.second);
- if (default_mode == nullptr)
- default_mode = &modes[p.first];
+ if (params.contains("modes")) {
+ for (const auto &[key, value] : params["modes"].asDict()) {
+ ret = modes[key].read(value);
+ if (ret)
+ return ret;
+ if (defaultMode == nullptr)
+ defaultMode = &modes[key];
+ }
+ if (defaultMode == nullptr) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB modes configured";
+ return -EINVAL;
}
- if (default_mode == nullptr)
- throw std::runtime_error(
- "AwbConfig: no AWB modes configured");
}
- min_pixels = params.get<double>("min_pixels", 16.0);
- min_G = params.get<uint16_t>("min_G", 32);
- min_regions = params.get<uint32_t>("min_regions", 10);
- delta_limit = params.get<double>("delta_limit", 0.2);
- coarse_step = params.get<double>("coarse_step", 0.2);
- transverse_pos = params.get<double>("transverse_pos", 0.01);
- transverse_neg = params.get<double>("transverse_neg", 0.01);
- if (transverse_pos <= 0 || transverse_neg <= 0)
- throw std::runtime_error(
- "AwbConfig: transverse_pos/neg must be > 0");
- sensitivity_r = params.get<double>("sensitivity_r", 1.0);
- sensitivity_b = params.get<double>("sensitivity_b", 1.0);
+
+ minPixels = params["min_pixels"].get<double>(16.0);
+ minG = params["min_G"].get<uint16_t>(32);
+ minRegions = params["min_regions"].get<uint32_t>(10);
+ deltaLimit = params["delta_limit"].get<double>(0.2);
+ coarseStep = params["coarse_step"].get<double>(0.2);
+ transversePos = params["transverse_pos"].get<double>(0.01);
+ transverseNeg = params["transverse_neg"].get<double>(0.01);
+ if (transversePos <= 0 || transverseNeg <= 0) {
+ LOG(RPiAwb, Error) << "AwbConfig: transverse_pos/neg must be > 0";
+ return -EINVAL;
+ }
+
+ sensitivityR = params["sensitivity_r"].get<double>(1.0);
+ sensitivityB = params["sensitivity_b"].get<double>(1.0);
+
if (bayes) {
- if (ct_r.Empty() || ct_b.Empty() || priors.empty() ||
- default_mode == nullptr) {
+ if (ctR.empty() || ctB.empty() || priors.empty() ||
+ defaultMode == nullptr) {
LOG(RPiAwb, Warning)
<< "Bayesian AWB mis-configured - switch to Grey method";
bayes = false;
}
}
- fast = params.get<int>(
- "fast", bayes); // default to fast for Bayesian, otherwise slow
- whitepoint_r = params.get<double>("whitepoint_r", 0.0);
- whitepoint_b = params.get<double>("whitepoint_b", 0.0);
+ fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */
+ whitepointR = params["whitepoint_r"].get<double>(0.0);
+ whitepointB = params["whitepoint_b"].get<double>(0.0);
if (bayes == false)
- sensitivity_r = sensitivity_b =
- 1.0; // nor do sensitivities make any sense
+ sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
+ return 0;
}
Awb::Awb(Controller *controller)
: AwbAlgorithm(controller)
{
- async_abort_ = async_start_ = async_started_ = async_finished_ = false;
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
mode_ = nullptr;
- manual_r_ = manual_b_ = 0.0;
- first_switch_mode_ = true;
- async_thread_ = std::thread(std::bind(&Awb::asyncFunc, this));
+ manualR_ = manualB_ = 0.0;
+ firstSwitchMode_ = true;
+ asyncThread_ = std::thread(std::bind(&Awb::asyncFunc, this));
}
Awb::~Awb()
{
{
std::lock_guard<std::mutex> lock(mutex_);
- async_abort_ = true;
+ asyncAbort_ = true;
}
- async_signal_.notify_one();
- async_thread_.join();
+ asyncSignal_.notify_one();
+ asyncThread_.join();
}
-char const *Awb::Name() const
+char const *Awb::name() const
{
return NAME;
}
-void Awb::Read(boost::property_tree::ptree const &params)
+int Awb::read(const libcamera::YamlObject &params)
{
- config_.Read(params);
+ return config_.read(params);
}
-void Awb::Initialise()
+void Awb::initialise()
{
- frame_count_ = frame_phase_ = 0;
- // Put something sane into the status that we are filtering towards,
- // just in case the first few frames don't have anything meaningful in
- // them.
- if (!config_.ct_r.Empty() && !config_.ct_b.Empty()) {
- sync_results_.temperature_K = config_.ct_r.Domain().Clip(4000);
- sync_results_.gain_r =
- 1.0 / config_.ct_r.Eval(sync_results_.temperature_K);
- sync_results_.gain_g = 1.0;
- sync_results_.gain_b =
- 1.0 / config_.ct_b.Eval(sync_results_.temperature_K);
+ frameCount_ = framePhase_ = 0;
+ /*
+ * Put something sane into the status that we are filtering towards,
+ * just in case the first few frames don't have anything meaningful in
+ * them.
+ */
+ if (!config_.ctR.empty() && !config_.ctB.empty()) {
+ syncResults_.temperatureK = config_.ctR.domain().clip(4000);
+ syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
+ syncResults_.gainG = 1.0;
+ syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
} else {
- // random values just to stop the world blowing up
- sync_results_.temperature_K = 4500;
- sync_results_.gain_r = sync_results_.gain_g =
- sync_results_.gain_b = 1.0;
+ /* random values just to stop the world blowing up */
+ syncResults_.temperatureK = 4500;
+ syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
}
- prev_sync_results_ = sync_results_;
- async_results_ = sync_results_;
+ prevSyncResults_ = syncResults_;
+ asyncResults_ = syncResults_;
}
-bool Awb::IsPaused() const
+bool Awb::isPaused() const
{
return false;
}
-void Awb::Pause()
+void Awb::pause()
{
- // "Pause" by fixing everything to the most recent values.
- manual_r_ = sync_results_.gain_r = prev_sync_results_.gain_r;
- manual_b_ = sync_results_.gain_b = prev_sync_results_.gain_b;
- sync_results_.gain_g = prev_sync_results_.gain_g;
- sync_results_.temperature_K = prev_sync_results_.temperature_K;
+ /* "Pause" by fixing everything to the most recent values. */
+ manualR_ = syncResults_.gainR = prevSyncResults_.gainR;
+ manualB_ = syncResults_.gainB = prevSyncResults_.gainB;
+ syncResults_.gainG = prevSyncResults_.gainG;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
-void Awb::Resume()
+void Awb::resume()
{
- manual_r_ = 0.0;
- manual_b_ = 0.0;
+ manualR_ = 0.0;
+ manualB_ = 0.0;
}
-unsigned int Awb::GetConvergenceFrames() const
+unsigned int Awb::getConvergenceFrames() const
{
- // If not in auto mode, there is no convergence
- // to happen, so no need to drop any frames - return zero.
+ /*
+ * If not in auto mode, there is no convergence
+ * to happen, so no need to drop any frames - return zero.
+ */
if (!isAutoEnabled())
return 0;
else
- return config_.convergence_frames;
+ return config_.convergenceFrames;
}
-void Awb::SetMode(std::string const &mode_name)
+void Awb::setMode(std::string const &modeName)
{
- mode_name_ = mode_name;
+ modeName_ = modeName;
}
-void Awb::SetManualGains(double manual_r, double manual_b)
+void Awb::setManualGains(double manualR, double manualB)
{
- // If any of these are 0.0, we swich back to auto.
- manual_r_ = manual_r;
- manual_b_ = manual_b;
- // If not in auto mode, set these values into the sync_results which
- // means that Prepare() will adopt them immediately.
+ /* If any of these are 0.0, we swich back to auto. */
+ manualR_ = manualR;
+ manualB_ = manualB;
+ /*
+ * If not in auto mode, set these values into the syncResults which
+ * means that Prepare() will adopt them immediately.
+ */
if (!isAutoEnabled()) {
- sync_results_.gain_r = prev_sync_results_.gain_r = manual_r_;
- sync_results_.gain_g = prev_sync_results_.gain_g = 1.0;
- sync_results_.gain_b = prev_sync_results_.gain_b = manual_b_;
+ syncResults_.gainR = prevSyncResults_.gainR = manualR_;
+ syncResults_.gainG = prevSyncResults_.gainG = 1.0;
+ syncResults_.gainB = prevSyncResults_.gainB = manualB_;
}
}
-void Awb::SwitchMode([[maybe_unused]] CameraMode const &camera_mode,
+void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
Metadata *metadata)
{
- // On the first mode switch we'll have no meaningful colour
- // temperature, so try to dead reckon one if in manual mode.
- if (!isAutoEnabled() && first_switch_mode_ && config_.bayes) {
- Pwl ct_r_inverse = config_.ct_r.Inverse();
- Pwl ct_b_inverse = config_.ct_b.Inverse();
- double ct_r = ct_r_inverse.Eval(ct_r_inverse.Domain().Clip(1 / manual_r_));
- double ct_b = ct_b_inverse.Eval(ct_b_inverse.Domain().Clip(1 / manual_b_));
- prev_sync_results_.temperature_K = (ct_r + ct_b) / 2;
- sync_results_.temperature_K = prev_sync_results_.temperature_K;
+ /*
+ * On the first mode switch we'll have no meaningful colour
+ * temperature, so try to dead reckon one if in manual mode.
+ */
+ if (!isAutoEnabled() && firstSwitchMode_ && config_.bayes) {
+ Pwl ctRInverse = config_.ctR.inverse();
+ Pwl ctBInverse = config_.ctB.inverse();
+ double ctR = ctRInverse.eval(ctRInverse.domain().clip(1 / manualR_));
+ double ctB = ctBInverse.eval(ctBInverse.domain().clip(1 / manualB_));
+ prevSyncResults_.temperatureK = (ctR + ctB) / 2;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
- // Let other algorithms know the current white balance values.
- metadata->Set("awb.status", prev_sync_results_);
- first_switch_mode_ = false;
+ /* Let other algorithms know the current white balance values. */
+ metadata->set("awb.status", prevSyncResults_);
+ firstSwitchMode_ = false;
}
bool Awb::isAutoEnabled() const
{
- return manual_r_ == 0.0 || manual_b_ == 0.0;
+ return manualR_ == 0.0 || manualB_ == 0.0;
}
void Awb::fetchAsyncResults()
{
LOG(RPiAwb, Debug) << "Fetch AWB results";
- async_finished_ = false;
- async_started_ = false;
- // It's possible manual gains could be set even while the async
- // thread was running, so only copy the results if still in auto mode.
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ /*
+ * It's possible manual gains could be set even while the async
+ * thread was running, so only copy the results if still in auto mode.
+ */
if (isAutoEnabled())
- sync_results_ = async_results_;
+ syncResults_ = asyncResults_;
}
void Awb::restartAsync(StatisticsPtr &stats, double lux)
{
LOG(RPiAwb, Debug) << "Starting AWB calculation";
- // this makes a new reference which belongs to the asynchronous thread
+ /* this makes a new reference which belongs to the asynchronous thread */
statistics_ = stats;
- // store the mode as it could technically change
- auto m = config_.modes.find(mode_name_);
+ /* store the mode as it could technically change */
+ auto m = config_.modes.find(modeName_);
mode_ = m != config_.modes.end()
? &m->second
- : (mode_ == nullptr ? config_.default_mode : mode_);
+ : (mode_ == nullptr ? config_.defaultMode : mode_);
lux_ = lux;
- frame_phase_ = 0;
- async_started_ = true;
- size_t len = mode_name_.copy(async_results_.mode,
- sizeof(async_results_.mode) - 1);
- async_results_.mode[len] = '\0';
+ framePhase_ = 0;
+ asyncStarted_ = true;
+ size_t len = modeName_.copy(asyncResults_.mode,
+ sizeof(asyncResults_.mode) - 1);
+ asyncResults_.mode[len] = '\0';
{
std::lock_guard<std::mutex> lock(mutex_);
- async_start_ = true;
+ asyncStart_ = true;
}
- async_signal_.notify_one();
+ asyncSignal_.notify_one();
}
-void Awb::Prepare(Metadata *image_metadata)
+void Awb::prepare(Metadata *imageMetadata)
{
- if (frame_count_ < (int)config_.startup_frames)
- frame_count_++;
- double speed = frame_count_ < (int)config_.startup_frames
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
? 1.0
: config_.speed;
LOG(RPiAwb, Debug)
- << "frame_count " << frame_count_ << " speed " << speed;
+ << "frame_count " << frameCount_ << " speed " << speed;
{
std::unique_lock<std::mutex> lock(mutex_);
- if (async_started_ && async_finished_)
+ if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
- // Finally apply IIR filter to results and put into metadata.
- memcpy(prev_sync_results_.mode, sync_results_.mode,
- sizeof(prev_sync_results_.mode));
- prev_sync_results_.temperature_K =
- speed * sync_results_.temperature_K +
- (1.0 - speed) * prev_sync_results_.temperature_K;
- prev_sync_results_.gain_r = speed * sync_results_.gain_r +
- (1.0 - speed) * prev_sync_results_.gain_r;
- prev_sync_results_.gain_g = speed * sync_results_.gain_g +
- (1.0 - speed) * prev_sync_results_.gain_g;
- prev_sync_results_.gain_b = speed * sync_results_.gain_b +
- (1.0 - speed) * prev_sync_results_.gain_b;
- image_metadata->Set("awb.status", prev_sync_results_);
+ /* Finally apply IIR filter to results and put into metadata. */
+ memcpy(prevSyncResults_.mode, syncResults_.mode,
+ sizeof(prevSyncResults_.mode));
+ prevSyncResults_.temperatureK = speed * syncResults_.temperatureK +
+ (1.0 - speed) * prevSyncResults_.temperatureK;
+ prevSyncResults_.gainR = speed * syncResults_.gainR +
+ (1.0 - speed) * prevSyncResults_.gainR;
+ prevSyncResults_.gainG = speed * syncResults_.gainG +
+ (1.0 - speed) * prevSyncResults_.gainG;
+ prevSyncResults_.gainB = speed * syncResults_.gainB +
+ (1.0 - speed) * prevSyncResults_.gainB;
+ imageMetadata->set("awb.status", prevSyncResults_);
LOG(RPiAwb, Debug)
- << "Using AWB gains r " << prev_sync_results_.gain_r << " g "
- << prev_sync_results_.gain_g << " b "
- << prev_sync_results_.gain_b;
+ << "Using AWB gains r " << prevSyncResults_.gainR << " g "
+ << prevSyncResults_.gainG << " b "
+ << prevSyncResults_.gainB;
}
-void Awb::Process(StatisticsPtr &stats, Metadata *image_metadata)
+void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
- // Count frames since we last poked the async thread.
- if (frame_phase_ < (int)config_.frame_period)
- frame_phase_++;
- LOG(RPiAwb, Debug) << "frame_phase " << frame_phase_;
- // We do not restart the async thread if we're not in auto mode.
+ /* Count frames since we last poked the async thread. */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ LOG(RPiAwb, Debug) << "frame_phase " << framePhase_;
+ /* We do not restart the async thread if we're not in auto mode. */
if (isAutoEnabled() &&
- (frame_phase_ >= (int)config_.frame_period ||
- frame_count_ < (int)config_.startup_frames)) {
- // Update any settings and any image metadata that we need.
- struct LuxStatus lux_status = {};
- lux_status.lux = 400; // in case no metadata
- if (image_metadata->Get("lux.status", lux_status) != 0)
+ (framePhase_ >= (int)config_.framePeriod ||
+ frameCount_ < (int)config_.startupFrames)) {
+ /* Update any settings and any image metadata that we need. */
+ struct LuxStatus luxStatus = {};
+ luxStatus.lux = 400; /* in case no metadata */
+ if (imageMetadata->get("lux.status", luxStatus) != 0)
LOG(RPiAwb, Debug) << "No lux metadata found";
- LOG(RPiAwb, Debug) << "Awb lux value is " << lux_status.lux;
+ LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux;
- if (async_started_ == false)
- restartAsync(stats, lux_status.lux);
+ if (asyncStarted_ == false)
+ restartAsync(stats, luxStatus.lux);
}
}
@@ -338,32 +393,32 @@ void Awb::asyncFunc()
while (true) {
{
std::unique_lock<std::mutex> lock(mutex_);
- async_signal_.wait(lock, [&] {
- return async_start_ || async_abort_;
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
});
- async_start_ = false;
- if (async_abort_)
+ asyncStart_ = false;
+ if (asyncAbort_)
break;
}
doAwb();
{
std::lock_guard<std::mutex> lock(mutex_);
- async_finished_ = true;
+ asyncFinished_ = true;
}
- sync_signal_.notify_one();
+ syncSignal_.notify_one();
}
}
-static void generate_stats(std::vector<Awb::RGB> &zones,
- bcm2835_isp_stats_region *stats, double min_pixels,
- double min_G)
+static void generateStats(std::vector<Awb::RGB> &zones,
+ bcm2835_isp_stats_region *stats, double minPixels,
+ double minG)
{
- for (int i = 0; i < AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y; i++) {
+ for (unsigned int i = 0; i < AwbStatsSizeX * AwbStatsSizeY; i++) {
Awb::RGB zone;
double counted = stats[i].counted;
- if (counted >= min_pixels) {
+ if (counted >= minPixels) {
zone.G = stats[i].g_sum / counted;
- if (zone.G >= min_G) {
+ if (zone.G >= minG) {
zone.R = stats[i].r_sum / counted;
zone.B = stats[i].b_sum / counted;
zones.push_back(zone);
@@ -375,52 +430,63 @@ static void generate_stats(std::vector<Awb::RGB> &zones,
void Awb::prepareStats()
{
zones_.clear();
- // LSC has already been applied to the stats in this pipeline, so stop
- // any LSC compensation. We also ignore config_.fast in this version.
- generate_stats(zones_, statistics_->awb_stats, config_.min_pixels,
- config_.min_G);
- // we're done with these; we may as well relinquish our hold on the
- // pointer.
+ /*
+ * LSC has already been applied to the stats in this pipeline, so stop
+ * any LSC compensation. We also ignore config_.fast in this version.
+ */
+ generateStats(zones_, statistics_->awb_stats, config_.minPixels,
+ config_.minG);
+ /*
+ * we're done with these; we may as well relinquish our hold on the
+ * pointer.
+ */
statistics_.reset();
- // apply sensitivities, so values appear to come from our "canonical"
- // sensor.
- for (auto &zone : zones_)
- zone.R *= config_.sensitivity_r,
- zone.B *= config_.sensitivity_b;
+ /*
+ * apply sensitivities, so values appear to come from our "canonical"
+ * sensor.
+ */
+ for (auto &zone : zones_) {
+ zone.R *= config_.sensitivityR;
+ zone.B *= config_.sensitivityB;
+ }
}
-double Awb::computeDelta2Sum(double gain_r, double gain_b)
+double Awb::computeDelta2Sum(double gainR, double gainB)
{
- // Compute the sum of the squared colour error (non-greyness) as it
- // appears in the log likelihood equation.
- double delta2_sum = 0;
+ /*
+ * Compute the sum of the squared colour error (non-greyness) as it
+ * appears in the log likelihood equation.
+ */
+ double delta2Sum = 0;
for (auto &z : zones_) {
- double delta_r = gain_r * z.R - 1 - config_.whitepoint_r;
- double delta_b = gain_b * z.B - 1 - config_.whitepoint_b;
- double delta2 = delta_r * delta_r + delta_b * delta_b;
- //LOG(RPiAwb, Debug) << "delta_r " << delta_r << " delta_b " << delta_b << " delta2 " << delta2;
- delta2 = std::min(delta2, config_.delta_limit);
- delta2_sum += delta2;
+ double deltaR = gainR * z.R - 1 - config_.whitepointR;
+ double deltaB = gainB * z.B - 1 - config_.whitepointB;
+ double delta2 = deltaR * deltaR + deltaB * deltaB;
+ /* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */
+ delta2 = std::min(delta2, config_.deltaLimit);
+ delta2Sum += delta2;
}
- return delta2_sum;
+ return delta2Sum;
}
Pwl Awb::interpolatePrior()
{
- // Interpolate the prior log likelihood function for our current lux
- // value.
+ /*
+ * Interpolate the prior log likelihood function for our current lux
+ * value.
+ */
if (lux_ <= config_.priors.front().lux)
return config_.priors.front().prior;
else if (lux_ >= config_.priors.back().lux)
return config_.priors.back().prior;
else {
int idx = 0;
- // find which two we lie between
+ /* find which two we lie between */
while (config_.priors[idx + 1].lux < lux_)
idx++;
double lux0 = config_.priors[idx].lux,
lux1 = config_.priors[idx + 1].lux;
- return Pwl::Combine(config_.priors[idx].prior,
+ return Pwl::combine(config_.priors[idx].prior,
config_.priors[idx + 1].prior,
[&](double /*x*/, double y0, double y1) {
return y0 + (y1 - y0) *
@@ -429,62 +495,64 @@ Pwl Awb::interpolatePrior()
}
}
-static double interpolate_quadatric(Pwl::Point const &A, Pwl::Point const &B,
- Pwl::Point const &C)
+static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b,
+ Pwl::Point const &c)
{
- // Given 3 points on a curve, find the extremum of the function in that
- // interval by fitting a quadratic.
+ /*
+ * Given 3 points on a curve, find the extremum of the function in that
+ * interval by fitting a quadratic.
+ */
const double eps = 1e-3;
- Pwl::Point CA = C - A, BA = B - A;
- double denominator = 2 * (BA.y * CA.x - CA.y * BA.x);
+ Pwl::Point ca = c - a, ba = b - a;
+ double denominator = 2 * (ba.y * ca.x - ca.y * ba.x);
if (abs(denominator) > eps) {
- double numerator = BA.y * CA.x * CA.x - CA.y * BA.x * BA.x;
- double result = numerator / denominator + A.x;
- return std::max(A.x, std::min(C.x, result));
+ double numerator = ba.y * ca.x * ca.x - ca.y * ba.x * ba.x;
+ double result = numerator / denominator + a.x;
+ return std::max(a.x, std::min(c.x, result));
}
- // has degenerated to straight line segment
- return A.y < C.y - eps ? A.x : (C.y < A.y - eps ? C.x : B.x);
+ /* has degenerated to straight line segment */
+ return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x);
}
double Awb::coarseSearch(Pwl const &prior)
{
- points_.clear(); // assume doesn't deallocate memory
- size_t best_point = 0;
- double t = mode_->ct_lo;
- int span_r = 0, span_b = 0;
- // Step down the CT curve evaluating log likelihood.
+ points_.clear(); /* assume doesn't deallocate memory */
+ size_t bestPoint = 0;
+ double t = mode_->ctLo;
+ int spanR = 0, spanB = 0;
+ /* Step down the CT curve evaluating log likelihood. */
while (true) {
- double r = config_.ct_r.Eval(t, &span_r);
- double b = config_.ct_b.Eval(t, &span_b);
- double gain_r = 1 / r, gain_b = 1 / b;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- double prior_log_likelihood =
- prior.Eval(prior.Domain().Clip(t));
- double final_log_likelihood = delta2_sum - prior_log_likelihood;
+ double r = config_.ctR.eval(t, &spanR);
+ double b = config_.ctB.eval(t, &spanB);
+ double gainR = 1 / r, gainB = 1 / b;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double priorLogLikelihood = prior.eval(prior.domain().clip(t));
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
- << "t: " << t << " gain_r " << gain_r << " gain_b "
- << gain_b << " delta2_sum " << delta2_sum
- << " prior " << prior_log_likelihood << " final "
- << final_log_likelihood;
- points_.push_back(Pwl::Point(t, final_log_likelihood));
- if (points_.back().y < points_[best_point].y)
- best_point = points_.size() - 1;
- if (t == mode_->ct_hi)
+ << "t: " << t << " gain R " << gainR << " gain B "
+ << gainB << " delta2_sum " << delta2Sum
+ << " prior " << priorLogLikelihood << " final "
+ << finalLogLikelihood;
+ points_.push_back(Pwl::Point(t, finalLogLikelihood));
+ if (points_.back().y < points_[bestPoint].y)
+ bestPoint = points_.size() - 1;
+ if (t == mode_->ctHi)
break;
- // for even steps along the r/b curve scale them by the current t
- t = std::min(t + t / 10 * config_.coarse_step,
- mode_->ct_hi);
+ /* for even steps along the r/b curve scale them by the current t */
+ t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
}
- t = points_[best_point].x;
+ t = points_[bestPoint].x;
LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
- // We have the best point of the search, but refine it with a quadratic
- // interpolation around its neighbours.
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbours.
+ */
if (points_.size() > 2) {
- unsigned long bp = std::min(best_point, points_.size() - 2);
- best_point = std::max(1UL, bp);
- t = interpolate_quadatric(points_[best_point - 1],
- points_[best_point],
- points_[best_point + 1]);
+ unsigned long bp = std::min(bestPoint, points_.size() - 2);
+ bestPoint = std::max(1UL, bp);
+ t = interpolateQuadatric(points_[bestPoint - 1],
+ points_[bestPoint],
+ points_[bestPoint + 1]);
LOG(RPiAwb, Debug)
<< "After quadratic refinement, coarse search has CT "
<< t;
@@ -494,174 +562,186 @@ double Awb::coarseSearch(Pwl const &prior)
void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
{
- int span_r = -1, span_b = -1;
- config_.ct_r.Eval(t, &span_r);
- config_.ct_b.Eval(t, &span_b);
- double step = t / 10 * config_.coarse_step * 0.1;
+ int spanR = -1, spanB = -1;
+ config_.ctR.eval(t, &spanR);
+ config_.ctB.eval(t, &spanB);
+ double step = t / 10 * config_.coarseStep * 0.1;
int nsteps = 5;
- double r_diff = config_.ct_r.Eval(t + nsteps * step, &span_r) -
- config_.ct_r.Eval(t - nsteps * step, &span_r);
- double b_diff = config_.ct_b.Eval(t + nsteps * step, &span_b) -
- config_.ct_b.Eval(t - nsteps * step, &span_b);
- Pwl::Point transverse(b_diff, -r_diff);
- if (transverse.Len2() < 1e-6)
+ double rDiff = config_.ctR.eval(t + nsteps * step, &spanR) -
+ config_.ctR.eval(t - nsteps * step, &spanR);
+ double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) -
+ config_.ctB.eval(t - nsteps * step, &spanB);
+ Pwl::Point transverse(bDiff, -rDiff);
+ if (transverse.len2() < 1e-6)
return;
- // unit vector orthogonal to the b vs. r function (pointing outwards
- // with r and b increasing)
- transverse = transverse / transverse.Len();
- double best_log_likelihood = 0, best_t = 0, best_r = 0, best_b = 0;
- double transverse_range =
- config_.transverse_neg + config_.transverse_pos;
- const int MAX_NUM_DELTAS = 12;
- // a transverse step approximately every 0.01 r/b units
- int num_deltas = floor(transverse_range * 100 + 0.5) + 1;
- num_deltas = num_deltas < 3 ? 3 :
- (num_deltas > MAX_NUM_DELTAS ? MAX_NUM_DELTAS : num_deltas);
- // Step down CT curve. March a bit further if the transverse range is
- // large.
- nsteps += num_deltas;
+ /*
+ * unit vector orthogonal to the b vs. r function (pointing outwards
+ * with r and b increasing)
+ */
+ transverse = transverse / transverse.len();
+ double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
+ double transverseRange = config_.transverseNeg + config_.transversePos;
+ const int maxNumDeltas = 12;
+ /* a transverse step approximately every 0.01 r/b units */
+ int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
+ numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas);
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
+ nsteps += numDeltas;
for (int i = -nsteps; i <= nsteps; i++) {
- double t_test = t + i * step;
- double prior_log_likelihood =
- prior.Eval(prior.Domain().Clip(t_test));
- double r_curve = config_.ct_r.Eval(t_test, &span_r);
- double b_curve = config_.ct_b.Eval(t_test, &span_b);
- // x will be distance off the curve, y the log likelihood there
- Pwl::Point points[MAX_NUM_DELTAS];
- int best_point = 0;
- // Take some measurements transversely *off* the CT curve.
- for (int j = 0; j < num_deltas; j++) {
- points[j].x = -config_.transverse_neg +
- (transverse_range * j) / (num_deltas - 1);
- Pwl::Point rb_test = Pwl::Point(r_curve, b_curve) +
- transverse * points[j].x;
- double r_test = rb_test.x, b_test = rb_test.y;
- double gain_r = 1 / r_test, gain_b = 1 / b_test;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- points[j].y = delta2_sum - prior_log_likelihood;
+ double tTest = t + i * step;
+ double priorLogLikelihood =
+ prior.eval(prior.domain().clip(tTest));
+ double rCurve = config_.ctR.eval(tTest, &spanR);
+ double bCurve = config_.ctB.eval(tTest, &spanB);
+ /* x will be distance off the curve, y the log likelihood there */
+ Pwl::Point points[maxNumDeltas];
+ int bestPoint = 0;
+ /* Take some measurements transversely *off* the CT curve. */
+ for (int j = 0; j < numDeltas; j++) {
+ points[j].x = -config_.transverseNeg +
+ (transverseRange * j) / (numDeltas - 1);
+ Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
+ transverse * points[j].x;
+ double rTest = rbTest.x, bTest = rbTest.y;
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ points[j].y = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
- << "At t " << t_test << " r " << r_test << " b "
- << b_test << ": " << points[j].y;
- if (points[j].y < points[best_point].y)
- best_point = j;
+ << "At t " << tTest << " r " << rTest << " b "
+ << bTest << ": " << points[j].y;
+ if (points[j].y < points[bestPoint].y)
+ bestPoint = j;
}
- // We have NUM_DELTAS points transversely across the CT curve,
- // now let's do a quadratic interpolation for the best result.
- best_point = std::max(1, std::min(best_point, num_deltas - 2));
- Pwl::Point rb_test =
- Pwl::Point(r_curve, b_curve) +
- transverse *
- interpolate_quadatric(points[best_point - 1],
- points[best_point],
- points[best_point + 1]);
- double r_test = rb_test.x, b_test = rb_test.y;
- double gain_r = 1 / r_test, gain_b = 1 / b_test;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- double final_log_likelihood = delta2_sum - prior_log_likelihood;
+ /*
+ * We have NUM_DELTAS points transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
+ bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
+ Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
+ transverse * interpolateQuadatric(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ double rTest = rbTest.x, bTest = rbTest.y;
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "Finally "
- << t_test << " r " << r_test << " b " << b_test << ": "
- << final_log_likelihood
- << (final_log_likelihood < best_log_likelihood ? " BEST" : "");
- if (best_t == 0 || final_log_likelihood < best_log_likelihood)
- best_log_likelihood = final_log_likelihood,
- best_t = t_test, best_r = r_test, best_b = b_test;
+ << tTest << " r " << rTest << " b " << bTest << ": "
+ << finalLogLikelihood
+ << (finalLogLikelihood < bestLogLikelihood ? " BEST" : "");
+ if (bestT == 0 || finalLogLikelihood < bestLogLikelihood)
+ bestLogLikelihood = finalLogLikelihood,
+ bestT = tTest, bestR = rTest, bestB = bTest;
}
- t = best_t, r = best_r, b = best_b;
+ t = bestT, r = bestR, b = bestB;
LOG(RPiAwb, Debug)
<< "Fine search found t " << t << " r " << r << " b " << b;
}
void Awb::awbBayes()
{
- // May as well divide out G to save computeDelta2Sum from doing it over
- // and over.
+ /*
+ * May as well divide out G to save computeDelta2Sum from doing it over
+ * and over.
+ */
for (auto &z : zones_)
z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
- // Get the current prior, and scale according to how many zones are
- // valid... not entirely sure about this.
+ /*
+ * Get the current prior, and scale according to how many zones are
+ * valid... not entirely sure about this.
+ */
Pwl prior = interpolatePrior();
- prior *= zones_.size() / (double)(AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y);
- prior.Map([](double x, double y) {
+ prior *= zones_.size() / (double)(AwbStatsSizeX * AwbStatsSizeY);
+ prior.map([](double x, double y) {
LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
});
double t = coarseSearch(prior);
- double r = config_.ct_r.Eval(t);
- double b = config_.ct_b.Eval(t);
+ double r = config_.ctR.eval(t);
+ double b = config_.ctB.eval(t);
LOG(RPiAwb, Debug)
<< "After coarse search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
- // Not entirely sure how to handle the fine search yet. Mostly the
- // estimated CT is already good enough, but the fine search allows us to
- // wander transverely off the CT curve. Under some illuminants, where
- // there may be more or less green light, this may prove beneficial,
- // though I probably need more real datasets before deciding exactly how
- // this should be controlled and tuned.
+ /*
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transverely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
fineSearch(t, r, b, prior);
LOG(RPiAwb, Debug)
<< "After fine search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
- // Write results out for the main thread to pick up. Remember to adjust
- // the gains from the ones that the "canonical sensor" would require to
- // the ones needed by *this* sensor.
- async_results_.temperature_K = t;
- async_results_.gain_r = 1.0 / r * config_.sensitivity_r;
- async_results_.gain_g = 1.0;
- async_results_.gain_b = 1.0 / b * config_.sensitivity_b;
+ /*
+ * Write results out for the main thread to pick up. Remember to adjust
+ * the gains from the ones that the "canonical sensor" would require to
+ * the ones needed by *this* sensor.
+ */
+ asyncResults_.temperatureK = t;
+ asyncResults_.gainR = 1.0 / r * config_.sensitivityR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = 1.0 / b * config_.sensitivityB;
}
void Awb::awbGrey()
{
LOG(RPiAwb, Debug) << "Grey world AWB";
- // Make a separate list of the derivatives for each of red and blue, so
- // that we can sort them to exclude the extreme gains. We could
- // consider some variations, such as normalising all the zones first, or
- // doing an L2 average etc.
- std::vector<RGB> &derivs_R(zones_);
- std::vector<RGB> derivs_B(derivs_R);
- std::sort(derivs_R.begin(), derivs_R.end(),
+ /*
+ * Make a separate list of the derivatives for each of red and blue, so
+ * that we can sort them to exclude the extreme gains. We could
+ * consider some variations, such as normalising all the zones first, or
+ * doing an L2 average etc.
+ */
+ std::vector<RGB> &derivsR(zones_);
+ std::vector<RGB> derivsB(derivsR);
+ std::sort(derivsR.begin(), derivsR.end(),
[](RGB const &a, RGB const &b) {
return a.G * b.R < b.G * a.R;
});
- std::sort(derivs_B.begin(), derivs_B.end(),
+ std::sort(derivsB.begin(), derivsB.end(),
[](RGB const &a, RGB const &b) {
return a.G * b.B < b.G * a.B;
});
- // Average the middle half of the values.
- int discard = derivs_R.size() / 4;
- RGB sum_R(0, 0, 0), sum_B(0, 0, 0);
- for (auto ri = derivs_R.begin() + discard,
- bi = derivs_B.begin() + discard;
- ri != derivs_R.end() - discard; ri++, bi++)
- sum_R += *ri, sum_B += *bi;
- double gain_r = sum_R.G / (sum_R.R + 1),
- gain_b = sum_B.G / (sum_B.B + 1);
- async_results_.temperature_K = 4500; // don't know what it is
- async_results_.gain_r = gain_r;
- async_results_.gain_g = 1.0;
- async_results_.gain_b = gain_b;
+ /* Average the middle half of the values. */
+ int discard = derivsR.size() / 4;
+ RGB sumR(0, 0, 0), sumB(0, 0, 0);
+ for (auto ri = derivsR.begin() + discard,
+ bi = derivsB.begin() + discard;
+ ri != derivsR.end() - discard; ri++, bi++)
+ sumR += *ri, sumB += *bi;
+ double gainR = sumR.G / (sumR.R + 1),
+ gainB = sumB.G / (sumB.B + 1);
+ asyncResults_.temperatureK = 4500; /* don't know what it is */
+ asyncResults_.gainR = gainR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = gainB;
}
void Awb::doAwb()
{
prepareStats();
LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size();
- if (zones_.size() > config_.min_regions) {
+ if (zones_.size() > config_.minRegions) {
if (config_.bayes)
awbBayes();
else
awbGrey();
LOG(RPiAwb, Debug)
<< "CT found is "
- << async_results_.temperature_K
- << " with gains r " << async_results_.gain_r
- << " and b " << async_results_.gain_b;
+ << asyncResults_.temperatureK
+ << " with gains r " << asyncResults_.gainR
+ << " and b " << asyncResults_.gainB;
}
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Awb(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.h b/src/ipa/raspberrypi/controller/rpi/awb.h
new file mode 100644
index 00000000..cb4cfd1b
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/awb.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * awb.h - AWB control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include "../awb_algorithm.h"
+#include "../pwl.h"
+#include "../awb_status.h"
+
+namespace RPiController {
+
+/* Control algorithm to perform AWB calculations. */
+
+struct AwbMode {
+ int read(const libcamera::YamlObject &params);
+ double ctLo; /* low CT value for search */
+ double ctHi; /* high CT value for search */
+};
+
+struct AwbPrior {
+ int read(const libcamera::YamlObject &params);
+ double lux; /* lux level */
+ Pwl prior; /* maps CT to prior log likelihood for this lux level */
+};
+
+struct AwbConfig {
+ AwbConfig() : defaultMode(nullptr) {}
+ int read(const libcamera::YamlObject &params);
+ /* Only repeat the AWB calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ unsigned int convergenceFrames; /* approx number of frames to converge */
+ double speed; /* IIR filter speed applied to algorithm results */
+ bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */
+ Pwl ctR; /* function maps CT to r (= R/G) */
+ Pwl ctB; /* function maps CT to b (= B/G) */
+ /* table of illuminant priors at different lux levels */
+ std::vector<AwbPrior> priors;
+ /* AWB "modes" (determines the search range) */
+ std::map<std::string, AwbMode> modes;
+ AwbMode *defaultMode; /* mode used if no mode selected */
+ /*
+ * minimum proportion of pixels counted within AWB region for it to be
+ * "useful"
+ */
+ double minPixels;
+ /* minimum G value of those pixels, to be regarded a "useful" */
+ uint16_t minG;
+ /*
+ * number of AWB regions that must be "useful" in order to do the AWB
+ * calculation
+ */
+ uint32_t minRegions;
+ /* clamp on colour error term (so as not to penalise non-grey excessively) */
+ double deltaLimit;
+ /* step size control in coarse search */
+ double coarseStep;
+ /* how far to wander off CT curve towards "more purple" */
+ double transversePos;
+ /* how far to wander off CT curve towards "more green" */
+ double transverseNeg;
+ /*
+ * red sensitivity ratio (set to canonical sensor's R/G divided by this
+ * sensor's R/G)
+ */
+ double sensitivityR;
+ /*
+ * blue sensitivity ratio (set to canonical sensor's B/G divided by this
+ * sensor's B/G)
+ */
+ double sensitivityB;
+ /* The whitepoint (which we normally "aim" for) can be moved. */
+ double whitepointR;
+ double whitepointB;
+ bool bayes; /* use Bayesian algorithm */
+};
+
+class Awb : public AwbAlgorithm
+{
+public:
+ Awb(Controller *controller = NULL);
+ ~Awb();
+ char const *name() const override;
+ void initialise() override;
+ int read(const libcamera::YamlObject &params) override;
+ /* AWB handles "pausing" for itself. */
+ bool isPaused() const override;
+ void pause() override;
+ void resume() override;
+ unsigned int getConvergenceFrames() const override;
+ void setMode(std::string const &name) override;
+ void setManualGains(double manualR, double manualB) override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ struct RGB {
+ RGB(double r = 0, double g = 0, double b = 0)
+ : R(r), G(g), B(b)
+ {
+ }
+ double R, G, B;
+ RGB &operator+=(RGB const &other)
+ {
+ R += other.R, G += other.G, B += other.B;
+ return *this;
+ }
+ };
+
+private:
+ bool isAutoEnabled() const;
+ /* configuration is read-only, and available to both threads */
+ AwbConfig config_;
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ int frameCount_; /* counts up to startup_frames */
+ AwbStatus syncResults_;
+ AwbStatus prevSyncResults_;
+ std::string modeName_;
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, double lux);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ StatisticsPtr statistics_;
+ AwbMode *mode_;
+ double lux_;
+ AwbStatus asyncResults_;
+ void doAwb();
+ void awbBayes();
+ void awbGrey();
+ void prepareStats();
+ double computeDelta2Sum(double gainR, double gainB);
+ Pwl interpolatePrior();
+ double coarseSearch(Pwl const &prior);
+ void fineSearch(double &t, double &r, double &b, Pwl const &prior);
+ std::vector<RGB> zones_;
+ std::vector<Pwl::Point> points_;
+ /* manual r setting */
+ double manualR_;
+ /* manual b setting */
+ double manualB_;
+ bool firstSwitchMode_; /* is this the first call to SwitchMode? */
+};
+
+static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
+}
+static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
+}
+static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
+{
+ return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
+}
+static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
+{
+ return d * rgb;
+}
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.hpp b/src/ipa/raspberrypi/controller/rpi/awb.hpp
deleted file mode 100644
index ac3dca6f..00000000
--- a/src/ipa/raspberrypi/controller/rpi/awb.hpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb.hpp - AWB control algorithm
- */
-#pragma once
-
-#include <mutex>
-#include <condition_variable>
-#include <thread>
-
-#include "../awb_algorithm.hpp"
-#include "../pwl.hpp"
-#include "../awb_status.h"
-
-namespace RPiController {
-
-// Control algorithm to perform AWB calculations.
-
-struct AwbMode {
- void Read(boost::property_tree::ptree const &params);
- double ct_lo; // low CT value for search
- double ct_hi; // high CT value for search
-};
-
-struct AwbPrior {
- void Read(boost::property_tree::ptree const &params);
- double lux; // lux level
- Pwl prior; // maps CT to prior log likelihood for this lux level
-};
-
-struct AwbConfig {
- AwbConfig() : default_mode(nullptr) {}
- void Read(boost::property_tree::ptree const &params);
- // Only repeat the AWB calculation every "this many" frames
- uint16_t frame_period;
- // number of initial frames for which speed taken as 1.0 (maximum)
- uint16_t startup_frames;
- unsigned int convergence_frames; // approx number of frames to converge
- double speed; // IIR filter speed applied to algorithm results
- bool fast; // "fast" mode uses a 16x16 rather than 32x32 grid
- Pwl ct_r; // function maps CT to r (= R/G)
- Pwl ct_b; // function maps CT to b (= B/G)
- // table of illuminant priors at different lux levels
- std::vector<AwbPrior> priors;
- // AWB "modes" (determines the search range)
- std::map<std::string, AwbMode> modes;
- AwbMode *default_mode; // mode used if no mode selected
- // minimum proportion of pixels counted within AWB region for it to be
- // "useful"
- double min_pixels;
- // minimum G value of those pixels, to be regarded a "useful"
- uint16_t min_G;
- // number of AWB regions that must be "useful" in order to do the AWB
- // calculation
- uint32_t min_regions;
- // clamp on colour error term (so as not to penalise non-grey excessively)
- double delta_limit;
- // step size control in coarse search
- double coarse_step;
- // how far to wander off CT curve towards "more purple"
- double transverse_pos;
- // how far to wander off CT curve towards "more green"
- double transverse_neg;
- // red sensitivity ratio (set to canonical sensor's R/G divided by this
- // sensor's R/G)
- double sensitivity_r;
- // blue sensitivity ratio (set to canonical sensor's B/G divided by this
- // sensor's B/G)
- double sensitivity_b;
- // The whitepoint (which we normally "aim" for) can be moved.
- double whitepoint_r;
- double whitepoint_b;
- bool bayes; // use Bayesian algorithm
-};
-
-class Awb : public AwbAlgorithm
-{
-public:
- Awb(Controller *controller = NULL);
- ~Awb();
- char const *Name() const override;
- void Initialise() override;
- void Read(boost::property_tree::ptree const &params) override;
- // AWB handles "pausing" for itself.
- bool IsPaused() const override;
- void Pause() override;
- void Resume() override;
- unsigned int GetConvergenceFrames() const override;
- void SetMode(std::string const &name) override;
- void SetManualGains(double manual_r, double manual_b) override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
- struct RGB {
- RGB(double _R = 0, double _G = 0, double _B = 0)
- : R(_R), G(_G), B(_B)
- {
- }
- double R, G, B;
- RGB &operator+=(RGB const &other)
- {
- R += other.R, G += other.G, B += other.B;
- return *this;
- }
- };
-
-private:
- bool isAutoEnabled() const;
- // configuration is read-only, and available to both threads
- AwbConfig config_;
- std::thread async_thread_;
- void asyncFunc(); // asynchronous thread function
- std::mutex mutex_;
- // condvar for async thread to wait on
- std::condition_variable async_signal_;
- // condvar for synchronous thread to wait on
- std::condition_variable sync_signal_;
- // for sync thread to check if async thread finished (requires mutex)
- bool async_finished_;
- // for async thread to check if it's been told to run (requires mutex)
- bool async_start_;
- // for async thread to check if it's been told to quit (requires mutex)
- bool async_abort_;
-
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
- bool async_started_;
- // counts up to frame_period before restarting the async thread
- int frame_phase_;
- int frame_count_; // counts up to startup_frames
- AwbStatus sync_results_;
- AwbStatus prev_sync_results_;
- std::string mode_name_;
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
- void restartAsync(StatisticsPtr &stats, double lux);
- // copy out the results from the async thread so that it can be restarted
- void fetchAsyncResults();
- StatisticsPtr statistics_;
- AwbMode *mode_;
- double lux_;
- AwbStatus async_results_;
- void doAwb();
- void awbBayes();
- void awbGrey();
- void prepareStats();
- double computeDelta2Sum(double gain_r, double gain_b);
- Pwl interpolatePrior();
- double coarseSearch(Pwl const &prior);
- void fineSearch(double &t, double &r, double &b, Pwl const &prior);
- std::vector<RGB> zones_;
- std::vector<Pwl::Point> points_;
- // manual r setting
- double manual_r_;
- // manual b setting
- double manual_b_;
- bool first_switch_mode_; // is this the first call to SwitchMode?
-};
-
-static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
-{
- return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
-}
-static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
-{
- return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
-}
-static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
-{
- return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
-}
-static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
-{
- return d * rgb;
-}
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.cpp b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
index 6b3497f1..85baec3f 100644
--- a/src/ipa/raspberrypi/controller/rpi/black_level.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* black_level.cpp - black level control algorithm
*/
@@ -12,7 +12,7 @@
#include "../black_level_status.h"
-#include "black_level.hpp"
+#include "black_level.h"
using namespace RPiController;
using namespace libcamera;
@@ -26,38 +26,41 @@ BlackLevel::BlackLevel(Controller *controller)
{
}
-char const *BlackLevel::Name() const
+char const *BlackLevel::name() const
{
return NAME;
}
-void BlackLevel::Read(boost::property_tree::ptree const &params)
+int BlackLevel::read(const libcamera::YamlObject &params)
{
- uint16_t black_level = params.get<uint16_t>(
- "black_level", 4096); // 64 in 10 bits scaled to 16 bits
- black_level_r_ = params.get<uint16_t>("black_level_r", black_level);
- black_level_g_ = params.get<uint16_t>("black_level_g", black_level);
- black_level_b_ = params.get<uint16_t>("black_level_b", black_level);
+ /* 64 in 10 bits scaled to 16 bits */
+ uint16_t blackLevel = params["black_level"].get<uint16_t>(4096);
+ blackLevelR_ = params["black_level_r"].get<uint16_t>(blackLevel);
+ blackLevelG_ = params["black_level_g"].get<uint16_t>(blackLevel);
+ blackLevelB_ = params["black_level_b"].get<uint16_t>(blackLevel);
LOG(RPiBlackLevel, Debug)
- << " Read black levels red " << black_level_r_
- << " green " << black_level_g_
- << " blue " << black_level_b_;
+ << " Read black levels red " << blackLevelR_
+ << " green " << blackLevelG_
+ << " blue " << blackLevelB_;
+ return 0;
}
-void BlackLevel::Prepare(Metadata *image_metadata)
+void BlackLevel::prepare(Metadata *imageMetadata)
{
- // Possibly we should think about doing this in a switch_mode or
- // something?
+ /*
+ * Possibly we should think about doing this in a switchMode or
+ * something?
+ */
struct BlackLevelStatus status;
- status.black_level_r = black_level_r_;
- status.black_level_g = black_level_g_;
- status.black_level_b = black_level_b_;
- image_metadata->Set("black_level.status", status);
+ status.blackLevelR = blackLevelR_;
+ status.blackLevelG = blackLevelG_;
+ status.blackLevelB = blackLevelB_;
+ imageMetadata->set("black_level.status", status);
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return new BlackLevel(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.h b/src/ipa/raspberrypi/controller/rpi/black_level.h
new file mode 100644
index 00000000..2403f7f7
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black_level.h - black level control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../black_level_status.h"
+
+/* This is our implementation of the "black level algorithm". */
+
+namespace RPiController {
+
+class BlackLevel : public Algorithm
+{
+public:
+ BlackLevel(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double blackLevelR_;
+ double blackLevelG_;
+ double blackLevelB_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.hpp b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
deleted file mode 100644
index 65ec4d0e..00000000
--- a/src/ipa/raspberrypi/controller/rpi/black_level.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * black_level.hpp - black level control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../black_level_status.h"
-
-// This is our implementation of the "black level algorithm".
-
-namespace RPiController {
-
-class BlackLevel : public Algorithm
-{
-public:
- BlackLevel(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- double black_level_r_;
- double black_level_g_;
- double black_level_b_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.cpp b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
index 821a4c7c..2e2e6664 100644
--- a/src/ipa/raspberrypi/controller/rpi/ccm.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* ccm.cpp - CCM (colour correction matrix) control algorithm
*/
@@ -10,20 +10,22 @@
#include "../awb_status.h"
#include "../ccm_status.h"
#include "../lux_status.h"
-#include "../metadata.hpp"
+#include "../metadata.h"
-#include "ccm.hpp"
+#include "ccm.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiCcm)
-// This algorithm selects a CCM (Colour Correction Matrix) according to the
-// colour temperature estimated by AWB (interpolating between known matricies as
-// necessary). Additionally the amount of colour saturation can be controlled
-// both according to the current estimated lux level and according to a
-// saturation setting that is exposed to applications.
+/*
+ * This algorithm selects a CCM (Colour Correction Matrix) according to the
+ * colour temperature estimated by AWB (interpolating between known matricies as
+ * necessary). Additionally the amount of colour saturation can be controlled
+ * both according to the current estimated lux level and according to a
+ * saturation setting that is exposed to applications.
+ */
#define NAME "rpi.ccm"
@@ -37,63 +39,91 @@ Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
}
-void Matrix::Read(boost::property_tree::ptree const &params)
+int Matrix::read(const libcamera::YamlObject &params)
{
double *ptr = (double *)m;
- int n = 0;
- for (auto it = params.begin(); it != params.end(); it++) {
- if (n++ == 9)
- throw std::runtime_error("Ccm: too many values in CCM");
- *ptr++ = it->second.get_value<double>();
+
+ if (params.size() != 9) {
+ LOG(RPiCcm, Error) << "Wrong number of values in CCM";
+ return -EINVAL;
+ }
+
+ for (const auto &param : params.asList()) {
+ auto value = param.get<double>();
+ if (!value)
+ return -EINVAL;
+ *ptr++ = *value;
}
- if (n < 9)
- throw std::runtime_error("Ccm: too few values in CCM");
+
+ return 0;
}
Ccm::Ccm(Controller *controller)
: CcmAlgorithm(controller), saturation_(1.0) {}
-char const *Ccm::Name() const
+char const *Ccm::name() const
{
return NAME;
}
-void Ccm::Read(boost::property_tree::ptree const &params)
+int Ccm::read(const libcamera::YamlObject &params)
{
- if (params.get_child_optional("saturation"))
- config_.saturation.Read(params.get_child("saturation"));
- for (auto &p : params.get_child("ccms")) {
- CtCcm ct_ccm;
- ct_ccm.ct = p.second.get<double>("ct");
- ct_ccm.ccm.Read(p.second.get_child("ccm"));
- if (!config_.ccms.empty() &&
- ct_ccm.ct <= config_.ccms.back().ct)
- throw std::runtime_error(
- "Ccm: CCM not in increasing colour temperature order");
- config_.ccms.push_back(std::move(ct_ccm));
+ int ret;
+
+ if (params.contains("saturation")) {
+ ret = config_.saturation.read(params["saturation"]);
+ if (ret)
+ return ret;
}
- if (config_.ccms.empty())
- throw std::runtime_error("Ccm: no CCMs specified");
+
+ for (auto &p : params["ccms"].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+
+ CtCcm ctCcm;
+ ctCcm.ct = *value;
+ ret = ctCcm.ccm.read(p["ccm"]);
+ if (ret)
+ return ret;
+
+ if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) {
+ LOG(RPiCcm, Error)
+ << "CCM not in increasing colour temperature order";
+ return -EINVAL;
+ }
+
+ config_.ccms.push_back(std::move(ctCcm));
+ }
+
+ if (config_.ccms.empty()) {
+ LOG(RPiCcm, Error) << "No CCMs specified";
+ return -EINVAL;
+ }
+
+ return 0;
}
-void Ccm::SetSaturation(double saturation)
+void Ccm::setSaturation(double saturation)
{
saturation_ = saturation;
}
-void Ccm::Initialise() {}
+void Ccm::initialise()
+{
+}
template<typename T>
-static bool get_locked(Metadata *metadata, std::string const &tag, T &value)
+static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
{
- T *ptr = metadata->GetLocked<T>(tag);
+ T *ptr = metadata->getLocked<T>(tag);
if (ptr == nullptr)
return false;
value = *ptr;
return true;
}
-Matrix calculate_ccm(std::vector<CtCcm> const &ccms, double ct)
+Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct)
{
if (ct <= ccms.front().ct)
return ccms.front().ccm;
@@ -109,7 +139,7 @@ Matrix calculate_ccm(std::vector<CtCcm> const &ccms, double ct)
}
}
-Matrix apply_saturation(Matrix const &ccm, double saturation)
+Matrix applySaturation(Matrix const &ccm, double saturation)
{
Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
-0.081);
@@ -119,51 +149,51 @@ Matrix apply_saturation(Matrix const &ccm, double saturation)
return Y2RGB * S * RGB2Y * ccm;
}
-void Ccm::Prepare(Metadata *image_metadata)
+void Ccm::prepare(Metadata *imageMetadata)
{
- bool awb_ok = false, lux_ok = false;
+ bool awbOk = false, luxOk = false;
struct AwbStatus awb = {};
- awb.temperature_K = 4000; // in case no metadata
+ awb.temperatureK = 4000; /* in case no metadata */
struct LuxStatus lux = {};
- lux.lux = 400; // in case no metadata
+ lux.lux = 400; /* in case no metadata */
{
- // grab mutex just once to get everything
- std::lock_guard<Metadata> lock(*image_metadata);
- awb_ok = get_locked(image_metadata, "awb.status", awb);
- lux_ok = get_locked(image_metadata, "lux.status", lux);
+ /* grab mutex just once to get everything */
+ std::lock_guard<Metadata> lock(*imageMetadata);
+ awbOk = getLocked(imageMetadata, "awb.status", awb);
+ luxOk = getLocked(imageMetadata, "lux.status", lux);
}
- if (!awb_ok)
+ if (!awbOk)
LOG(RPiCcm, Warning) << "no colour temperature found";
- if (!lux_ok)
+ if (!luxOk)
LOG(RPiCcm, Warning) << "no lux value found";
- Matrix ccm = calculate_ccm(config_.ccms, awb.temperature_K);
+ Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK);
double saturation = saturation_;
- struct CcmStatus ccm_status;
- ccm_status.saturation = saturation;
- if (!config_.saturation.Empty())
- saturation *= config_.saturation.Eval(
- config_.saturation.Domain().Clip(lux.lux));
- ccm = apply_saturation(ccm, saturation);
+ struct CcmStatus ccmStatus;
+ ccmStatus.saturation = saturation;
+ if (!config_.saturation.empty())
+ saturation *= config_.saturation.eval(
+ config_.saturation.domain().clip(lux.lux));
+ ccm = applySaturation(ccm, saturation);
for (int j = 0; j < 3; j++)
for (int i = 0; i < 3; i++)
- ccm_status.matrix[j * 3 + i] =
+ ccmStatus.matrix[j * 3 + i] =
std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
LOG(RPiCcm, Debug)
- << "colour temperature " << awb.temperature_K << "K";
+ << "colour temperature " << awb.temperatureK << "K";
LOG(RPiCcm, Debug)
- << "CCM: " << ccm_status.matrix[0] << " " << ccm_status.matrix[1]
- << " " << ccm_status.matrix[2] << " "
- << ccm_status.matrix[3] << " " << ccm_status.matrix[4]
- << " " << ccm_status.matrix[5] << " "
- << ccm_status.matrix[6] << " " << ccm_status.matrix[7]
- << " " << ccm_status.matrix[8];
- image_metadata->Set("ccm.status", ccm_status);
+ << "CCM: " << ccmStatus.matrix[0] << " " << ccmStatus.matrix[1]
+ << " " << ccmStatus.matrix[2] << " "
+ << ccmStatus.matrix[3] << " " << ccmStatus.matrix[4]
+ << " " << ccmStatus.matrix[5] << " "
+ << ccmStatus.matrix[6] << " " << ccmStatus.matrix[7]
+ << " " << ccmStatus.matrix[8];
+ imageMetadata->set("ccm.status", ccmStatus);
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Ccm(controller);
;
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.hpp b/src/ipa/raspberrypi/controller/rpi/ccm.h
index 330ed51f..286d0b33 100644
--- a/src/ipa/raspberrypi/controller/rpi/ccm.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.h
@@ -1,26 +1,26 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm.hpp - CCM (colour correction matrix) control algorithm
+ * ccm.h - CCM (colour correction matrix) control algorithm
*/
#pragma once
#include <vector>
-#include "../ccm_algorithm.hpp"
-#include "../pwl.hpp"
+#include "../ccm_algorithm.h"
+#include "../pwl.h"
namespace RPiController {
-// Algorithm to calculate colour matrix. Should be placed after AWB.
+/* Algorithm to calculate colour matrix. Should be placed after AWB. */
struct Matrix {
Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
double m6, double m7, double m8);
Matrix();
double m[3][3];
- void Read(boost::property_tree::ptree const &params);
+ int read(const libcamera::YamlObject &params);
};
static inline Matrix operator*(double d, Matrix const &m)
{
@@ -61,15 +61,15 @@ class Ccm : public CcmAlgorithm
{
public:
Ccm(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetSaturation(double saturation) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setSaturation(double saturation) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
private:
CcmConfig config_;
double saturation_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.cpp b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
index ae55aad5..5b37edcb 100644
--- a/src/ipa/raspberrypi/controller/rpi/contrast.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* contrast.cpp - contrast (gamma) control algorithm
*/
@@ -9,20 +9,22 @@
#include <libcamera/base/log.h>
#include "../contrast_status.h"
-#include "../histogram.hpp"
+#include "../histogram.h"
-#include "contrast.hpp"
+#include "contrast.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiContrast)
-// This is a very simple control algorithm which simply retrieves the results of
-// AGC and AWB via their "status" metadata, and applies digital gain to the
-// colour channels in accordance with those instructions. We take care never to
-// apply less than unity gains, as that would cause fully saturated pixels to go
-// off-white.
+/*
+ * This is a very simple control algorithm which simply retrieves the results of
+ * AGC and AWB via their "status" metadata, and applies digital gain to the
+ * colour channels in accordance with those instructions. We take care never to
+ * apply less than unity gains, as that would cause fully saturated pixels to go
+ * off-white.
+ */
#define NAME "rpi.contrast"
@@ -31,155 +33,167 @@ Contrast::Contrast(Controller *controller)
{
}
-char const *Contrast::Name() const
+char const *Contrast::name() const
{
return NAME;
}
-void Contrast::Read(boost::property_tree::ptree const &params)
+int Contrast::read(const libcamera::YamlObject &params)
{
// enable adaptive enhancement by default
- config_.ce_enable = params.get<int>("ce_enable", 1);
+ config_.ceEnable = params["ce_enable"].get<int>(1);
// the point near the bottom of the histogram to move
- config_.lo_histogram = params.get<double>("lo_histogram", 0.01);
+ config_.loHistogram = params["lo_histogram"].get<double>(0.01);
// where in the range to try and move it to
- config_.lo_level = params.get<double>("lo_level", 0.015);
+ config_.loLevel = params["lo_level"].get<double>(0.015);
// but don't move by more than this
- config_.lo_max = params.get<double>("lo_max", 500);
+ config_.loMax = params["lo_max"].get<double>(500);
// equivalent values for the top of the histogram...
- config_.hi_histogram = params.get<double>("hi_histogram", 0.95);
- config_.hi_level = params.get<double>("hi_level", 0.95);
- config_.hi_max = params.get<double>("hi_max", 2000);
- config_.gamma_curve.Read(params.get_child("gamma_curve"));
+ config_.hiHistogram = params["hi_histogram"].get<double>(0.95);
+ config_.hiLevel = params["hi_level"].get<double>(0.95);
+ config_.hiMax = params["hi_max"].get<double>(2000);
+ return config_.gammaCurve.read(params["gamma_curve"]);
}
-void Contrast::SetBrightness(double brightness)
+void Contrast::setBrightness(double brightness)
{
brightness_ = brightness;
}
-void Contrast::SetContrast(double contrast)
+void Contrast::setContrast(double contrast)
{
contrast_ = contrast;
}
-static void fill_in_status(ContrastStatus &status, double brightness,
- double contrast, Pwl &gamma_curve)
+static void fillInStatus(ContrastStatus &status, double brightness,
+ double contrast, Pwl &gammaCurve)
{
status.brightness = brightness;
status.contrast = contrast;
- for (int i = 0; i < CONTRAST_NUM_POINTS - 1; i++) {
+ for (unsigned int i = 0; i < ContrastNumPoints - 1; i++) {
int x = i < 16 ? i * 1024
: (i < 24 ? (i - 16) * 2048 + 16384
: (i - 24) * 4096 + 32768);
status.points[i].x = x;
- status.points[i].y = std::min(65535.0, gamma_curve.Eval(x));
+ status.points[i].y = std::min(65535.0, gammaCurve.eval(x));
}
- status.points[CONTRAST_NUM_POINTS - 1].x = 65535;
- status.points[CONTRAST_NUM_POINTS - 1].y = 65535;
+ status.points[ContrastNumPoints - 1].x = 65535;
+ status.points[ContrastNumPoints - 1].y = 65535;
}
-void Contrast::Initialise()
+void Contrast::initialise()
{
- // Fill in some default values as Prepare will run before Process gets
- // called.
- fill_in_status(status_, brightness_, contrast_, config_.gamma_curve);
+ /*
+ * Fill in some default values as Prepare will run before Process gets
+ * called.
+ */
+ fillInStatus(status_, brightness_, contrast_, config_.gammaCurve);
}
-void Contrast::Prepare(Metadata *image_metadata)
+void Contrast::prepare(Metadata *imageMetadata)
{
std::unique_lock<std::mutex> lock(mutex_);
- image_metadata->Set("contrast.status", status_);
+ imageMetadata->set("contrast.status", status_);
}
-Pwl compute_stretch_curve(Histogram const &histogram,
- ContrastConfig const &config)
+Pwl computeStretchCurve(Histogram const &histogram,
+ ContrastConfig const &config)
{
Pwl enhance;
- enhance.Append(0, 0);
- // If the start of the histogram is rather empty, try to pull it down a
- // bit.
- double hist_lo = histogram.Quantile(config.lo_histogram) *
- (65536 / NUM_HISTOGRAM_BINS);
- double level_lo = config.lo_level * 65536;
+ enhance.append(0, 0);
+ /*
+ * If the start of the histogram is rather empty, try to pull it down a
+ * bit.
+ */
+ double histLo = histogram.quantile(config.loHistogram) *
+ (65536 / NUM_HISTOGRAM_BINS);
+ double levelLo = config.loLevel * 65536;
LOG(RPiContrast, Debug)
- << "Move histogram point " << hist_lo << " to " << level_lo;
- hist_lo = std::max(
- level_lo,
- std::min(65535.0, std::min(hist_lo, level_lo + config.lo_max)));
+ << "Move histogram point " << histLo << " to " << levelLo;
+ histLo = std::max(levelLo,
+ std::min(65535.0, std::min(histLo, levelLo + config.loMax)));
LOG(RPiContrast, Debug)
- << "Final values " << hist_lo << " -> " << level_lo;
- enhance.Append(hist_lo, level_lo);
- // Keep the mid-point (median) in the same place, though, to limit the
- // apparent amount of global brightness shift.
- double mid = histogram.Quantile(0.5) * (65536 / NUM_HISTOGRAM_BINS);
- enhance.Append(mid, mid);
-
- // If the top to the histogram is empty, try to pull the pixel values
- // there up.
- double hist_hi = histogram.Quantile(config.hi_histogram) *
- (65536 / NUM_HISTOGRAM_BINS);
- double level_hi = config.hi_level * 65536;
+ << "Final values " << histLo << " -> " << levelLo;
+ enhance.append(histLo, levelLo);
+ /*
+ * Keep the mid-point (median) in the same place, though, to limit the
+ * apparent amount of global brightness shift.
+ */
+ double mid = histogram.quantile(0.5) * (65536 / NUM_HISTOGRAM_BINS);
+ enhance.append(mid, mid);
+
+ /*
+ * If the top to the histogram is empty, try to pull the pixel values
+ * there up.
+ */
+ double histHi = histogram.quantile(config.hiHistogram) *
+ (65536 / NUM_HISTOGRAM_BINS);
+ double levelHi = config.hiLevel * 65536;
LOG(RPiContrast, Debug)
- << "Move histogram point " << hist_hi << " to " << level_hi;
- hist_hi = std::min(
- level_hi,
- std::max(0.0, std::max(hist_hi, level_hi - config.hi_max)));
+ << "Move histogram point " << histHi << " to " << levelHi;
+ histHi = std::min(levelHi,
+ std::max(0.0, std::max(histHi, levelHi - config.hiMax)));
LOG(RPiContrast, Debug)
- << "Final values " << hist_hi << " -> " << level_hi;
- enhance.Append(hist_hi, level_hi);
- enhance.Append(65535, 65535);
+ << "Final values " << histHi << " -> " << levelHi;
+ enhance.append(histHi, levelHi);
+ enhance.append(65535, 65535);
return enhance;
}
-Pwl apply_manual_contrast(Pwl const &gamma_curve, double brightness,
- double contrast)
+Pwl applyManualContrast(Pwl const &gammaCurve, double brightness,
+ double contrast)
{
- Pwl new_gamma_curve;
+ Pwl newGammaCurve;
LOG(RPiContrast, Debug)
<< "Manual brightness " << brightness << " contrast " << contrast;
- gamma_curve.Map([&](double x, double y) {
- new_gamma_curve.Append(
+ gammaCurve.map([&](double x, double y) {
+ newGammaCurve.append(
x, std::max(0.0, std::min(65535.0,
(y - 32768) * contrast +
32768 + brightness)));
});
- return new_gamma_curve;
+ return newGammaCurve;
}
-void Contrast::Process(StatisticsPtr &stats,
- [[maybe_unused]] Metadata *image_metadata)
+void Contrast::process(StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
{
Histogram histogram(stats->hist[0].g_hist, NUM_HISTOGRAM_BINS);
- // We look at the histogram and adjust the gamma curve in the following
- // ways: 1. Adjust the gamma curve so as to pull the start of the
- // histogram down, and possibly push the end up.
- Pwl gamma_curve = config_.gamma_curve;
- if (config_.ce_enable) {
- if (config_.lo_max != 0 || config_.hi_max != 0)
- gamma_curve = compute_stretch_curve(histogram, config_)
- .Compose(gamma_curve);
- // We could apply other adjustments (e.g. partial equalisation)
- // based on the histogram...?
+ /*
+ * We look at the histogram and adjust the gamma curve in the following
+ * ways: 1. Adjust the gamma curve so as to pull the start of the
+ * histogram down, and possibly push the end up.
+ */
+ Pwl gammaCurve = config_.gammaCurve;
+ if (config_.ceEnable) {
+ if (config_.loMax != 0 || config_.hiMax != 0)
+ gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
+ /*
+ * We could apply other adjustments (e.g. partial equalisation)
+ * based on the histogram...?
+ */
}
- // 2. Finally apply any manually selected brightness/contrast
- // adjustment.
+ /*
+ * 2. Finally apply any manually selected brightness/contrast
+ * adjustment.
+ */
if (brightness_ != 0 || contrast_ != 1.0)
- gamma_curve = apply_manual_contrast(gamma_curve, brightness_,
- contrast_);
- // And fill in the status for output. Use more points towards the bottom
- // of the curve.
+ gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_);
+ /*
+ * And fill in the status for output. Use more points towards the bottom
+ * of the curve.
+ */
ContrastStatus status;
- fill_in_status(status, brightness_, contrast_, gamma_curve);
+ fillInStatus(status, brightness_, contrast_, gammaCurve);
{
std::unique_lock<std::mutex> lock(mutex_);
status_ = status;
}
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Contrast(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.h b/src/ipa/raspberrypi/controller/rpi/contrast.h
new file mode 100644
index 00000000..c68adbc9
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast.h - contrast (gamma) control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include "../contrast_algorithm.h"
+#include "../pwl.h"
+
+namespace RPiController {
+
+/*
+ * Back End algorithm to appaly correct digital gain. Should be placed after
+ * Back End AWB.
+ */
+
+struct ContrastConfig {
+ bool ceEnable;
+ double loHistogram;
+ double loLevel;
+ double loMax;
+ double hiHistogram;
+ double hiLevel;
+ double hiMax;
+ Pwl gammaCurve;
+};
+
+class Contrast : public ContrastAlgorithm
+{
+public:
+ Contrast(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setBrightness(double brightness) override;
+ void setContrast(double contrast) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ ContrastConfig config_;
+ double brightness_;
+ double contrast_;
+ ContrastStatus status_;
+ std::mutex mutex_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.hpp b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
deleted file mode 100644
index 85624539..00000000
--- a/src/ipa/raspberrypi/controller/rpi/contrast.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast.hpp - contrast (gamma) control algorithm
- */
-#pragma once
-
-#include <mutex>
-
-#include "../contrast_algorithm.hpp"
-#include "../pwl.hpp"
-
-namespace RPiController {
-
-// Back End algorithm to appaly correct digital gain. Should be placed after
-// Back End AWB.
-
-struct ContrastConfig {
- bool ce_enable;
- double lo_histogram;
- double lo_level;
- double lo_max;
- double hi_histogram;
- double hi_level;
- double hi_max;
- Pwl gamma_curve;
-};
-
-class Contrast : public ContrastAlgorithm
-{
-public:
- Contrast(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetBrightness(double brightness) override;
- void SetContrast(double contrast) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- ContrastConfig config_;
- double brightness_;
- double contrast_;
- ContrastStatus status_;
- std::mutex mutex_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.cpp b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
index 110f5056..be3871df 100644
--- a/src/ipa/raspberrypi/controller/rpi/dpc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
@@ -1,21 +1,23 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* dpc.cpp - DPC (defective pixel correction) control algorithm
*/
#include <libcamera/base/log.h>
-#include "dpc.hpp"
+#include "dpc.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiDpc)
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
#define NAME "rpi.dpc"
@@ -24,30 +26,34 @@ Dpc::Dpc(Controller *controller)
{
}
-char const *Dpc::Name() const
+char const *Dpc::name() const
{
return NAME;
}
-void Dpc::Read(boost::property_tree::ptree const &params)
+int Dpc::read(const libcamera::YamlObject &params)
{
- config_.strength = params.get<int>("strength", 1);
- if (config_.strength < 0 || config_.strength > 2)
- throw std::runtime_error("Dpc: bad strength value");
+ config_.strength = params["strength"].get<int>(1);
+ if (config_.strength < 0 || config_.strength > 2) {
+ LOG(RPiDpc, Error) << "Bad strength value";
+ return -EINVAL;
+ }
+
+ return 0;
}
-void Dpc::Prepare(Metadata *image_metadata)
+void Dpc::prepare(Metadata *imageMetadata)
{
- DpcStatus dpc_status = {};
- // Should we vary this with lux level or analogue gain? TBD.
- dpc_status.strength = config_.strength;
- LOG(RPiDpc, Debug) << "strength " << dpc_status.strength;
- image_metadata->Set("dpc.status", dpc_status);
+ DpcStatus dpcStatus = {};
+ /* Should we vary this with lux level or analogue gain? TBD. */
+ dpcStatus.strength = config_.strength;
+ LOG(RPiDpc, Debug) << "strength " << dpcStatus.strength;
+ imageMetadata->set("dpc.status", dpcStatus);
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Dpc(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.h b/src/ipa/raspberrypi/controller/rpi/dpc.h
new file mode 100644
index 00000000..84a05604
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * dpc.h - DPC (defective pixel correction) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../dpc_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct DpcConfig {
+ int strength;
+};
+
+class Dpc : public Algorithm
+{
+public:
+ Dpc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ DpcConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.hpp b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
deleted file mode 100644
index d90285c4..00000000
--- a/src/ipa/raspberrypi/controller/rpi/dpc.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * dpc.hpp - DPC (defective pixel correction) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../dpc_status.h"
-
-namespace RPiController {
-
-// Back End algorithm to apply appropriate GEQ settings.
-
-struct DpcConfig {
- int strength;
-};
-
-class Dpc : public Algorithm
-{
-public:
- Dpc(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- DpcConfig config_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/focus.cpp b/src/ipa/raspberrypi/controller/rpi/focus.cpp
index a87ec802..8c5029bd 100644
--- a/src/ipa/raspberrypi/controller/rpi/focus.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/focus.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
* focus.cpp - focus algorithm
*/
@@ -9,7 +9,7 @@
#include <libcamera/base/log.h>
#include "../focus_status.h"
-#include "focus.hpp"
+#include "focus.h"
using namespace RPiController;
using namespace libcamera;
@@ -23,28 +23,28 @@ Focus::Focus(Controller *controller)
{
}
-char const *Focus::Name() const
+char const *Focus::name() const
{
return NAME;
}
-void Focus::Process(StatisticsPtr &stats, Metadata *image_metadata)
+void Focus::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
FocusStatus status;
unsigned int i;
for (i = 0; i < FOCUS_REGIONS; i++)
- status.focus_measures[i] = stats->focus_stats[i].contrast_val[1][1] / 1000;
+ status.focusMeasures[i] = stats->focus_stats[i].contrast_val[1][1] / 1000;
status.num = i;
- image_metadata->Set("focus.status", status);
+ imageMetadata->set("focus.status", status);
LOG(RPiFocus, Debug)
<< "Focus contrast measure: "
- << (status.focus_measures[5] + status.focus_measures[6]) / 10;
+ << (status.focusMeasures[5] + status.focusMeasures[6]) / 10;
}
/* Register algorithm with the system. */
-static Algorithm *Create(Controller *controller)
+static Algorithm *create(Controller *controller)
{
return new Focus(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/focus.hpp b/src/ipa/raspberrypi/controller/rpi/focus.h
index 131b1d0f..8556039d 100644
--- a/src/ipa/raspberrypi/controller/rpi/focus.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/focus.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * focus.hpp - focus algorithm
+ * focus.h - focus algorithm
*/
#pragma once
-#include "../algorithm.hpp"
-#include "../metadata.hpp"
+#include "../algorithm.h"
+#include "../metadata.h"
/*
* The "focus" algorithm. All it does it print out a version of the
@@ -21,8 +21,8 @@ class Focus : public Algorithm
{
public:
Focus(Controller *controller);
- char const *Name() const override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+ char const *name() const override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
};
} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.cpp b/src/ipa/raspberrypi/controller/rpi/geq.cpp
index 4530cb75..510870e9 100644
--- a/src/ipa/raspberrypi/controller/rpi/geq.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/geq.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* geq.cpp - GEQ (green equalisation) control algorithm
*/
@@ -9,17 +9,19 @@
#include "../device_status.h"
#include "../lux_status.h"
-#include "../pwl.hpp"
+#include "../pwl.h"
-#include "geq.hpp"
+#include "geq.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiGeq)
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
#define NAME "rpi.geq"
@@ -28,54 +30,60 @@ Geq::Geq(Controller *controller)
{
}
-char const *Geq::Name() const
+char const *Geq::name() const
{
return NAME;
}
-void Geq::Read(boost::property_tree::ptree const &params)
+int Geq::read(const libcamera::YamlObject &params)
{
- config_.offset = params.get<uint16_t>("offset", 0);
- config_.slope = params.get<double>("slope", 0.0);
- if (config_.slope < 0.0 || config_.slope >= 1.0)
- throw std::runtime_error("Geq: bad slope value");
- if (params.get_child_optional("strength"))
- config_.strength.Read(params.get_child("strength"));
+ config_.offset = params["offset"].get<uint16_t>(0);
+ config_.slope = params["slope"].get<double>(0.0);
+ if (config_.slope < 0.0 || config_.slope >= 1.0) {
+ LOG(RPiGeq, Error) << "Bad slope value";
+ return -EINVAL;
+ }
+
+ if (params.contains("strength")) {
+ int ret = config_.strength.read(params["strength"]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-void Geq::Prepare(Metadata *image_metadata)
+void Geq::prepare(Metadata *imageMetadata)
{
- LuxStatus lux_status = {};
- lux_status.lux = 400;
- if (image_metadata->Get("lux.status", lux_status))
+ LuxStatus luxStatus = {};
+ luxStatus.lux = 400;
+ if (imageMetadata->get("lux.status", luxStatus))
LOG(RPiGeq, Warning) << "no lux data found";
- DeviceStatus device_status;
- device_status.analogue_gain = 1.0; // in case not found
- if (image_metadata->Get("device.status", device_status))
+ DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* in case not found */
+ if (imageMetadata->get("device.status", deviceStatus))
LOG(RPiGeq, Warning)
<< "no device metadata - use analogue gain of 1x";
- GeqStatus geq_status = {};
- double strength =
- config_.strength.Empty()
+ GeqStatus geqStatus = {};
+ double strength = config_.strength.empty()
? 1.0
- : config_.strength.Eval(config_.strength.Domain().Clip(
- lux_status.lux));
- strength *= device_status.analogue_gain;
+ : config_.strength.eval(config_.strength.domain().clip(luxStatus.lux));
+ strength *= deviceStatus.analogueGain;
double offset = config_.offset * strength;
double slope = config_.slope * strength;
- geq_status.offset = std::min(65535.0, std::max(0.0, offset));
- geq_status.slope = std::min(.99999, std::max(0.0, slope));
+ geqStatus.offset = std::min(65535.0, std::max(0.0, offset));
+ geqStatus.slope = std::min(.99999, std::max(0.0, slope));
LOG(RPiGeq, Debug)
- << "offset " << geq_status.offset << " slope "
- << geq_status.slope << " (analogue gain "
- << device_status.analogue_gain << " lux "
- << lux_status.lux << ")";
- image_metadata->Set("geq.status", geq_status);
+ << "offset " << geqStatus.offset << " slope "
+ << geqStatus.slope << " (analogue gain "
+ << deviceStatus.analogueGain << " lux "
+ << luxStatus.lux << ")";
+ imageMetadata->set("geq.status", geqStatus);
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Geq(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.h b/src/ipa/raspberrypi/controller/rpi/geq.h
new file mode 100644
index 00000000..ee3a52ff
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/geq.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * geq.h - GEQ (green equalisation) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../geq_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct GeqConfig {
+ uint16_t offset;
+ double slope;
+ Pwl strength; /* lux to strength factor */
+};
+
+class Geq : public Algorithm
+{
+public:
+ Geq(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ GeqConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.hpp b/src/ipa/raspberrypi/controller/rpi/geq.hpp
deleted file mode 100644
index 8ba3046b..00000000
--- a/src/ipa/raspberrypi/controller/rpi/geq.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * geq.hpp - GEQ (green equalisation) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../geq_status.h"
-
-namespace RPiController {
-
-// Back End algorithm to apply appropriate GEQ settings.
-
-struct GeqConfig {
- uint16_t offset;
- double slope;
- Pwl strength; // lux to strength factor
-};
-
-class Geq : public Algorithm
-{
-public:
- Geq(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- GeqConfig config_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.cpp b/src/ipa/raspberrypi/controller/rpi/lux.cpp
index f77e9140..9759186a 100644
--- a/src/ipa/raspberrypi/controller/rpi/lux.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/lux.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* lux.cpp - Lux control algorithm
*/
@@ -12,7 +12,7 @@
#include "../device_status.h"
-#include "lux.hpp"
+#include "lux.h"
using namespace RPiController;
using namespace libcamera;
@@ -25,82 +25,101 @@ LOG_DEFINE_CATEGORY(RPiLux)
Lux::Lux(Controller *controller)
: Algorithm(controller)
{
- // Put in some defaults as there will be no meaningful values until
- // Process has run.
+ /*
+ * Put in some defaults as there will be no meaningful values until
+ * Process has run.
+ */
status_.aperture = 1.0;
status_.lux = 400;
}
-char const *Lux::Name() const
+char const *Lux::name() const
{
return NAME;
}
-void Lux::Read(boost::property_tree::ptree const &params)
+int Lux::read(const libcamera::YamlObject &params)
{
- reference_shutter_speed_ =
- params.get<double>("reference_shutter_speed") * 1.0us;
- reference_gain_ = params.get<double>("reference_gain");
- reference_aperture_ = params.get<double>("reference_aperture", 1.0);
- reference_Y_ = params.get<double>("reference_Y");
- reference_lux_ = params.get<double>("reference_lux");
- current_aperture_ = reference_aperture_;
+ auto value = params["reference_shutter_speed"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceShutterSpeed_ = *value * 1.0us;
+
+ value = params["reference_gain"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceGain_ = *value;
+
+ referenceAperture_ = params["reference_aperture"].get<double>(1.0);
+
+ value = params["reference_Y"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceY_ = *value;
+
+ value = params["reference_lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceLux_ = *value;
+
+ currentAperture_ = referenceAperture_;
+ return 0;
}
-void Lux::SetCurrentAperture(double aperture)
+void Lux::setCurrentAperture(double aperture)
{
- current_aperture_ = aperture;
+ currentAperture_ = aperture;
}
-void Lux::Prepare(Metadata *image_metadata)
+void Lux::prepare(Metadata *imageMetadata)
{
std::unique_lock<std::mutex> lock(mutex_);
- image_metadata->Set("lux.status", status_);
+ imageMetadata->set("lux.status", status_);
}
-void Lux::Process(StatisticsPtr &stats, Metadata *image_metadata)
+void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
- DeviceStatus device_status;
- if (image_metadata->Get("device.status", device_status) == 0) {
- double current_gain = device_status.analogue_gain;
- double current_aperture = device_status.aperture;
- if (current_aperture == 0)
- current_aperture = current_aperture_;
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ double currentGain = deviceStatus.analogueGain;
+ double currentAperture = deviceStatus.aperture.value_or(currentAperture_);
uint64_t sum = 0;
uint32_t num = 0;
uint32_t *bin = stats->hist[0].g_hist;
- const int num_bins = sizeof(stats->hist[0].g_hist) /
- sizeof(stats->hist[0].g_hist[0]);
- for (int i = 0; i < num_bins; i++)
+ const int numBins = sizeof(stats->hist[0].g_hist) /
+ sizeof(stats->hist[0].g_hist[0]);
+ for (int i = 0; i < numBins; i++)
sum += bin[i] * (uint64_t)i, num += bin[i];
- // add .5 to reflect the mid-points of bins
- double current_Y = sum / (double)num + .5;
- double gain_ratio = reference_gain_ / current_gain;
- double shutter_speed_ratio =
- reference_shutter_speed_ / device_status.shutter_speed;
- double aperture_ratio = reference_aperture_ / current_aperture;
- double Y_ratio = current_Y * (65536 / num_bins) / reference_Y_;
- double estimated_lux = shutter_speed_ratio * gain_ratio *
- aperture_ratio * aperture_ratio *
- Y_ratio * reference_lux_;
+ /* add .5 to reflect the mid-points of bins */
+ double currentY = sum / (double)num + .5;
+ double gainRatio = referenceGain_ / currentGain;
+ double shutterSpeedRatio =
+ referenceShutterSpeed_ / deviceStatus.shutterSpeed;
+ double apertureRatio = referenceAperture_ / currentAperture;
+ double yRatio = currentY * (65536 / numBins) / referenceY_;
+ double estimatedLux = shutterSpeedRatio * gainRatio *
+ apertureRatio * apertureRatio *
+ yRatio * referenceLux_;
LuxStatus status;
- status.lux = estimated_lux;
- status.aperture = current_aperture;
- LOG(RPiLux, Debug) << ": estimated lux " << estimated_lux;
+ status.lux = estimatedLux;
+ status.aperture = currentAperture;
+ LOG(RPiLux, Debug) << ": estimated lux " << estimatedLux;
{
std::unique_lock<std::mutex> lock(mutex_);
status_ = status;
}
- // Overwrite the metadata here as well, so that downstream
- // algorithms get the latest value.
- image_metadata->Set("lux.status", status);
+ /*
+ * Overwrite the metadata here as well, so that downstream
+ * algorithms get the latest value.
+ */
+ imageMetadata->set("lux.status", status);
} else
LOG(RPiLux, Warning) << ": no device metadata";
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Lux(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.h b/src/ipa/raspberrypi/controller/rpi/lux.h
new file mode 100644
index 00000000..89411a54
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/lux.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * lux.h - Lux control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include <libcamera/base/utils.h>
+
+#include "../lux_status.h"
+#include "../algorithm.h"
+
+/* This is our implementation of the "lux control algorithm". */
+
+namespace RPiController {
+
+class Lux : public Algorithm
+{
+public:
+ Lux(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ void setCurrentAperture(double aperture);
+
+private:
+ /*
+ * These values define the conditions of the reference image, against
+ * which we compare the new image.
+ */
+ libcamera::utils::Duration referenceShutterSpeed_;
+ double referenceGain_;
+ double referenceAperture_; /* units of 1/f */
+ double referenceY_; /* out of 65536 */
+ double referenceLux_;
+ double currentAperture_;
+ LuxStatus status_;
+ std::mutex mutex_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.hpp b/src/ipa/raspberrypi/controller/rpi/lux.hpp
deleted file mode 100644
index 3ebd35d1..00000000
--- a/src/ipa/raspberrypi/controller/rpi/lux.hpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * lux.hpp - Lux control algorithm
- */
-#pragma once
-
-#include <mutex>
-
-#include <libcamera/base/utils.h>
-
-#include "../lux_status.h"
-#include "../algorithm.hpp"
-
-// This is our implementation of the "lux control algorithm".
-
-namespace RPiController {
-
-class Lux : public Algorithm
-{
-public:
- Lux(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
- void SetCurrentAperture(double aperture);
-
-private:
- // These values define the conditions of the reference image, against
- // which we compare the new image.
- libcamera::utils::Duration reference_shutter_speed_;
- double reference_gain_;
- double reference_aperture_; // units of 1/f
- double reference_Y_; // out of 65536
- double reference_lux_;
- double current_aperture_;
- LuxStatus status_;
- std::mutex mutex_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.cpp b/src/ipa/raspberrypi/controller/rpi/noise.cpp
index 63cad639..bcd8b9ed 100644
--- a/src/ipa/raspberrypi/controller/rpi/noise.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/noise.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* noise.cpp - Noise control algorithm
*/
@@ -12,7 +12,7 @@
#include "../device_status.h"
#include "../noise_status.h"
-#include "noise.hpp"
+#include "noise.h"
using namespace RPiController;
using namespace libcamera;
@@ -22,55 +22,68 @@ LOG_DEFINE_CATEGORY(RPiNoise)
#define NAME "rpi.noise"
Noise::Noise(Controller *controller)
- : Algorithm(controller), mode_factor_(1.0)
+ : Algorithm(controller), modeFactor_(1.0)
{
}
-char const *Noise::Name() const
+char const *Noise::name() const
{
return NAME;
}
-void Noise::SwitchMode(CameraMode const &camera_mode,
+void Noise::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // For example, we would expect a 2x2 binned mode to have a "noise
- // factor" of sqrt(2x2) = 2. (can't be less than one, right?)
- mode_factor_ = std::max(1.0, camera_mode.noise_factor);
+ /*
+ * For example, we would expect a 2x2 binned mode to have a "noise
+ * factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
}
-void Noise::Read(boost::property_tree::ptree const &params)
+int Noise::read(const libcamera::YamlObject &params)
{
- reference_constant_ = params.get<double>("reference_constant");
- reference_slope_ = params.get<double>("reference_slope");
+ auto value = params["reference_constant"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceConstant_ = *value;
+
+ value = params["reference_slope"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceSlope_ = *value;
+
+ return 0;
}
-void Noise::Prepare(Metadata *image_metadata)
+void Noise::prepare(Metadata *imageMetadata)
{
- struct DeviceStatus device_status;
- device_status.analogue_gain = 1.0; // keep compiler calm
- if (image_metadata->Get("device.status", device_status) == 0) {
- // There is a slight question as to exactly how the noise
- // profile, specifically the constant part of it, scales. For
- // now we assume it all scales the same, and we'll revisit this
- // if it proves substantially wrong. NOTE: we may also want to
- // make some adjustments based on the camera mode (such as
- // binning), if we knew how to discover it...
- double factor = sqrt(device_status.analogue_gain) / mode_factor_;
+ struct DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* keep compiler calm */
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ /*
+ * There is a slight question as to exactly how the noise
+ * profile, specifically the constant part of it, scales. For
+ * now we assume it all scales the same, and we'll revisit this
+ * if it proves substantially wrong. NOTE: we may also want to
+ * make some adjustments based on the camera mode (such as
+ * binning), if we knew how to discover it...
+ */
+ double factor = sqrt(deviceStatus.analogueGain) / modeFactor_;
struct NoiseStatus status;
- status.noise_constant = reference_constant_ * factor;
- status.noise_slope = reference_slope_ * factor;
- image_metadata->Set("noise.status", status);
+ status.noiseConstant = referenceConstant_ * factor;
+ status.noiseSlope = referenceSlope_ * factor;
+ imageMetadata->set("noise.status", status);
LOG(RPiNoise, Debug)
- << "constant " << status.noise_constant
- << " slope " << status.noise_slope;
+ << "constant " << status.noiseConstant
+ << " slope " << status.noiseSlope;
} else
LOG(RPiNoise, Warning) << " no metadata";
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return new Noise(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.h b/src/ipa/raspberrypi/controller/rpi/noise.h
new file mode 100644
index 00000000..74c31e64
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/noise.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * noise.h - Noise control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../noise_status.h"
+
+/* This is our implementation of the "noise algorithm". */
+
+namespace RPiController {
+
+class Noise : public Algorithm
+{
+public:
+ Noise(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ /* the noise profile for analogue gain of 1.0 */
+ double referenceConstant_;
+ double referenceSlope_;
+ double modeFactor_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.hpp b/src/ipa/raspberrypi/controller/rpi/noise.hpp
deleted file mode 100644
index 1c9de5c8..00000000
--- a/src/ipa/raspberrypi/controller/rpi/noise.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * noise.hpp - Noise control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../noise_status.h"
-
-// This is our implementation of the "noise algorithm".
-
-namespace RPiController {
-
-class Noise : public Algorithm
-{
-public:
- Noise(Controller *controller);
- char const *Name() const override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- // the noise profile for analogue gain of 1.0
- double reference_constant_;
- double reference_slope_;
- double mode_factor_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.cpp b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
index 93845509..b6b66251 100644
--- a/src/ipa/raspberrypi/controller/rpi/sdn.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
*
* sdn.cpp - SDN (spatial denoise) control algorithm
*/
@@ -10,15 +10,17 @@
#include "../denoise_status.h"
#include "../noise_status.h"
-#include "sdn.hpp"
+#include "sdn.h"
using namespace RPiController;
using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiSdn)
-// Calculate settings for the spatial denoise block using the noise profile in
-// the image metadata.
+/*
+ * Calculate settings for the spatial denoise block using the noise profile in
+ * the image metadata.
+ */
#define NAME "rpi.sdn"
@@ -27,49 +29,52 @@ Sdn::Sdn(Controller *controller)
{
}
-char const *Sdn::Name() const
+char const *Sdn::name() const
{
return NAME;
}
-void Sdn::Read(boost::property_tree::ptree const &params)
+int Sdn::read(const libcamera::YamlObject &params)
{
- deviation_ = params.get<double>("deviation", 3.2);
- strength_ = params.get<double>("strength", 0.75);
+ deviation_ = params["deviation"].get<double>(3.2);
+ strength_ = params["strength"].get<double>(0.75);
+ return 0;
}
-void Sdn::Initialise() {}
+void Sdn::initialise()
+{
+}
-void Sdn::Prepare(Metadata *image_metadata)
+void Sdn::prepare(Metadata *imageMetadata)
{
- struct NoiseStatus noise_status = {};
- noise_status.noise_slope = 3.0; // in case no metadata
- if (image_metadata->Get("noise.status", noise_status) != 0)
+ struct NoiseStatus noiseStatus = {};
+ noiseStatus.noiseSlope = 3.0; /* in case no metadata */
+ if (imageMetadata->get("noise.status", noiseStatus) != 0)
LOG(RPiSdn, Warning) << "no noise profile found";
LOG(RPiSdn, Debug)
- << "Noise profile: constant " << noise_status.noise_constant
- << " slope " << noise_status.noise_slope;
+ << "Noise profile: constant " << noiseStatus.noiseConstant
+ << " slope " << noiseStatus.noiseSlope;
struct DenoiseStatus status;
- status.noise_constant = noise_status.noise_constant * deviation_;
- status.noise_slope = noise_status.noise_slope * deviation_;
+ status.noiseConstant = noiseStatus.noiseConstant * deviation_;
+ status.noiseSlope = noiseStatus.noiseSlope * deviation_;
status.strength = strength_;
status.mode = static_cast<std::underlying_type_t<DenoiseMode>>(mode_);
- image_metadata->Set("denoise.status", status);
+ imageMetadata->set("denoise.status", status);
LOG(RPiSdn, Debug)
- << "programmed constant " << status.noise_constant
- << " slope " << status.noise_slope
+ << "programmed constant " << status.noiseConstant
+ << " slope " << status.noiseSlope
<< " strength " << status.strength;
}
-void Sdn::SetMode(DenoiseMode mode)
+void Sdn::setMode(DenoiseMode mode)
{
- // We only distinguish between off and all other modes.
+ /* We only distinguish between off and all other modes. */
mode_ = mode;
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Sdn(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.h b/src/ipa/raspberrypi/controller/rpi/sdn.h
new file mode 100644
index 00000000..9dd73c38
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sdn.h - SDN (spatial denoise) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../denoise_algorithm.h"
+
+namespace RPiController {
+
+/* Algorithm to calculate correct spatial denoise (SDN) settings. */
+
+class Sdn : public DenoiseAlgorithm
+{
+public:
+ Sdn(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void setMode(DenoiseMode mode) override;
+
+private:
+ double deviation_;
+ double strength_;
+ DenoiseMode mode_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.hpp b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
deleted file mode 100644
index 2371ce04..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sdn.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sdn.hpp - SDN (spatial denoise) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../denoise_algorithm.hpp"
-
-namespace RPiController {
-
-// Algorithm to calculate correct spatial denoise (SDN) settings.
-
-class Sdn : public DenoiseAlgorithm
-{
-public:
- Sdn(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
- void SetMode(DenoiseMode mode) override;
-
-private:
- double deviation_;
- double strength_;
- DenoiseMode mode_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
index 18825a43..4f6f020a 100644
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* sharpen.cpp - sharpening control algorithm
*/
@@ -11,7 +11,7 @@
#include "../sharpen_status.h"
-#include "sharpen.hpp"
+#include "sharpen.h"
using namespace RPiController;
using namespace libcamera;
@@ -21,65 +21,72 @@ LOG_DEFINE_CATEGORY(RPiSharpen)
#define NAME "rpi.sharpen"
Sharpen::Sharpen(Controller *controller)
- : SharpenAlgorithm(controller), user_strength_(1.0)
+ : SharpenAlgorithm(controller), userStrength_(1.0)
{
}
-char const *Sharpen::Name() const
+char const *Sharpen::name() const
{
return NAME;
}
-void Sharpen::SwitchMode(CameraMode const &camera_mode,
+void Sharpen::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // can't be less than one, right?
- mode_factor_ = std::max(1.0, camera_mode.noise_factor);
+ /* can't be less than one, right? */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
}
-void Sharpen::Read(boost::property_tree::ptree const &params)
+int Sharpen::read(const libcamera::YamlObject &params)
{
- threshold_ = params.get<double>("threshold", 1.0);
- strength_ = params.get<double>("strength", 1.0);
- limit_ = params.get<double>("limit", 1.0);
+ threshold_ = params["threshold"].get<double>(1.0);
+ strength_ = params["strength"].get<double>(1.0);
+ limit_ = params["limit"].get<double>(1.0);
LOG(RPiSharpen, Debug)
<< "Read threshold " << threshold_
<< " strength " << strength_
<< " limit " << limit_;
+ return 0;
}
-void Sharpen::SetStrength(double strength)
+void Sharpen::setStrength(double strength)
{
- // Note that this function is how an application sets the overall
- // sharpening "strength". We call this the "user strength" field
- // as there already is a strength_ field - being an internal gain
- // parameter that gets passed to the ISP control code. Negative
- // values are not allowed - coerce them to zero (no sharpening).
- user_strength_ = std::max(0.0, strength);
+ /*
+ * Note that this function is how an application sets the overall
+ * sharpening "strength". We call this the "user strength" field
+ * as there already is a strength_ field - being an internal gain
+ * parameter that gets passed to the ISP control code. Negative
+ * values are not allowed - coerce them to zero (no sharpening).
+ */
+ userStrength_ = std::max(0.0, strength);
}
-void Sharpen::Prepare(Metadata *image_metadata)
+void Sharpen::prepare(Metadata *imageMetadata)
{
- // The user_strength_ affects the algorithm's internal gain directly, but
- // we adjust the limit and threshold less aggressively. Using a sqrt
- // function is an arbitrary but gentle way of accomplishing this.
- double user_strength_sqrt = sqrt(user_strength_);
+ /*
+ * The userStrength_ affects the algorithm's internal gain directly, but
+ * we adjust the limit and threshold less aggressively. Using a sqrt
+ * function is an arbitrary but gentle way of accomplishing this.
+ */
+ double userStrengthSqrt = sqrt(userStrength_);
struct SharpenStatus status;
- // Binned modes seem to need the sharpening toned down with this
- // pipeline, thus we use the mode_factor here. Also avoid
- // divide-by-zero with the user_strength_sqrt.
- status.threshold = threshold_ * mode_factor_ /
- std::max(0.01, user_strength_sqrt);
- status.strength = strength_ / mode_factor_ * user_strength_;
- status.limit = limit_ / mode_factor_ * user_strength_sqrt;
- // Finally, report any application-supplied parameters that were used.
- status.user_strength = user_strength_;
- image_metadata->Set("sharpen.status", status);
+ /*
+ * Binned modes seem to need the sharpening toned down with this
+ * pipeline, thus we use the modeFactor_ here. Also avoid
+ * divide-by-zero with the userStrengthSqrt.
+ */
+ status.threshold = threshold_ * modeFactor_ /
+ std::max(0.01, userStrengthSqrt);
+ status.strength = strength_ / modeFactor_ * userStrength_;
+ status.limit = limit_ / modeFactor_ * userStrengthSqrt;
+ /* Finally, report any application-supplied parameters that were used. */
+ status.userStrength = userStrength_;
+ imageMetadata->set("sharpen.status", status);
}
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
{
return new Sharpen(controller);
}
-static RegisterAlgorithm reg(NAME, &Create);
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.h b/src/ipa/raspberrypi/controller/rpi/sharpen.h
new file mode 100644
index 00000000..8bb7631e
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sharpen.h - sharpening control algorithm
+ */
+#pragma once
+
+#include "../sharpen_algorithm.h"
+#include "../sharpen_status.h"
+
+/* This is our implementation of the "sharpen algorithm". */
+
+namespace RPiController {
+
+class Sharpen : public SharpenAlgorithm
+{
+public:
+ Sharpen(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void setStrength(double strength) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double threshold_;
+ double strength_;
+ double limit_;
+ double modeFactor_;
+ double userStrength_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
deleted file mode 100644
index 13a076a8..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sharpen.hpp - sharpening control algorithm
- */
-#pragma once
-
-#include "../sharpen_algorithm.hpp"
-#include "../sharpen_status.h"
-
-// This is our implementation of the "sharpen algorithm".
-
-namespace RPiController {
-
-class Sharpen : public SharpenAlgorithm
-{
-public:
- Sharpen(Controller *controller);
- char const *Name() const override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetStrength(double strength) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- double threshold_;
- double strength_;
- double limit_;
- double mode_factor_;
- double user_strength_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/sharpen_algorithm.h b/src/ipa/raspberrypi/controller/sharpen_algorithm.h
new file mode 100644
index 00000000..3be21c32
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/sharpen_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * sharpen_algorithm.h - sharpness control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class SharpenAlgorithm : public Algorithm
+{
+public:
+ SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A sharpness control algorithm must provide the following: */
+ virtual void setStrength(double strength) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp b/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp
deleted file mode 100644
index ca800308..00000000
--- a/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * sharpen_algorithm.hpp - sharpness control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class SharpenAlgorithm : public Algorithm
-{
-public:
- SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A sharpness control algorithm must provide the following:
- virtual void SetStrength(double strength) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/sharpen_status.h b/src/ipa/raspberrypi/controller/sharpen_status.h
index 7501b191..106166db 100644
--- a/src/ipa/raspberrypi/controller/sharpen_status.h
+++ b/src/ipa/raspberrypi/controller/sharpen_status.h
@@ -1,28 +1,20 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
* sharpen_status.h - Sharpen control algorithm status
*/
#pragma once
-// The "sharpen" algorithm stores the strength to use.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* The "sharpen" algorithm stores the strength to use. */
struct SharpenStatus {
- // controls the smallest level of detail (or noise!) that sharpening will pick up
+ /* controls the smallest level of detail (or noise!) that sharpening will pick up */
double threshold;
- // the rate at which the sharpening response ramps once above the threshold
+ /* the rate at which the sharpening response ramps once above the threshold */
double strength;
- // upper limit of the allowed sharpening response
+ /* upper limit of the allowed sharpening response */
double limit;
- // The sharpening strength requested by the user or application.
- double user_strength;
+ /* The sharpening strength requested by the user or application. */
+ double userStrength;
};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/data/imx219.json b/src/ipa/raspberrypi/data/imx219.json
index de59d936..efe7210a 100644
--- a/src/ipa/raspberrypi/data/imx219.json
+++ b/src/ipa/raspberrypi/data/imx219.json
@@ -1,412 +1,486 @@
{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27685,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 998,
- "reference_Y": 12744
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 3.67
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01633
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
{
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
+ "black_level": 4096
}
- ],
- "modes":
+ },
{
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
{
- "lo": 7000,
- "hi": 8600
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
}
},
- "bayes": 1,
- "ct_curve":
- [
- 2498.0, 0.9309, 0.3599, 2911.0, 0.8682, 0.4283, 2919.0, 0.8358, 0.4621, 3627.0, 0.7646, 0.5327, 4600.0, 0.6079, 0.6721, 5716.0,
- 0.5712, 0.7017, 8575.0, 0.4331, 0.8037
- ],
- "sensitivity_r": 1.05,
- "sensitivity_b": 1.05,
- "transverse_pos": 0.04791,
- "transverse_neg": 0.04881
- },
- "rpi.agc":
- {
- "metering_modes":
{
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
+ "rpi.noise":
{
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
+ "reference_constant": 0,
+ "reference_slope": 3.67
}
},
- "exposure_modes":
{
- "normal":
+ "rpi.geq":
{
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
{
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
[
- 100, 10000, 30000, 60000, 120000
+ 2498.0, 0.9309, 0.3599,
+ 2911.0, 0.8682, 0.4283,
+ 2919.0, 0.8358, 0.4621,
+ 3627.0, 0.7646, 0.5327,
+ 4600.0, 0.6079, 0.6721,
+ 5716.0, 0.5712, 0.7017,
+ 8575.0, 0.4331, 0.8037
],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.04791,
+ "transverse_neg": 0.04881
}
},
- "constraint_modes":
{
- "normal":
- [
+ "rpi.agc":
+ {
+ "metering_modes":
{
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
+ "centre-weighted":
+ {
+ "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ },
+ "spot":
+ {
+ "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ },
+ "matrix":
+ {
+ "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ }
+ },
+ "exposure_modes":
{
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
},
+ "constraint_modes":
{
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.7,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
- 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
- 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
- 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
- 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
- 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
- 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
- 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
- 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
- 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
- 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
- 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
- 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
- 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
- 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
- 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
- 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
- ]
- },
- {
- "ct": 6000, "table":
+ },
+ "y_target":
[
- 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
- 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
- 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
- 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
- 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
- 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
- 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
- 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
- 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
]
}
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
- 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
- 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
- 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
- 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
- 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
- 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
- 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
- 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
- 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
- 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
- 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
- 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
- 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
- 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
- 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
- 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
- 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
- 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
- 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
- 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
- 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
- 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
- ]
- },
+ },
+ {
+ "rpi.alsc":
{
- "ct": 6000, "table":
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
[
- 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
- 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
- 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
- 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
- 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
- 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
- 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
- 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
- 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
- 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
- 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
- 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
- ]
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
}
- ],
- "luminance_lut":
- [
- 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
- 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
- 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
- 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
- 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
- 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
- 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
- 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
- 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
- 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
- ],
- "sigma": 0.00381,
- "sigma_Cb": 0.00216
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2498, "ccm":
- [
- 1.58731, -0.18011, -0.40721, -0.60639, 2.03422, -0.42782, -0.19612, -1.69203, 2.88815
- ]
- },
- {
- "ct": 2811, "ccm":
- [
- 1.61593, -0.33164, -0.28429, -0.55048, 1.97779, -0.42731, -0.12042, -1.42847, 2.54889
- ]
- },
- {
- "ct": 2911, "ccm":
- [
- 1.62771, -0.41282, -0.21489, -0.57991, 2.04176, -0.46186, -0.07613, -1.13359, 2.20972
- ]
- },
- {
- "ct": 2919, "ccm":
- [
- 1.62661, -0.37736, -0.24925, -0.52519, 1.95233, -0.42714, -0.10842, -1.34929, 2.45771
- ]
- },
- {
- "ct": 3627, "ccm":
- [
- 1.70385, -0.57231, -0.13154, -0.47763, 1.85998, -0.38235, -0.07467, -0.82678, 1.90145
- ]
- },
- {
- "ct": 4600, "ccm":
- [
- 1.68486, -0.61085, -0.07402, -0.41927, 2.04016, -0.62089, -0.08633, -0.67672, 1.76305
- ]
- },
+ },
+ {
+ "rpi.contrast":
{
- "ct": 5716, "ccm":
+ "ce_enable": 1,
+ "gamma_curve":
[
- 1.80439, -0.73699, -0.06739, -0.36073, 1.83327, -0.47255, -0.08378, -0.56403, 1.64781
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
]
- },
+ }
+ },
+ {
+ "rpi.ccm":
{
- "ct": 8575, "ccm":
- [
- 1.89357, -0.76427, -0.12931, -0.27399, 2.15605, -0.88206, -0.12035, -0.68256, 1.80292
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.58731, -0.18011, -0.40721,
+ -0.60639, 2.03422, -0.42782,
+ -0.19612, -1.69203, 2.88815
+ ]
+ },
+ {
+ "ct": 2811,
+ "ccm":
+ [
+ 1.61593, -0.33164, -0.28429,
+ -0.55048, 1.97779, -0.42731,
+ -0.12042, -1.42847, 2.54889
+ ]
+ },
+ {
+ "ct": 2911,
+ "ccm":
+ [
+ 1.62771, -0.41282, -0.21489,
+ -0.57991, 2.04176, -0.46186,
+ -0.07613, -1.13359, 2.20972
+ ]
+ },
+ {
+ "ct": 2919,
+ "ccm":
+ [
+ 1.62661, -0.37736, -0.24925,
+ -0.52519, 1.95233, -0.42714,
+ -0.10842, -1.34929, 2.45771
+ ]
+ },
+ {
+ "ct": 3627,
+ "ccm":
+ [
+ 1.70385, -0.57231, -0.13154,
+ -0.47763, 1.85998, -0.38235,
+ -0.07467, -0.82678, 1.90145
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.68486, -0.61085, -0.07402,
+ -0.41927, 2.04016, -0.62089,
+ -0.08633, -0.67672, 1.76305
+ ]
+ },
+ {
+ "ct": 5716,
+ "ccm":
+ [
+ 1.80439, -0.73699, -0.06739,
+ -0.36073, 1.83327, -0.47255,
+ -0.08378, -0.56403, 1.64781
+ ]
+ },
+ {
+ "ct": 8575,
+ "ccm":
+ [
+ 1.89357, -0.76427, -0.12931,
+ -0.27399, 2.15605, -0.88206,
+ -0.12035, -0.68256, 1.80292
+ ]
+ }
]
}
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.dpc":
- {
-
- }
-}
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/raspberrypi/data/imx219_noir.json b/src/ipa/raspberrypi/data/imx219_noir.json
index 9a3f03ec..cfedb943 100644
--- a/src/ipa/raspberrypi/data/imx219_noir.json
+++ b/src/ipa/raspberrypi/data/imx219_noir.json
@@ -1,344 +1,402 @@
{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27685,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 998,
- "reference_Y": 12744
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 3.67
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01633
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "metering_modes":
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
{
- "centre-weighted":
+ "rpi.black_level":
{
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
{
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
}
},
- "exposure_modes":
{
- "normal":
+ "rpi.noise":
{
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
+ "reference_constant": 0,
+ "reference_slope": 3.67
+ }
+ },
+ {
+ "rpi.geq":
{
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
{
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
+ "bayes": 0
}
},
- "constraint_modes":
{
- "normal":
- [
+ "rpi.agc":
+ {
+ "metering_modes":
{
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
+ "centre-weighted":
+ {
+ "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ },
+ "spot":
+ {
+ "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ },
+ "matrix":
+ {
+ "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ }
+ },
+ "exposure_modes":
{
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
},
+ "constraint_modes":
{
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.7,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
- 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
- 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
- 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
- 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
- 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
- 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
- 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
- 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
- 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
- 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
- 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
- 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
- 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
- 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
- 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
- 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
- ]
- },
- {
- "ct": 6000, "table":
+ },
+ "y_target":
[
- 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
- 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
- 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
- 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
- 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
- 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
- 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
- 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
- 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
]
}
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
- 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
- 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
- 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
- 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
- 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
- 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
- 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
- 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
- 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
- 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
- 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
- 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
- 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
- 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
- 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
- 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
- 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
- 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
- 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
- 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
- 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
- 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
- ]
- },
+ },
+ {
+ "rpi.alsc":
{
- "ct": 6000, "table":
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
[
- 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
- 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
- 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
- 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
- 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
- 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
- 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
- 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
- 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
- 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
- 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
- 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
- ]
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
}
- ],
- "luminance_lut":
- [
- 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
- 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
- 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
- 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
- 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
- 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
- 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
- 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
- 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
- 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
- ],
- "sigma": 0.00381,
- "sigma_Cb": 0.00216
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2498, "ccm":
- [
- 1.58731, -0.18011, -0.40721, -0.60639, 2.03422, -0.42782, -0.19612, -1.69203, 2.88815
- ]
- },
- {
- "ct": 2811, "ccm":
- [
- 1.61593, -0.33164, -0.28429, -0.55048, 1.97779, -0.42731, -0.12042, -1.42847, 2.54889
- ]
- },
- {
- "ct": 2911, "ccm":
- [
- 1.62771, -0.41282, -0.21489, -0.57991, 2.04176, -0.46186, -0.07613, -1.13359, 2.20972
- ]
- },
- {
- "ct": 2919, "ccm":
- [
- 1.62661, -0.37736, -0.24925, -0.52519, 1.95233, -0.42714, -0.10842, -1.34929, 2.45771
- ]
- },
- {
- "ct": 3627, "ccm":
- [
- 1.70385, -0.57231, -0.13154, -0.47763, 1.85998, -0.38235, -0.07467, -0.82678, 1.90145
- ]
- },
- {
- "ct": 4600, "ccm":
- [
- 1.68486, -0.61085, -0.07402, -0.41927, 2.04016, -0.62089, -0.08633, -0.67672, 1.76305
- ]
- },
+ },
+ {
+ "rpi.contrast":
{
- "ct": 5716, "ccm":
+ "ce_enable": 1,
+ "gamma_curve":
[
- 1.80439, -0.73699, -0.06739, -0.36073, 1.83327, -0.47255, -0.08378, -0.56403, 1.64781
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
]
- },
+ }
+ },
+ {
+ "rpi.ccm":
{
- "ct": 8575, "ccm":
- [
- 1.89357, -0.76427, -0.12931, -0.27399, 2.15605, -0.88206, -0.12035, -0.68256, 1.80292
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.58731, -0.18011, -0.40721,
+ -0.60639, 2.03422, -0.42782,
+ -0.19612, -1.69203, 2.88815
+ ]
+ },
+ {
+ "ct": 2811,
+ "ccm":
+ [
+ 1.61593, -0.33164, -0.28429,
+ -0.55048, 1.97779, -0.42731,
+ -0.12042, -1.42847, 2.54889
+ ]
+ },
+ {
+ "ct": 2911,
+ "ccm":
+ [
+ 1.62771, -0.41282, -0.21489,
+ -0.57991, 2.04176, -0.46186,
+ -0.07613, -1.13359, 2.20972
+ ]
+ },
+ {
+ "ct": 2919,
+ "ccm":
+ [
+ 1.62661, -0.37736, -0.24925,
+ -0.52519, 1.95233, -0.42714,
+ -0.10842, -1.34929, 2.45771
+ ]
+ },
+ {
+ "ct": 3627,
+ "ccm":
+ [
+ 1.70385, -0.57231, -0.13154,
+ -0.47763, 1.85998, -0.38235,
+ -0.07467, -0.82678, 1.90145
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.68486, -0.61085, -0.07402,
+ -0.41927, 2.04016, -0.62089,
+ -0.08633, -0.67672, 1.76305
+ ]
+ },
+ {
+ "ct": 5716,
+ "ccm":
+ [
+ 1.80439, -0.73699, -0.06739,
+ -0.36073, 1.83327, -0.47255,
+ -0.08378, -0.56403, 1.64781
+ ]
+ },
+ {
+ "ct": 8575,
+ "ccm":
+ [
+ 1.89357, -0.76427, -0.12931,
+ -0.27399, 2.15605, -0.88206,
+ -0.12035, -0.68256, 1.80292
+ ]
+ }
]
}
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.dpc":
- {
-
- }
-}
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/raspberrypi/data/imx290.json b/src/ipa/raspberrypi/data/imx290.json
index 20b45c16..bfb9c609 100644
--- a/src/ipa/raspberrypi/data/imx290.json
+++ b/src/ipa/raspberrypi/data/imx290.json
@@ -1,165 +1,203 @@
{
- "rpi.black_level":
- {
- "black_level": 3840
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 6813,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 890,
- "reference_Y": 12900
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.67
- },
- "rpi.geq":
- {
- "offset": 187,
- "slope": 0.00842
- },
- "rpi.sdn":
- {
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "speed": 0.2,
- "metering_modes":
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
{
- "matrix":
+ "rpi.black_level":
{
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- },
- "centre-weighted":
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
{
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
{
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
+ "reference_constant": 0,
+ "reference_slope": 2.67
}
},
- "exposure_modes":
{
- "normal":
+ "rpi.geq":
{
- "shutter":
- [
- 10, 30000, 60000
- ],
- "gain":
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ },
+ "centre-weighted":
+ {
+ "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ },
+ "spot":
+ {
+ "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "sport":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
[
- 1.0, 2.0, 8.0
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
]
- },
- "sport":
+ }
+ },
+ {
+ "rpi.alsc":
{
- "shutter":
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
[
- 10, 5000, 10000, 20000, 120000
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
],
- "gain":
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
[
- 1.0, 2.0, 4.0, 6.0, 8.0
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
]
}
},
- "constraint_modes":
{
- "normal":
- [
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
+ "rpi.sharpen": { }
},
- "y_target":
-